diff options
author | Nick Piggin <npiggin@suse.de> | 2008-04-29 14:48:33 +0200 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2008-04-29 14:48:33 +0200 |
commit | 75ad23bc0fcb4f992a5d06982bf0857ab1738e9e (patch) | |
tree | 8668ef63b1f420252ae41aed9e13737d49fd8054 /include | |
parent | 68154e90c9d1492d570671ae181d9a8f8530da55 (diff) |
block: make queue flags non-atomic
We can save some atomic ops in the IO path, if we clearly define
the rules of how to modify the queue flags.
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/blkdev.h | 33 |
1 files changed, 29 insertions, 4 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index c5065e3d2ca9..8ca481cd7d73 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -408,6 +408,30 @@ struct request_queue #define QUEUE_FLAG_ELVSWITCH 8 /* don't use elevator, just do FIFO */ #define QUEUE_FLAG_BIDI 9 /* queue supports bidi requests */ +static inline void queue_flag_set_unlocked(unsigned int flag, + struct request_queue *q) +{ + __set_bit(flag, &q->queue_flags); +} + +static inline void queue_flag_set(unsigned int flag, struct request_queue *q) +{ + WARN_ON_ONCE(!spin_is_locked(q->queue_lock)); + __set_bit(flag, &q->queue_flags); +} + +static inline void queue_flag_clear_unlocked(unsigned int flag, + struct request_queue *q) +{ + __clear_bit(flag, &q->queue_flags); +} + +static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) +{ + WARN_ON_ONCE(!spin_is_locked(q->queue_lock)); + __clear_bit(flag, &q->queue_flags); +} + enum { /* * Hardbarrier is supported with one of the following methods. @@ -496,17 +520,17 @@ static inline int blk_queue_full(struct request_queue *q, int rw) static inline void blk_set_queue_full(struct request_queue *q, int rw) { if (rw == READ) - set_bit(QUEUE_FLAG_READFULL, &q->queue_flags); + queue_flag_set(QUEUE_FLAG_READFULL, q); else - set_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags); + queue_flag_set(QUEUE_FLAG_WRITEFULL, q); } static inline void blk_clear_queue_full(struct request_queue *q, int rw) { if (rw == READ) - clear_bit(QUEUE_FLAG_READFULL, &q->queue_flags); + queue_flag_clear(QUEUE_FLAG_READFULL, q); else - clear_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags); + queue_flag_clear(QUEUE_FLAG_WRITEFULL, q); } @@ -626,6 +650,7 @@ extern void blk_start_queue(struct request_queue *q); extern void blk_stop_queue(struct request_queue *q); extern void blk_sync_queue(struct request_queue *q); extern void __blk_stop_queue(struct request_queue *q); +extern void __blk_run_queue(struct request_queue *); extern void blk_run_queue(struct request_queue *); extern void blk_start_queueing(struct request_queue *); extern int blk_rq_map_user(struct request_queue *, struct request *, void __user *, unsigned long); |