diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2011-06-22 19:47:02 +0200 |
---|---|---|
committer | Clark Williams <williams@redhat.com> | 2012-02-15 10:32:27 -0600 |
commit | 0a0928d99a31240488c1f1e51cf424aa2ea3eacb (patch) | |
tree | 58f36dc75fbf80f3091ed6be6c5c5f8137eda043 /block | |
parent | 314759d9153af63579af471858aefba290f9752d (diff) |
block: Shorten interrupt disabled regions
Moving the blk_sched_flush_plug() call out of the interrupt/preempt
disabled region in the scheduler allows us to replace
local_irq_save/restore(flags) by local_irq_disable/enable() in
blk_flush_plug().
Now instead of doing this we disable interrupts explicitely when we
lock the request_queue and reenable them when we drop the lock. That
allows interrupts to be handled when the plug list contains requests
for more than one queue.
Aside of that this change makes the scope of the irq disabled region
more obvious. The current code confused the hell out of me when
looking at:
local_irq_save(flags);
spin_lock(q->queue_lock);
...
queue_unplugged(q...);
scsi_request_fn();
spin_unlock(q->queue_lock);
spin_lock(shost->host_lock);
spin_unlock_irq(shost->host_lock);
-------------------^^^ ????
spin_lock_irq(q->queue_lock);
spin_unlock(q->lock);
local_irq_restore(flags);
Also add a comment to __blk_run_queue() documenting that
q->request_fn() can drop q->queue_lock and reenable interrupts, but
must return with q->queue_lock held and interrupts disabled.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Tejun Heo <tj@kernel.org>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Link: http://lkml.kernel.org/r/20110622174919.025446432@linutronix.de
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-core.c | 20 |
1 files changed, 8 insertions, 12 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index 15de223c7f93..7366ad422a5a 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -300,7 +300,11 @@ void __blk_run_queue(struct request_queue *q) { if (unlikely(blk_queue_stopped(q))) return; - + /* + * q->request_fn() can drop q->queue_lock and reenable + * interrupts, but must return with q->queue_lock held and + * interrupts disabled. + */ q->request_fn(q); } EXPORT_SYMBOL(__blk_run_queue); @@ -2745,11 +2749,11 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth, * this lock). */ if (from_schedule) { - spin_unlock(q->queue_lock); + spin_unlock_irq(q->queue_lock); blk_run_queue_async(q); } else { __blk_run_queue(q); - spin_unlock(q->queue_lock); + spin_unlock_irq(q->queue_lock); } } @@ -2775,7 +2779,6 @@ static void flush_plug_callbacks(struct blk_plug *plug) void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) { struct request_queue *q; - unsigned long flags; struct request *rq; LIST_HEAD(list); unsigned int depth; @@ -2796,11 +2799,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) q = NULL; depth = 0; - /* - * Save and disable interrupts here, to avoid doing it for every - * queue lock we have to take. - */ - local_irq_save(flags); while (!list_empty(&list)) { rq = list_entry_rq(list.next); list_del_init(&rq->queuelist); @@ -2813,7 +2811,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) queue_unplugged(q, depth, from_schedule); q = rq->q; depth = 0; - spin_lock(q->queue_lock); + spin_lock_irq(q->queue_lock); } /* * rq is already accounted, so use raw insert @@ -2831,8 +2829,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) */ if (q) queue_unplugged(q, depth, from_schedule); - - local_irq_restore(flags); } void blk_finish_plug(struct blk_plug *plug) |