summaryrefslogtreecommitdiff
path: root/block/blk-core.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2011-03-09 10:38:55 +0100
committerIngo Molnar <mingo@elte.hu>2011-03-09 10:38:59 +0100
commitc8b44163b754612fc4769fe1c5df00e98fc9d3c6 (patch)
tree77706ff1f2a72ed294885b6cf0a7c0de0f92d6df /block/blk-core.c
parentac23f25355ef53f3d14352fcff3c6817527a9749 (diff)
parenta5abba989deceb731047425812d268daf7536575 (diff)
Merge commit 'v2.6.38-rc8' into x86/asm
Merge reason: Update with the latest fixes. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'block/blk-core.c')
-rw-r--r--block/blk-core.c18
1 files changed, 6 insertions, 12 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 2f4002f79a24..518dd423a5fe 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -352,7 +352,7 @@ void blk_start_queue(struct request_queue *q)
WARN_ON(!irqs_disabled());
queue_flag_clear(QUEUE_FLAG_STOPPED, q);
- __blk_run_queue(q);
+ __blk_run_queue(q, false);
}
EXPORT_SYMBOL(blk_start_queue);
@@ -403,13 +403,14 @@ EXPORT_SYMBOL(blk_sync_queue);
/**
* __blk_run_queue - run a single device queue
* @q: The queue to run
+ * @force_kblockd: Don't run @q->request_fn directly. Use kblockd.
*
* Description:
* See @blk_run_queue. This variant must be called with the queue lock
* held and interrupts disabled.
*
*/
-void __blk_run_queue(struct request_queue *q)
+void __blk_run_queue(struct request_queue *q, bool force_kblockd)
{
blk_remove_plug(q);
@@ -423,7 +424,7 @@ void __blk_run_queue(struct request_queue *q)
* Only recurse once to avoid overrunning the stack, let the unplug
* handling reinvoke the handler shortly if we already got there.
*/
- if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
+ if (!force_kblockd && !queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
q->request_fn(q);
queue_flag_clear(QUEUE_FLAG_REENTER, q);
} else {
@@ -446,7 +447,7 @@ void blk_run_queue(struct request_queue *q)
unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags);
- __blk_run_queue(q);
+ __blk_run_queue(q, false);
spin_unlock_irqrestore(q->queue_lock, flags);
}
EXPORT_SYMBOL(blk_run_queue);
@@ -1053,7 +1054,7 @@ void blk_insert_request(struct request_queue *q, struct request *rq,
drive_stat_acct(rq, 1);
__elv_add_request(q, rq, where, 0);
- __blk_run_queue(q);
+ __blk_run_queue(q, false);
spin_unlock_irqrestore(q->queue_lock, flags);
}
EXPORT_SYMBOL(blk_insert_request);
@@ -2610,13 +2611,6 @@ int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)
}
EXPORT_SYMBOL(kblockd_schedule_work);
-int kblockd_schedule_delayed_work(struct request_queue *q,
- struct delayed_work *dwork, unsigned long delay)
-{
- return queue_delayed_work(kblockd_workqueue, dwork, delay);
-}
-EXPORT_SYMBOL(kblockd_schedule_delayed_work);
-
int __init blk_dev_init(void)
{
BUILD_BUG_ON(__REQ_NR_BITS > 8 *