summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2009-04-23 11:05:17 +0900
committerJens Axboe <jens.axboe@oracle.com>2009-04-28 07:37:33 +0200
commita538cd03be6f363d039daa94199c28cfbd508455 (patch)
tree47d327e3339f5f07c4f4386537fab526310344a7 /block
parentdb2dbb12dc47a50c7a4c5678f526014063e486f6 (diff)
downloadlinux-3.10-a538cd03be6f363d039daa94199c28cfbd508455.tar.gz
linux-3.10-a538cd03be6f363d039daa94199c28cfbd508455.tar.bz2
linux-3.10-a538cd03be6f363d039daa94199c28cfbd508455.zip
block: merge blk_invoke_request_fn() into __blk_run_queue()
__blk_run_queue wraps blk_invoke_request_fn() such that it additionally removes plug and bails out early if the queue is empty. Both extra operations have their own pending mechanisms and don't cause any harm correctness-wise when they are done superflously. The only user of blk_invoke_request_fn() being blk_start_queue(), there isn't much reason to keep both functions around. Merge blk_invoke_request_fn() into __blk_run_queue() and make blk_start_queue() use __blk_run_queue() instead. [ Impact: merge two subtly different internal functions ] Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c35
1 files changed, 14 insertions, 21 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 41bc0ff75e2..02f53bc00e4 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -333,24 +333,6 @@ void blk_unplug(struct request_queue *q)
}
EXPORT_SYMBOL(blk_unplug);
-static void blk_invoke_request_fn(struct request_queue *q)
-{
- if (unlikely(blk_queue_stopped(q)))
- return;
-
- /*
- * one level of recursion is ok and is much faster than kicking
- * the unplug handling
- */
- if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
- q->request_fn(q);
- queue_flag_clear(QUEUE_FLAG_REENTER, q);
- } else {
- queue_flag_set(QUEUE_FLAG_PLUGGED, q);
- kblockd_schedule_work(q, &q->unplug_work);
- }
-}
-
/**
* blk_start_queue - restart a previously stopped queue
* @q: The &struct request_queue in question
@@ -365,7 +347,7 @@ void blk_start_queue(struct request_queue *q)
WARN_ON(!irqs_disabled());
queue_flag_clear(QUEUE_FLAG_STOPPED, q);
- blk_invoke_request_fn(q);
+ __blk_run_queue(q);
}
EXPORT_SYMBOL(blk_start_queue);
@@ -425,12 +407,23 @@ void __blk_run_queue(struct request_queue *q)
{
blk_remove_plug(q);
+ if (unlikely(blk_queue_stopped(q)))
+ return;
+
+ if (elv_queue_empty(q))
+ return;
+
/*
* Only recurse once to avoid overrunning the stack, let the unplug
* handling reinvoke the handler shortly if we already got there.
*/
- if (!elv_queue_empty(q))
- blk_invoke_request_fn(q);
+ if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
+ q->request_fn(q);
+ queue_flag_clear(QUEUE_FLAG_REENTER, q);
+ } else {
+ queue_flag_set(QUEUE_FLAG_PLUGGED, q);
+ kblockd_schedule_work(q, &q->unplug_work);
+ }
}
EXPORT_SYMBOL(__blk_run_queue);