summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorJens Axboe <jaxboe@fusionio.com>2011-04-18 09:59:55 +0200
committerJens Axboe <jaxboe@fusionio.com>2011-04-18 09:59:55 +0200
commit99e22598e9a8e0a996d69c8c0f6b7027cb57720a (patch)
tree9cf18bc4681889bdfcbc0c845e384f809fb29fce /block
parentb4cb290e0a7d19235bd075c2ad4d60dbab0bac15 (diff)
downloadlinux-exynos-99e22598e9a8e0a996d69c8c0f6b7027cb57720a.tar.gz
linux-exynos-99e22598e9a8e0a996d69c8c0f6b7027cb57720a.tar.bz2
linux-exynos-99e22598e9a8e0a996d69c8c0f6b7027cb57720a.zip
block: drop queue lock before calling __blk_run_queue() for kblockd punt
If we know we are going to punt to kblockd, we can drop the queue lock before calling into __blk_run_queue() since it only does a safe bit test and a workqueue call. Since kblockd needs to grab this very lock as one of the first things it does, it's a good optimization to drop the lock before waking kblockd. Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c33
1 files changed, 25 insertions, 8 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 09b262811fff..5e413933bc3a 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -295,7 +295,8 @@ EXPORT_SYMBOL(blk_sync_queue);
*
* Description:
* See @blk_run_queue. This variant must be called with the queue lock
- * held and interrupts disabled.
+ * held and interrupts disabled. If force_kblockd is true, then it is
+ * safe to call this without holding the queue lock.
*
*/
void __blk_run_queue(struct request_queue *q, bool force_kblockd)
@@ -2671,9 +2672,23 @@ static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
*/
static void queue_unplugged(struct request_queue *q, unsigned int depth,
bool from_schedule)
+ __releases(q->queue_lock)
{
trace_block_unplug(q, depth, !from_schedule);
- __blk_run_queue(q, from_schedule);
+
+ /*
+ * If we are punting this to kblockd, then we can safely drop
+ * the queue_lock before waking kblockd (which needs to take
+ * this lock).
+ */
+ if (from_schedule) {
+ spin_unlock(q->queue_lock);
+ __blk_run_queue(q, true);
+ } else {
+ __blk_run_queue(q, false);
+ spin_unlock(q->queue_lock);
+ }
+
}
static void flush_plug_callbacks(struct blk_plug *plug)
@@ -2729,10 +2744,11 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
BUG_ON(!(rq->cmd_flags & REQ_ON_PLUG));
BUG_ON(!rq->q);
if (rq->q != q) {
- if (q) {
+ /*
+ * This drops the queue lock
+ */
+ if (q)
queue_unplugged(q, depth, from_schedule);
- spin_unlock(q->queue_lock);
- }
q = rq->q;
depth = 0;
spin_lock(q->queue_lock);
@@ -2750,10 +2766,11 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
depth++;
}
- if (q) {
+ /*
+ * This drops the queue lock
+ */
+ if (q)
queue_unplugged(q, depth, from_schedule);
- spin_unlock(q->queue_lock);
- }
local_irq_restore(flags);
}