From e7e72bf641b1fc7b9df6f40bd2c36dfccd8d647c Mon Sep 17 00:00:00 2001 From: Neil Brown Date: Wed, 14 May 2008 16:05:54 -0700 Subject: Remove blkdev warning triggered by using md As setting and clearing queue flags now requires that we hold a spinlock on the queue, and as blk_queue_stack_limits is called without that lock, get the lock inside blk_queue_stack_limits. For blk_queue_stack_limits to be able to find the right lock, each md personality needs to set q->queue_lock to point to the appropriate lock. Those personalities which didn't previously use a spin_lock, us q->__queue_lock. So always initialise that lock when allocated. With this in place, setting/clearing of the QUEUE_FLAG_PLUGGED bit will no longer cause warnings as it will be clear that the proper lock is held. Thanks to Dan Williams for review and fixing the silly bugs. Signed-off-by: NeilBrown Cc: Dan Williams Cc: Jens Axboe Cc: Alistair John Strachan Cc: Nick Piggin Cc: "Rafael J. Wysocki" Cc: Jacek Luczak Cc: Prakash Punnoor Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- block/blk-core.c | 5 ++--- block/blk-settings.c | 8 +++++++- 2 files changed, 9 insertions(+), 4 deletions(-) (limited to 'block') diff --git a/block/blk-core.c b/block/blk-core.c index 2987fe47b5e..6a9cc0d22a6 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -482,6 +482,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) kobject_init(&q->kobj, &blk_queue_ktype); mutex_init(&q->sysfs_lock); + spin_lock_init(&q->__queue_lock); return q; } @@ -544,10 +545,8 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) * if caller didn't supply a lock, they get per-queue locking with * our embedded lock */ - if (!lock) { - spin_lock_init(&q->__queue_lock); + if (!lock) lock = &q->__queue_lock; - } q->request_fn = rfn; q->prep_rq_fn = NULL; diff --git a/block/blk-settings.c b/block/blk-settings.c index bb93d4c3277..8dd86418f35 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -286,8 +286,14 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) t->max_hw_segments = min(t->max_hw_segments, b->max_hw_segments); t->max_segment_size = min(t->max_segment_size, b->max_segment_size); t->hardsect_size = max(t->hardsect_size, b->hardsect_size); - if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) + if (!t->queue_lock) + WARN_ON_ONCE(1); + else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) { + unsigned long flags; + spin_lock_irqsave(t->queue_lock, flags); queue_flag_clear(QUEUE_FLAG_CLUSTER, t); + spin_unlock_irqrestore(t->queue_lock, flags); + } } EXPORT_SYMBOL(blk_queue_stack_limits); -- cgit v1.2.3