summaryrefslogtreecommitdiff
path: root/block/blk-mq.c
diff options
context:
space:
mode:
authorChengming Zhou <zhouchengming@bytedance.com>2023-08-21 17:56:02 +0800
committerJens Axboe <axboe@kernel.dk>2023-08-22 08:58:06 -0600
commit7222657e51b5626d10154b3e48ad441c33b5da96 (patch)
tree1cd2f2e35cb1154c3d2a460fe49d73c518c5e555 /block/blk-mq.c
parent2bc4d7a355a4d617452eaf1b21d6d261194b3667 (diff)
downloadlinux-rpi-7222657e51b5626d10154b3e48ad441c33b5da96.tar.gz
linux-rpi-7222657e51b5626d10154b3e48ad441c33b5da96.tar.bz2
linux-rpi-7222657e51b5626d10154b3e48ad441c33b5da96.zip
blk-mq: prealloc tags when increase tagset nr_hw_queues
Just like blk_mq_alloc_tag_set(), it's better to prepare all tags before using to map to queue ctxs in blk_mq_map_swqueue(), which now have to consider empty set->tags[]. The good point is that we can fallback easily if increasing nr_hw_queues fail, instead of just mapping to hctx[0] when fail in blk_mq_map_swqueue(). And the fallback path already has tags free & clean handling, so all is good. Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com> Reviewed-by: Ming Lei <ming.lei@redhat.com> Link: https://lore.kernel.org/r/20230821095602.70742-3-chengming.zhou@linux.dev Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r--block/blk-mq.c10
1 files changed, 10 insertions, 0 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 22397ba815ca..84400157c5f4 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -4397,6 +4397,16 @@ static int blk_mq_realloc_tag_set_tags(struct blk_mq_tag_set *set,
sizeof(*set->tags));
kfree(set->tags);
set->tags = new_tags;
+
+ for (i = set->nr_hw_queues; i < new_nr_hw_queues; i++) {
+ if (!__blk_mq_alloc_map_and_rqs(set, i)) {
+ while (--i >= set->nr_hw_queues)
+ __blk_mq_free_map_and_rqs(set, i);
+ return -ENOMEM;
+ }
+ cond_resched();
+ }
+
done:
set->nr_hw_queues = new_nr_hw_queues;
return 0;