diff options
author | Ming Lei <ming.lei@redhat.com> | 2023-05-18 07:30:59 +0200 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2023-05-18 19:40:09 -0600 |
commit | d97217e7f024bbe9aa62aea070771234c2879358 (patch) | |
tree | cfcb145de4b2ee8091c8e5b136072c814472d76a /block/blk-mq.c | |
parent | d5fb8726f1dea70543a93ab1d7332857f157b7f3 (diff) | |
download | linux-rpi-d97217e7f024bbe9aa62aea070771234c2879358.tar.gz linux-rpi-d97217e7f024bbe9aa62aea070771234c2879358.tar.bz2 linux-rpi-d97217e7f024bbe9aa62aea070771234c2879358.zip |
blk-mq: don't queue plugged passthrough requests into scheduler
Passthrough requests should never be queued to the I/O scheduler,
as scheduling these opaque requests doesn't make sense, and I/O
schedulers might require req->bio to be always valid.
We never let passthrough requests insert into the scheduler before
commit 1c2d2fff6dc0 ("block: wire-up support for passthrough plugging"),
restore this behavior even for passthrough requests issued under a plug.
[hch: use blk_mq_insert_requests for passthrough requests,
fix up the commit message and comments]
Reported-by: Guangwu Zhang <guazhang@redhat.com>
Closes: https://lore.kernel.org/linux-block/CAGS2=YosaYaUTEMU3uaf+y=8MqSrhL7sYsJn8EwbaM=76p_4Qg@mail.gmail.com/
Investigated-by: Yu Kuai <yukuai1@huaweicloud.com>
Fixes: 1c2d2fff6dc0 ("block: wire-up support for passthrough plugging")
Signed-off-by: Ming Lei <ming.lei@redhat.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20230518053101.760632-2-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r-- | block/blk-mq.c | 8 |
1 files changed, 6 insertions, 2 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c index f6dad0886a2f..8b7e4daaa5b7 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2711,6 +2711,7 @@ static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched) struct request *requeue_list = NULL; struct request **requeue_lastp = &requeue_list; unsigned int depth = 0; + bool is_passthrough = false; LIST_HEAD(list); do { @@ -2719,7 +2720,9 @@ static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched) if (!this_hctx) { this_hctx = rq->mq_hctx; this_ctx = rq->mq_ctx; - } else if (this_hctx != rq->mq_hctx || this_ctx != rq->mq_ctx) { + is_passthrough = blk_rq_is_passthrough(rq); + } else if (this_hctx != rq->mq_hctx || this_ctx != rq->mq_ctx || + is_passthrough != blk_rq_is_passthrough(rq)) { rq_list_add_tail(&requeue_lastp, rq); continue; } @@ -2731,7 +2734,8 @@ static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched) trace_block_unplug(this_hctx->queue, depth, !from_sched); percpu_ref_get(&this_hctx->queue->q_usage_counter); - if (this_hctx->queue->elevator) { + /* passthrough requests should never be issued to the I/O scheduler */ + if (this_hctx->queue->elevator && !is_passthrough) { this_hctx->queue->elevator->type->ops.insert_requests(this_hctx, &list, 0); blk_mq_run_hw_queue(this_hctx, from_sched); |