diff options
author | Christoph Hellwig <hch@lst.de> | 2021-10-19 14:25:53 +0200 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2021-10-19 11:10:09 -0600 |
commit | d92ca9d8348fb12c89eac5928bd651c3a485d7b9 (patch) | |
tree | 05d9098b647cd912977e1f87fa297fd9c543ffde | |
parent | dc5fc361d891e089dfd9c0a975dc78041036b906 (diff) | |
download | linux-rpi-d92ca9d8348fb12c89eac5928bd651c3a485d7b9.tar.gz linux-rpi-d92ca9d8348fb12c89eac5928bd651c3a485d7b9.tar.bz2 linux-rpi-d92ca9d8348fb12c89eac5928bd651c3a485d7b9.zip |
blk-mq: don't handle non-flush requests in blk_insert_flush
Return to the normal blk_mq_submit_bio flow if the bio did not end up
actually being a flush because the device didn't support it. Note that
this is basically impossible to hit without special instrumentation given
that submit_bio_checks already clears these flags usually, so we'd need a
tight race to actually hit this code path.
With this the call to blk_mq_run_hw_queue for the flush requests can be
removed given that the actual flush requests are always issued via the
requeue workqueue which runs the queue unconditionally.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20211019122553.2467817-1-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r-- | block/blk-flush.c | 12 | ||||
-rw-r--r-- | block/blk-mq.c | 14 | ||||
-rw-r--r-- | block/blk.h | 2 |
3 files changed, 13 insertions, 15 deletions
diff --git a/block/blk-flush.c b/block/blk-flush.c index 4201728bf3a5..8e364bda5166 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c @@ -379,7 +379,7 @@ static void mq_flush_data_end_io(struct request *rq, blk_status_t error) * @rq is being submitted. Analyze what needs to be done and put it on the * right queue. */ -void blk_insert_flush(struct request *rq) +bool blk_insert_flush(struct request *rq) { struct request_queue *q = rq->q; unsigned long fflags = q->queue_flags; /* may change, cache */ @@ -409,7 +409,7 @@ void blk_insert_flush(struct request *rq) */ if (!policy) { blk_mq_end_request(rq, 0); - return; + return true; } BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */ @@ -420,10 +420,8 @@ void blk_insert_flush(struct request *rq) * for normal execution. */ if ((policy & REQ_FSEQ_DATA) && - !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) { - blk_mq_request_bypass_insert(rq, false, false); - return; - } + !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) + return false; /* * @rq should go through flush machinery. Mark it part of flush @@ -439,6 +437,8 @@ void blk_insert_flush(struct request *rq) spin_lock_irq(&fq->mq_flush_lock); blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0); spin_unlock_irq(&fq->mq_flush_lock); + + return true; } /** diff --git a/block/blk-mq.c b/block/blk-mq.c index 71ab7521dd3d..3481a8712234 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2532,14 +2532,12 @@ void blk_mq_submit_bio(struct bio *bio) return; } - if (unlikely(is_flush_fua)) { - struct blk_mq_hw_ctx *hctx = rq->mq_hctx; - /* Bypass scheduler for flush requests */ - blk_insert_flush(rq); - blk_mq_run_hw_queue(hctx, true); - } else if (plug && (q->nr_hw_queues == 1 || - blk_mq_is_shared_tags(rq->mq_hctx->flags) || - q->mq_ops->commit_rqs || !blk_queue_nonrot(q))) { + if (is_flush_fua && blk_insert_flush(rq)) + return; + + if (plug && (q->nr_hw_queues == 1 || + blk_mq_is_shared_tags(rq->mq_hctx->flags) || + q->mq_ops->commit_rqs || !blk_queue_nonrot(q))) { /* * Use plugging if we have a ->commit_rqs() hook as well, as * we know the driver uses bd->last in a smart fashion. diff --git a/block/blk.h b/block/blk.h index b9729c12fd62..6a039e6c7d07 100644 --- a/block/blk.h +++ b/block/blk.h @@ -236,7 +236,7 @@ void __blk_account_io_done(struct request *req, u64 now); */ #define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED) -void blk_insert_flush(struct request *rq); +bool blk_insert_flush(struct request *rq); int elevator_switch_mq(struct request_queue *q, struct elevator_type *new_e); |