summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2009-07-03 12:56:18 +0200
committerJens Axboe <axboe@carl.(none)>2009-07-03 21:06:45 +0200
commitab0fd1debe730ec9998678a0c53caefbd121ed10 (patch)
treec44de6ef7d876a32c4f733fdc26d40cdcbf7d374 /block
parentb59e64d0ddb756af57ea032383bfd393a286a8e8 (diff)
downloadkernel-common-ab0fd1debe730ec9998678a0c53caefbd121ed10.tar.gz
kernel-common-ab0fd1debe730ec9998678a0c53caefbd121ed10.tar.bz2
kernel-common-ab0fd1debe730ec9998678a0c53caefbd121ed10.zip
block: don't merge requests of different failfast settings
Block layer used to merge requests and bios with different failfast settings. This caused regular IOs to fail prematurely when they were merged into failfast requests for readahead. Niel Lambrechts could trigger the problem semi-reliably on ext4 when resuming from STR. ext4 uses readahead when reading inodes and combined with the deterministic extra SATA PHY exception cycle during resume on the specific configuration, non-readahead inode read would fail causing ext4 errors. Please read the following thread for details. http://lkml.org/lkml/2009/5/23/21 This patch makes block layer reject merging if the failfast settings don't match. This is correct but likely to lower IO performance by preventing regular IOs from mingling into surrounding readahead requests. Changes to allow such mixed merges and handle errors correctly will be added later. Signed-off-by: Tejun Heo <tj@kernel.org> Reported-by: Niel Lambrechts <niel.lambrechts@gmail.com> Cc: Theodore Tso <tytso@mit.edu> Signed-off-by: Jens Axboe <axboe@carl.(none)>
Diffstat (limited to 'block')
-rw-r--r--block/blk-merge.c6
-rw-r--r--block/elevator.c8
2 files changed, 14 insertions, 0 deletions
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 39ce64432ba6..e1999679a4d5 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -350,6 +350,12 @@ static int attempt_merge(struct request_queue *q, struct request *req,
if (blk_integrity_rq(req) != blk_integrity_rq(next))
return 0;
+ /* don't merge requests of different failfast settings */
+ if (blk_failfast_dev(req) != blk_failfast_dev(next) ||
+ blk_failfast_transport(req) != blk_failfast_transport(next) ||
+ blk_failfast_driver(req) != blk_failfast_driver(next))
+ return 0;
+
/*
* If we are allowed to merge, then append bio list
* from next to rq and release next. merge_requests_fn
diff --git a/block/elevator.c b/block/elevator.c
index ca861927ba41..6f2375339a99 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -100,6 +100,14 @@ int elv_rq_merge_ok(struct request *rq, struct bio *bio)
if (bio_integrity(bio) != blk_integrity_rq(rq))
return 0;
+ /*
+ * Don't merge if failfast settings don't match
+ */
+ if (bio_failfast_dev(bio) != blk_failfast_dev(rq) ||
+ bio_failfast_transport(bio) != blk_failfast_transport(rq) ||
+ bio_failfast_driver(bio) != blk_failfast_driver(rq))
+ return 0;
+
if (!elv_iosched_allow_merge(rq, bio))
return 0;