summaryrefslogtreecommitdiff
path: root/block/blk-lib.c
diff options
context:
space:
mode:
authorMing Lei <ming.lei@redhat.com>2018-10-12 15:53:10 +0800
committerJens Axboe <axboe@kernel.dk>2018-10-18 07:23:40 -0600
commit744889b7cbb56a64f957e65ade7cb65fe3f35714 (patch)
tree65972a4045c33b737862a4522271ec5705f41e2c /block/blk-lib.c
parent7a7080b53467ad96c0b371e9d2a2a312f96f99c2 (diff)
downloadlinux-rpi-744889b7cbb56a64f957e65ade7cb65fe3f35714.tar.gz
linux-rpi-744889b7cbb56a64f957e65ade7cb65fe3f35714.tar.bz2
linux-rpi-744889b7cbb56a64f957e65ade7cb65fe3f35714.zip
block: don't deal with discard limit in blkdev_issue_discard()
blk_queue_split() does respect this limit via bio splitting, so no need to do that in blkdev_issue_discard(), then we can align to normal bio submit(bio_add_page() & submit_bio()). More importantly, this patch fixes one issue introduced in a22c4d7e34402cc ("block: re-add discard_granularity and alignment checks"), in which zero discard bio may be generated in case of zero alignment. Fixes: a22c4d7e34402ccdf3 ("block: re-add discard_granularity and alignment checks") Cc: stable@vger.kernel.org Cc: Ming Lin <ming.l@ssi.samsung.com> Cc: Mike Snitzer <snitzer@redhat.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Xiao Ni <xni@redhat.com> Tested-by: Mariusz Dabrowski <mariusz.dabrowski@intel.com> Signed-off-by: Ming Lei <ming.lei@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-lib.c')
-rw-r--r--block/blk-lib.c28
1 files changed, 2 insertions, 26 deletions
diff --git a/block/blk-lib.c b/block/blk-lib.c
index d1b9dd03da25..bbd44666f2b5 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -29,9 +29,7 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
{
struct request_queue *q = bdev_get_queue(bdev);
struct bio *bio = *biop;
- unsigned int granularity;
unsigned int op;
- int alignment;
sector_t bs_mask;
if (!q)
@@ -54,38 +52,16 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
if ((sector | nr_sects) & bs_mask)
return -EINVAL;
- /* Zero-sector (unknown) and one-sector granularities are the same. */
- granularity = max(q->limits.discard_granularity >> 9, 1U);
- alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
-
while (nr_sects) {
- unsigned int req_sects;
- sector_t end_sect, tmp;
+ unsigned int req_sects = nr_sects;
+ sector_t end_sect;
- /*
- * Issue in chunks of the user defined max discard setting,
- * ensuring that bi_size doesn't overflow
- */
- req_sects = min_t(sector_t, nr_sects,
- q->limits.max_discard_sectors);
if (!req_sects)
goto fail;
if (req_sects > UINT_MAX >> 9)
req_sects = UINT_MAX >> 9;
- /*
- * If splitting a request, and the next starting sector would be
- * misaligned, stop the discard at the previous aligned sector.
- */
end_sect = sector + req_sects;
- tmp = end_sect;
- if (req_sects < nr_sects &&
- sector_div(tmp, granularity) != alignment) {
- end_sect = end_sect - alignment;
- sector_div(end_sect, granularity);
- end_sect = end_sect * granularity + alignment;
- req_sects = end_sect - sector;
- }
bio = next_bio(bio, 0, gfp_mask);
bio->bi_iter.bi_sector = sector;