summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKeith Busch <kbusch@kernel.org>2023-01-05 11:07:34 -0800
committerJens Axboe <axboe@kernel.dk>2023-01-11 10:31:49 -0700
commitd46aa786fa53cbc92593089374e49c94fd9063ae (patch)
tree7ff7c5e0d2fffecac8d4e0eec74cb5729e8b0517
parent4397a17c1dc53f436285f372432dd1aea44e7953 (diff)
downloadlinux-rpi-d46aa786fa53cbc92593089374e49c94fd9063ae.tar.gz
linux-rpi-d46aa786fa53cbc92593089374e49c94fd9063ae.tar.bz2
linux-rpi-d46aa786fa53cbc92593089374e49c94fd9063ae.zip
block: use iter_ubuf for single range
This is more efficient than iter_iov. Signed-off-by: Keith Busch <kbusch@kernel.org> Reviewed-by: Christoph Hellwig <hch@lst.de> [axboe: fold in iovec assumption fix] Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--block/blk-map.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/block/blk-map.c b/block/blk-map.c
index 19940c978c73..f2135e6ee8f6 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -31,7 +31,8 @@ static struct bio_map_data *bio_alloc_map_data(struct iov_iter *data,
return NULL;
memcpy(bmd->iov, data->iov, sizeof(struct iovec) * data->nr_segs);
bmd->iter = *data;
- bmd->iter.iov = bmd->iov;
+ if (iter_is_iovec(data))
+ bmd->iter.iov = bmd->iov;
return bmd;
}
@@ -641,7 +642,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
copy = true;
else if (iov_iter_is_bvec(iter))
map_bvec = true;
- else if (!iter_is_iovec(iter))
+ else if (!user_backed_iter(iter))
copy = true;
else if (queue_virt_boundary(q))
copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter);
@@ -682,9 +683,8 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
struct rq_map_data *map_data, void __user *ubuf,
unsigned long len, gfp_t gfp_mask)
{
- struct iovec iov;
struct iov_iter i;
- int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i);
+ int ret = import_ubuf(rq_data_dir(rq), ubuf, len, &i);
if (unlikely(ret < 0))
return ret;