summaryrefslogtreecommitdiff
path: root/block/blk-map.c
diff options
context:
space:
mode:
authorFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>2008-09-02 16:20:19 +0900
committerJens Axboe <jens.axboe@oracle.com>2008-10-09 08:56:11 +0200
commit818827669d85b84241696ffef2de485db46b0b5e (patch)
tree694d09728733e65d604bf3e1f13679db73fc1d9a /block/blk-map.c
parent839e96afba87117befd39cf4e43f156edc8047a7 (diff)
downloadkernel-common-818827669d85b84241696ffef2de485db46b0b5e.tar.gz
kernel-common-818827669d85b84241696ffef2de485db46b0b5e.tar.bz2
kernel-common-818827669d85b84241696ffef2de485db46b0b5e.zip
block: make blk_rq_map_user take a NULL user-space buffer
This patch changes blk_rq_map_user to accept a NULL user-space buffer with a READ command if rq_map_data is not NULL. Thus a caller can pass page frames to lk_rq_map_user to just set up a request and bios with page frames propely. bio_uncopy_user (called via blk_rq_unmap_user) doesn't copy data to user space with such request. Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/blk-map.c')
-rw-r--r--block/blk-map.c16
1 files changed, 12 insertions, 4 deletions
diff --git a/block/blk-map.c b/block/blk-map.c
index 572140cda5ff..4849fa36161e 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -42,7 +42,7 @@ static int __blk_rq_unmap_user(struct bio *bio)
static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
struct rq_map_data *map_data, void __user *ubuf,
- unsigned int len, gfp_t gfp_mask)
+ unsigned int len, int null_mapped, gfp_t gfp_mask)
{
unsigned long uaddr;
struct bio *bio, *orig_bio;
@@ -63,6 +63,9 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
if (IS_ERR(bio))
return PTR_ERR(bio);
+ if (null_mapped)
+ bio->bi_flags |= (1 << BIO_NULL_MAPPED);
+
orig_bio = bio;
blk_queue_bounce(q, &bio);
@@ -111,12 +114,17 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
{
unsigned long bytes_read = 0;
struct bio *bio = NULL;
- int ret;
+ int ret, null_mapped = 0;
if (len > (q->max_hw_sectors << 9))
return -EINVAL;
- if (!len || !ubuf)
+ if (!len)
return -EINVAL;
+ if (!ubuf) {
+ if (!map_data || rq_data_dir(rq) != READ)
+ return -EINVAL;
+ null_mapped = 1;
+ }
while (bytes_read != len) {
unsigned long map_len, end, start;
@@ -135,7 +143,7 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
map_len -= PAGE_SIZE;
ret = __blk_rq_map_user(q, rq, map_data, ubuf, map_len,
- gfp_mask);
+ null_mapped, gfp_mask);
if (ret < 0)
goto unmap_rq;
if (!bio)