diff options
author | Tejun Heo <tj@kernel.org> | 2009-05-07 22:24:38 +0900 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2009-05-11 09:50:53 +0200 |
commit | 5b93629b4509c03ffa87a9316412fedf6f58cb37 (patch) | |
tree | ad5ceda3f718361b8df3057b4f9edcc79aa4cd9d /block | |
parent | c3a4d78c580de4edc9ef0f7c59812fb02ceb037f (diff) | |
download | linux-stable-5b93629b4509c03ffa87a9316412fedf6f58cb37.tar.gz linux-stable-5b93629b4509c03ffa87a9316412fedf6f58cb37.tar.bz2 linux-stable-5b93629b4509c03ffa87a9316412fedf6f58cb37.zip |
block: implement blk_rq_pos/[cur_]sectors() and convert obvious ones
Implement accessors - blk_rq_pos(), blk_rq_sectors() and
blk_rq_cur_sectors() which return rq->hard_sector, rq->hard_nr_sectors
and rq->hard_cur_sectors respectively and convert direct references of
the said fields to the accessors.
This is in preparation of request data length handling cleanup.
Geert : suggested adding const to struct request * parameter to accessors
Sergei : spotted error in patch description
[ Impact: cleanup ]
Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: Geert Uytterhoeven <Geert.Uytterhoeven@sonycom.com>
Acked-by: Stephen Rothwell <sfr@canb.auug.org.au>
Tested-by: Grant Likely <grant.likely@secretlab.ca>
Acked-by: Grant Likely <grant.likely@secretlab.ca>
Ackec-by: Sergei Shtylyov <sshtylyov@ru.mvista.com>
Cc: Bartlomiej Zolnierkiewicz <bzolnier@gmail.com>
Cc: Borislav Petkov <petkovbb@googlemail.com>
Cc: James Bottomley <James.Bottomley@HansenPartnership.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-barrier.c | 2 | ||||
-rw-r--r-- | block/blk-core.c | 2 | ||||
-rw-r--r-- | block/cfq-iosched.c | 2 |
3 files changed, 3 insertions, 3 deletions
diff --git a/block/blk-barrier.c b/block/blk-barrier.c index c8d087655eff..c167de5b9eff 100644 --- a/block/blk-barrier.c +++ b/block/blk-barrier.c @@ -163,7 +163,7 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp) * For an empty barrier, there's no actual BAR request, which * in turn makes POSTFLUSH unnecessary. Mask them off. */ - if (!rq->hard_nr_sectors) { + if (!blk_rq_sectors(rq)) { q->ordered &= ~(QUEUE_ORDERED_DO_BAR | QUEUE_ORDERED_DO_POSTFLUSH); /* diff --git a/block/blk-core.c b/block/blk-core.c index 394c5bd81271..895e55b74a40 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1683,7 +1683,7 @@ static void blk_account_io_done(struct request *req) unsigned int blk_rq_bytes(struct request *rq) { if (blk_fs_request(rq)) - return rq->hard_nr_sectors << 9; + return blk_rq_sectors(rq) << 9; return rq->data_len; } diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index def0c698a4bc..575083a9ffe4 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -760,7 +760,7 @@ static void cfq_activate_request(struct request_queue *q, struct request *rq) cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d", cfqd->rq_in_driver); - cfqd->last_position = rq->hard_sector + rq->hard_nr_sectors; + cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq); } static void cfq_deactivate_request(struct request_queue *q, struct request *rq) |