diff options
author | Tejun Heo <tj@kernel.org> | 2012-06-04 20:40:58 -0700 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2012-06-25 11:53:51 +0200 |
commit | 8a5ecdd42862bf87ceab00bf2a15d7eabf58c02d (patch) | |
tree | 36ff209f0655c5da9cfb7c5c6f6e9b9786841201 /block | |
parent | b1208b56f31408f7d8381ff5d08e970aa5ee761c (diff) | |
download | linux-3.10-8a5ecdd42862bf87ceab00bf2a15d7eabf58c02d.tar.gz linux-3.10-8a5ecdd42862bf87ceab00bf2a15d7eabf58c02d.tar.bz2 linux-3.10-8a5ecdd42862bf87ceab00bf2a15d7eabf58c02d.zip |
block: add q->nr_rqs[] and move q->rq.elvpriv to q->nr_rqs_elvpriv
Add q->nr_rqs[] which currently behaves the same as q->rq.count[] and
move q->rq.elvpriv to q->nr_rqs_elvpriv. blk_drain_queue() is updated
to use q->nr_rqs[] instead of q->rq.count[].
These counters separates queue-wide request statistics from the
request list and allow implementation of per-queue request allocation.
While at it, properly indent fields of struct request_list.
Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-core.c | 13 |
1 files changed, 7 insertions, 6 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index 71894e143b9..a2648153691 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -387,7 +387,7 @@ void blk_drain_queue(struct request_queue *q, bool drain_all) if (!list_empty(&q->queue_head) && q->request_fn) __blk_run_queue(q); - drain |= q->rq.elvpriv; + drain |= q->nr_rqs_elvpriv; /* * Unfortunately, requests are queued at and tracked from @@ -397,7 +397,7 @@ void blk_drain_queue(struct request_queue *q, bool drain_all) if (drain_all) { drain |= !list_empty(&q->queue_head); for (i = 0; i < 2; i++) { - drain |= q->rq.count[i]; + drain |= q->nr_rqs[i]; drain |= q->in_flight[i]; drain |= !list_empty(&q->flush_queue[i]); } @@ -526,7 +526,6 @@ static int blk_init_free_list(struct request_queue *q) rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0; rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0; - rl->elvpriv = 0; init_waitqueue_head(&rl->wait[BLK_RW_SYNC]); init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]); @@ -791,9 +790,10 @@ static void freed_request(struct request_queue *q, unsigned int flags) struct request_list *rl = &q->rq; int sync = rw_is_sync(flags); + q->nr_rqs[sync]--; rl->count[sync]--; if (flags & REQ_ELVPRIV) - rl->elvpriv--; + q->nr_rqs_elvpriv--; __freed_request(q, sync); @@ -902,6 +902,7 @@ static struct request *__get_request(struct request_queue *q, int rw_flags, if (rl->count[is_sync] >= (3 * q->nr_requests / 2)) return NULL; + q->nr_rqs[is_sync]++; rl->count[is_sync]++; rl->starved[is_sync] = 0; @@ -917,7 +918,7 @@ static struct request *__get_request(struct request_queue *q, int rw_flags, */ if (blk_rq_should_init_elevator(bio) && !blk_queue_bypass(q)) { rw_flags |= REQ_ELVPRIV; - rl->elvpriv++; + q->nr_rqs_elvpriv++; if (et->icq_cache && ioc) icq = ioc_lookup_icq(ioc, q); } @@ -978,7 +979,7 @@ fail_elvpriv: rq->elv.icq = NULL; spin_lock_irq(q->queue_lock); - rl->elvpriv--; + q->nr_rqs_elvpriv--; spin_unlock_irq(q->queue_lock); goto out; |