diff options
author | Tejun Heo <tj@kernel.org> | 2009-04-23 11:05:18 +0900 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2009-04-28 07:37:34 +0200 |
commit | 158dbda0068e63c7cce7bd47c123bd1dfa5a902c (patch) | |
tree | 2665f31350ba4f0875c7611c980b0831c22d8c98 /block | |
parent | 5efccd17ceb0fc43837a331297c2c407969d7201 (diff) | |
download | linux-exynos-158dbda0068e63c7cce7bd47c123bd1dfa5a902c.tar.gz linux-exynos-158dbda0068e63c7cce7bd47c123bd1dfa5a902c.tar.bz2 linux-exynos-158dbda0068e63c7cce7bd47c123bd1dfa5a902c.zip |
block: reorganize request fetching functions
Impact: code reorganization
elv_next_request() and elv_dequeue_request() are public block layer
interface than actual elevator implementation. They mostly deal with
how requests interact with block layer and low level drivers at the
beginning of rqeuest processing whereas __elv_next_request() is the
actual eleveator request fetching interface.
Move the two functions to blk-core.c. This prepares for further
interface cleanup.
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-core.c | 95 | ||||
-rw-r--r-- | block/blk.h | 37 | ||||
-rw-r--r-- | block/elevator.c | 128 |
3 files changed, 132 insertions, 128 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index 406a93e526b6..678ede23ed0a 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1712,6 +1712,101 @@ unsigned int blk_rq_cur_bytes(struct request *rq) } EXPORT_SYMBOL_GPL(blk_rq_cur_bytes); +struct request *elv_next_request(struct request_queue *q) +{ + struct request *rq; + int ret; + + while ((rq = __elv_next_request(q)) != NULL) { + if (!(rq->cmd_flags & REQ_STARTED)) { + /* + * This is the first time the device driver + * sees this request (possibly after + * requeueing). Notify IO scheduler. + */ + if (blk_sorted_rq(rq)) + elv_activate_rq(q, rq); + + /* + * just mark as started even if we don't start + * it, a request that has been delayed should + * not be passed by new incoming requests + */ + rq->cmd_flags |= REQ_STARTED; + trace_block_rq_issue(q, rq); + } + + if (!q->boundary_rq || q->boundary_rq == rq) { + q->end_sector = rq_end_sector(rq); + q->boundary_rq = NULL; + } + + if (rq->cmd_flags & REQ_DONTPREP) + break; + + if (q->dma_drain_size && rq->data_len) { + /* + * make sure space for the drain appears we + * know we can do this because max_hw_segments + * has been adjusted to be one fewer than the + * device can handle + */ + rq->nr_phys_segments++; + } + + if (!q->prep_rq_fn) + break; + + ret = q->prep_rq_fn(q, rq); + if (ret == BLKPREP_OK) { + break; + } else if (ret == BLKPREP_DEFER) { + /* + * the request may have been (partially) prepped. + * we need to keep this request in the front to + * avoid resource deadlock. REQ_STARTED will + * prevent other fs requests from passing this one. + */ + if (q->dma_drain_size && rq->data_len && + !(rq->cmd_flags & REQ_DONTPREP)) { + /* + * remove the space for the drain we added + * so that we don't add it again + */ + --rq->nr_phys_segments; + } + + rq = NULL; + break; + } else if (ret == BLKPREP_KILL) { + rq->cmd_flags |= REQ_QUIET; + __blk_end_request(rq, -EIO, blk_rq_bytes(rq)); + } else { + printk(KERN_ERR "%s: bad return=%d\n", __func__, ret); + break; + } + } + + return rq; +} +EXPORT_SYMBOL(elv_next_request); + +void elv_dequeue_request(struct request_queue *q, struct request *rq) +{ + BUG_ON(list_empty(&rq->queuelist)); + BUG_ON(ELV_ON_HASH(rq)); + + list_del_init(&rq->queuelist); + + /* + * the time frame between a request being removed from the lists + * and to it is freed is accounted as io that is in progress at + * the driver side. + */ + if (blk_account_rq(rq)) + q->in_flight++; +} + /** * __end_that_request_first - end I/O on a request * @req: the request being processed diff --git a/block/blk.h b/block/blk.h index 79c85f7c9ff5..9b2c324e4407 100644 --- a/block/blk.h +++ b/block/blk.h @@ -43,6 +43,43 @@ static inline void blk_clear_rq_complete(struct request *rq) clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags); } +/* + * Internal elevator interface + */ +#define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash)) + +static inline struct request *__elv_next_request(struct request_queue *q) +{ + struct request *rq; + + while (1) { + while (!list_empty(&q->queue_head)) { + rq = list_entry_rq(q->queue_head.next); + if (blk_do_ordered(q, &rq)) + return rq; + } + + if (!q->elevator->ops->elevator_dispatch_fn(q, 0)) + return NULL; + } +} + +static inline void elv_activate_rq(struct request_queue *q, struct request *rq) +{ + struct elevator_queue *e = q->elevator; + + if (e->ops->elevator_activate_req_fn) + e->ops->elevator_activate_req_fn(q, rq); +} + +static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq) +{ + struct elevator_queue *e = q->elevator; + + if (e->ops->elevator_deactivate_req_fn) + e->ops->elevator_deactivate_req_fn(q, rq); +} + #ifdef CONFIG_FAIL_IO_TIMEOUT int blk_should_fake_timeout(struct request_queue *); ssize_t part_timeout_show(struct device *, struct device_attribute *, char *); diff --git a/block/elevator.c b/block/elevator.c index 2e0fb21485b7..b03b8752e18b 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -53,7 +53,6 @@ static const int elv_hash_shift = 6; (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift)) #define ELV_HASH_ENTRIES (1 << elv_hash_shift) #define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors) -#define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash)) DEFINE_TRACE(block_rq_insert); DEFINE_TRACE(block_rq_issue); @@ -310,22 +309,6 @@ void elevator_exit(struct elevator_queue *e) } EXPORT_SYMBOL(elevator_exit); -static void elv_activate_rq(struct request_queue *q, struct request *rq) -{ - struct elevator_queue *e = q->elevator; - - if (e->ops->elevator_activate_req_fn) - e->ops->elevator_activate_req_fn(q, rq); -} - -static void elv_deactivate_rq(struct request_queue *q, struct request *rq) -{ - struct elevator_queue *e = q->elevator; - - if (e->ops->elevator_deactivate_req_fn) - e->ops->elevator_deactivate_req_fn(q, rq); -} - static inline void __elv_rqhash_del(struct request *rq) { hlist_del_init(&rq->hash); @@ -758,117 +741,6 @@ void elv_add_request(struct request_queue *q, struct request *rq, int where, } EXPORT_SYMBOL(elv_add_request); -static inline struct request *__elv_next_request(struct request_queue *q) -{ - struct request *rq; - - while (1) { - while (!list_empty(&q->queue_head)) { - rq = list_entry_rq(q->queue_head.next); - if (blk_do_ordered(q, &rq)) - return rq; - } - - if (!q->elevator->ops->elevator_dispatch_fn(q, 0)) - return NULL; - } -} - -struct request *elv_next_request(struct request_queue *q) -{ - struct request *rq; - int ret; - - while ((rq = __elv_next_request(q)) != NULL) { - if (!(rq->cmd_flags & REQ_STARTED)) { - /* - * This is the first time the device driver - * sees this request (possibly after - * requeueing). Notify IO scheduler. - */ - if (blk_sorted_rq(rq)) - elv_activate_rq(q, rq); - - /* - * just mark as started even if we don't start - * it, a request that has been delayed should - * not be passed by new incoming requests - */ - rq->cmd_flags |= REQ_STARTED; - trace_block_rq_issue(q, rq); - } - - if (!q->boundary_rq || q->boundary_rq == rq) { - q->end_sector = rq_end_sector(rq); - q->boundary_rq = NULL; - } - - if (rq->cmd_flags & REQ_DONTPREP) - break; - - if (q->dma_drain_size && rq->data_len) { - /* - * make sure space for the drain appears we - * know we can do this because max_hw_segments - * has been adjusted to be one fewer than the - * device can handle - */ - rq->nr_phys_segments++; - } - - if (!q->prep_rq_fn) - break; - - ret = q->prep_rq_fn(q, rq); - if (ret == BLKPREP_OK) { - break; - } else if (ret == BLKPREP_DEFER) { - /* - * the request may have been (partially) prepped. - * we need to keep this request in the front to - * avoid resource deadlock. REQ_STARTED will - * prevent other fs requests from passing this one. - */ - if (q->dma_drain_size && rq->data_len && - !(rq->cmd_flags & REQ_DONTPREP)) { - /* - * remove the space for the drain we added - * so that we don't add it again - */ - --rq->nr_phys_segments; - } - - rq = NULL; - break; - } else if (ret == BLKPREP_KILL) { - rq->cmd_flags |= REQ_QUIET; - __blk_end_request(rq, -EIO, blk_rq_bytes(rq)); - } else { - printk(KERN_ERR "%s: bad return=%d\n", __func__, ret); - break; - } - } - - return rq; -} -EXPORT_SYMBOL(elv_next_request); - -void elv_dequeue_request(struct request_queue *q, struct request *rq) -{ - BUG_ON(list_empty(&rq->queuelist)); - BUG_ON(ELV_ON_HASH(rq)); - - list_del_init(&rq->queuelist); - - /* - * the time frame between a request being removed from the lists - * and to it is freed is accounted as io that is in progress at - * the driver side. - */ - if (blk_account_rq(rq)) - q->in_flight++; -} - int elv_queue_empty(struct request_queue *q) { struct elevator_queue *e = q->elevator; |