summaryrefslogtreecommitdiff
path: root/drivers/block/ll_rw_blk.c
diff options
context:
space:
mode:
authorNick Piggin <nickpiggin@yahoo.com.au>2005-06-28 20:45:13 -0700
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-06-28 21:20:34 -0700
commit450991bc1026135ee30482a4a806d069915ab2f6 (patch)
tree186359995e27df92fd6539ad0a0657df8e79322e /drivers/block/ll_rw_blk.c
parent69f63c5c34d0b34ee2cbf10c5ff7fcff0404879e (diff)
downloadlinux-3.10-450991bc1026135ee30482a4a806d069915ab2f6.tar.gz
linux-3.10-450991bc1026135ee30482a4a806d069915ab2f6.tar.bz2
linux-3.10-450991bc1026135ee30482a4a806d069915ab2f6.zip
[PATCH] blk: __make_request efficiency
In the case where the request is not able to be merged by the elevator, don't retake the lock and retry the merge mechanism after allocating a new request. Instead assume that the chance of a merge remains slim, and now that we've done most of the work allocating a request we may as well just go with it. Also be rid of the GFP_ATOMIC allocation: we've got working mempools for the block layer now, so let's save atomic memory for things like networking. Lastly, in get_request_wait, do an initial get_request call before going into the waitqueue. This is reported to help efficiency. Signed-off-by: Nick Piggin <nickpiggin@yahoo.com.au> Cc: Jens Axboe <axboe@suse.de> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers/block/ll_rw_blk.c')
-rw-r--r--drivers/block/ll_rw_blk.c62
1 files changed, 21 insertions, 41 deletions
diff --git a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c
index 6c98cf04271..67431f28015 100644
--- a/drivers/block/ll_rw_blk.c
+++ b/drivers/block/ll_rw_blk.c
@@ -1971,10 +1971,11 @@ out:
static struct request *get_request_wait(request_queue_t *q, int rw,
struct bio *bio)
{
- DEFINE_WAIT(wait);
struct request *rq;
- do {
+ rq = get_request(q, rw, bio, GFP_NOIO);
+ while (!rq) {
+ DEFINE_WAIT(wait);
struct request_list *rl = &q->rq;
prepare_to_wait_exclusive(&rl->wait[rw], &wait,
@@ -1999,7 +2000,7 @@ static struct request *get_request_wait(request_queue_t *q, int rw,
put_io_context(ioc);
}
finish_wait(&rl->wait[rw], &wait);
- } while (!rq);
+ }
return rq;
}
@@ -2521,7 +2522,7 @@ EXPORT_SYMBOL(blk_attempt_remerge);
static int __make_request(request_queue_t *q, struct bio *bio)
{
- struct request *req, *freereq = NULL;
+ struct request *req;
int el_ret, rw, nr_sectors, cur_nr_sectors, barrier, err, sync;
unsigned short prio;
sector_t sector;
@@ -2549,14 +2550,9 @@ static int __make_request(request_queue_t *q, struct bio *bio)
goto end_io;
}
-again:
spin_lock_irq(q->queue_lock);
- if (elv_queue_empty(q)) {
- blk_plug_device(q);
- goto get_rq;
- }
- if (barrier)
+ if (unlikely(barrier) || elv_queue_empty(q))
goto get_rq;
el_ret = elv_merge(q, &req, bio);
@@ -2601,40 +2597,23 @@ again:
elv_merged_request(q, req);
goto out;
- /*
- * elevator says don't/can't merge. get new request
- */
- case ELEVATOR_NO_MERGE:
- break;
-
+ /* ELV_NO_MERGE: elevator says don't/can't merge. */
default:
- printk("elevator returned crap (%d)\n", el_ret);
- BUG();
+ ;
}
+get_rq:
/*
- * Grab a free request from the freelist - if that is empty, check
- * if we are doing read ahead and abort instead of blocking for
- * a free slot.
+ * Grab a free request. This is might sleep but can not fail.
+ */
+ spin_unlock_irq(q->queue_lock);
+ req = get_request_wait(q, rw, bio);
+ /*
+ * After dropping the lock and possibly sleeping here, our request
+ * may now be mergeable after it had proven unmergeable (above).
+ * We don't worry about that case for efficiency. It won't happen
+ * often, and the elevators are able to handle it.
*/
-get_rq:
- if (freereq) {
- req = freereq;
- freereq = NULL;
- } else {
- spin_unlock_irq(q->queue_lock);
- if ((freereq = get_request(q, rw, bio, GFP_ATOMIC)) == NULL) {
- /*
- * READA bit set
- */
- err = -EWOULDBLOCK;
- if (bio_rw_ahead(bio))
- goto end_io;
-
- freereq = get_request_wait(q, rw, bio);
- }
- goto again;
- }
req->flags |= REQ_CMD;
@@ -2663,10 +2642,11 @@ get_rq:
req->rq_disk = bio->bi_bdev->bd_disk;
req->start_time = jiffies;
+ spin_lock_irq(q->queue_lock);
+ if (elv_queue_empty(q))
+ blk_plug_device(q);
add_request(q, req);
out:
- if (freereq)
- __blk_put_request(q, freereq);
if (sync)
__generic_unplug_device(q);