diff options
author | Tejun Heo <htejun@gmail.com> | 2005-06-23 00:08:49 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-06-23 09:45:15 -0700 |
commit | fa72b903f75e4f0f0b2c2feed093005167da4023 (patch) | |
tree | 12087e87fb8d41d10013946e5b2c91e57265c29e /drivers/block/ll_rw_blk.c | |
parent | 2bf0fdad51c6710bf15d0bf4b9b30b8498fe4ddd (diff) | |
download | linux-3.10-fa72b903f75e4f0f0b2c2feed093005167da4023.tar.gz linux-3.10-fa72b903f75e4f0f0b2c2feed093005167da4023.tar.bz2 linux-3.10-fa72b903f75e4f0f0b2c2feed093005167da4023.zip |
[PATCH] blk: remove blk_queue_tag->real_max_depth optimization
blk_queue_tag->real_max_depth was used to optimize out unnecessary
allocations/frees on tag resize. However, the whole thing was very broken -
tag_map was never allocated to real_max_depth resulting in access beyond the
end of the map, bits in [max_depth..real_max_depth] were set when initializing
a map and copied when resizing resulting in pre-occupied tags.
As the gain of the optimization is very small, well, almost nill, remove the
whole thing.
Signed-off-by: Tejun Heo <htejun@gmail.com>
Acked-by: Jens Axboe <axboe@suse.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers/block/ll_rw_blk.c')
-rw-r--r-- | drivers/block/ll_rw_blk.c | 35 |
1 files changed, 10 insertions, 25 deletions
diff --git a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c index 808390c7420..896d17c28f4 100644 --- a/drivers/block/ll_rw_blk.c +++ b/drivers/block/ll_rw_blk.c @@ -717,7 +717,7 @@ struct request *blk_queue_find_tag(request_queue_t *q, int tag) { struct blk_queue_tag *bqt = q->queue_tags; - if (unlikely(bqt == NULL || tag >= bqt->real_max_depth)) + if (unlikely(bqt == NULL || tag >= bqt->max_depth)) return NULL; return bqt->tag_index[tag]; @@ -775,9 +775,9 @@ EXPORT_SYMBOL(blk_queue_free_tags); static int init_tag_map(request_queue_t *q, struct blk_queue_tag *tags, int depth) { - int bits, i; struct request **tag_index; unsigned long *tag_map; + int nr_ulongs; if (depth > q->nr_requests * 2) { depth = q->nr_requests * 2; @@ -789,24 +789,17 @@ init_tag_map(request_queue_t *q, struct blk_queue_tag *tags, int depth) if (!tag_index) goto fail; - bits = (depth / BLK_TAGS_PER_LONG) + 1; - tag_map = kmalloc(bits * sizeof(unsigned long), GFP_ATOMIC); + nr_ulongs = ALIGN(depth, BLK_TAGS_PER_LONG) / BLK_TAGS_PER_LONG; + tag_map = kmalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC); if (!tag_map) goto fail; memset(tag_index, 0, depth * sizeof(struct request *)); - memset(tag_map, 0, bits * sizeof(unsigned long)); + memset(tag_map, 0, nr_ulongs * sizeof(unsigned long)); tags->max_depth = depth; - tags->real_max_depth = bits * BITS_PER_LONG; tags->tag_index = tag_index; tags->tag_map = tag_map; - /* - * set the upper bits if the depth isn't a multiple of the word size - */ - for (i = depth; i < bits * BLK_TAGS_PER_LONG; i++) - __set_bit(i, tag_map); - return 0; fail: kfree(tag_index); @@ -871,32 +864,24 @@ int blk_queue_resize_tags(request_queue_t *q, int new_depth) struct blk_queue_tag *bqt = q->queue_tags; struct request **tag_index; unsigned long *tag_map; - int bits, max_depth; + int max_depth, nr_ulongs; if (!bqt) return -ENXIO; /* - * don't bother sizing down - */ - if (new_depth <= bqt->real_max_depth) { - bqt->max_depth = new_depth; - return 0; - } - - /* * save the old state info, so we can copy it back */ tag_index = bqt->tag_index; tag_map = bqt->tag_map; - max_depth = bqt->real_max_depth; + max_depth = bqt->max_depth; if (init_tag_map(q, bqt, new_depth)) return -ENOMEM; memcpy(bqt->tag_index, tag_index, max_depth * sizeof(struct request *)); - bits = max_depth / BLK_TAGS_PER_LONG; - memcpy(bqt->tag_map, tag_map, bits * sizeof(unsigned long)); + nr_ulongs = ALIGN(max_depth, BLK_TAGS_PER_LONG) / BLK_TAGS_PER_LONG; + memcpy(bqt->tag_map, tag_map, nr_ulongs * sizeof(unsigned long)); kfree(tag_index); kfree(tag_map); @@ -926,7 +911,7 @@ void blk_queue_end_tag(request_queue_t *q, struct request *rq) BUG_ON(tag == -1); - if (unlikely(tag >= bqt->real_max_depth)) + if (unlikely(tag >= bqt->max_depth)) return; if (unlikely(!__test_and_clear_bit(tag, bqt->tag_map))) { |