diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-04-13 18:07:19 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-04-13 18:07:19 -0700 |
commit | d8dd0b6d4836bce81cece60509ef3b157a420776 (patch) | |
tree | 7a28f327a15443d6c9d091f3d272abd107251ab7 | |
parent | 2d59dcfb54ade45cacc59a6e7bd96b8c19088c3d (diff) | |
parent | 1b2e19f17ed327af6add02978efdf354e4f8e4df (diff) | |
download | linux-3.10-d8dd0b6d4836bce81cece60509ef3b157a420776.tar.gz linux-3.10-d8dd0b6d4836bce81cece60509ef3b157a420776.tar.bz2 linux-3.10-d8dd0b6d4836bce81cece60509ef3b157a420776.zip |
Merge branch 'for-3.4/core' of git://git.kernel.dk/linux-block
Pull block core bits from Jens Axboe:
"It's a nice and quiet round this time, since most of the tricky stuff
has been pushed to 3.5 to give it more time to mature. After a few
hectic block IO core changes for 3.3 and 3.2, I'm quite happy with a
slow round.
Really minor stuff in here, the only real functional change is making
the auto-unplug threshold a per-queue entity. The threshold is set so
that it's low enough that we don't hold off IO for too long, but still
big enough to get a nice benefit from the batched insert (and hence
queue lock cost reduction). For raid configurations, this currently
breaks down."
* 'for-3.4/core' of git://git.kernel.dk/linux-block:
block: make auto block plug flush threshold per-disk based
Documentation: Add sysfs ABI change for cfq's target latency.
block: Make cfq_target_latency tunable through sysfs.
block: use lockdep_assert_held for queue locking
block: blk_alloc_queue_node(): use caller's GFP flags instead of GFP_KERNEL
-rw-r--r-- | Documentation/ABI/testing/sysfs-cfq-target-latency | 8 | ||||
-rw-r--r-- | block/blk-core.c | 5 | ||||
-rw-r--r-- | block/blk-throttle.c | 2 | ||||
-rw-r--r-- | block/cfq-iosched.c | 10 | ||||
-rw-r--r-- | include/linux/blkdev.h | 18 |
5 files changed, 27 insertions, 16 deletions
diff --git a/Documentation/ABI/testing/sysfs-cfq-target-latency b/Documentation/ABI/testing/sysfs-cfq-target-latency new file mode 100644 index 00000000000..df0f7828c5e --- /dev/null +++ b/Documentation/ABI/testing/sysfs-cfq-target-latency @@ -0,0 +1,8 @@ +What: /sys/block/<device>/iosched/target_latency +Date: March 2012 +contact: Tao Ma <boyu.mt@taobao.com> +Description: + The /sys/block/<device>/iosched/target_latency only exists + when the user sets cfq to /sys/block/<device>/scheduler. + It contains an estimated latency time for the cfq. cfq will + use it to calculate the time slice used for every task. diff --git a/block/blk-core.c b/block/blk-core.c index 3a78b00edd7..1f61b74867e 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -483,7 +483,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) if (!q) return NULL; - q->id = ida_simple_get(&blk_queue_ida, 0, 0, GFP_KERNEL); + q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask); if (q->id < 0) goto fail_q; @@ -1277,7 +1277,8 @@ static bool attempt_plug_merge(struct request_queue *q, struct bio *bio, list_for_each_entry_reverse(rq, &plug->list, queuelist) { int el_ret; - (*request_count)++; + if (rq->q == q) + (*request_count)++; if (rq->q != q || !blk_rq_merge_ok(rq, bio)) continue; diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 5eed6a76721..f2ddb94626b 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -1218,7 +1218,7 @@ void blk_throtl_drain(struct request_queue *q) struct bio_list bl; struct bio *bio; - WARN_ON_ONCE(!queue_is_locked(q)); + queue_lockdep_assert_held(q); bio_list_init(&bl); diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 45729525356..3c38536bd52 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -295,6 +295,7 @@ struct cfq_data { unsigned int cfq_slice_idle; unsigned int cfq_group_idle; unsigned int cfq_latency; + unsigned int cfq_target_latency; /* * Fallback dummy cfqq for extreme OOM conditions @@ -604,7 +605,7 @@ cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg) { struct cfq_rb_root *st = &cfqd->grp_service_tree; - return cfq_target_latency * cfqg->weight / st->total_weight; + return cfqd->cfq_target_latency * cfqg->weight / st->total_weight; } static inline unsigned @@ -2271,7 +2272,8 @@ new_workload: * to have higher weight. A more accurate thing would be to * calculate system wide asnc/sync ratio. */ - tmp = cfq_target_latency * cfqg_busy_async_queues(cfqd, cfqg); + tmp = cfqd->cfq_target_latency * + cfqg_busy_async_queues(cfqd, cfqg); tmp = tmp/cfqd->busy_queues; slice = min_t(unsigned, slice, tmp); @@ -3737,6 +3739,7 @@ static void *cfq_init_queue(struct request_queue *q) cfqd->cfq_back_penalty = cfq_back_penalty; cfqd->cfq_slice[0] = cfq_slice_async; cfqd->cfq_slice[1] = cfq_slice_sync; + cfqd->cfq_target_latency = cfq_target_latency; cfqd->cfq_slice_async_rq = cfq_slice_async_rq; cfqd->cfq_slice_idle = cfq_slice_idle; cfqd->cfq_group_idle = cfq_group_idle; @@ -3788,6 +3791,7 @@ SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1); SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1); SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0); SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0); +SHOW_FUNCTION(cfq_target_latency_show, cfqd->cfq_target_latency, 1); #undef SHOW_FUNCTION #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ @@ -3821,6 +3825,7 @@ STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, UINT_MAX, 0); STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0); +STORE_FUNCTION(cfq_target_latency_store, &cfqd->cfq_target_latency, 1, UINT_MAX, 1); #undef STORE_FUNCTION #define CFQ_ATTR(name) \ @@ -3838,6 +3843,7 @@ static struct elv_fs_entry cfq_attrs[] = { CFQ_ATTR(slice_idle), CFQ_ATTR(group_idle), CFQ_ATTR(low_latency), + CFQ_ATTR(target_latency), __ATTR_NULL }; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 606cf339bb5..2aa24664a5b 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -426,14 +426,10 @@ struct request_queue { (1 << QUEUE_FLAG_SAME_COMP) | \ (1 << QUEUE_FLAG_ADD_RANDOM)) -static inline int queue_is_locked(struct request_queue *q) +static inline void queue_lockdep_assert_held(struct request_queue *q) { -#ifdef CONFIG_SMP - spinlock_t *lock = q->queue_lock; - return lock && spin_is_locked(lock); -#else - return 1; -#endif + if (q->queue_lock) + lockdep_assert_held(q->queue_lock); } static inline void queue_flag_set_unlocked(unsigned int flag, @@ -445,7 +441,7 @@ static inline void queue_flag_set_unlocked(unsigned int flag, static inline int queue_flag_test_and_clear(unsigned int flag, struct request_queue *q) { - WARN_ON_ONCE(!queue_is_locked(q)); + queue_lockdep_assert_held(q); if (test_bit(flag, &q->queue_flags)) { __clear_bit(flag, &q->queue_flags); @@ -458,7 +454,7 @@ static inline int queue_flag_test_and_clear(unsigned int flag, static inline int queue_flag_test_and_set(unsigned int flag, struct request_queue *q) { - WARN_ON_ONCE(!queue_is_locked(q)); + queue_lockdep_assert_held(q); if (!test_bit(flag, &q->queue_flags)) { __set_bit(flag, &q->queue_flags); @@ -470,7 +466,7 @@ static inline int queue_flag_test_and_set(unsigned int flag, static inline void queue_flag_set(unsigned int flag, struct request_queue *q) { - WARN_ON_ONCE(!queue_is_locked(q)); + queue_lockdep_assert_held(q); __set_bit(flag, &q->queue_flags); } @@ -487,7 +483,7 @@ static inline int queue_in_flight(struct request_queue *q) static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) { - WARN_ON_ONCE(!queue_is_locked(q)); + queue_lockdep_assert_held(q); __clear_bit(flag, &q->queue_flags); } |