diff options
author | Tejun Heo <tj@kernel.org> | 2012-03-05 13:15:06 -0800 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2012-03-06 21:27:22 +0100 |
commit | cd1604fab4f95f7cfc227d3955fd7ae14da61f38 (patch) | |
tree | 021881faedc1c2468730f9f54d364083e70dce76 /block/cfq-iosched.c | |
parent | f51b802c17e2a21926b29911493f5e7ddf6eee87 (diff) | |
download | linux-3.10-cd1604fab4f95f7cfc227d3955fd7ae14da61f38.tar.gz linux-3.10-cd1604fab4f95f7cfc227d3955fd7ae14da61f38.tar.bz2 linux-3.10-cd1604fab4f95f7cfc227d3955fd7ae14da61f38.zip |
blkcg: factor out blkio_group creation
Currently both blk-throttle and cfq-iosched implement their own
blkio_group creation code in throtl_get_tg() and cfq_get_cfqg(). This
patch factors out the common code into blkg_lookup_create(), which
returns ERR_PTR value so that transitional failures due to queue
bypass can be distinguished from other failures.
* New plkio_policy_ops methods blkio_alloc_group_fn() and
blkio_link_group_fn added. Both are transitional and will be
removed once the blkg management code is fully moved into
blk-cgroup.c.
* blkio_alloc_group_fn() allocates policy-specific blkg which is
usually a larger data structure with blkg as the first entry and
intiailizes it. Note that initialization of blkg proper, including
percpu stats, is responsibility of blk-cgroup proper.
Note that default config (weight, bps...) initialization is done
from this method; otherwise, we end up violating locking order
between blkcg and q locks via blkcg_get_CONF() functions.
* blkio_link_group_fn() is called under queue_lock and responsible for
linking the blkg to the queue. blkcg side is handled by blk-cgroup
proper.
* The common blkg creation function is named blkg_lookup_create() and
blkiocg_lookup_group() is renamed to blkg_lookup() for consistency.
Also, throtl / cfq related functions are similarly [re]named for
consistency.
This simplifies blkcg policy implementations and enables further
cleanup.
-v2: Vivek noticed that blkg_lookup_create() incorrectly tested
blk_queue_dead() instead of blk_queue_bypass() leading a user of
the function ending up creating a new blkg on bypassing queue.
This is a bug introduced while relocating bypass patches before
this one. Fixed.
-v3: ERR_PTR patch folded into this one. @for_root added to
blkg_lookup_create() to allow creating root group on a bypassed
queue during elevator switch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r-- | block/cfq-iosched.c | 131 |
1 files changed, 40 insertions, 91 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 1c3f41b9d5d..acef564578c 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -1048,10 +1048,12 @@ static void cfq_update_blkio_group_weight(struct request_queue *q, cfqg->needs_update = true; } -static void cfq_init_add_cfqg_lists(struct cfq_data *cfqd, - struct cfq_group *cfqg, struct blkio_cgroup *blkcg) +static void cfq_link_blkio_group(struct request_queue *q, + struct blkio_group *blkg) { - struct backing_dev_info *bdi = &cfqd->queue->backing_dev_info; + struct cfq_data *cfqd = q->elevator->elevator_data; + struct backing_dev_info *bdi = &q->backing_dev_info; + struct cfq_group *cfqg = cfqg_of_blkg(blkg); unsigned int major, minor; /* @@ -1062,34 +1064,26 @@ static void cfq_init_add_cfqg_lists(struct cfq_data *cfqd, */ if (bdi->dev) { sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor); - cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg, - cfqd->queue, MKDEV(major, minor)); - } else - cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg, - cfqd->queue, 0); + blkg->dev = MKDEV(major, minor); + } cfqd->nr_blkcg_linked_grps++; - cfqg->weight = blkcg_get_weight(blkcg, cfqg->blkg.dev); /* Add group on cfqd list */ hlist_add_head(&cfqg->cfqd_node, &cfqd->cfqg_list); } -/* - * Should be called from sleepable context. No request queue lock as per - * cpu stats are allocated dynamically and alloc_percpu needs to be called - * from sleepable context. - */ -static struct cfq_group * cfq_alloc_cfqg(struct cfq_data *cfqd) +static struct blkio_group *cfq_alloc_blkio_group(struct request_queue *q, + struct blkio_cgroup *blkcg) { struct cfq_group *cfqg; - int ret; - cfqg = kzalloc_node(sizeof(*cfqg), GFP_ATOMIC, cfqd->queue->node); + cfqg = kzalloc_node(sizeof(*cfqg), GFP_ATOMIC, q->node); if (!cfqg) return NULL; cfq_init_cfqg_base(cfqg); + cfqg->weight = blkcg_get_weight(blkcg, cfqg->blkg.dev); /* * Take the initial reference that will be released on destroy @@ -1099,90 +1093,38 @@ static struct cfq_group * cfq_alloc_cfqg(struct cfq_data *cfqd) */ cfqg->ref = 1; - ret = blkio_alloc_blkg_stats(&cfqg->blkg); - if (ret) { - kfree(cfqg); - return NULL; - } - - return cfqg; -} - -static struct cfq_group * -cfq_find_cfqg(struct cfq_data *cfqd, struct blkio_cgroup *blkcg) -{ - struct cfq_group *cfqg = NULL; - struct backing_dev_info *bdi = &cfqd->queue->backing_dev_info; - unsigned int major, minor; - - /* - * This is the common case when there are no blkio cgroups. - * Avoid lookup in this case - */ - if (blkcg == &blkio_root_cgroup) - cfqg = cfqd->root_group; - else - cfqg = cfqg_of_blkg(blkiocg_lookup_group(blkcg, cfqd->queue, - BLKIO_POLICY_PROP)); - - if (cfqg && !cfqg->blkg.dev && bdi->dev && dev_name(bdi->dev)) { - sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor); - cfqg->blkg.dev = MKDEV(major, minor); - } - - return cfqg; + return &cfqg->blkg; } /* * Search for the cfq group current task belongs to. request_queue lock must * be held. */ -static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd, - struct blkio_cgroup *blkcg) +static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd, + struct blkio_cgroup *blkcg) { - struct cfq_group *cfqg = NULL, *__cfqg = NULL; struct request_queue *q = cfqd->queue; + struct backing_dev_info *bdi = &q->backing_dev_info; + struct cfq_group *cfqg = NULL; - cfqg = cfq_find_cfqg(cfqd, blkcg); - if (cfqg) - return cfqg; - - if (!css_tryget(&blkcg->css)) - return NULL; - - /* - * Need to allocate a group. Allocation of group also needs allocation - * of per cpu stats which in-turn takes a mutex() and can block. Hence - * we need to drop rcu lock and queue_lock before we call alloc. - * - * Not taking any queue reference here and assuming that queue is - * around by the time we return. CFQ queue allocation code does - * the same. It might be racy though. - */ - rcu_read_unlock(); - spin_unlock_irq(q->queue_lock); - - cfqg = cfq_alloc_cfqg(cfqd); + /* avoid lookup for the common case where there's no blkio cgroup */ + if (blkcg == &blkio_root_cgroup) { + cfqg = cfqd->root_group; + } else { + struct blkio_group *blkg; - spin_lock_irq(q->queue_lock); - rcu_read_lock(); - css_put(&blkcg->css); + blkg = blkg_lookup_create(blkcg, q, BLKIO_POLICY_PROP, false); + if (!IS_ERR(blkg)) + cfqg = cfqg_of_blkg(blkg); + } - /* - * If some other thread already allocated the group while we were - * not holding queue lock, free up the group - */ - __cfqg = cfq_find_cfqg(cfqd, blkcg); + if (cfqg && !cfqg->blkg.dev && bdi->dev && dev_name(bdi->dev)) { + unsigned int major, minor; - if (__cfqg) { - kfree(cfqg); - return __cfqg; + sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor); + cfqg->blkg.dev = MKDEV(major, minor); } - if (!cfqg) - cfqg = cfqd->root_group; - - cfq_init_add_cfqg_lists(cfqd, cfqg, blkcg); return cfqg; } @@ -1294,8 +1236,8 @@ static bool cfq_clear_queue(struct request_queue *q) } #else /* GROUP_IOSCHED */ -static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd, - struct blkio_cgroup *blkcg) +static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd, + struct blkio_cgroup *blkcg) { return cfqd->root_group; } @@ -2887,7 +2829,8 @@ retry: blkcg = task_blkio_cgroup(current); - cfqg = cfq_get_cfqg(cfqd, blkcg); + cfqg = cfq_lookup_create_cfqg(cfqd, blkcg); + cic = cfq_cic_lookup(cfqd, ioc); /* cic always exists here */ cfqq = cic_to_cfqq(cic, is_sync); @@ -3694,6 +3637,7 @@ static void cfq_exit_queue(struct elevator_queue *e) static int cfq_init_queue(struct request_queue *q) { struct cfq_data *cfqd; + struct blkio_group *blkg __maybe_unused; int i; cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node); @@ -3711,7 +3655,10 @@ static int cfq_init_queue(struct request_queue *q) rcu_read_lock(); spin_lock_irq(q->queue_lock); - cfqd->root_group = cfq_get_cfqg(cfqd, &blkio_root_cgroup); + blkg = blkg_lookup_create(&blkio_root_cgroup, q, BLKIO_POLICY_PROP, + true); + if (!IS_ERR(blkg)) + cfqd->root_group = cfqg_of_blkg(blkg); spin_unlock_irq(q->queue_lock); rcu_read_unlock(); @@ -3897,6 +3844,8 @@ static struct elevator_type iosched_cfq = { #ifdef CONFIG_CFQ_GROUP_IOSCHED static struct blkio_policy_type blkio_policy_cfq = { .ops = { + .blkio_alloc_group_fn = cfq_alloc_blkio_group, + .blkio_link_group_fn = cfq_link_blkio_group, .blkio_unlink_group_fn = cfq_unlink_blkio_group, .blkio_clear_queue_fn = cfq_clear_queue, .blkio_update_group_weight_fn = cfq_update_blkio_group_weight, |