summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2013-04-14 11:36:57 -0700
committerMaciej Wereski <m.wereski@partner.samsung.com>2014-12-29 09:31:37 +0100
commit6e39060d794da7636c7aef0398d7f5a3ad76f444 (patch)
tree969ebe9aec1c69f72da6d9ef523ce1e65c2bc1a1
parentfa57f74d4219054ed604fc6149d992f26186b6a7 (diff)
downloadlinux-3.10-6e39060d794da7636c7aef0398d7f5a3ad76f444.tar.gz
linux-3.10-6e39060d794da7636c7aef0398d7f5a3ad76f444.tar.bz2
linux-3.10-6e39060d794da7636c7aef0398d7f5a3ad76f444.zip
cgroup: drop hierarchy_id_lock
Now that hierarchy_id alloc / free are protected by the cgroup mutexes, there's no need for this separate lock. Drop it. Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Li Zefan <lizefan@huawei.com> Change-Id: I03fbc8bba08a785c6082a9b5bb1087c53c506c60 Origin: https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/commit/?id=54e7b4eb15fc4354d5ada5469e3db4a220ddb3ed Backported-by: Maciej Wereski <m.wereski@partner.samsung.com> Signed-off-by: Maciej Wereski <m.wereski@partner.samsung.com>
-rw-r--r--kernel/cgroup.c23
1 files changed, 17 insertions, 6 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 8ea49c7c59a..62f95510950 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -197,9 +197,13 @@ struct cgroup_event {
static LIST_HEAD(roots);
static int root_count;
+/*
+ * Hierarchy ID allocation and mapping. It follows the same exclusion
+ * rules as other root ops - both cgroup_mutex and cgroup_root_mutex for
+ * writes, either for reads.
+ */
static DEFINE_IDA(hierarchy_ida);
static int next_hierarchy_id;
-static DEFINE_SPINLOCK(hierarchy_id_lock);
/* dummytop is a shorthand for the dummy hierarchy's top cgroup */
#define dummytop (&rootnode.top_cgroup)
@@ -1438,10 +1442,12 @@ static int cgroup_init_root_id(struct cgroupfs_root *root)
{
int ret;
+ lockdep_assert_held(&cgroup_mutex);
+ lockdep_assert_held(&cgroup_root_mutex);
+
do {
if (!ida_pre_get(&hierarchy_ida, GFP_KERNEL))
return -ENOMEM;
- spin_lock(&hierarchy_id_lock);
/* Try to allocate the next unused ID */
ret = ida_get_new_above(&hierarchy_ida, next_hierarchy_id,
&root->hierarchy_id);
@@ -1454,18 +1460,17 @@ static int cgroup_init_root_id(struct cgroupfs_root *root)
/* Can only get here if the 31-bit IDR is full ... */
BUG_ON(ret);
}
- spin_unlock(&hierarchy_id_lock);
} while (ret);
return 0;
}
static void cgroup_exit_root_id(struct cgroupfs_root *root)
{
+ lockdep_assert_held(&cgroup_mutex);
+ lockdep_assert_held(&cgroup_root_mutex);
+
if (root->hierarchy_id) {
- spin_lock(&hierarchy_id_lock);
ida_remove(&hierarchy_ida, root->hierarchy_id);
- spin_unlock(&hierarchy_id_lock);
-
root->hierarchy_id = 0;
}
}
@@ -4685,8 +4690,14 @@ int __init cgroup_init(void)
hash_add(css_set_table, &init_css_set.hlist, key);
/* allocate id for the dummy hierarchy */
+ mutex_lock(&cgroup_mutex);
+ mutex_lock(&cgroup_root_mutex);
+
BUG_ON(cgroup_init_root_id(&rootnode));
+ mutex_unlock(&cgroup_root_mutex);
+ mutex_unlock(&cgroup_mutex);
+
cgroup_kobj = kobject_create_and_add("cgroup", fs_kobj);
if (!cgroup_kobj) {
err = -ENOMEM;