summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorMichal Hocko <mhocko@suse.cz>2013-02-22 16:35:39 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-23 17:50:21 -0800
commit8787a1df30c7b75521fe8cbed42895d47e6b8d52 (patch)
tree12129663393911726999884a370199d7b09b66bd /mm
parent0e50ce3b50fb4ffc38c98fe7622361da4d0808c1 (diff)
downloadkernel-common-8787a1df30c7b75521fe8cbed42895d47e6b8d52.tar.gz
kernel-common-8787a1df30c7b75521fe8cbed42895d47e6b8d52.tar.bz2
kernel-common-8787a1df30c7b75521fe8cbed42895d47e6b8d52.zip
memcg: move mem_cgroup_soft_limit_tree_init to mem_cgroup_init
Per-node-zone soft limit tree is currently initialized when the root cgroup is created which is OK but it pointlessly pollutes memcg allocation code with something that can be called when the memcg subsystem is initialized by mem_cgroup_init along with other controller specific parts. While we are at it let's make mem_cgroup_soft_limit_tree_init void because it doesn't make much sense to report memory failure because if we fail to allocate memory that early during the boot then we are screwed anyway (this saves some code). Signed-off-by: Michal Hocko <mhocko@suse.cz> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Tejun Heo <htejun@gmail.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memcontrol.c19
1 files changed, 3 insertions, 16 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index af4b04f4d744..bb894b071f59 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -6052,7 +6052,7 @@ struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
}
EXPORT_SYMBOL(parent_mem_cgroup);
-static int mem_cgroup_soft_limit_tree_init(void)
+static void __init mem_cgroup_soft_limit_tree_init(void)
{
struct mem_cgroup_tree_per_node *rtpn;
struct mem_cgroup_tree_per_zone *rtpz;
@@ -6063,8 +6063,7 @@ static int mem_cgroup_soft_limit_tree_init(void)
if (!node_state(node, N_NORMAL_MEMORY))
tmp = -1;
rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp);
- if (!rtpn)
- goto err_cleanup;
+ BUG_ON(!rtpn);
soft_limit_tree.rb_tree_per_node[node] = rtpn;
@@ -6074,17 +6073,6 @@ static int mem_cgroup_soft_limit_tree_init(void)
spin_lock_init(&rtpz->lock);
}
}
- return 0;
-
-err_cleanup:
- for_each_node(node) {
- if (!soft_limit_tree.rb_tree_per_node[node])
- break;
- kfree(soft_limit_tree.rb_tree_per_node[node]);
- soft_limit_tree.rb_tree_per_node[node] = NULL;
- }
- return 1;
-
}
static struct cgroup_subsys_state * __ref
@@ -6106,8 +6094,6 @@ mem_cgroup_css_alloc(struct cgroup *cont)
if (cont->parent == NULL) {
int cpu;
- if (mem_cgroup_soft_limit_tree_init())
- goto free_out;
root_mem_cgroup = memcg;
for_each_possible_cpu(cpu) {
struct memcg_stock_pcp *stock =
@@ -6850,6 +6836,7 @@ static int __init mem_cgroup_init(void)
{
hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
enable_swap_cgroup();
+ mem_cgroup_soft_limit_tree_init();
return 0;
}
subsys_initcall(mem_cgroup_init);