diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memcontrol.c | 80 |
1 files changed, 49 insertions, 31 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index b8dc8e4cbf6a..0f1d92163f30 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3321,52 +3321,53 @@ void mem_cgroup_destroy_cache(struct kmem_cache *cachep) schedule_work(&cachep->memcg_params->destroy); } -static char *memcg_cache_name(struct mem_cgroup *memcg, struct kmem_cache *s) -{ - char *name; - struct dentry *dentry; - - rcu_read_lock(); - dentry = rcu_dereference(memcg->css.cgroup->dentry); - rcu_read_unlock(); - - BUG_ON(dentry == NULL); - - name = kasprintf(GFP_KERNEL, "%s(%d:%s)", s->name, - memcg_cache_id(memcg), dentry->d_name.name); - - return name; -} +/* + * This lock protects updaters, not readers. We want readers to be as fast as + * they can, and they will either see NULL or a valid cache value. Our model + * allow them to see NULL, in which case the root memcg will be selected. + * + * We need this lock because multiple allocations to the same cache from a non + * will span more than one worker. Only one of them can create the cache. + */ +static DEFINE_MUTEX(memcg_cache_mutex); +/* + * Called with memcg_cache_mutex held + */ static struct kmem_cache *kmem_cache_dup(struct mem_cgroup *memcg, struct kmem_cache *s) { - char *name; struct kmem_cache *new; + static char *tmp_name = NULL; - name = memcg_cache_name(memcg, s); - if (!name) - return NULL; + lockdep_assert_held(&memcg_cache_mutex); + + /* + * kmem_cache_create_memcg duplicates the given name and + * cgroup_name for this name requires RCU context. + * This static temporary buffer is used to prevent from + * pointless shortliving allocation. + */ + if (!tmp_name) { + tmp_name = kmalloc(PATH_MAX, GFP_KERNEL); + if (!tmp_name) + return NULL; + } + + rcu_read_lock(); + snprintf(tmp_name, PATH_MAX, "%s(%d:%s)", s->name, + memcg_cache_id(memcg), cgroup_name(memcg->css.cgroup)); + rcu_read_unlock(); - new = kmem_cache_create_memcg(memcg, name, s->object_size, s->align, + new = kmem_cache_create_memcg(memcg, tmp_name, s->object_size, s->align, (s->flags & ~SLAB_PANIC), s->ctor, s); if (new) new->allocflags |= __GFP_KMEMCG; - kfree(name); return new; } -/* - * This lock protects updaters, not readers. We want readers to be as fast as - * they can, and they will either see NULL or a valid cache value. Our model - * allow them to see NULL, in which case the root memcg will be selected. - * - * We need this lock because multiple allocations to the same cache from a non - * will span more than one worker. Only one of them can create the cache. - */ -static DEFINE_MUTEX(memcg_cache_mutex); static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg, struct kmem_cache *cachep) { @@ -5912,6 +5913,7 @@ static struct cftype mem_cgroup_files[] = { }, { .name = "use_hierarchy", + .flags = CFTYPE_INSANE, .write_u64 = mem_cgroup_hierarchy_write, .read_u64 = mem_cgroup_hierarchy_read, }, @@ -6907,6 +6909,21 @@ static void mem_cgroup_move_task(struct cgroup *cont, } #endif +/* + * Cgroup retains root cgroups across [un]mount cycles making it necessary + * to verify sane_behavior flag on each mount attempt. + */ +static void mem_cgroup_bind(struct cgroup *root) +{ + /* + * use_hierarchy is forced with sane_behavior. cgroup core + * guarantees that @root doesn't have any children, so turning it + * on for the root memcg is enough. + */ + if (cgroup_sane_behavior(root)) + mem_cgroup_from_cont(root)->use_hierarchy = true; +} + struct cgroup_subsys mem_cgroup_subsys = { .name = "memory", .subsys_id = mem_cgroup_subsys_id, @@ -6917,6 +6934,7 @@ struct cgroup_subsys mem_cgroup_subsys = { .can_attach = mem_cgroup_can_attach, .cancel_attach = mem_cgroup_cancel_attach, .attach = mem_cgroup_move_task, + .bind = mem_cgroup_bind, .base_cftypes = mem_cgroup_files, .early_init = 0, .use_id = 1, |