summaryrefslogtreecommitdiff
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
authorKirill A. Shutemov <kirill@shutemov.name>2010-05-26 14:42:46 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2010-05-27 09:12:44 -0700
commit907860ed381a31b0102f362df67c1c5cae6ef050 (patch)
tree4f47a6fe898b1f45da505fc0c27d98e66d42aa46 /mm/memcontrol.c
parentac39cf8cb86c45eeac6a592ce0d58f9021a97235 (diff)
downloadlinux-3.10-907860ed381a31b0102f362df67c1c5cae6ef050.tar.gz
linux-3.10-907860ed381a31b0102f362df67c1c5cae6ef050.tar.bz2
linux-3.10-907860ed381a31b0102f362df67c1c5cae6ef050.zip
cgroups: make cftype.unregister_event() void-returning
Since we are unable to handle an error returned by cftype.unregister_event() properly, let's make the callback void-returning. mem_cgroup_unregister_event() has been rewritten to be a "never fail" function. On mem_cgroup_usage_register_event() we save old buffer for thresholds array and reuse it in mem_cgroup_usage_unregister_event() to avoid allocation. Signed-off-by: Kirill A. Shutemov <kirill@shutemov.name> Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Phil Carmody <ext-phil.2.carmody@nokia.com> Cc: Balbir Singh <balbir@linux.vnet.ibm.com> Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> Cc: Paul Menage <menage@google.com> Cc: Li Zefan <lizf@cn.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c65
1 files changed, 41 insertions, 24 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index df1234c0dac..a4172a861b3 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -226,9 +226,19 @@ struct mem_cgroup {
/* thresholds for memory usage. RCU-protected */
struct mem_cgroup_threshold_ary *thresholds;
+ /*
+ * Preallocated buffer to be used in mem_cgroup_unregister_event()
+ * to make it "never fail".
+ * It must be able to store at least thresholds->size - 1 entries.
+ */
+ struct mem_cgroup_threshold_ary *__thresholds;
+
/* thresholds for mem+swap usage. RCU-protected */
struct mem_cgroup_threshold_ary *memsw_thresholds;
+ /* the same as __thresholds, but for memsw_thresholds */
+ struct mem_cgroup_threshold_ary *__memsw_thresholds;
+
/* For oom notifier event fd */
struct list_head oom_notify;
@@ -3604,17 +3614,27 @@ static int mem_cgroup_usage_register_event(struct cgroup *cgrp,
else
rcu_assign_pointer(memcg->memsw_thresholds, thresholds_new);
- /* To be sure that nobody uses thresholds before freeing it */
+ /* To be sure that nobody uses thresholds */
synchronize_rcu();
- kfree(thresholds);
+ /*
+ * Free old preallocated buffer and use thresholds as new
+ * preallocated buffer.
+ */
+ if (type == _MEM) {
+ kfree(memcg->__thresholds);
+ memcg->__thresholds = thresholds;
+ } else {
+ kfree(memcg->__memsw_thresholds);
+ memcg->__memsw_thresholds = thresholds;
+ }
unlock:
mutex_unlock(&memcg->thresholds_lock);
return ret;
}
-static int mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
+static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
struct cftype *cft, struct eventfd_ctx *eventfd)
{
struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
@@ -3622,7 +3642,7 @@ static int mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
int type = MEMFILE_TYPE(cft->private);
u64 usage;
int size = 0;
- int i, j, ret = 0;
+ int i, j;
mutex_lock(&memcg->thresholds_lock);
if (type == _MEM)
@@ -3649,20 +3669,19 @@ static int mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
size++;
}
+ /* Use preallocated buffer for new array of thresholds */
+ if (type == _MEM)
+ thresholds_new = memcg->__thresholds;
+ else
+ thresholds_new = memcg->__memsw_thresholds;
+
/* Set thresholds array to NULL if we don't have thresholds */
if (!size) {
+ kfree(thresholds_new);
thresholds_new = NULL;
- goto assign;
+ goto swap_buffers;
}
- /* Allocate memory for new array of thresholds */
- thresholds_new = kmalloc(sizeof(*thresholds_new) +
- size * sizeof(struct mem_cgroup_threshold),
- GFP_KERNEL);
- if (!thresholds_new) {
- ret = -ENOMEM;
- goto unlock;
- }
thresholds_new->size = size;
/* Copy thresholds and find current threshold */
@@ -3683,20 +3702,20 @@ static int mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
j++;
}
-assign:
- if (type == _MEM)
+swap_buffers:
+ /* Swap thresholds array and preallocated buffer */
+ if (type == _MEM) {
+ memcg->__thresholds = thresholds;
rcu_assign_pointer(memcg->thresholds, thresholds_new);
- else
+ } else {
+ memcg->__memsw_thresholds = thresholds;
rcu_assign_pointer(memcg->memsw_thresholds, thresholds_new);
+ }
- /* To be sure that nobody uses thresholds before freeing it */
+ /* To be sure that nobody uses thresholds */
synchronize_rcu();
- kfree(thresholds);
-unlock:
mutex_unlock(&memcg->thresholds_lock);
-
- return ret;
}
static int mem_cgroup_oom_register_event(struct cgroup *cgrp,
@@ -3724,7 +3743,7 @@ static int mem_cgroup_oom_register_event(struct cgroup *cgrp,
return 0;
}
-static int mem_cgroup_oom_unregister_event(struct cgroup *cgrp,
+static void mem_cgroup_oom_unregister_event(struct cgroup *cgrp,
struct cftype *cft, struct eventfd_ctx *eventfd)
{
struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
@@ -3743,8 +3762,6 @@ static int mem_cgroup_oom_unregister_event(struct cgroup *cgrp,
}
mutex_unlock(&memcg_oom_mutex);
-
- return 0;
}
static int mem_cgroup_oom_control_read(struct cgroup *cgrp,