summaryrefslogtreecommitdiff
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
authorKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>2009-10-01 15:44:12 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2009-10-01 16:11:13 -0700
commitef8745c1e7fc5413d760b3b958f3fd3a0beaad72 (patch)
treea1f1998dbcf06e84fe3539192e440e9d1bb876f2 /mm/memcontrol.c
parent4e649152cbaa1aedd01821d200ab9d597fe469e4 (diff)
downloadlinux-3.10-ef8745c1e7fc5413d760b3b958f3fd3a0beaad72.tar.gz
linux-3.10-ef8745c1e7fc5413d760b3b958f3fd3a0beaad72.tar.bz2
linux-3.10-ef8745c1e7fc5413d760b3b958f3fd3a0beaad72.zip
memcg: reduce check for softlimit excess
In charge/uncharge/reclaim path, usage_in_excess is calculated repeatedly and it takes res_counter's spin_lock every time. This patch removes unnecessary calls for res_count_soft_limit_excess. Reviewed-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Paul Menage <menage@google.com> Cc: Li Zefan <lizf@cn.fujitsu.com> Cc: Balbir Singh <balbir@in.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c31
1 files changed, 15 insertions, 16 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 1ae8c439584..f99f5991d6b 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -313,7 +313,8 @@ soft_limit_tree_from_page(struct page *page)
static void
__mem_cgroup_insert_exceeded(struct mem_cgroup *mem,
struct mem_cgroup_per_zone *mz,
- struct mem_cgroup_tree_per_zone *mctz)
+ struct mem_cgroup_tree_per_zone *mctz,
+ unsigned long long new_usage_in_excess)
{
struct rb_node **p = &mctz->rb_root.rb_node;
struct rb_node *parent = NULL;
@@ -322,7 +323,9 @@ __mem_cgroup_insert_exceeded(struct mem_cgroup *mem,
if (mz->on_tree)
return;
- mz->usage_in_excess = res_counter_soft_limit_excess(&mem->res);
+ mz->usage_in_excess = new_usage_in_excess;
+ if (!mz->usage_in_excess)
+ return;
while (*p) {
parent = *p;
mz_node = rb_entry(parent, struct mem_cgroup_per_zone,
@@ -382,7 +385,7 @@ static bool mem_cgroup_soft_limit_check(struct mem_cgroup *mem)
static void mem_cgroup_update_tree(struct mem_cgroup *mem, struct page *page)
{
- unsigned long long new_usage_in_excess;
+ unsigned long long excess;
struct mem_cgroup_per_zone *mz;
struct mem_cgroup_tree_per_zone *mctz;
int nid = page_to_nid(page);
@@ -395,25 +398,21 @@ static void mem_cgroup_update_tree(struct mem_cgroup *mem, struct page *page)
*/
for (; mem; mem = parent_mem_cgroup(mem)) {
mz = mem_cgroup_zoneinfo(mem, nid, zid);
- new_usage_in_excess =
- res_counter_soft_limit_excess(&mem->res);
+ excess = res_counter_soft_limit_excess(&mem->res);
/*
* We have to update the tree if mz is on RB-tree or
* mem is over its softlimit.
*/
- if (new_usage_in_excess || mz->on_tree) {
+ if (excess || mz->on_tree) {
spin_lock(&mctz->lock);
/* if on-tree, remove it */
if (mz->on_tree)
__mem_cgroup_remove_exceeded(mem, mz, mctz);
/*
- * if over soft limit, insert again. mz->usage_in_excess
- * will be updated properly.
+ * Insert again. mz->usage_in_excess will be updated.
+ * If excess is 0, no tree ops.
*/
- if (new_usage_in_excess)
- __mem_cgroup_insert_exceeded(mem, mz, mctz);
- else
- mz->usage_in_excess = 0;
+ __mem_cgroup_insert_exceeded(mem, mz, mctz, excess);
spin_unlock(&mctz->lock);
}
}
@@ -2221,6 +2220,7 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
unsigned long reclaimed;
int loop = 0;
struct mem_cgroup_tree_per_zone *mctz;
+ unsigned long long excess;
if (order > 0)
return 0;
@@ -2272,9 +2272,8 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
break;
} while (1);
}
- mz->usage_in_excess =
- res_counter_soft_limit_excess(&mz->mem->res);
__mem_cgroup_remove_exceeded(mz->mem, mz, mctz);
+ excess = res_counter_soft_limit_excess(&mz->mem->res);
/*
* One school of thought says that we should not add
* back the node to the tree if reclaim returns 0.
@@ -2283,8 +2282,8 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
* memory to reclaim from. Consider this as a longer
* term TODO.
*/
- if (mz->usage_in_excess)
- __mem_cgroup_insert_exceeded(mz->mem, mz, mctz);
+ /* If excess == 0, no tree ops */
+ __mem_cgroup_insert_exceeded(mz->mem, mz, mctz, excess);
spin_unlock(&mctz->lock);
css_put(&mz->mem->css);
loop++;