diff options
author | KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> | 2011-06-15 15:08:13 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-06-15 20:03:59 -0700 |
commit | a433658c30974fc87ba3ff52d7e4e6299762aa3d (patch) | |
tree | 8df65e22af520ca5c020281763e6874d0bb51bc5 /mm | |
parent | e1bbd19bc4afef7adb80cca163800391c4f5773d (diff) | |
download | linux-3.10-a433658c30974fc87ba3ff52d7e4e6299762aa3d.tar.gz linux-3.10-a433658c30974fc87ba3ff52d7e4e6299762aa3d.tar.bz2 linux-3.10-a433658c30974fc87ba3ff52d7e4e6299762aa3d.zip |
vmscan,memcg: memcg aware swap token
Currently, memcg reclaim can disable swap token even if the swap token mm
doesn't belong in its memory cgroup. It's slightly risky. If an admin
creates very small mem-cgroup and silly guy runs contentious heavy memory
pressure workload, every tasks are going to lose swap token and then
system may become unresponsive. That's bad.
This patch adds 'memcg' parameter into disable_swap_token(). and if the
parameter doesn't match swap token, VM doesn't disable it.
[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Reviewed-by: Rik van Riel<riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memcontrol.c | 16 | ||||
-rw-r--r-- | mm/thrash.c | 87 | ||||
-rw-r--r-- | mm/vmscan.c | 4 |
3 files changed, 77 insertions, 30 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index bd9052a5d3a..e37c44d88d4 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -735,7 +735,7 @@ struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) struct mem_cgroup, css); } -static struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm) +struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm) { struct mem_cgroup *mem = NULL; @@ -5414,18 +5414,16 @@ static void mem_cgroup_move_task(struct cgroup_subsys *ss, struct cgroup *old_cont, struct task_struct *p) { - struct mm_struct *mm; + struct mm_struct *mm = get_task_mm(p); - if (!mc.to) - /* no need to move charge */ - return; - - mm = get_task_mm(p); if (mm) { - mem_cgroup_move_charge(mm); + if (mc.to) + mem_cgroup_move_charge(mm); + put_swap_token(mm); mmput(mm); } - mem_cgroup_clear_mc(); + if (mc.to) + mem_cgroup_clear_mc(); } #else /* !CONFIG_MMU */ static int mem_cgroup_can_attach(struct cgroup_subsys *ss, diff --git a/mm/thrash.c b/mm/thrash.c index 2372d4ed5dd..6cdf8651138 100644 --- a/mm/thrash.c +++ b/mm/thrash.c @@ -21,11 +21,31 @@ #include <linux/mm.h> #include <linux/sched.h> #include <linux/swap.h> +#include <linux/memcontrol.h> static DEFINE_SPINLOCK(swap_token_lock); struct mm_struct *swap_token_mm; +struct mem_cgroup *swap_token_memcg; static unsigned int global_faults; +#ifdef CONFIG_CGROUP_MEM_RES_CTLR +static struct mem_cgroup *swap_token_memcg_from_mm(struct mm_struct *mm) +{ + struct mem_cgroup *memcg; + + memcg = try_get_mem_cgroup_from_mm(mm); + if (memcg) + css_put(mem_cgroup_css(memcg)); + + return memcg; +} +#else +static struct mem_cgroup *swap_token_memcg_from_mm(struct mm_struct *mm) +{ + return NULL; +} +#endif + void grab_swap_token(struct mm_struct *mm) { int current_interval; @@ -38,40 +58,69 @@ void grab_swap_token(struct mm_struct *mm) return; /* First come first served */ - if (swap_token_mm == NULL) { - mm->token_priority = mm->token_priority + 2; - swap_token_mm = mm; + if (!swap_token_mm) + goto replace_token; + + if (mm == swap_token_mm) { + mm->token_priority += 2; goto out; } - if (mm != swap_token_mm) { - if (current_interval < mm->last_interval) - mm->token_priority++; - else { - if (likely(mm->token_priority > 0)) - mm->token_priority--; - } - /* Check if we deserve the token */ - if (mm->token_priority > swap_token_mm->token_priority) { - mm->token_priority += 2; - swap_token_mm = mm; - } - } else { - /* Token holder came in again! */ - mm->token_priority += 2; + if (current_interval < mm->last_interval) + mm->token_priority++; + else { + if (likely(mm->token_priority > 0)) + mm->token_priority--; } + /* Check if we deserve the token */ + if (mm->token_priority > swap_token_mm->token_priority) + goto replace_token; + out: mm->faultstamp = global_faults; mm->last_interval = current_interval; spin_unlock(&swap_token_lock); + return; + +replace_token: + mm->token_priority += 2; + swap_token_mm = mm; + swap_token_memcg = swap_token_memcg_from_mm(mm); + goto out; } /* Called on process exit. */ void __put_swap_token(struct mm_struct *mm) { spin_lock(&swap_token_lock); - if (likely(mm == swap_token_mm)) + if (likely(mm == swap_token_mm)) { swap_token_mm = NULL; + swap_token_memcg = NULL; + } spin_unlock(&swap_token_lock); } + +static bool match_memcg(struct mem_cgroup *a, struct mem_cgroup *b) +{ + if (!a) + return true; + if (!b) + return true; + if (a == b) + return true; + return false; +} + +void disable_swap_token(struct mem_cgroup *memcg) +{ + /* memcg reclaim don't disable unrelated mm token. */ + if (match_memcg(memcg, swap_token_memcg)) { + spin_lock(&swap_token_lock); + if (match_memcg(memcg, swap_token_memcg)) { + swap_token_mm = NULL; + swap_token_memcg = NULL; + } + spin_unlock(&swap_token_lock); + } +} diff --git a/mm/vmscan.c b/mm/vmscan.c index faa0a088f9c..dbe6ea321df 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2081,7 +2081,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, for (priority = DEF_PRIORITY; priority >= 0; priority--) { sc->nr_scanned = 0; if (!priority) - disable_swap_token(); + disable_swap_token(sc->mem_cgroup); total_scanned += shrink_zones(priority, zonelist, sc); /* * Don't shrink slabs when reclaiming memory from @@ -2407,7 +2407,7 @@ loop_again: /* The swap token gets in the way of swapout... */ if (!priority) - disable_swap_token(); + disable_swap_token(NULL); all_zones_ok = 1; balanced = 0; |