summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2011-03-23 16:42:35 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2011-03-23 19:46:30 -0700
commit7ffd4ca7a2cdd7a18f0b499a4e9e0e7cf36ba018 (patch)
tree1a2678ce91b9dfb2118d19e0ec457ca5582793f9
parent11c9ea4e80fc3be83485667204c68d0a732f3757 (diff)
downloadkernel-common-7ffd4ca7a2cdd7a18f0b499a4e9e0e7cf36ba018.tar.gz
kernel-common-7ffd4ca7a2cdd7a18f0b499a4e9e0e7cf36ba018.tar.bz2
kernel-common-7ffd4ca7a2cdd7a18f0b499a4e9e0e7cf36ba018.zip
memcg: convert uncharge batching from bytes to page granularity
We never uncharge subpage quantities. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> Cc: Balbir Singh <balbir@linux.vnet.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/sched.h4
-rw-r--r--mm/memcontrol.c18
2 files changed, 12 insertions, 10 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 4b601be3dace..98fc7ed4b191 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1524,8 +1524,8 @@ struct task_struct {
struct memcg_batch_info {
int do_batch; /* incremented when batch uncharge started */
struct mem_cgroup *memcg; /* target memcg of uncharge */
- unsigned long bytes; /* uncharged usage */
- unsigned long memsw_bytes; /* uncharged mem+swap usage */
+ unsigned long nr_pages; /* uncharged usage */
+ unsigned long memsw_nr_pages; /* uncharged mem+swap usage */
} memcg_batch;
#endif
};
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 91120a04f935..9dfbed2aacc9 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2579,9 +2579,9 @@ __do_uncharge(struct mem_cgroup *mem, const enum charge_type ctype,
if (batch->memcg != mem)
goto direct_uncharge;
/* remember freed charge and uncharge it later */
- batch->bytes += PAGE_SIZE;
+ batch->nr_pages++;
if (uncharge_memsw)
- batch->memsw_bytes += PAGE_SIZE;
+ batch->memsw_nr_pages++;
return;
direct_uncharge:
res_counter_uncharge(&mem->res, page_size);
@@ -2708,8 +2708,8 @@ void mem_cgroup_uncharge_start(void)
/* We can do nest. */
if (current->memcg_batch.do_batch == 1) {
current->memcg_batch.memcg = NULL;
- current->memcg_batch.bytes = 0;
- current->memcg_batch.memsw_bytes = 0;
+ current->memcg_batch.nr_pages = 0;
+ current->memcg_batch.memsw_nr_pages = 0;
}
}
@@ -2730,10 +2730,12 @@ void mem_cgroup_uncharge_end(void)
* This "batch->memcg" is valid without any css_get/put etc...
* bacause we hide charges behind us.
*/
- if (batch->bytes)
- res_counter_uncharge(&batch->memcg->res, batch->bytes);
- if (batch->memsw_bytes)
- res_counter_uncharge(&batch->memcg->memsw, batch->memsw_bytes);
+ if (batch->nr_pages)
+ res_counter_uncharge(&batch->memcg->res,
+ batch->nr_pages * PAGE_SIZE);
+ if (batch->memsw_nr_pages)
+ res_counter_uncharge(&batch->memcg->memsw,
+ batch->memsw_nr_pages * PAGE_SIZE);
memcg_oom_recover(batch->memcg);
/* forget this pointer (for sanity check) */
batch->memcg = NULL;