summaryrefslogtreecommitdiff
path: root/mm/page_cgroup.c
diff options
context:
space:
mode:
authorFernando Luis Vazquez Cao <fernando@oss.ntt.co.jp>2009-01-07 18:07:51 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2009-01-08 08:31:04 -0800
commit0753b0ef3b301895234fed02bea2c099c7ff4feb (patch)
tree35d01368e76d0e5ad21b38fc80274154f8a35d62 /mm/page_cgroup.c
parent01b1ae63c2270cbacfd43fea94578c17950eb548 (diff)
downloadkernel-mfld-blackbay-0753b0ef3b301895234fed02bea2c099c7ff4feb.tar.gz
kernel-mfld-blackbay-0753b0ef3b301895234fed02bea2c099c7ff4feb.tar.bz2
kernel-mfld-blackbay-0753b0ef3b301895234fed02bea2c099c7ff4feb.zip
memcg: do not recalculate section unnecessarily in init_section_page_cgroup
In init_section_page_cgroup() the section a given pfn belongs to is calculated at the top of the function and, despite the fact that the pfn/section correspondence does not change, it is recalculated further down the same function. By computing this just once and reusing that value we save some bytes in the object file and do not waste CPU cycles. Signed-off-by: Fernando Luis Vazquez Cao <fernando@oss.ntt.co.jp> Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> Cc: Balbir Singh <balbir@in.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_cgroup.c')
-rw-r--r--mm/page_cgroup.c5
1 files changed, 1 insertions, 4 deletions
diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c
index d6507a660ed..df1e54a5ed1 100644
--- a/mm/page_cgroup.c
+++ b/mm/page_cgroup.c
@@ -103,13 +103,11 @@ struct page_cgroup *lookup_page_cgroup(struct page *page)
/* __alloc_bootmem...() is protected by !slab_available() */
static int __init_refok init_section_page_cgroup(unsigned long pfn)
{
- struct mem_section *section;
+ struct mem_section *section = __pfn_to_section(pfn);
struct page_cgroup *base, *pc;
unsigned long table_size;
int nid, index;
- section = __pfn_to_section(pfn);
-
if (!section->page_cgroup) {
nid = page_to_nid(pfn_to_page(pfn));
table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION;
@@ -145,7 +143,6 @@ static int __init_refok init_section_page_cgroup(unsigned long pfn)
__init_page_cgroup(pc, pfn + index);
}
- section = __pfn_to_section(pfn);
section->page_cgroup = base - pfn;
total_usage += table_size;
return 0;