summaryrefslogtreecommitdiff
path: root/mm/memory.c
diff options
context:
space:
mode:
authorHugh Dickins <hugh@veritas.com>2009-01-06 14:39:25 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2009-01-06 15:59:02 -0800
commitb5934c531849ff4a51ce0f290141efe564290e40 (patch)
tree0c741a5bd6178db11f1147875f59e7a1a9d1754d /mm/memory.c
parent51726b1222863852c46ca21ed0115b85d1edfd89 (diff)
downloadlinux-3.10-b5934c531849ff4a51ce0f290141efe564290e40.tar.gz
linux-3.10-b5934c531849ff4a51ce0f290141efe564290e40.tar.bz2
linux-3.10-b5934c531849ff4a51ce0f290141efe564290e40.zip
mm: add_active_or_unevictable into rmap
lru_cache_add_active_or_unevictable() and page_add_new_anon_rmap() always appear together. Save some symbol table space and some jumping around by removing lru_cache_add_active_or_unevictable(), folding its code into page_add_new_anon_rmap(): like how we add file pages to lru just after adding them to page cache. Remove the nearby "TODO: is this safe?" comments (yes, it is safe), and change page_add_new_anon_rmap()'s address BUG_ON to VM_BUG_ON as originally intended. Signed-off-by: Hugh Dickins <hugh@veritas.com> Acked-by: Rik van Riel <riel@redhat.com> Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Cc: Mel Gorman <mel@csn.ul.ie> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c6
1 files changed, 0 insertions, 6 deletions
diff --git a/mm/memory.c b/mm/memory.c
index b5af358b8b2..a138c50dc39 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1949,10 +1949,7 @@ gotten:
*/
ptep_clear_flush_notify(vma, address, page_table);
SetPageSwapBacked(new_page);
- lru_cache_add_active_or_unevictable(new_page, vma);
page_add_new_anon_rmap(new_page, vma, address);
-
-//TODO: is this safe? do_anonymous_page() does it this way.
set_pte_at(mm, address, page_table, entry);
update_mmu_cache(vma, address, entry);
if (old_page) {
@@ -2448,7 +2445,6 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
goto release;
inc_mm_counter(mm, anon_rss);
SetPageSwapBacked(page);
- lru_cache_add_active_or_unevictable(page, vma);
page_add_new_anon_rmap(page, vma, address);
set_pte_at(mm, address, page_table, entry);
@@ -2597,7 +2593,6 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
if (anon) {
inc_mm_counter(mm, anon_rss);
SetPageSwapBacked(page);
- lru_cache_add_active_or_unevictable(page, vma);
page_add_new_anon_rmap(page, vma, address);
} else {
inc_mm_counter(mm, file_rss);
@@ -2607,7 +2602,6 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
get_page(dirty_page);
}
}
-//TODO: is this safe? do_anonymous_page() does it this way.
set_pte_at(mm, address, page_table, entry);
/* no need to invalidate: a not-present page won't be cached */