summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorAndrea Arcangeli <aarcange@redhat.com>2012-01-12 17:19:29 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2012-01-12 20:13:08 -0800
commit5013473152d1ac9d44d787fb02edda845fdf2cb3 (patch)
tree639d241cdc804f09d56caf8ff844e09291b99170 /mm
parentb16d3d5a5219d01e9be5e586e5d50fbf1ca955ea (diff)
downloadlinux-stable-5013473152d1ac9d44d787fb02edda845fdf2cb3.tar.gz
linux-stable-5013473152d1ac9d44d787fb02edda845fdf2cb3.tar.bz2
linux-stable-5013473152d1ac9d44d787fb02edda845fdf2cb3.zip
mm: vmscan: check if we isolated a compound page during lumpy scan
Properly take into account if we isolated a compound page during the lumpy scan in reclaim and skip over the tail pages when encountered. This corrects the values given to the tracepoint for number of lumpy pages isolated and will avoid breaking the loop early if compound pages smaller than the requested allocation size are requested. [mgorman@suse.de: Updated changelog] Signed-off-by: Andrea Arcangeli <aarcange@redhat.com> Signed-off-by: Mel Gorman <mgorman@suse.de> Reviewed-by: Minchan Kim <minchan.kim@gmail.com> Reviewed-by: Rik van Riel <riel@redhat.com> Cc: Dave Jones <davej@redhat.com> Cc: Jan Kara <jack@suse.cz> Cc: Andy Isaacson <adi@hexapodia.org> Cc: Nai Xia <nai.xia@gmail.com> Cc: Johannes Weiner <jweiner@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/vmscan.c10
1 files changed, 7 insertions, 3 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index a85a261bf8f9..866ab27c52a5 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1196,13 +1196,17 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
break;
if (__isolate_lru_page(cursor_page, mode, file) == 0) {
+ unsigned int isolated_pages;
+
mem_cgroup_lru_del(cursor_page);
list_move(&cursor_page->lru, dst);
- nr_taken += hpage_nr_pages(cursor_page);
- nr_lumpy_taken++;
+ isolated_pages = hpage_nr_pages(cursor_page);
+ nr_taken += isolated_pages;
+ nr_lumpy_taken += isolated_pages;
if (PageDirty(cursor_page))
- nr_lumpy_dirty++;
+ nr_lumpy_dirty += isolated_pages;
scan++;
+ pfn += isolated_pages - 1;
} else {
/*
* Check if the page is freed already.