summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorRik van Riel <riel@redhat.com>2011-01-13 15:47:13 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2011-01-13 17:32:46 -0800
commit9992af102974f3f8a02a1f2729c3461881539e26 (patch)
tree40958e1a8bd7efc7c9a4d28e2b77d86bb8688734 /mm
parent2c888cfbc1b45508a44763d85ba2e8ac43faff5f (diff)
downloadlinux-3.10-9992af102974f3f8a02a1f2729c3461881539e26.tar.gz
linux-3.10-9992af102974f3f8a02a1f2729c3461881539e26.tar.bz2
linux-3.10-9992af102974f3f8a02a1f2729c3461881539e26.zip
thp: scale nr_rotated to balance memory pressure
Make sure we scale up nr_rotated when we encounter a referenced transparent huge page. This ensures pageout scanning balance is not distorted when there are huge pages on the LRU. Signed-off-by: Rik van Riel <riel@redhat.com> Signed-off-by: Andrea Arcangeli <aarcange@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/vmscan.c5
1 files changed, 3 insertions, 2 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 0882014d2ce..47a50962ce8 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1276,7 +1276,8 @@ putback_lru_pages(struct zone *zone, struct scan_control *sc,
add_page_to_lru_list(zone, page, lru);
if (is_active_lru(lru)) {
int file = is_file_lru(lru);
- reclaim_stat->recent_rotated[file]++;
+ int numpages = hpage_nr_pages(page);
+ reclaim_stat->recent_rotated[file] += numpages;
}
if (!pagevec_add(&pvec, page)) {
spin_unlock_irq(&zone->lru_lock);
@@ -1552,7 +1553,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
}
if (page_referenced(page, 0, sc->mem_cgroup, &vm_flags)) {
- nr_rotated++;
+ nr_rotated += hpage_nr_pages(page);
/*
* Identify referenced, file-backed active pages and
* give them one more trip around the active list. So