summaryrefslogtreecommitdiff
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c20
1 files changed, 20 insertions, 0 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 676aec93d69..e1d87ee1d9c 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -765,6 +765,23 @@ int move_freepages_block(struct zone *zone, struct page *page, int migratetype)
return move_freepages(zone, start_page, end_page, migratetype);
}
+/* Return the page with the lowest PFN in the list */
+static struct page *min_page(struct list_head *list)
+{
+ unsigned long min_pfn = -1UL;
+ struct page *min_page = NULL, *page;;
+
+ list_for_each_entry(page, list, lru) {
+ unsigned long pfn = page_to_pfn(page);
+ if (pfn < min_pfn) {
+ min_pfn = pfn;
+ min_page = page;
+ }
+ }
+
+ return min_page;
+}
+
/* Remove an element from the buddy allocator from the fallback list */
static struct page *__rmqueue_fallback(struct zone *zone, int order,
int start_migratetype)
@@ -795,8 +812,11 @@ retry:
if (list_empty(&area->free_list[migratetype]))
continue;
+ /* Bias kernel allocations towards low pfns */
page = list_entry(area->free_list[migratetype].next,
struct page, lru);
+ if (unlikely(start_migratetype != MIGRATE_MOVABLE))
+ page = min_page(&area->free_list[migratetype]);
area->nr_free--;
/*