summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMel Gorman <mel@csn.ul.ie>2009-09-21 17:03:20 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2009-09-22 07:17:39 -0700
commita6f9edd65beaef24836e8934c8912c1e974dd45c (patch)
tree041c60ed559d3bc1f289d0040e75cfdd78f0acd0
parent5f8dcc21211a3d4e3a7a5ca366b469fb88117f61 (diff)
downloadkernel-common-a6f9edd65beaef24836e8934c8912c1e974dd45c.tar.gz
kernel-common-a6f9edd65beaef24836e8934c8912c1e974dd45c.tar.bz2
kernel-common-a6f9edd65beaef24836e8934c8912c1e974dd45c.zip
page-allocator: maintain rolling count of pages to free from the PCP
When round-robin freeing pages from the PCP lists, empty lists may be encountered. In the event one of the lists has more pages than another, there may be numerous checks for list_empty() which is undesirable. This patch maintains a count of pages to free which is incremented when empty lists are encountered. The intention is that more pages will then be freed from fuller lists than the empty ones reducing the number of empty list checks in the free path. [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Mel Gorman <mel@csn.ul.ie> Cc: Nick Piggin <npiggin@suse.de> Cc: Christoph Lameter <cl@linux-foundation.org> Reviewed-by: Minchan Kim <minchan.kim@gmail.com> Cc: Pekka Enberg <penberg@cs.helsinki.fi> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/page_alloc.c24
1 files changed, 15 insertions, 9 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 1b1c39e6a9b8..6877e22e3aa1 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -525,32 +525,38 @@ static void free_pcppages_bulk(struct zone *zone, int count,
struct per_cpu_pages *pcp)
{
int migratetype = 0;
+ int batch_free = 0;
spin_lock(&zone->lock);
zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
zone->pages_scanned = 0;
__mod_zone_page_state(zone, NR_FREE_PAGES, count);
- while (count--) {
+ while (count) {
struct page *page;
struct list_head *list;
/*
- * Remove pages from lists in a round-robin fashion. This spinning
- * around potentially empty lists is bloody awful, alternatives that
- * don't suck are welcome
+ * Remove pages from lists in a round-robin fashion. A
+ * batch_free count is maintained that is incremented when an
+ * empty list is encountered. This is so more pages are freed
+ * off fuller lists instead of spinning excessively around empty
+ * lists
*/
do {
+ batch_free++;
if (++migratetype == MIGRATE_PCPTYPES)
migratetype = 0;
list = &pcp->lists[migratetype];
} while (list_empty(list));
- page = list_entry(list->prev, struct page, lru);
- /* have to delete it as __free_one_page list manipulates */
- list_del(&page->lru);
- trace_mm_page_pcpu_drain(page, 0, migratetype);
- __free_one_page(page, zone, 0, migratetype);
+ do {
+ page = list_entry(list->prev, struct page, lru);
+ /* must delete as __free_one_page list manipulates */
+ list_del(&page->lru);
+ __free_one_page(page, zone, 0, migratetype);
+ trace_mm_page_pcpu_drain(page, 0, migratetype);
+ } while (--count && --batch_free && !list_empty(list));
}
spin_unlock(&zone->lock);
}