diff options
author | Rafael J. Wysocki <rjw@sisk.pl> | 2006-09-25 23:32:49 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-09-26 08:48:59 -0700 |
commit | f623f0db8e6aa86a37be86167e4ff478821a9f4f (patch) | |
tree | fab12e8dc57d14101e9e512ba708b83f74551dd9 /mm | |
parent | e3920fb42c8ddfe63befb54d95c0e13eabacea9b (diff) | |
download | linux-3.10-f623f0db8e6aa86a37be86167e4ff478821a9f4f.tar.gz linux-3.10-f623f0db8e6aa86a37be86167e4ff478821a9f4f.tar.bz2 linux-3.10-f623f0db8e6aa86a37be86167e4ff478821a9f4f.zip |
[PATCH] swsusp: Fix mark_free_pages
Clean up mm/page_alloc.c#mark_free_pages() and make it avoid clearing
PageNosaveFree for PageNosave pages. This allows us to get rid of an ugly
hack in kernel/power/snapshot.c#copy_data_pages().
Additionally, the page-copying loop in copy_data_pages() is moved to an
inline function.
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
Cc: Pavel Machek <pavel@ucw.cz>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/page_alloc.c | 24 |
1 files changed, 16 insertions, 8 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 51070b6d593..9810f0a60db 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -694,7 +694,8 @@ static void __drain_pages(unsigned int cpu) void mark_free_pages(struct zone *zone) { - unsigned long zone_pfn, flags; + unsigned long pfn, max_zone_pfn; + unsigned long flags; int order; struct list_head *curr; @@ -702,18 +703,25 @@ void mark_free_pages(struct zone *zone) return; spin_lock_irqsave(&zone->lock, flags); - for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) - ClearPageNosaveFree(pfn_to_page(zone_pfn + zone->zone_start_pfn)); + + max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages; + for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) + if (pfn_valid(pfn)) { + struct page *page = pfn_to_page(pfn); + + if (!PageNosave(page)) + ClearPageNosaveFree(page); + } for (order = MAX_ORDER - 1; order >= 0; --order) list_for_each(curr, &zone->free_area[order].free_list) { - unsigned long start_pfn, i; + unsigned long i; - start_pfn = page_to_pfn(list_entry(curr, struct page, lru)); + pfn = page_to_pfn(list_entry(curr, struct page, lru)); + for (i = 0; i < (1UL << order); i++) + SetPageNosaveFree(pfn_to_page(pfn + i)); + } - for (i=0; i < (1<<order); i++) - SetPageNosaveFree(pfn_to_page(start_pfn+i)); - } spin_unlock_irqrestore(&zone->lock, flags); } |