summaryrefslogtreecommitdiff
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-07-08 10:32:56 +0200
committerIngo Molnar <mingo@elte.hu>2008-07-08 10:32:56 +0200
commit896395c290f902576270d84291c1f7f8bfbe339d (patch)
tree650114bff3a5f808ee1d713ecc443b0eaab2e1c3 /mm/page_alloc.c
parentaf1cf204ba2fd8135933a2e4df523fb1112dc0e2 (diff)
parent1b40a895df6c7d5a80e71f65674060b03d84bbef (diff)
downloadlinux-stable-896395c290f902576270d84291c1f7f8bfbe339d.tar.gz
linux-stable-896395c290f902576270d84291c1f7f8bfbe339d.tar.bz2
linux-stable-896395c290f902576270d84291c1f7f8bfbe339d.zip
Merge branch 'linus' into tmp.x86.mpparse.new
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c43
1 files changed, 9 insertions, 34 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 215408684076..eee5ba7509c1 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -237,16 +237,7 @@ static void bad_page(struct page *page)
printk(KERN_EMERG "Trying to fix it up, but a reboot is needed\n"
KERN_EMERG "Backtrace:\n");
dump_stack();
- page->flags &= ~(1 << PG_lru |
- 1 << PG_private |
- 1 << PG_locked |
- 1 << PG_active |
- 1 << PG_dirty |
- 1 << PG_reclaim |
- 1 << PG_slab |
- 1 << PG_swapcache |
- 1 << PG_writeback |
- 1 << PG_buddy );
+ page->flags &= ~PAGE_FLAGS_CLEAR_WHEN_BAD;
set_page_count(page, 0);
reset_page_mapcount(page);
page->mapping = NULL;
@@ -463,16 +454,7 @@ static inline int free_pages_check(struct page *page)
(page->mapping != NULL) |
(page_get_page_cgroup(page) != NULL) |
(page_count(page) != 0) |
- (page->flags & (
- 1 << PG_lru |
- 1 << PG_private |
- 1 << PG_locked |
- 1 << PG_active |
- 1 << PG_slab |
- 1 << PG_swapcache |
- 1 << PG_writeback |
- 1 << PG_reserved |
- 1 << PG_buddy ))))
+ (page->flags & PAGE_FLAGS_CHECK_AT_FREE)))
bad_page(page);
if (PageDirty(page))
__ClearPageDirty(page);
@@ -616,17 +598,7 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
(page->mapping != NULL) |
(page_get_page_cgroup(page) != NULL) |
(page_count(page) != 0) |
- (page->flags & (
- 1 << PG_lru |
- 1 << PG_private |
- 1 << PG_locked |
- 1 << PG_active |
- 1 << PG_dirty |
- 1 << PG_slab |
- 1 << PG_swapcache |
- 1 << PG_writeback |
- 1 << PG_reserved |
- 1 << PG_buddy ))))
+ (page->flags & PAGE_FLAGS_CHECK_AT_PREP)))
bad_page(page);
/*
@@ -1396,6 +1368,9 @@ get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
(void)first_zones_zonelist(zonelist, high_zoneidx, nodemask,
&preferred_zone);
+ if (!preferred_zone)
+ return NULL;
+
classzone_idx = zone_idx(preferred_zone);
zonelist_scan:
@@ -2353,7 +2328,6 @@ static void build_zonelists(pg_data_t *pgdat)
static void build_zonelist_cache(pg_data_t *pgdat)
{
pgdat->node_zonelists[0].zlcache_ptr = NULL;
- pgdat->node_zonelists[1].zlcache_ptr = NULL;
}
#endif /* CONFIG_NUMA */
@@ -2804,7 +2778,7 @@ int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
alloc_size = zone->wait_table_hash_nr_entries
* sizeof(wait_queue_head_t);
- if (system_state == SYSTEM_BOOTING) {
+ if (!slab_is_available()) {
zone->wait_table = (wait_queue_head_t *)
alloc_bootmem_node(pgdat, alloc_size);
} else {
@@ -3378,7 +3352,8 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
* is used by this zone for memmap. This affects the watermark
* and per-cpu initialisations
*/
- memmap_pages = (size * sizeof(struct page)) >> PAGE_SHIFT;
+ memmap_pages =
+ PAGE_ALIGN(size * sizeof(struct page)) >> PAGE_SHIFT;
if (realsize >= memmap_pages) {
realsize -= memmap_pages;
printk(KERN_DEBUG