diff options
-rw-r--r-- | include/linux/mm_inline.h | 13 | ||||
-rw-r--r-- | include/linux/mmzone.h | 4 | ||||
-rw-r--r-- | include/linux/vmstat.h | 9 | ||||
-rw-r--r-- | mm/page_alloc.c | 6 | ||||
-rw-r--r-- | mm/vmscan.c | 51 | ||||
-rw-r--r-- | mm/vmstat.c | 28 |
6 files changed, 60 insertions, 51 deletions
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index 3b6723dfaff..895bc4e9303 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -1,30 +1,29 @@ - static inline void add_page_to_active_list(struct zone *zone, struct page *page) { list_add(&page->lru, &zone->active_list); - zone->nr_active++; + __inc_zone_state(zone, NR_ACTIVE); } static inline void add_page_to_inactive_list(struct zone *zone, struct page *page) { list_add(&page->lru, &zone->inactive_list); - zone->nr_inactive++; + __inc_zone_state(zone, NR_INACTIVE); } static inline void del_page_from_active_list(struct zone *zone, struct page *page) { list_del(&page->lru); - zone->nr_active--; + __dec_zone_state(zone, NR_ACTIVE); } static inline void del_page_from_inactive_list(struct zone *zone, struct page *page) { list_del(&page->lru); - zone->nr_inactive--; + __dec_zone_state(zone, NR_INACTIVE); } static inline void @@ -33,9 +32,9 @@ del_page_from_lru(struct zone *zone, struct page *page) list_del(&page->lru); if (PageActive(page)) { __ClearPageActive(page); - zone->nr_active--; + __dec_zone_state(zone, NR_ACTIVE); } else { - zone->nr_inactive--; + __dec_zone_state(zone, NR_INACTIVE); } } diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index b262f47961f..9137d1b9735 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -47,6 +47,8 @@ struct zone_padding { #endif enum zone_stat_item { + NR_INACTIVE, + NR_ACTIVE, NR_ANON_PAGES, /* Mapped anonymous pages */ NR_FILE_MAPPED, /* pagecache pages mapped into pagetables. only modified from process context */ @@ -197,8 +199,6 @@ struct zone { struct list_head inactive_list; unsigned long nr_scan_active; unsigned long nr_scan_inactive; - unsigned long nr_active; - unsigned long nr_inactive; unsigned long pages_scanned; /* since last reclaim */ int all_unreclaimable; /* All pages pinned */ diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 5e9803ed17f..c8d55bcc09b 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -186,6 +186,9 @@ void inc_zone_page_state(struct page *, enum zone_stat_item); void dec_zone_page_state(struct page *, enum zone_stat_item); extern void inc_zone_state(struct zone *, enum zone_stat_item); +extern void __inc_zone_state(struct zone *, enum zone_stat_item); +extern void dec_zone_state(struct zone *, enum zone_stat_item); +extern void __dec_zone_state(struct zone *, enum zone_stat_item); void refresh_cpu_vm_stats(int); void refresh_vm_stats(void); @@ -214,6 +217,12 @@ static inline void __inc_zone_page_state(struct page *page, __inc_zone_state(page_zone(page), item); } +static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item) +{ + atomic_long_dec(&zone->vm_stat[item]); + atomic_long_dec(&vm_stat[item]); +} + static inline void __dec_zone_page_state(struct page *page, enum zone_stat_item item) { diff --git a/mm/page_alloc.c b/mm/page_alloc.c index f26fdc94393..07c954e5327 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1616,8 +1616,8 @@ void show_free_areas(void) K(zone->pages_min), K(zone->pages_low), K(zone->pages_high), - K(zone->nr_active), - K(zone->nr_inactive), + K(zone_page_state(zone, NR_ACTIVE)), + K(zone_page_state(zone, NR_INACTIVE)), K(zone->present_pages), zone->pages_scanned, (zone->all_unreclaimable ? "yes" : "no") @@ -2684,8 +2684,6 @@ static void __meminit free_area_init_core(struct pglist_data *pgdat, INIT_LIST_HEAD(&zone->inactive_list); zone->nr_scan_active = 0; zone->nr_scan_inactive = 0; - zone->nr_active = 0; - zone->nr_inactive = 0; zap_zone_vm_stats(zone); atomic_set(&zone->reclaim_in_progress, 0); if (!size) diff --git a/mm/vmscan.c b/mm/vmscan.c index 7430df68cb6..0655d5fe73e 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -679,7 +679,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan, nr_taken = isolate_lru_pages(sc->swap_cluster_max, &zone->inactive_list, &page_list, &nr_scan); - zone->nr_inactive -= nr_taken; + __mod_zone_page_state(zone, NR_INACTIVE, -nr_taken); zone->pages_scanned += nr_scan; spin_unlock_irq(&zone->lru_lock); @@ -740,7 +740,8 @@ static inline void note_zone_scanning_priority(struct zone *zone, int priority) static inline int zone_is_near_oom(struct zone *zone) { - return zone->pages_scanned >= (zone->nr_active + zone->nr_inactive)*3; + return zone->pages_scanned >= (zone_page_state(zone, NR_ACTIVE) + + zone_page_state(zone, NR_INACTIVE))*3; } /* @@ -825,7 +826,7 @@ force_reclaim_mapped: pgmoved = isolate_lru_pages(nr_pages, &zone->active_list, &l_hold, &pgscanned); zone->pages_scanned += pgscanned; - zone->nr_active -= pgmoved; + __mod_zone_page_state(zone, NR_ACTIVE, -pgmoved); spin_unlock_irq(&zone->lru_lock); while (!list_empty(&l_hold)) { @@ -857,7 +858,7 @@ force_reclaim_mapped: list_move(&page->lru, &zone->inactive_list); pgmoved++; if (!pagevec_add(&pvec, page)) { - zone->nr_inactive += pgmoved; + __mod_zone_page_state(zone, NR_INACTIVE, pgmoved); spin_unlock_irq(&zone->lru_lock); pgdeactivate += pgmoved; pgmoved = 0; @@ -867,7 +868,7 @@ force_reclaim_mapped: spin_lock_irq(&zone->lru_lock); } } - zone->nr_inactive += pgmoved; + __mod_zone_page_state(zone, NR_INACTIVE, pgmoved); pgdeactivate += pgmoved; if (buffer_heads_over_limit) { spin_unlock_irq(&zone->lru_lock); @@ -885,14 +886,14 @@ force_reclaim_mapped: list_move(&page->lru, &zone->active_list); pgmoved++; if (!pagevec_add(&pvec, page)) { - zone->nr_active += pgmoved; + __mod_zone_page_state(zone, NR_ACTIVE, pgmoved); pgmoved = 0; spin_unlock_irq(&zone->lru_lock); __pagevec_release(&pvec); spin_lock_irq(&zone->lru_lock); } } - zone->nr_active += pgmoved; + __mod_zone_page_state(zone, NR_ACTIVE, pgmoved); __count_zone_vm_events(PGREFILL, zone, pgscanned); __count_vm_events(PGDEACTIVATE, pgdeactivate); @@ -918,14 +919,16 @@ static unsigned long shrink_zone(int priority, struct zone *zone, * Add one to `nr_to_scan' just to make sure that the kernel will * slowly sift through the active list. */ - zone->nr_scan_active += (zone->nr_active >> priority) + 1; + zone->nr_scan_active += + (zone_page_state(zone, NR_ACTIVE) >> priority) + 1; nr_active = zone->nr_scan_active; if (nr_active >= sc->swap_cluster_max) zone->nr_scan_active = 0; else nr_active = 0; - zone->nr_scan_inactive += (zone->nr_inactive >> priority) + 1; + zone->nr_scan_inactive += + (zone_page_state(zone, NR_INACTIVE) >> priority) + 1; nr_inactive = zone->nr_scan_inactive; if (nr_inactive >= sc->swap_cluster_max) zone->nr_scan_inactive = 0; @@ -1037,7 +1040,8 @@ unsigned long try_to_free_pages(struct zone **zones, gfp_t gfp_mask) if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) continue; - lru_pages += zone->nr_active + zone->nr_inactive; + lru_pages += zone_page_state(zone, NR_ACTIVE) + + zone_page_state(zone, NR_INACTIVE); } for (priority = DEF_PRIORITY; priority >= 0; priority--) { @@ -1182,7 +1186,8 @@ loop_again: for (i = 0; i <= end_zone; i++) { struct zone *zone = pgdat->node_zones + i; - lru_pages += zone->nr_active + zone->nr_inactive; + lru_pages += zone_page_state(zone, NR_ACTIVE) + + zone_page_state(zone, NR_INACTIVE); } /* @@ -1219,8 +1224,9 @@ loop_again: if (zone->all_unreclaimable) continue; if (nr_slab == 0 && zone->pages_scanned >= - (zone->nr_active + zone->nr_inactive) * 6) - zone->all_unreclaimable = 1; + (zone_page_state(zone, NR_ACTIVE) + + zone_page_state(zone, NR_INACTIVE)) * 6) + zone->all_unreclaimable = 1; /* * If we've done a decent amount of scanning and * the reclaim ratio is low, start doing writepage @@ -1385,18 +1391,22 @@ static unsigned long shrink_all_zones(unsigned long nr_pages, int prio, /* For pass = 0 we don't shrink the active list */ if (pass > 0) { - zone->nr_scan_active += (zone->nr_active >> prio) + 1; + zone->nr_scan_active += + (zone_page_state(zone, NR_ACTIVE) >> prio) + 1; if (zone->nr_scan_active >= nr_pages || pass > 3) { zone->nr_scan_active = 0; - nr_to_scan = min(nr_pages, zone->nr_active); + nr_to_scan = min(nr_pages, + zone_page_state(zone, NR_ACTIVE)); shrink_active_list(nr_to_scan, zone, sc, prio); } } - zone->nr_scan_inactive += (zone->nr_inactive >> prio) + 1; + zone->nr_scan_inactive += + (zone_page_state(zone, NR_INACTIVE) >> prio) + 1; if (zone->nr_scan_inactive >= nr_pages || pass > 3) { zone->nr_scan_inactive = 0; - nr_to_scan = min(nr_pages, zone->nr_inactive); + nr_to_scan = min(nr_pages, + zone_page_state(zone, NR_INACTIVE)); ret += shrink_inactive_list(nr_to_scan, zone, sc); if (ret >= nr_pages) return ret; @@ -1408,12 +1418,7 @@ static unsigned long shrink_all_zones(unsigned long nr_pages, int prio, static unsigned long count_lru_pages(void) { - struct zone *zone; - unsigned long ret = 0; - - for_each_zone(zone) - ret += zone->nr_active + zone->nr_inactive; - return ret; + return global_page_state(NR_ACTIVE) + global_page_state(NR_INACTIVE); } /* diff --git a/mm/vmstat.c b/mm/vmstat.c index bf62a823210..5462106725d 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -19,12 +19,10 @@ void __get_zone_counts(unsigned long *active, unsigned long *inactive, struct zone *zones = pgdat->node_zones; int i; - *active = 0; - *inactive = 0; + *active = node_page_state(pgdat->node_id, NR_ACTIVE); + *inactive = node_page_state(pgdat->node_id, NR_INACTIVE); *free = 0; for (i = 0; i < MAX_NR_ZONES; i++) { - *active += zones[i].nr_active; - *inactive += zones[i].nr_inactive; *free += zones[i].free_pages; } } @@ -34,14 +32,12 @@ void get_zone_counts(unsigned long *active, { struct pglist_data *pgdat; - *active = 0; - *inactive = 0; + *active = global_page_state(NR_ACTIVE); + *inactive = global_page_state(NR_INACTIVE); *free = 0; for_each_online_pgdat(pgdat) { unsigned long l, m, n; __get_zone_counts(&l, &m, &n, pgdat); - *active += l; - *inactive += m; *free += n; } } @@ -239,7 +235,7 @@ EXPORT_SYMBOL(mod_zone_page_state); * in between and therefore the atomicity vs. interrupt cannot be exploited * in a useful way here. */ -static void __inc_zone_state(struct zone *zone, enum zone_stat_item item) +void __inc_zone_state(struct zone *zone, enum zone_stat_item item) { struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id()); s8 *p = pcp->vm_stat_diff + item; @@ -260,9 +256,8 @@ void __inc_zone_page_state(struct page *page, enum zone_stat_item item) } EXPORT_SYMBOL(__inc_zone_page_state); -void __dec_zone_page_state(struct page *page, enum zone_stat_item item) +void __dec_zone_state(struct zone *zone, enum zone_stat_item item) { - struct zone *zone = page_zone(page); struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id()); s8 *p = pcp->vm_stat_diff + item; @@ -275,6 +270,11 @@ void __dec_zone_page_state(struct page *page, enum zone_stat_item item) *p = overstep; } } + +void __dec_zone_page_state(struct page *page, enum zone_stat_item item) +{ + __dec_zone_state(page_zone(page), item); +} EXPORT_SYMBOL(__dec_zone_page_state); void inc_zone_state(struct zone *zone, enum zone_stat_item item) @@ -454,6 +454,8 @@ const struct seq_operations fragmentation_op = { static const char * const vmstat_text[] = { /* Zoned VM counters */ + "nr_active", + "nr_inactive", "nr_anon_pages", "nr_mapped", "nr_file_pages", @@ -529,8 +531,6 @@ static int zoneinfo_show(struct seq_file *m, void *arg) "\n min %lu" "\n low %lu" "\n high %lu" - "\n active %lu" - "\n inactive %lu" "\n scanned %lu (a: %lu i: %lu)" "\n spanned %lu" "\n present %lu", @@ -538,8 +538,6 @@ static int zoneinfo_show(struct seq_file *m, void *arg) zone->pages_min, zone->pages_low, zone->pages_high, - zone->nr_active, - zone->nr_inactive, zone->pages_scanned, zone->nr_scan_active, zone->nr_scan_inactive, zone->spanned_pages, |