summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>2011-05-24 17:11:32 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2011-05-25 08:39:09 -0700
commit1b79acc91115ba47e744b70bb166b77bd94f5855 (patch)
tree9097834522de3840845368312c09b5ad4a98e5e5 /mm
parent839a4fcc8af7412be2efd11f0bd0504757f79f08 (diff)
downloadlinux-3.10-1b79acc91115ba47e744b70bb166b77bd94f5855.tar.gz
linux-3.10-1b79acc91115ba47e744b70bb166b77bd94f5855.tar.bz2
linux-3.10-1b79acc91115ba47e744b70bb166b77bd94f5855.zip
mm, mem-hotplug: recalculate lowmem_reserve when memory hotplug occurs
Currently, memory hotplug calls setup_per_zone_wmarks() and calculate_zone_inactive_ratio(), but doesn't call setup_per_zone_lowmem_reserve(). It means the number of reserved pages aren't updated even if memory hot plug occur. This patch fixes it. Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Acked-by: Mel Gorman <mel@csn.ul.ie> Reviewed-by: Minchan Kim <minchan.kim@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memory_hotplug.c9
-rw-r--r--mm/page_alloc.c4
2 files changed, 7 insertions, 6 deletions
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 2c4edc459fb..59ac18fefd6 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -459,8 +459,9 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages)
zone_pcp_update(zone);
mutex_unlock(&zonelists_mutex);
- setup_per_zone_wmarks();
- calculate_zone_inactive_ratio(zone);
+
+ init_per_zone_wmark_min();
+
if (onlined_pages) {
kswapd_run(zone_to_nid(zone));
node_set_state(zone_to_nid(zone), N_HIGH_MEMORY);
@@ -893,8 +894,8 @@ repeat:
zone->zone_pgdat->node_present_pages -= offlined_pages;
totalram_pages -= offlined_pages;
- setup_per_zone_wmarks();
- calculate_zone_inactive_ratio(zone);
+ init_per_zone_wmark_min();
+
if (!node_present_pages(node)) {
node_clear_state(node, N_HIGH_MEMORY);
kswapd_stop(node);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 56d0be36be9..e133cea3693 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5094,7 +5094,7 @@ void setup_per_zone_wmarks(void)
* 1TB 101 10GB
* 10TB 320 32GB
*/
-void __meminit calculate_zone_inactive_ratio(struct zone *zone)
+static void __meminit calculate_zone_inactive_ratio(struct zone *zone)
{
unsigned int gb, ratio;
@@ -5140,7 +5140,7 @@ static void __meminit setup_per_zone_inactive_ratio(void)
* 8192MB: 11584k
* 16384MB: 16384k
*/
-static int __init init_per_zone_wmark_min(void)
+int __meminit init_per_zone_wmark_min(void)
{
unsigned long lowmem_kbytes;