summaryrefslogtreecommitdiff
path: root/mm/sparse-vmemmap.c
diff options
context:
space:
mode:
authorBen Hutchings <ben@decadent.org.uk>2013-04-29 15:07:49 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2013-04-29 15:54:35 -0700
commit055e4fd96e95b0eee0d92fd54a26be7f0d3bcad0 (patch)
tree48f7e2d9a829daf6fef897e7d34dfa01f8cf83b3 /mm/sparse-vmemmap.c
parent949f7ec5760b021da3cccc1eaeb0671270e4238f (diff)
downloadlinux-exynos-055e4fd96e95b0eee0d92fd54a26be7f0d3bcad0.tar.gz
linux-exynos-055e4fd96e95b0eee0d92fd54a26be7f0d3bcad0.tar.bz2
linux-exynos-055e4fd96e95b0eee0d92fd54a26be7f0d3bcad0.zip
mm: try harder to allocate vmemmap blocks
Hot-adding memory on x86_64 normally requires huge page allocation. When this is done to a VM guest, it's usually because the system is already tight on memory, so the request tends to fail. Try to avoid this by adding __GFP_REPEAT to the allocation flags. Addresses http://bugs.debian.org/699913 Signed-off-by: Ben Hutchings <ben@decadent.org.uk> Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Reported-by: Bernhard Schmidt <Bernhard.Schmidt@lrz.de> Tested-by: Bernhard Schmidt <Bernhard.Schmidt@lrz.de> Cc: Russell King <rmk@arm.linux.org.uk> Cc: Ingo Molnar <mingo@elte.hu> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: "Luck, Tony" <tony.luck@intel.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: David Miller <davem@davemloft.net> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/sparse-vmemmap.c')
-rw-r--r--mm/sparse-vmemmap.c8
1 files changed, 5 insertions, 3 deletions
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
index 1b7e22ab9b09..22b7e18e9dea 100644
--- a/mm/sparse-vmemmap.c
+++ b/mm/sparse-vmemmap.c
@@ -53,10 +53,12 @@ void * __meminit vmemmap_alloc_block(unsigned long size, int node)
struct page *page;
if (node_state(node, N_HIGH_MEMORY))
- page = alloc_pages_node(node,
- GFP_KERNEL | __GFP_ZERO, get_order(size));
+ page = alloc_pages_node(
+ node, GFP_KERNEL | __GFP_ZERO | __GFP_REPEAT,
+ get_order(size));
else
- page = alloc_pages(GFP_KERNEL | __GFP_ZERO,
+ page = alloc_pages(
+ GFP_KERNEL | __GFP_ZERO | __GFP_REPEAT,
get_order(size));
if (page)
return page_address(page);