summaryrefslogtreecommitdiff
path: root/arch/x86/mm
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2013-04-29 15:07:56 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2013-04-29 15:54:35 -0700
commit8e2cdbcb86b0abefc3d07922c48edb01fece3c56 (patch)
tree4d666e779a1c6954313ea50abcadae20bb8ff230 /arch/x86/mm
parente8216da5c719c3bfec12779b6faf456009f01c44 (diff)
downloadlinux-3.10-8e2cdbcb86b0abefc3d07922c48edb01fece3c56.tar.gz
linux-3.10-8e2cdbcb86b0abefc3d07922c48edb01fece3c56.tar.bz2
linux-3.10-8e2cdbcb86b0abefc3d07922c48edb01fece3c56.zip
x86-64: fall back to regular page vmemmap on allocation failure
Memory hotplug can happen on a machine under load, memory shortness and fragmentation, so huge page allocations for the vmemmap are not guaranteed to succeed. Try to fall back to regular pages before failing the hotplug event completely. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Ben Hutchings <ben@decadent.org.uk> Cc: Bernhard Schmidt <Bernhard.Schmidt@lrz.de> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Russell King <rmk@arm.linux.org.uk> Cc: Ingo Molnar <mingo@elte.hu> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: "Luck, Tony" <tony.luck@intel.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: David Miller <davem@davemloft.net> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/init_64.c46
1 files changed, 26 insertions, 20 deletions
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 9f6347c468b..71ff55a1b28 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -1303,31 +1303,37 @@ static int __meminit vmemmap_populate_hugepages(unsigned long start,
pmd = pmd_offset(pud, addr);
if (pmd_none(*pmd)) {
- pte_t entry;
void *p;
p = vmemmap_alloc_block_buf(PMD_SIZE, node);
- if (!p)
- return -ENOMEM;
-
- entry = pfn_pte(__pa(p) >> PAGE_SHIFT,
- PAGE_KERNEL_LARGE);
- set_pmd(pmd, __pmd(pte_val(entry)));
-
- /* check to see if we have contiguous blocks */
- if (p_end != p || node_start != node) {
- if (p_start)
- printk(KERN_DEBUG " [%lx-%lx] PMD -> [%p-%p] on node %d\n",
- addr_start, addr_end-1, p_start, p_end-1, node_start);
- addr_start = addr;
- node_start = node;
- p_start = p;
- }
+ if (p) {
+ pte_t entry;
+
+ entry = pfn_pte(__pa(p) >> PAGE_SHIFT,
+ PAGE_KERNEL_LARGE);
+ set_pmd(pmd, __pmd(pte_val(entry)));
+
+ /* check to see if we have contiguous blocks */
+ if (p_end != p || node_start != node) {
+ if (p_start)
+ printk(KERN_DEBUG " [%lx-%lx] PMD -> [%p-%p] on node %d\n",
+ addr_start, addr_end-1, p_start, p_end-1, node_start);
+ addr_start = addr;
+ node_start = node;
+ p_start = p;
+ }
- addr_end = addr + PMD_SIZE;
- p_end = p + PMD_SIZE;
- } else
+ addr_end = addr + PMD_SIZE;
+ p_end = p + PMD_SIZE;
+ continue;
+ }
+ } else if (pmd_large(*pmd)) {
vmemmap_verify((pte_t *)pmd, node, addr, next);
+ continue;
+ }
+ pr_warn_once("vmemmap: falling back to regular page backing\n");
+ if (vmemmap_populate_basepages(addr, next, node))
+ return -ENOMEM;
}
return 0;
}