summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2009-06-22 11:56:24 +0900
committerTejun Heo <tj@kernel.org>2009-06-22 11:56:24 +0900
commit0017c869ddcb73069905d09f9e98e68627466237 (patch)
tree380248938561c53ff174ffba72f22cbc3e8a5438
parentfa8a7094ba1679b4b9b443e0ac9f5e046c79ee8d (diff)
downloadlinux-3.10-0017c869ddcb73069905d09f9e98e68627466237.tar.gz
linux-3.10-0017c869ddcb73069905d09f9e98e68627466237.tar.bz2
linux-3.10-0017c869ddcb73069905d09f9e98e68627466237.zip
x86: ensure percpu lpage doesn't consume too much vmalloc space
On extreme configuration (e.g. 32bit 32-way NUMA machine), lpage percpu first chunk allocator can consume too much of vmalloc space. Make it fall back to 4k allocator if the consumption goes over 20%. [ Impact: add sanity check for lpage percpu first chunk allocator ] Signed-off-by: Tejun Heo <tj@kernel.org> Reported-by: Jan Beulich <JBeulich@novell.com> Cc: Andi Kleen <andi@firstfloor.org> Cc: Ingo Molnar <mingo@elte.hu>
-rw-r--r--arch/x86/kernel/setup_percpu.c18
1 files changed, 15 insertions, 3 deletions
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index 165ebd5ba83..29a3eef7cf4 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -163,9 +163,21 @@ static ssize_t __init setup_pcpu_lpage(size_t static_size, bool chosen)
int i, j;
ssize_t ret;
- /* on non-NUMA, embedding is better */
- if (!chosen && !pcpu_need_numa())
- return -EINVAL;
+ if (!chosen) {
+ size_t vm_size = VMALLOC_END - VMALLOC_START;
+ size_t tot_size = num_possible_cpus() * PMD_SIZE;
+
+ /* on non-NUMA, embedding is better */
+ if (!pcpu_need_numa())
+ return -EINVAL;
+
+ /* don't consume more than 20% of vmalloc area */
+ if (tot_size > vm_size / 5) {
+ pr_info("PERCPU: too large chunk size %zuMB for "
+ "large page remap\n", tot_size >> 20);
+ return -EINVAL;
+ }
+ }
/* need PSE */
if (!cpu_has_pse) {