summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAnton Blanchard <anton@samba.org>2010-05-10 18:59:18 +0000
committerGreg Kroah-Hartman <gregkh@suse.de>2011-03-21 12:45:31 -0700
commit8f52e5db24ab6b73f0a33e5748b9462be650e7c9 (patch)
treecd25cd718d1c1c584469cb0441b5164591dbd6ae
parent1ed9c543aa80a4e42b61ddd02b2a66986660165f (diff)
downloadkernel-common-8f52e5db24ab6b73f0a33e5748b9462be650e7c9.tar.gz
kernel-common-8f52e5db24ab6b73f0a33e5748b9462be650e7c9.tar.bz2
kernel-common-8f52e5db24ab6b73f0a33e5748b9462be650e7c9.zip
powerpc: Use more accurate limit for first segment memory allocations
commit 095c7965f4dc870ed2b65143b1e2610de653416c upstream. Author: Milton Miller <miltonm@bga.com> On large machines we are running out of room below 256MB. In some cases we only need to ensure the allocation is in the first segment, which may be 256MB or 1TB. Add slb0_limit and use it to specify the upper limit for the irqstack and emergency stacks. On a large ppc64 box, this fixes a panic at boot when the crashkernel= option is specified (previously we would run out of memory below 256MB). Signed-off-by: Milton Miller <miltonm@bga.com> Signed-off-by: Anton Blanchard <anton@samba.org> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Kamalesh Babulal <kamalesh@linux.vnet.ibm.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
-rw-r--r--arch/powerpc/kernel/setup_64.c17
1 files changed, 13 insertions, 4 deletions
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 6568406b2a30..21e529e57c56 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -427,9 +427,18 @@ void __init setup_system(void)
DBG(" <- setup_system()\n");
}
+static u64 slb0_limit(void)
+{
+ if (cpu_has_feature(CPU_FTR_1T_SEGMENT)) {
+ return 1UL << SID_SHIFT_1T;
+ }
+ return 1UL << SID_SHIFT;
+}
+
#ifdef CONFIG_IRQSTACKS
static void __init irqstack_early_init(void)
{
+ u64 limit = slb0_limit();
unsigned int i;
/*
@@ -439,10 +448,10 @@ static void __init irqstack_early_init(void)
for_each_possible_cpu(i) {
softirq_ctx[i] = (struct thread_info *)
__va(lmb_alloc_base(THREAD_SIZE,
- THREAD_SIZE, 0x10000000));
+ THREAD_SIZE, limit));
hardirq_ctx[i] = (struct thread_info *)
__va(lmb_alloc_base(THREAD_SIZE,
- THREAD_SIZE, 0x10000000));
+ THREAD_SIZE, limit));
}
}
#else
@@ -473,7 +482,7 @@ static void __init exc_lvl_early_init(void)
*/
static void __init emergency_stack_init(void)
{
- unsigned long limit;
+ u64 limit;
unsigned int i;
/*
@@ -485,7 +494,7 @@ static void __init emergency_stack_init(void)
* bringup, we need to get at them in real mode. This means they
* must also be within the RMO region.
*/
- limit = min(0x10000000ULL, lmb.rmo_size);
+ limit = min(slb0_limit(), lmb.rmo_size);
for_each_possible_cpu(i) {
unsigned long sp;