summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorOlof Johansson <olof@lixom.net>2007-10-16 00:58:59 +1000
committerPaul Mackerras <paulus@samba.org>2007-10-17 22:30:09 +1000
commitf66bce5e6aa1388289c04496c3fcae7bebf5f905 (patch)
tree7e788739a51947f1caff47f9b5226cad739e3805
parent8129535b6bcf40be62af2ae6b9234494f39725dd (diff)
downloadlinux-3.10-f66bce5e6aa1388289c04496c3fcae7bebf5f905.tar.gz
linux-3.10-f66bce5e6aa1388289c04496c3fcae7bebf5f905.tar.bz2
linux-3.10-f66bce5e6aa1388289c04496c3fcae7bebf5f905.zip
[POWERPC] Add 1TB workaround for PA6T
PA6T has a bug where the slbie instruction does not honor the large segment bit. As a result, we have to always use slbia when switching context. We don't have to worry about changing the slbie's during fault processing, since they should never be replacing one VSID with another using the same ESID. I.e. there's no risk for inserting duplicate entries due to a failed slbie of the old entry. So as long as we clear it out on context switch we should be fine. Signed-off-by: Olof Johansson <olof@lixom.net> Signed-off-by: Paul Mackerras <paulus@samba.org>
-rw-r--r--arch/powerpc/kernel/entry_64.S6
-rw-r--r--arch/powerpc/mm/hash_utils_64.c1
-rw-r--r--arch/powerpc/mm/slb.c3
-rw-r--r--include/asm-powerpc/cputable.h3
4 files changed, 11 insertions, 2 deletions
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 0ec13403489..148a3547c9a 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -408,6 +408,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT)
std r7,SLBSHADOW_STACKVSID(r9) /* Save VSID */
std r0,SLBSHADOW_STACKESID(r9) /* Save ESID */
+ /* No need to check for CPU_FTR_NO_SLBIE_B here, since when
+ * we have 1TB segments, the only CPUs known to have the errata
+ * only support less than 1TB of system memory and we'll never
+ * actually hit this code path.
+ */
+
slbie r6
slbie r6 /* Workaround POWER5 < DD2.1 issue */
slbmte r7,r0
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 09da90b5385..c78dc912411 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -212,6 +212,7 @@ static int __init htab_dt_scan_seg_sizes(unsigned long node,
return 1;
}
}
+ cur_cpu_spec->cpu_features &= ~CPU_FTR_NO_SLBIE_B;
return 0;
}
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 6c164cec9d2..bbd2c512ee0 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -157,7 +157,8 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
unsigned long stack = KSTK_ESP(tsk);
unsigned long unmapped_base;
- if (offset <= SLB_CACHE_ENTRIES) {
+ if (!cpu_has_feature(CPU_FTR_NO_SLBIE_B) &&
+ offset <= SLB_CACHE_ENTRIES) {
int i;
asm volatile("isync" : : : "memory");
for (i = 0; i < offset; i++) {
diff --git a/include/asm-powerpc/cputable.h b/include/asm-powerpc/cputable.h
index ae093ef6836..9d74338e3de 100644
--- a/include/asm-powerpc/cputable.h
+++ b/include/asm-powerpc/cputable.h
@@ -165,6 +165,7 @@ extern void do_feature_fixups(unsigned long value, void *fixup_start,
#define CPU_FTR_SPURR LONG_ASM_CONST(0x0001000000000000)
#define CPU_FTR_DSCR LONG_ASM_CONST(0x0002000000000000)
#define CPU_FTR_1T_SEGMENT LONG_ASM_CONST(0x0004000000000000)
+#define CPU_FTR_NO_SLBIE_B LONG_ASM_CONST(0x0008000000000000)
#ifndef __ASSEMBLY__
@@ -367,7 +368,7 @@ extern void do_feature_fixups(unsigned long value, void *fixup_start,
#define CPU_FTRS_PA6T (CPU_FTR_USE_TB | \
CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | \
CPU_FTR_ALTIVEC_COMP | CPU_FTR_CI_LARGE_PAGE | \
- CPU_FTR_PURR | CPU_FTR_REAL_LE)
+ CPU_FTR_PURR | CPU_FTR_REAL_LE | CPU_FTR_NO_SLBIE_B)
#define CPU_FTRS_COMPATIBLE (CPU_FTR_USE_TB | \
CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2)