summaryrefslogtreecommitdiff
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/hash_low_64.S6
-rw-r--r--arch/powerpc/mm/hash_utils_64.c2
-rw-r--r--arch/powerpc/mm/numa.c4
-rw-r--r--arch/powerpc/mm/slb.c28
4 files changed, 25 insertions, 15 deletions
diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S
index 4762ff7c14d..35eabfb5072 100644
--- a/arch/powerpc/mm/hash_low_64.S
+++ b/arch/powerpc/mm/hash_low_64.S
@@ -472,10 +472,12 @@ _GLOBAL(htab_call_hpte_insert1)
/* Now try secondary slot */
/* real page number in r5, PTE RPN value + index */
- rldicl r5,r31,64-PTE_RPN_SHIFT,PTE_RPN_SHIFT
+ andis. r0,r31,_PAGE_4K_PFN@h
+ srdi r5,r31,PTE_RPN_SHIFT
+ bne- 3f
sldi r5,r5,PAGE_SHIFT-HW_PAGE_SHIFT
add r5,r5,r25
- sldi r5,r5,HW_PAGE_SHIFT
+3: sldi r5,r5,HW_PAGE_SHIFT
/* Calculate secondary group hash */
andc r0,r27,r28
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index bc7b0cedae5..f1789578747 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -759,7 +759,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
mmu_psize_defs[mmu_vmalloc_psize].sllp) {
get_paca()->vmalloc_sllp =
mmu_psize_defs[mmu_vmalloc_psize].sllp;
- slb_flush_and_rebolt();
+ slb_vmalloc_update();
}
#endif /* CONFIG_PPC_64K_PAGES */
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index de45aa82d97..c12adc3ddff 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -307,9 +307,9 @@ static void __init parse_drconf_memory(struct device_node *memory)
const unsigned int *lm, *dm, *aa;
unsigned int ls, ld, la;
unsigned int n, aam, aalen;
- unsigned long lmb_size, size;
+ unsigned long lmb_size, size, start;
int nid, default_nid = 0;
- unsigned int start, ai, flags;
+ unsigned int ai, flags;
lm = of_get_property(memory, "ibm,lmb-size", &ls);
dm = of_get_property(memory, "ibm,dynamic-memory", &ld);
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 304375a7357..b0697017d0e 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -53,7 +53,8 @@ static inline unsigned long mk_vsid_data(unsigned long ea, unsigned long flags)
return (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | flags;
}
-static inline void slb_shadow_update(unsigned long esid, unsigned long vsid,
+static inline void slb_shadow_update(unsigned long ea,
+ unsigned long flags,
unsigned long entry)
{
/*
@@ -61,11 +62,11 @@ static inline void slb_shadow_update(unsigned long esid, unsigned long vsid,
* updating it.
*/
get_slb_shadow()->save_area[entry].esid = 0;
- barrier();
- get_slb_shadow()->save_area[entry].vsid = vsid;
- barrier();
- get_slb_shadow()->save_area[entry].esid = esid;
-
+ smp_wmb();
+ get_slb_shadow()->save_area[entry].vsid = mk_vsid_data(ea, flags);
+ smp_wmb();
+ get_slb_shadow()->save_area[entry].esid = mk_esid_data(ea, entry);
+ smp_wmb();
}
static inline void create_shadowed_slbe(unsigned long ea, unsigned long flags,
@@ -76,8 +77,7 @@ static inline void create_shadowed_slbe(unsigned long ea, unsigned long flags,
* we don't get a stale entry here if we get preempted by PHYP
* between these two statements.
*/
- slb_shadow_update(mk_esid_data(ea, entry), mk_vsid_data(ea, flags),
- entry);
+ slb_shadow_update(ea, flags, entry);
asm volatile("slbmte %0,%1" :
: "r" (mk_vsid_data(ea, flags)),
@@ -104,8 +104,7 @@ void slb_flush_and_rebolt(void)
ksp_esid_data &= ~SLB_ESID_V;
/* Only third entry (stack) may change here so only resave that */
- slb_shadow_update(ksp_esid_data,
- mk_vsid_data(ksp_esid_data, lflags), 2);
+ slb_shadow_update(get_paca()->kstack, lflags, 2);
/* We need to do this all in asm, so we're sure we don't touch
* the stack between the slbia and rebolting it. */
@@ -123,6 +122,15 @@ void slb_flush_and_rebolt(void)
: "memory");
}
+void slb_vmalloc_update(void)
+{
+ unsigned long vflags;
+
+ vflags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_vmalloc_psize].sllp;
+ slb_shadow_update(VMALLOC_START, vflags, 1);
+ slb_flush_and_rebolt();
+}
+
/* Flush all user entries from the segment table of the current processor. */
void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
{