summaryrefslogtreecommitdiff
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/abort-ev6.S17
-rw-r--r--arch/arm/mm/cache-l2x0.c25
-rw-r--r--arch/arm/mm/context.c57
-rw-r--r--arch/arm/mm/fault.c4
-rw-r--r--arch/arm/mm/init.c4
-rw-r--r--arch/arm/mm/mmu.c7
-rw-r--r--arch/arm/mm/proc-v7-2level.S9
7 files changed, 64 insertions, 59 deletions
diff --git a/arch/arm/mm/abort-ev6.S b/arch/arm/mm/abort-ev6.S
index ff1f7cc11f8..80741992a9f 100644
--- a/arch/arm/mm/abort-ev6.S
+++ b/arch/arm/mm/abort-ev6.S
@@ -26,18 +26,23 @@ ENTRY(v6_early_abort)
mrc p15, 0, r1, c5, c0, 0 @ get FSR
mrc p15, 0, r0, c6, c0, 0 @ get FAR
/*
- * Faulty SWP instruction on 1136 doesn't set bit 11 in DFSR (erratum 326103).
- * The test below covers all the write situations, including Java bytecodes
+ * Faulty SWP instruction on 1136 doesn't set bit 11 in DFSR.
*/
- bic r1, r1, #1 << 11 @ clear bit 11 of FSR
+#ifdef CONFIG_ARM_ERRATA_326103
+ ldr ip, =0x4107b36
+ mrc p15, 0, r3, c0, c0, 0 @ get processor id
+ teq ip, r3, lsr #4 @ r0 ARM1136?
+ bne do_DataAbort
tst r5, #PSR_J_BIT @ Java?
+ tsteq r5, #PSR_T_BIT @ Thumb?
bne do_DataAbort
- do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3
- ldreq r3, [r4] @ read aborted ARM instruction
+ bic r1, r1, #1 << 11 @ clear bit 11 of FSR
+ ldr r3, [r4] @ read aborted ARM instruction
#ifdef CONFIG_CPU_ENDIAN_BE8
- reveq r3, r3
+ rev r3, r3
#endif
do_ldrd_abort tmp=ip, insn=r3
tst r3, #1 << 20 @ L = 0 -> write
orreq r1, r1, #1 << 11 @ yes.
+#endif
b do_DataAbort
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index a53fd2aaa2f..2a8e380501e 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -32,6 +32,7 @@ static void __iomem *l2x0_base;
static DEFINE_RAW_SPINLOCK(l2x0_lock);
static u32 l2x0_way_mask; /* Bitmask of active ways */
static u32 l2x0_size;
+static unsigned long sync_reg_offset = L2X0_CACHE_SYNC;
struct l2x0_regs l2x0_saved_regs;
@@ -61,12 +62,7 @@ static inline void cache_sync(void)
{
void __iomem *base = l2x0_base;
-#ifdef CONFIG_PL310_ERRATA_753970
- /* write to an unmmapped register */
- writel_relaxed(0, base + L2X0_DUMMY_REG);
-#else
- writel_relaxed(0, base + L2X0_CACHE_SYNC);
-#endif
+ writel_relaxed(0, base + sync_reg_offset);
cache_wait(base + L2X0_CACHE_SYNC, 1);
}
@@ -85,10 +81,13 @@ static inline void l2x0_inv_line(unsigned long addr)
}
#if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
+static inline void debug_writel(unsigned long val)
+{
+ if (outer_cache.set_debug)
+ outer_cache.set_debug(val);
+}
-#define debug_writel(val) outer_cache.set_debug(val)
-
-static void l2x0_set_debug(unsigned long val)
+static void pl310_set_debug(unsigned long val)
{
writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL);
}
@@ -98,7 +97,7 @@ static inline void debug_writel(unsigned long val)
{
}
-#define l2x0_set_debug NULL
+#define pl310_set_debug NULL
#endif
#ifdef CONFIG_PL310_ERRATA_588369
@@ -331,6 +330,11 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
else
ways = 8;
type = "L310";
+#ifdef CONFIG_PL310_ERRATA_753970
+ /* Unmapped register. */
+ sync_reg_offset = L2X0_DUMMY_REG;
+#endif
+ outer_cache.set_debug = pl310_set_debug;
break;
case L2X0_CACHE_ID_PART_L210:
ways = (aux >> 13) & 0xf;
@@ -379,7 +383,6 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
outer_cache.flush_all = l2x0_flush_all;
outer_cache.inv_all = l2x0_inv_all;
outer_cache.disable = l2x0_disable;
- outer_cache.set_debug = l2x0_set_debug;
printk(KERN_INFO "%s cache controller enabled\n", type);
printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n",
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index ee9bb363d60..806cc4f6351 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -18,30 +18,39 @@
static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
unsigned int cpu_last_asid = ASID_FIRST_VERSION;
-#ifdef CONFIG_SMP
-DEFINE_PER_CPU(struct mm_struct *, current_mm);
-#endif
#ifdef CONFIG_ARM_LPAE
-#define cpu_set_asid(asid) { \
- unsigned long ttbl, ttbh; \
- asm volatile( \
- " mrrc p15, 0, %0, %1, c2 @ read TTBR0\n" \
- " mov %1, %2, lsl #(48 - 32) @ set ASID\n" \
- " mcrr p15, 0, %0, %1, c2 @ set TTBR0\n" \
- : "=&r" (ttbl), "=&r" (ttbh) \
- : "r" (asid & ~ASID_MASK)); \
+void cpu_set_reserved_ttbr0(void)
+{
+ unsigned long ttbl = __pa(swapper_pg_dir);
+ unsigned long ttbh = 0;
+
+ /*
+ * Set TTBR0 to swapper_pg_dir which contains only global entries. The
+ * ASID is set to 0.
+ */
+ asm volatile(
+ " mcrr p15, 0, %0, %1, c2 @ set TTBR0\n"
+ :
+ : "r" (ttbl), "r" (ttbh));
+ isb();
}
#else
-#define cpu_set_asid(asid) \
- asm(" mcr p15, 0, %0, c13, c0, 1\n" : : "r" (asid))
+void cpu_set_reserved_ttbr0(void)
+{
+ u32 ttb;
+ /* Copy TTBR1 into TTBR0 */
+ asm volatile(
+ " mrc p15, 0, %0, c2, c0, 1 @ read TTBR1\n"
+ " mcr p15, 0, %0, c2, c0, 0 @ set TTBR0\n"
+ : "=r" (ttb));
+ isb();
+}
#endif
/*
* We fork()ed a process, and we need a new context for the child
- * to run in. We reserve version 0 for initial tasks so we will
- * always allocate an ASID. The ASID 0 is reserved for the TTBR
- * register changing sequence.
+ * to run in.
*/
void __init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
@@ -51,9 +60,7 @@ void __init_new_context(struct task_struct *tsk, struct mm_struct *mm)
static void flush_context(void)
{
- /* set the reserved ASID before flushing the TLB */
- cpu_set_asid(0);
- isb();
+ cpu_set_reserved_ttbr0();
local_flush_tlb_all();
if (icache_is_vivt_asid_tagged()) {
__flush_icache_all();
@@ -98,14 +105,7 @@ static void reset_context(void *info)
{
unsigned int asid;
unsigned int cpu = smp_processor_id();
- struct mm_struct *mm = per_cpu(current_mm, cpu);
-
- /*
- * Check if a current_mm was set on this CPU as it might still
- * be in the early booting stages and using the reserved ASID.
- */
- if (!mm)
- return;
+ struct mm_struct *mm = current->active_mm;
smp_rmb();
asid = cpu_last_asid + cpu + 1;
@@ -114,8 +114,7 @@ static void reset_context(void *info)
set_mm_context(mm, asid);
/* set the new ASID */
- cpu_set_asid(mm->context.id);
- isb();
+ cpu_switch_mm(mm->pgd, mm);
}
#else
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index f0746753336..5bb48356d21 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -247,7 +247,9 @@ good_area:
return handle_mm_fault(mm, vma, addr & PAGE_MASK, flags);
check_stack:
- if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr))
+ /* Don't allow expansion below FIRST_USER_ADDRESS */
+ if (vma->vm_flags & VM_GROWSDOWN &&
+ addr >= FIRST_USER_ADDRESS && !expand_stack(vma, addr))
goto good_area;
out:
return fault;
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 595079fa9d1..8f5813bbffb 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -293,11 +293,11 @@ EXPORT_SYMBOL(pfn_valid);
#endif
#ifndef CONFIG_SPARSEMEM
-static void arm_memory_present(void)
+static void __init arm_memory_present(void)
{
}
#else
-static void arm_memory_present(void)
+static void __init arm_memory_present(void)
{
struct memblock_region *reg;
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index b86f8933ff9..aa78de8bfdd 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -489,7 +489,8 @@ static void __init build_mem_type_table(void)
*/
for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
mem_types[i].prot_pte |= PTE_EXT_AF;
- mem_types[i].prot_sect |= PMD_SECT_AF;
+ if (mem_types[i].prot_sect)
+ mem_types[i].prot_sect |= PMD_SECT_AF;
}
kern_pgprot |= PTE_EXT_AF;
vecs_pgprot |= PTE_EXT_AF;
@@ -618,8 +619,8 @@ static void __init alloc_init_section(pud_t *pud, unsigned long addr,
}
}
-static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
- unsigned long phys, const struct mem_type *type)
+static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
+ unsigned long end, unsigned long phys, const struct mem_type *type)
{
pud_t *pud = pud_offset(pgd, addr);
unsigned long next;
diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S
index 3a4b3e7b888..42ac069c801 100644
--- a/arch/arm/mm/proc-v7-2level.S
+++ b/arch/arm/mm/proc-v7-2level.S
@@ -49,15 +49,10 @@ ENTRY(cpu_v7_switch_mm)
#ifdef CONFIG_ARM_ERRATA_754322
dsb
#endif
- mcr p15, 0, r2, c13, c0, 1 @ set reserved context ID
- isb
-1: mcr p15, 0, r0, c2, c0, 0 @ set TTB 0
- isb
-#ifdef CONFIG_ARM_ERRATA_754322
- dsb
-#endif
mcr p15, 0, r1, c13, c0, 1 @ set context ID
isb
+ mcr p15, 0, r0, c2, c0, 0 @ set TTB 0
+ isb
#endif
mov pc, lr
ENDPROC(cpu_v7_switch_mm)