diff options
author | Nicolas Pitre <nico@org.rmk.(none)> | 2005-06-08 19:00:47 +0100 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2005-06-08 19:00:47 +0100 |
commit | dcef1f634657dabe7905af3ccda12cf7f0b6fcc1 (patch) | |
tree | e1b8bf471c3c268d0be919561268c24f298d031b /arch/arm | |
parent | aeabbbbe126f3d5e61e2db07629443cd10932bb2 (diff) | |
download | linux-3.10-dcef1f634657dabe7905af3ccda12cf7f0b6fcc1.tar.gz linux-3.10-dcef1f634657dabe7905af3ccda12cf7f0b6fcc1.tar.bz2 linux-3.10-dcef1f634657dabe7905af3ccda12cf7f0b6fcc1.zip |
[PATCH] ARM: 2664/2: add support for atomic ops on pre-ARMv6 SMP systems
Patch from Nicolas Pitre
Not that there might be many of them on the planet, but at least RMK
apparently has one.
Signed-off-by: Nicolas Pitre
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm')
-rw-r--r-- | arch/arm/kernel/entry-armv.S | 16 | ||||
-rw-r--r-- | arch/arm/kernel/traps.c | 49 | ||||
-rw-r--r-- | arch/arm/mm/Kconfig | 8 |
3 files changed, 68 insertions, 5 deletions
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 4eb36155dc9..e14278d5988 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -269,7 +269,7 @@ __pabt_svc: add r5, sp, #S_PC ldmia r7, {r2 - r4} @ Get USR pc, cpsr -#if __LINUX_ARM_ARCH__ < 6 +#if __LINUX_ARM_ARCH__ < 6 && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) @ make sure our user space atomic helper is aborted cmp r2, #VIRT_OFFSET bichs r3, r3, #PSR_Z_BIT @@ -616,11 +616,17 @@ __kuser_helper_start: __kuser_cmpxchg: @ 0xffff0fc0 -#if __LINUX_ARM_ARCH__ < 6 +#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) -#ifdef CONFIG_SMP /* sanity check */ -#error "CONFIG_SMP on a machine supporting pre-ARMv6 processors?" -#endif + /* + * Poor you. No fast solution possible... + * The kernel itself must perform the operation. + * A special ghost syscall is used for that (see traps.c). + */ + swi #0x9ffff0 + mov pc, lr + +#elif __LINUX_ARM_ARCH__ < 6 /* * Theory of operation: diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 14df16b983f..45d2a032d89 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -464,6 +464,55 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs) #endif return 0; +#ifdef CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG + /* + * Atomically store r1 in *r2 if *r2 is equal to r0 for user space. + * Return zero in r0 if *MEM was changed or non-zero if no exchange + * happened. Also set the user C flag accordingly. + * If access permissions have to be fixed up then non-zero is + * returned and the operation has to be re-attempted. + * + * *NOTE*: This is a ghost syscall private to the kernel. Only the + * __kuser_cmpxchg code in entry-armv.S should be aware of its + * existence. Don't ever use this from user code. + */ + case 0xfff0: + { + extern void do_DataAbort(unsigned long addr, unsigned int fsr, + struct pt_regs *regs); + unsigned long val; + unsigned long addr = regs->ARM_r2; + struct mm_struct *mm = current->mm; + pgd_t *pgd; pmd_t *pmd; pte_t *pte; + + regs->ARM_cpsr &= ~PSR_C_BIT; + spin_lock(&mm->page_table_lock); + pgd = pgd_offset(mm, addr); + if (!pgd_present(*pgd)) + goto bad_access; + pmd = pmd_offset(pgd, addr); + if (!pmd_present(*pmd)) + goto bad_access; + pte = pte_offset_map(pmd, addr); + if (!pte_present(*pte) || !pte_write(*pte)) + goto bad_access; + val = *(unsigned long *)addr; + val -= regs->ARM_r0; + if (val == 0) { + *(unsigned long *)addr = regs->ARM_r1; + regs->ARM_cpsr |= PSR_C_BIT; + } + spin_unlock(&mm->page_table_lock); + return val; + + bad_access: + spin_unlock(&mm->page_table_lock); + /* simulate a read access fault */ + do_DataAbort(addr, 15 + (1 << 11), regs); + return -1; + } +#endif + default: /* Calls 9f00xx..9f07ff are defined to return -ENOSYS if not implemented, rather than raising SIGILL. This diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index ade0e2222f5..3fefb43c67f 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -422,3 +422,11 @@ config HAS_TLS_REG assume directly accessing that register and always obtain the expected value only on ARMv7 and above. +config NEEDS_SYSCALL_FOR_CMPXCHG + bool + default y if SMP && (CPU_32v5 || CPU_32v4 || CPU_32v3) + help + SMP on a pre-ARMv6 processor? Well OK then. + Forget about fast user space cmpxchg support. + It is just not possible. + |