From d24944d924d051281b276be8e5749b45785563c8 Mon Sep 17 00:00:00 2001 From: Carlos O'Donell Date: Wed, 31 Mar 2010 20:42:33 +0000 Subject: parisc: Use of align_frame provides stack frame. Any assembly constant generated with the use of align_frame includes size for a full stack frame. Signed-off-by: Carlos O'Donell Signed-off-by: Kyle McMartin --- arch/parisc/kernel/asm-offsets.c | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c index ec787b411e9..dcd55103a4b 100644 --- a/arch/parisc/kernel/asm-offsets.c +++ b/arch/parisc/kernel/asm-offsets.c @@ -45,8 +45,12 @@ #else #define FRAME_SIZE 64 #endif +#define FRAME_ALIGN 64 -#define align(x,y) (((x)+FRAME_SIZE+(y)-1) - (((x)+(y)-1)%(y))) +/* Add FRAME_SIZE to the size x and align it to y. All definitions + * that use align_frame will include space for a frame. + */ +#define align_frame(x,y) (((x)+FRAME_SIZE+(y)-1) - (((x)+(y)-1)%(y))) int main(void) { @@ -146,7 +150,8 @@ int main(void) DEFINE(TASK_PT_IOR, offsetof(struct task_struct, thread.regs.ior)); BLANK(); DEFINE(TASK_SZ, sizeof(struct task_struct)); - DEFINE(TASK_SZ_ALGN, align(sizeof(struct task_struct), 64)); + /* TASK_SZ_ALGN includes space for a stack frame. */ + DEFINE(TASK_SZ_ALGN, align_frame(sizeof(struct task_struct), FRAME_ALIGN)); BLANK(); DEFINE(PT_PSW, offsetof(struct pt_regs, gr[ 0])); DEFINE(PT_GR1, offsetof(struct pt_regs, gr[ 1])); @@ -233,7 +238,8 @@ int main(void) DEFINE(PT_ISR, offsetof(struct pt_regs, isr)); DEFINE(PT_IOR, offsetof(struct pt_regs, ior)); DEFINE(PT_SIZE, sizeof(struct pt_regs)); - DEFINE(PT_SZ_ALGN, align(sizeof(struct pt_regs), 64)); + /* PT_SZ_ALGN includes space for a stack frame. */ + DEFINE(PT_SZ_ALGN, align_frame(sizeof(struct pt_regs), FRAME_ALIGN)); BLANK(); DEFINE(TI_TASK, offsetof(struct thread_info, task)); DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain)); @@ -242,7 +248,8 @@ int main(void) DEFINE(TI_SEGMENT, offsetof(struct thread_info, addr_limit)); DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count)); DEFINE(THREAD_SZ, sizeof(struct thread_info)); - DEFINE(THREAD_SZ_ALGN, align(sizeof(struct thread_info), 64)); + /* THREAD_SZ_ALGN includes space for a stack frame. */ + DEFINE(THREAD_SZ_ALGN, align_frame(sizeof(struct thread_info), FRAME_ALIGN)); BLANK(); DEFINE(ICACHE_BASE, offsetof(struct pdc_cache_info, ic_base)); DEFINE(ICACHE_STRIDE, offsetof(struct pdc_cache_info, ic_stride)); -- cgit v1.2.3 From 550f0d922286556c7ea43974bb7921effb5a5278 Mon Sep 17 00:00:00 2001 From: Helge Deller Date: Mon, 3 May 2010 20:44:21 +0000 Subject: parisc: clear floating point exception flag on SIGFPE signal Clear the floating point exception flag before returning to user space. This is needed, else the libc trampoline handler may hit the same SIGFPE again while building up a trampoline to a signal handler. Fixes debian bug #559406. Signed-off-by: Helge Deller Signed-off-by: Kyle McMartin --- arch/parisc/math-emu/decode_exc.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/parisc/math-emu/decode_exc.c b/arch/parisc/math-emu/decode_exc.c index 3ca1c614921..27a7492ddb0 100644 --- a/arch/parisc/math-emu/decode_exc.c +++ b/arch/parisc/math-emu/decode_exc.c @@ -342,6 +342,7 @@ decode_fpu(unsigned int Fpu_register[], unsigned int trap_counts[]) return SIGNALCODE(SIGFPE, FPE_FLTINV); case DIVISIONBYZEROEXCEPTION: update_trap_counts(Fpu_register, aflags, bflags, trap_counts); + Clear_excp_register(exception_index); return SIGNALCODE(SIGFPE, FPE_FLTDIV); case INEXACTEXCEPTION: update_trap_counts(Fpu_register, aflags, bflags, trap_counts); -- cgit v1.2.3 From 53e30d022769434327a682d65031f129cd5d9c33 Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Thu, 22 Apr 2010 16:06:23 +0000 Subject: parisc: invoke oom-killer from page fault As explained in commit 1c0fe6e3bd, we want to call the architecture independent oom killer when getting an unexplained OOM from handle_mm_fault, rather than simply killing current. Cc: linux-parisc@vger.kernel.org Cc: linux-arch@vger.kernel.org Signed-off-by: Nick Piggin Acked-by: David Rientjes Signed-off-by: Kyle McMartin --- arch/parisc/mm/fault.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c index c6afbfc9577..18162ce4261 100644 --- a/arch/parisc/mm/fault.c +++ b/arch/parisc/mm/fault.c @@ -264,8 +264,7 @@ no_context: out_of_memory: up_read(&mm->mmap_sem); - printk(KERN_CRIT "VM: killing process %s\n", current->comm); - if (user_mode(regs)) - do_group_exit(SIGKILL); - goto no_context; + if (!user_mode(regs)) + goto no_context; + pagefault_out_of_memory(); } -- cgit v1.2.3 From 8f6c0c2bf1d4cc626588ca6f8dc642df34c0d26d Mon Sep 17 00:00:00 2001 From: John David Anglin Date: Sun, 11 Apr 2010 17:12:56 +0000 Subject: parisc: Avoid interruption in critical region in entry.S Signed-off-by: John David Anglin Signed-off-by: Kyle McMartin --- arch/parisc/kernel/entry.S | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S index 3a44f7f704f..ba86f610e76 100644 --- a/arch/parisc/kernel/entry.S +++ b/arch/parisc/kernel/entry.S @@ -2076,9 +2076,10 @@ syscall_restore: LDREG TASK_PT_GR31(%r1),%r31 /* restore syscall rp */ /* NOTE: We use rsm/ssm pair to make this operation atomic */ + LDREG TASK_PT_GR30(%r1),%r1 /* Get user sp */ rsm PSW_SM_I, %r0 - LDREG TASK_PT_GR30(%r1),%r30 /* restore user sp */ - mfsp %sr3,%r1 /* Get users space id */ + copy %r1,%r30 /* Restore user sp */ + mfsp %sr3,%r1 /* Get user space id */ mtsp %r1,%sr7 /* Restore sr7 */ ssm PSW_SM_I, %r0 -- cgit v1.2.3 From c2dc988ec566429841dd83644479aca78a6251e7 Mon Sep 17 00:00:00 2001 From: John David Anglin Date: Sun, 11 Apr 2010 17:08:51 +0000 Subject: parisc: Delete unnecessary nop's in entry.S Signed-off-by: John David Anglin Signed-off-by: Kyle McMartin --- arch/parisc/kernel/entry.S | 7 ------- 1 file changed, 7 deletions(-) (limited to 'arch') diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S index ba86f610e76..293ba44f40a 100644 --- a/arch/parisc/kernel/entry.S +++ b/arch/parisc/kernel/entry.S @@ -997,13 +997,6 @@ intr_restore: rfi nop - nop - nop - nop - nop - nop - nop - nop #ifndef CONFIG_PREEMPT # define intr_do_preempt intr_restore -- cgit v1.2.3 From f4c0346c6f350d51aac7ed87e266a4257bdbe506 Mon Sep 17 00:00:00 2001 From: John David Anglin Date: Sun, 11 Apr 2010 17:26:34 +0000 Subject: parisc: LWS fixes for syscall.S 1) Gate immediately and save a branch. 2) Fix off by one error in checking entry number. 3) Use sr7 instead of sr3 in error return path as sr3 might not contain correct value. 4) Enable locking on UP systems to prevent incorrect operation of the cas_action critical region on page faults. Tested on several systems, including UP c3750 with 2.6.33.2 kernel. Signed-off-by: John David Anglin Signed-off-by: Kyle McMartin --- arch/parisc/kernel/syscall.S | 32 +++++++++----------------------- 1 file changed, 9 insertions(+), 23 deletions(-) (limited to 'arch') diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S index f5f96021caa..68e75ce838d 100644 --- a/arch/parisc/kernel/syscall.S +++ b/arch/parisc/kernel/syscall.S @@ -47,18 +47,17 @@ ENTRY(linux_gateway_page) KILL_INSN .endr - /* ADDRESS 0xb0 to 0xb4, lws uses 1 insns for entry */ + /* ADDRESS 0xb0 to 0xb8, lws uses two insns for entry */ /* Light-weight-syscall entry must always be located at 0xb0 */ /* WARNING: Keep this number updated with table size changes */ #define __NR_lws_entries (2) lws_entry: - /* Unconditional branch to lws_start, located on the - same gateway page */ - b,n lws_start + gate lws_start, %r0 /* increase privilege */ + depi 3, 31, 2, %r31 /* Ensure we return into user mode. */ - /* Fill from 0xb4 to 0xe0 */ - .rept 11 + /* Fill from 0xb8 to 0xe0 */ + .rept 10 KILL_INSN .endr @@ -423,9 +422,6 @@ tracesys_sigexit: *********************************************************/ lws_start: - /* Gate and ensure we return to userspace */ - gate .+8, %r0 - depi 3, 31, 2, %r31 /* Ensure we return to userspace */ #ifdef CONFIG_64BIT /* FIXME: If we are a 64-bit kernel just @@ -442,7 +438,7 @@ lws_start: #endif /* Is the lws entry number valid? */ - comiclr,>>= __NR_lws_entries, %r20, %r0 + comiclr,>> __NR_lws_entries, %r20, %r0 b,n lws_exit_nosys /* WARNING: Trashing sr2 and sr3 */ @@ -473,7 +469,7 @@ lws_exit: /* now reset the lowest bit of sp if it was set */ xor %r30,%r1,%r30 #endif - be,n 0(%sr3, %r31) + be,n 0(%sr7, %r31) @@ -529,7 +525,6 @@ lws_compare_and_swap32: #endif lws_compare_and_swap: -#ifdef CONFIG_SMP /* Load start of lock table */ ldil L%lws_lock_start, %r20 ldo R%lws_lock_start(%r20), %r28 @@ -572,8 +567,6 @@ cas_wouldblock: ldo 2(%r0), %r28 /* 2nd case */ b lws_exit /* Contended... */ ldo -EAGAIN(%r0), %r21 /* Spin in userspace */ -#endif -/* CONFIG_SMP */ /* prev = *addr; @@ -601,13 +594,11 @@ cas_action: 1: ldw 0(%sr3,%r26), %r28 sub,<> %r28, %r25, %r0 2: stw %r24, 0(%sr3,%r26) -#ifdef CONFIG_SMP /* Free lock */ stw %r20, 0(%sr2,%r20) -# if ENABLE_LWS_DEBUG +#if ENABLE_LWS_DEBUG /* Clear thread register indicator */ stw %r0, 4(%sr2,%r20) -# endif #endif /* Return to userspace, set no error */ b lws_exit @@ -615,12 +606,10 @@ cas_action: 3: /* Error occured on load or store */ -#ifdef CONFIG_SMP /* Free lock */ stw %r20, 0(%sr2,%r20) -# if ENABLE_LWS_DEBUG +#if ENABLE_LWS_DEBUG stw %r0, 4(%sr2,%r20) -# endif #endif b lws_exit ldo -EFAULT(%r0),%r21 /* set errno */ @@ -672,7 +661,6 @@ ENTRY(sys_call_table64) END(sys_call_table64) #endif -#ifdef CONFIG_SMP /* All light-weight-syscall atomic operations will use this set of locks @@ -694,8 +682,6 @@ ENTRY(lws_lock_start) .endr END(lws_lock_start) .previous -#endif -/* CONFIG_SMP for lws_lock_start */ .end -- cgit v1.2.3 From 9b437bca163c44d4ec6544241f29451675693cd6 Mon Sep 17 00:00:00 2001 From: John David Anglin Date: Sun, 11 Apr 2010 17:03:54 +0000 Subject: parisc: Remove unnecessary macros from entry.S The EXTR, DEP and DEPI macros are unnecessary. There are PA 1.X pneumonics available with the same functionality, and the DEP and DEPI macros conflict with assembler pneumonics. Tested on a variety of 32 and 64-bit systems. Signed-off-by: John David Anglin Signed-off-by: Kyle McMartin --- arch/parisc/kernel/entry.S | 40 +++++++--------------------------------- 1 file changed, 7 insertions(+), 33 deletions(-) (limited to 'arch') diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S index 293ba44f40a..6337adef30f 100644 --- a/arch/parisc/kernel/entry.S +++ b/arch/parisc/kernel/entry.S @@ -364,32 +364,6 @@ .align 32 .endm - /* The following are simple 32 vs 64 bit instruction - * abstractions for the macros */ - .macro EXTR reg1,start,length,reg2 -#ifdef CONFIG_64BIT - extrd,u \reg1,32+(\start),\length,\reg2 -#else - extrw,u \reg1,\start,\length,\reg2 -#endif - .endm - - .macro DEP reg1,start,length,reg2 -#ifdef CONFIG_64BIT - depd \reg1,32+(\start),\length,\reg2 -#else - depw \reg1,\start,\length,\reg2 -#endif - .endm - - .macro DEPI val,start,length,reg -#ifdef CONFIG_64BIT - depdi \val,32+(\start),\length,\reg -#else - depwi \val,\start,\length,\reg -#endif - .endm - /* In LP64, the space contains part of the upper 32 bits of the * fault. We have to extract this and place it in the va, * zeroing the corresponding bits in the space register */ @@ -442,19 +416,19 @@ */ .macro L2_ptep pmd,pte,index,va,fault #if PT_NLEVELS == 3 - EXTR \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index + extru \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index #else - EXTR \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index + extru \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index #endif - DEP %r0,31,PAGE_SHIFT,\pmd /* clear offset */ + dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */ copy %r0,\pte ldw,s \index(\pmd),\pmd bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault - DEP %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */ + dep %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */ copy \pmd,%r9 SHLREG %r9,PxD_VALUE_SHIFT,\pmd - EXTR \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index - DEP %r0,31,PAGE_SHIFT,\pmd /* clear offset */ + extru \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index + dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */ shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd LDREG %r0(\pmd),\pte /* pmd is now pte */ bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault @@ -605,7 +579,7 @@ depdi 0,31,32,\tmp #endif copy \va,\tmp1 - DEPI 0,31,23,\tmp1 + depi 0,31,23,\tmp1 cmpb,COND(<>),n \tmp,\tmp1,\fault ldi (_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),\prot depd,z \prot,8,7,\prot -- cgit v1.2.3 From 210501aa570fdaa8b06e56fd1c04f31f2d3f368b Mon Sep 17 00:00:00 2001 From: John David Anglin Date: Sun, 11 Apr 2010 16:36:14 +0000 Subject: parisc: Call pagefault_disable/pagefault_enable in kmap_atomic/kunmap_atomic Based on the generic implementation of kmap_atomic and kunmap_atomic, we should call pagefault_disable and pagefault_enable in our PA8000 implementation. The define for kmap_atomic_prot was also missing, and I updated kmap_atomic_pfn to use the generic implementation because of the change to kmap_atomic. I believe that this change is needed to fix the fork copy-on-write bug. Signed-off-by: John David Anglin Signed-off-by: Kyle McMartin --- arch/parisc/include/asm/cacheflush.h | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h index 477277739da..4556d820128 100644 --- a/arch/parisc/include/asm/cacheflush.h +++ b/arch/parisc/include/asm/cacheflush.h @@ -2,6 +2,7 @@ #define _PARISC_CACHEFLUSH_H #include +#include /* The usual comment is "Caches aren't brain-dead on the ". * Unfortunately, that doesn't apply to PA-RISC. */ @@ -125,11 +126,20 @@ static inline void *kmap(struct page *page) #define kunmap(page) kunmap_parisc(page_address(page)) -#define kmap_atomic(page, idx) page_address(page) +static inline void *kmap_atomic(struct page *page, enum km_type idx) +{ + pagefault_disable(); + return page_address(page); +} -#define kunmap_atomic(addr, idx) kunmap_parisc(addr) +static inline void kunmap_atomic(void *addr, enum km_type idx) +{ + kunmap_parisc(addr); + pagefault_enable(); +} -#define kmap_atomic_pfn(pfn, idx) page_address(pfn_to_page(pfn)) +#define kmap_atomic_prot(page, idx, prot) kmap_atomic(page, idx) +#define kmap_atomic_pfn(pfn, idx) kmap_atomic(pfn_to_page(pfn), (idx)) #define kmap_atomic_to_page(ptr) virt_to_page(ptr) #endif -- cgit v1.2.3