From 1b0e235cc9bfae4bc0f5cd0cba929206fb0f6a64 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 11 Feb 2009 00:54:07 -0800 Subject: sparc64: Fix crashes in jbusmc_print_dimm() Return was missing for the case where there is no dimm info match. Signed-off-by: David S. Miller --- arch/sparc/kernel/chmc.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/sparc/kernel/chmc.c b/arch/sparc/kernel/chmc.c index 3b9f4d6e14a..e1a9598e2a4 100644 --- a/arch/sparc/kernel/chmc.c +++ b/arch/sparc/kernel/chmc.c @@ -306,6 +306,7 @@ static int jbusmc_print_dimm(int syndrome_code, buf[1] = '?'; buf[2] = '?'; buf[3] = '\0'; + return 0; } p = dp->controller; prop = &p->layout; -- cgit v1.2.3 From 6b1ff036d4cde7834ef2f9dbea5747adaaac24e0 Mon Sep 17 00:00:00 2001 From: Kyle McMartin Date: Sat, 14 Feb 2009 04:11:29 -0500 Subject: [IA64] enable setting DMAR on by default The previous commit which introduced the DMAR_DEFAULT_ON setting in drivers/pci/dmar.c neglected to add the ability for ia64 to enable the IOMMU by default. Rectify that mistake, doh! Signed-off-by: Kyle McMartin Signed-off-by: Tony Luck --- arch/ia64/Kconfig | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'arch') diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 4eb45c01249..153e727a6e8 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -638,6 +638,17 @@ config DMAR and include PCI device scope covered by these DMA remapping devices. +config DMAR_DEFAULT_ON + def_bool y + prompt "Enable DMA Remapping Devices by default" + depends on DMAR + help + Selecting this option will enable a DMAR device at boot time if + one is found. If this option is not selected, DMAR support can + be enabled by passing intel_iommu=on to the kernel. It is + recommended you say N here while the DMAR code remains + experimental. + endmenu endif -- cgit v1.2.3 From aa2f63c95439a11dfac35c60d9160dcd0189aed3 Mon Sep 17 00:00:00 2001 From: Roel Kluin Date: Sun, 22 Feb 2009 02:33:28 +0100 Subject: [IA64] Do not go beyond ARRAY_SIZE of unw.hash static struct { ... :114 unsigned short hash[UNW_HASH_SIZE]; ... :2152 for (index = 0; index <= UNW_HASH_SIZE; ++index) { This is a bug, isn't it? s/<=/ Signed-off-by: Tony Luck --- arch/ia64/kernel/unwind.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/ia64/kernel/unwind.c b/arch/ia64/kernel/unwind.c index 67810b77d99..b6c0e63a0bf 100644 --- a/arch/ia64/kernel/unwind.c +++ b/arch/ia64/kernel/unwind.c @@ -2149,7 +2149,7 @@ unw_remove_unwind_table (void *handle) /* next, remove hash table entries for this table */ - for (index = 0; index <= UNW_HASH_SIZE; ++index) { + for (index = 0; index < UNW_HASH_SIZE; ++index) { tmp = unw.cache + unw.hash[index]; if (unw.hash[index] >= UNW_CACHE_SIZE || tmp->ip < table->start || tmp->ip >= table->end) -- cgit v1.2.3 From 5b5923975f07836fc7a5388f9fa5f459828ae4ee Mon Sep 17 00:00:00 2001 From: Roel Kluin Date: Sat, 21 Feb 2009 23:40:27 +0100 Subject: [IA64] Don't go beyond iosapic_intr_info's arraysize vi arch/ia64/kernel/iosapic.c +142 static struct iosapic_intr_info { ... } iosapic_intr_info[NR_IRQS]; But at line 510 we have: for (i = 0; i <= NR_IRQS; i++) { s/<=/ Signed-off-by: Tony Luck --- arch/ia64/kernel/iosapic.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c index 5cfd3d91001..e13125058be 100644 --- a/arch/ia64/kernel/iosapic.c +++ b/arch/ia64/kernel/iosapic.c @@ -507,7 +507,7 @@ static int iosapic_find_sharable_irq(unsigned long trigger, unsigned long pol) if (trigger == IOSAPIC_EDGE) return -EINVAL; - for (i = 0; i <= NR_IRQS; i++) { + for (i = 0; i < NR_IRQS; i++) { info = &iosapic_intr_info[i]; if (info->trigger == trigger && info->polarity == pol && (info->dmode == IOSAPIC_FIXED || -- cgit v1.2.3 From 49f297f8df9adb797334155470ea9ca68bdb041e Mon Sep 17 00:00:00 2001 From: Michael Neuling Date: Thu, 19 Feb 2009 18:52:20 +0000 Subject: powerpc: Fix load/store float double alignment handler When we introduced VSX, we changed the way FPRs are stored in the thread_struct. Unfortunately we missed the load/store float double alignment handler code when updating how we access FPRs in the thread_struct. Below fixes this and merges the little/big endian case. Signed-off-by: Michael Neuling Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/kernel/align.c | 29 +++++++++++++---------------- 1 file changed, 13 insertions(+), 16 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c index ada06924a42..73cb6a3229a 100644 --- a/arch/powerpc/kernel/align.c +++ b/arch/powerpc/kernel/align.c @@ -367,27 +367,24 @@ static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr, static int emulate_fp_pair(unsigned char __user *addr, unsigned int reg, unsigned int flags) { - char *ptr = (char *) ¤t->thread.TS_FPR(reg); - int i, ret; + char *ptr0 = (char *) ¤t->thread.TS_FPR(reg); + char *ptr1 = (char *) ¤t->thread.TS_FPR(reg+1); + int i, ret, sw = 0; if (!(flags & F)) return 0; if (reg & 1) return 0; /* invalid form: FRS/FRT must be even */ - if (!(flags & SW)) { - /* not byte-swapped - easy */ - if (!(flags & ST)) - ret = __copy_from_user(ptr, addr, 16); - else - ret = __copy_to_user(addr, ptr, 16); - } else { - /* each FPR value is byte-swapped separately */ - ret = 0; - for (i = 0; i < 16; ++i) { - if (!(flags & ST)) - ret |= __get_user(ptr[i^7], addr + i); - else - ret |= __put_user(ptr[i^7], addr + i); + if (flags & SW) + sw = 7; + ret = 0; + for (i = 0; i < 8; ++i) { + if (!(flags & ST)) { + ret |= __get_user(ptr0[i^sw], addr + i); + ret |= __get_user(ptr1[i^sw], addr + i + 8); + } else { + ret |= __put_user(ptr0[i^sw], addr + i); + ret |= __put_user(ptr1[i^sw], addr + i + 8); } } if (ret) -- cgit v1.2.3 From e423b9ecd6aa434ce9ba72a21fdc61079e620e0a Mon Sep 17 00:00:00 2001 From: Mark Nelson Date: Wed, 25 Feb 2009 13:26:48 +0000 Subject: powerpc: Fix 64bit memcpy() regression This fixes a regression introduced by commit 25d6e2d7c58ddc4a3b614fc5381591c0cfe66556 ("powerpc: Update 64bit memcpy() using CPU_FTR_UNALIGNED_LD_STD"). This commit allowed CPUs that have the CPU_FTR_UNALIGNED_LD_STD CPU feature bit present to do the memcpy() with unaligned load doubles. But, along with this came a bug where our final load double would read bytes beyond a page boundary and into the next (unmapped) page. This was caught by enabling CONFIG_DEBUG_PAGEALLOC, The fix was to read only the number of bytes that we need to store rather than reading a full 8-byte doubleword and storing only a portion of that. In order to minimise the amount of existing code touched we use the original do_tail for the src_unaligned case. Below is an example of the regression, as reported by Sachin Sant: Unable to handle kernel paging request for data at address 0xc00000003f380000 Faulting instruction address: 0xc000000000039574 cpu 0x1: Vector: 300 (Data Access) at [c00000003baf3020] pc: c000000000039574: .memcpy+0x74/0x244 lr: d00000000244916c: .ext3_xattr_get+0x288/0x2f4 [ext3] sp: c00000003baf32a0 msr: 8000000000009032 dar: c00000003f380000 dsisr: 40000000 current = 0xc00000003e54b010 paca = 0xc000000000a53680 pid = 1840, comm = readahead enter ? for help [link register ] d00000000244916c .ext3_xattr_get+0x288/0x2f4 [ext3] [c00000003baf32a0] d000000002449104 .ext3_xattr_get+0x220/0x2f4 [ext3] (unreliab le) [c00000003baf3390] d00000000244a6e8 .ext3_xattr_security_get+0x40/0x5c [ext3] [c00000003baf3400] c000000000148154 .generic_getxattr+0x74/0x9c [c00000003baf34a0] c000000000333400 .inode_doinit_with_dentry+0x1c4/0x678 [c00000003baf3560] c00000000032c6b0 .security_d_instantiate+0x50/0x68 [c00000003baf35e0] c00000000013c818 .d_instantiate+0x78/0x9c [c00000003baf3680] c00000000013ced0 .d_splice_alias+0xf0/0x120 [c00000003baf3720] d00000000243e05c .ext3_lookup+0xec/0x134 [ext3] [c00000003baf37c0] c000000000131e74 .do_lookup+0x110/0x260 [c00000003baf3880] c000000000134ed0 .__link_path_walk+0xa98/0x1010 [c00000003baf3970] c0000000001354a0 .path_walk+0x58/0xc4 [c00000003baf3a20] c000000000135720 .do_path_lookup+0x138/0x1e4 [c00000003baf3ad0] c00000000013645c .path_lookup_open+0x6c/0xc8 [c00000003baf3b70] c000000000136780 .do_filp_open+0xcc/0x874 [c00000003baf3d10] c0000000001251e0 .do_sys_open+0x80/0x140 [c00000003baf3dc0] c00000000016aaec .compat_sys_open+0x24/0x38 [c00000003baf3e30] c00000000000855c syscall_exit+0x0/0x40 Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/lib/memcpy_64.S | 26 ++++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/lib/memcpy_64.S b/arch/powerpc/lib/memcpy_64.S index fe2d34e5332..e178922b2c2 100644 --- a/arch/powerpc/lib/memcpy_64.S +++ b/arch/powerpc/lib/memcpy_64.S @@ -53,18 +53,19 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) 3: std r8,8(r3) beq 3f addi r3,r3,16 - ld r9,8(r4) .Ldo_tail: bf cr7*4+1,1f - rotldi r9,r9,32 + lwz r9,8(r4) + addi r4,r4,4 stw r9,0(r3) addi r3,r3,4 1: bf cr7*4+2,2f - rotldi r9,r9,16 + lhz r9,8(r4) + addi r4,r4,2 sth r9,0(r3) addi r3,r3,2 2: bf cr7*4+3,3f - rotldi r9,r9,8 + lbz r9,8(r4) stb r9,0(r3) 3: ld r3,48(r1) /* return dest pointer */ blr @@ -133,11 +134,24 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) cmpwi cr1,r5,8 addi r3,r3,32 sld r9,r9,r10 - ble cr1,.Ldo_tail + ble cr1,6f ld r0,8(r4) srd r7,r0,r11 or r9,r7,r9 - b .Ldo_tail +6: + bf cr7*4+1,1f + rotldi r9,r9,32 + stw r9,0(r3) + addi r3,r3,4 +1: bf cr7*4+2,2f + rotldi r9,r9,16 + sth r9,0(r3) + addi r3,r3,2 +2: bf cr7*4+3,3f + rotldi r9,r9,8 + stb r9,0(r3) +3: ld r3,48(r1) /* return dest pointer */ + blr .Ldst_unaligned: PPC_MTOCRF 0x01,r6 # put #bytes to 8B bdry into cr7 -- cgit v1.2.3 From f72b728bf100f276628e378e1fe6c6acd5d09401 Mon Sep 17 00:00:00 2001 From: Mark Nelson Date: Wed, 25 Feb 2009 13:46:24 +0000 Subject: powerpc: Fix 64bit __copy_tofrom_user() regression This fixes a regression introduced by commit a4e22f02f5b6518c1484faea1f88d81802b9feac ("powerpc: Update 64bit __copy_tofrom_user() using CPU_FTR_UNALIGNED_LD_STD"). The same bug that existed in the 64bit memcpy() also exists here so fix it here too. The fix is the same as that applied to memcpy() with the addition of fixes for the exception handling code required for __copy_tofrom_user(). This stops us reading beyond the end of the source region we were told to copy. Signed-off-by: Mark Nelson Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/lib/copyuser_64.S | 38 +++++++++++++++++++++++++++++++------- 1 file changed, 31 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/lib/copyuser_64.S b/arch/powerpc/lib/copyuser_64.S index 70693a5c12a..693b14a778f 100644 --- a/arch/powerpc/lib/copyuser_64.S +++ b/arch/powerpc/lib/copyuser_64.S @@ -62,18 +62,19 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) 72: std r8,8(r3) beq+ 3f addi r3,r3,16 -23: ld r9,8(r4) .Ldo_tail: bf cr7*4+1,1f - rotldi r9,r9,32 +23: lwz r9,8(r4) + addi r4,r4,4 73: stw r9,0(r3) addi r3,r3,4 1: bf cr7*4+2,2f - rotldi r9,r9,16 +44: lhz r9,8(r4) + addi r4,r4,2 74: sth r9,0(r3) addi r3,r3,2 2: bf cr7*4+3,3f - rotldi r9,r9,8 +45: lbz r9,8(r4) 75: stb r9,0(r3) 3: li r3,0 blr @@ -141,11 +142,24 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) 6: cmpwi cr1,r5,8 addi r3,r3,32 sld r9,r9,r10 - ble cr1,.Ldo_tail + ble cr1,7f 34: ld r0,8(r4) srd r7,r0,r11 or r9,r7,r9 - b .Ldo_tail +7: + bf cr7*4+1,1f + rotldi r9,r9,32 +94: stw r9,0(r3) + addi r3,r3,4 +1: bf cr7*4+2,2f + rotldi r9,r9,16 +95: sth r9,0(r3) + addi r3,r3,2 +2: bf cr7*4+3,3f + rotldi r9,r9,8 +96: stb r9,0(r3) +3: li r3,0 + blr .Ldst_unaligned: PPC_MTOCRF 0x01,r6 /* put #bytes to 8B bdry into cr7 */ @@ -218,7 +232,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) 121: 132: addi r3,r3,8 -123: 134: 135: 138: @@ -226,6 +239,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) 140: 141: 142: +123: +144: +145: /* * here we have had a fault on a load and r3 points to the first @@ -309,6 +325,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) 187: 188: 189: +194: +195: +196: 1: ld r6,-24(r1) ld r5,-8(r1) @@ -329,7 +348,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) .llong 72b,172b .llong 23b,123b .llong 73b,173b + .llong 44b,144b .llong 74b,174b + .llong 45b,145b .llong 75b,175b .llong 24b,124b .llong 25b,125b @@ -347,6 +368,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) .llong 79b,179b .llong 80b,180b .llong 34b,134b + .llong 94b,194b + .llong 95b,195b + .llong 96b,196b .llong 35b,135b .llong 81b,181b .llong 36b,136b -- cgit v1.2.3