summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2013-07-04 11:40:32 +0100
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2013-08-11 18:35:20 -0700
commitb85796fa1987e014a61c379e3cb753976e993a46 (patch)
treef80f85350b34fe91c2b67b31e86e497e4bf16b3f
parentddc8d5e58a818e543ac658a572b3155c8ea3e45d (diff)
downloadlinux-3.10-b85796fa1987e014a61c379e3cb753976e993a46.tar.gz
linux-3.10-b85796fa1987e014a61c379e3cb753976e993a46.tar.bz2
linux-3.10-b85796fa1987e014a61c379e3cb753976e993a46.zip
ARM: move vector stubs
commit 19accfd373847ac3d10623c5d20f948846299741 upstream. Move the machine vector stubs into the page above the vector page, which we can prevent from being visible to userspace. Also move the reset stub, and place the swi vector at a location that the 'ldr' can get to it. This hides pointers into the kernel which could give valuable information to attackers, and reduces the number of exploitable instructions at a fixed address. Acked-by: Nicolas Pitre <nico@linaro.org> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--arch/arm/Kconfig3
-rw-r--r--arch/arm/kernel/entry-armv.S50
-rw-r--r--arch/arm/kernel/traps.c4
-rw-r--r--arch/arm/mm/mmu.c10
4 files changed, 37 insertions, 30 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 136f263ed47..d2e1f71cc19 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -213,7 +213,8 @@ config VECTORS_BASE
default DRAM_BASE if REMAP_VECTORS_TO_RAM
default 0x00000000
help
- The base address of exception vectors.
+ The base address of exception vectors. This must be two pages
+ in size.
config ARM_PATCH_PHYS_VIRT
bool "Patch physical to virtual translations at runtime" if EMBEDDED
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index f864f7e3a67..bc2180d3f6e 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -943,9 +943,9 @@ __kuser_helper_end:
/*
* Vector stubs.
*
- * This code is copied to 0xffff0200 so we can use branches in the
- * vectors, rather than ldr's. Note that this code must not
- * exceed 0x300 bytes.
+ * This code is copied to 0xffff1000 so we can use branches in the
+ * vectors, rather than ldr's. Note that this code must not exceed
+ * a page size.
*
* Common stub entry macro:
* Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
@@ -994,6 +994,15 @@ ENDPROC(vector_\name)
.globl __stubs_start
__stubs_start:
+ @ This must be the first word
+ .word vector_swi
+
+vector_rst:
+ ARM( swi SYS_ERROR0 )
+ THUMB( svc #0 )
+ THUMB( nop )
+ b vector_und
+
/*
* Interrupt dispatcher
*/
@@ -1088,6 +1097,16 @@ __stubs_start:
.align 5
/*=============================================================================
+ * Address exception handler
+ *-----------------------------------------------------------------------------
+ * These aren't too critical.
+ * (they're not supposed to happen, and won't happen in 32-bit data mode).
+ */
+
+vector_addrexcptn:
+ b vector_addrexcptn
+
+/*=============================================================================
* Undefined FIQs
*-----------------------------------------------------------------------------
* Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC
@@ -1100,35 +1119,14 @@ __stubs_start:
vector_fiq:
subs pc, lr, #4
-/*=============================================================================
- * Address exception handler
- *-----------------------------------------------------------------------------
- * These aren't too critical.
- * (they're not supposed to happen, and won't happen in 32-bit data mode).
- */
-
-vector_addrexcptn:
- b vector_addrexcptn
-
-/*
- * We group all the following data together to optimise
- * for CPUs with separate I & D caches.
- */
- .align 5
-
-.LCvswi:
- .word vector_swi
-
.globl __stubs_end
__stubs_end:
- .equ stubs_offset, __vectors_start + 0x200 - __stubs_start
+ .equ stubs_offset, __vectors_start + 0x1000 - __stubs_start
.globl __vectors_start
__vectors_start:
- ARM( swi SYS_ERROR0 )
- THUMB( svc #0 )
- THUMB( nop )
+ W(b) vector_rst + stubs_offset
W(b) vector_und + stubs_offset
W(ldr) pc, .LCvswi + stubs_offset
W(b) vector_pabt + stubs_offset
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 95bdab493f7..a58f64e4c71 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -836,7 +836,7 @@ void __init early_trap_init(void *vectors_base)
* are visible to the instruction stream.
*/
memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start);
- memcpy((void *)vectors + 0x200, __stubs_start, __stubs_end - __stubs_start);
+ memcpy((void *)vectors + 0x1000, __stubs_start, __stubs_end - __stubs_start);
memcpy((void *)vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz);
/*
@@ -851,6 +851,6 @@ void __init early_trap_init(void *vectors_base)
memcpy((void *)(vectors + KERN_SIGRETURN_CODE - CONFIG_VECTORS_BASE),
sigreturn_codes, sizeof(sigreturn_codes));
- flush_icache_range(vectors, vectors + PAGE_SIZE);
+ flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
}
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 4d409e6a552..08b37c421a5 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -1175,7 +1175,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
/*
* Allocate the vector page early.
*/
- vectors = early_alloc(PAGE_SIZE);
+ vectors = early_alloc(PAGE_SIZE * 2);
early_trap_init(vectors);
@@ -1225,10 +1225,18 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
if (!vectors_high()) {
map.virtual = 0;
+ map.length = PAGE_SIZE * 2;
map.type = MT_LOW_VECTORS;
create_mapping(&map);
}
+ /* Now create a kernel read-only mapping */
+ map.pfn += 1;
+ map.virtual = 0xffff0000 + PAGE_SIZE;
+ map.length = PAGE_SIZE;
+ map.type = MT_LOW_VECTORS;
+ create_mapping(&map);
+
/*
* Ask the machine support to map in the statically mapped devices.
*/