diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2008-02-15 17:29:12 +0100 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2008-02-18 20:54:14 +0100 |
commit | 31eedd823c1bf3650c450346a0d0c39431034eb9 (patch) | |
tree | af65f4c77b88f5d907aa54ff1e67b1bdf1488d67 | |
parent | c31c7d4844ea4817692ae16bf70f9c96c05a50eb (diff) | |
download | linux-3.10-31eedd823c1bf3650c450346a0d0c39431034eb9.tar.gz linux-3.10-31eedd823c1bf3650c450346a0d0c39431034eb9.tar.bz2 linux-3.10-31eedd823c1bf3650c450346a0d0c39431034eb9.zip |
x86: zap invalid and unused pmds in early boot
The early boot code maps KERNEL_TEXT_SIZE (currently 40MB) starting
from __START_KERNEL_map. The kernel itself only needs _text to _end
mapped in the high alias. On relocatible kernels the ASM setup code
adjusts the compile time created high mappings to the relocation. This
creates invalid pmd entries for negative offsets:
0xffffffff80000000 -> pmd entry: ffffffffff2001e3
It points outside of the physical address space and is marked present.
This starts at the virtual address __START_KERNEL_map and goes up to
the point where the first valid physical address (0x0) is mapped.
Zap the mappings before _text and after _end right away in early
boot. This removes also the invalid entries.
Furthermore it simplifies the range check for high aliases.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@zytor.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | arch/x86/kernel/head64.c | 3 | ||||
-rw-r--r-- | arch/x86/kernel/head_64.S | 7 | ||||
-rw-r--r-- | arch/x86/mm/init_64.c | 27 | ||||
-rw-r--r-- | include/asm-x86/pgtable_64.h | 1 |
4 files changed, 37 insertions, 1 deletions
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index 24dbf56928d..ad2440832de 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -88,6 +88,9 @@ void __init x86_64_start_kernel(char * real_mode_data) /* Make NULL pointers segfault */ zap_identity_mappings(); + /* Cleanup the over mapped high alias */ + cleanup_highmap(); + for (i = 0; i < IDT_ENTRIES; i++) { #ifdef CONFIG_EARLY_PRINTK set_intr_gate(i, &early_idt_handlers[i]); diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index 09b38d539b0..53e5820d605 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -107,8 +107,13 @@ startup_64: movq %rdx, 0(%rbx, %rax, 8) ident_complete: - /* Fixup the kernel text+data virtual addresses + /* + * Fixup the kernel text+data virtual addresses. Note that + * we might write invalid pmds, when the kernel is relocated + * cleanup_highmap() fixes this up along with the mappings + * beyond _end. */ + leaq level2_kernel_pgt(%rip), %rdi leaq 4096(%rdi), %r8 /* See if it is a valid page table entry */ diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index a4a9cccdd4f..bb652f5a93f 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -171,6 +171,33 @@ set_pte_phys(unsigned long vaddr, unsigned long phys, pgprot_t prot) __flush_tlb_one(vaddr); } +/* + * The head.S code sets up the kernel high mapping from: + * __START_KERNEL_map to __START_KERNEL_map + KERNEL_TEXT_SIZE + * + * phys_addr holds the negative offset to the kernel, which is added + * to the compile time generated pmds. This results in invalid pmds up + * to the point where we hit the physaddr 0 mapping. + * + * We limit the mappings to the region from _text to _end. _end is + * rounded up to the 2MB boundary. This catches the invalid pmds as + * well, as they are located before _text: + */ +void __init cleanup_highmap(void) +{ + unsigned long vaddr = __START_KERNEL_map; + unsigned long end = round_up((unsigned long)_end, PMD_SIZE) - 1; + pmd_t *pmd = level2_kernel_pgt; + pmd_t *last_pmd = pmd + PTRS_PER_PMD; + + for (; pmd < last_pmd; pmd++, vaddr += PMD_SIZE) { + if (!pmd_present(*pmd)) + continue; + if (vaddr < (unsigned long) _text || vaddr > end) + set_pmd(pmd, __pmd(0)); + } +} + /* NOTE: this is meant to be run only at boot */ void __init __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot) diff --git a/include/asm-x86/pgtable_64.h b/include/asm-x86/pgtable_64.h index bd4740a60f2..7fd5e0e2361 100644 --- a/include/asm-x86/pgtable_64.h +++ b/include/asm-x86/pgtable_64.h @@ -246,6 +246,7 @@ static inline int pud_large(pud_t pte) #define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val }) extern int kern_addr_valid(unsigned long addr); +extern void cleanup_highmap(void); #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ remap_pfn_range(vma, vaddr, pfn, size, prot) |