diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-12-14 10:08:40 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-12-14 10:08:40 -0800 |
commit | d42b3a2906a10b732ea7d7f849d49be79d242ef0 (patch) | |
tree | 1f4f2387bf53f8015aa87eb9c05ba8316cb5ed50 /arch | |
parent | 18dd0bf22b6f0c1bd5e4e813a42245ed86ec57b6 (diff) | |
parent | e83af1f18c78c7b6aa720beecc927ecc8afd3647 (diff) | |
download | linux-3.10-d42b3a2906a10b732ea7d7f849d49be79d242ef0.tar.gz linux-3.10-d42b3a2906a10b732ea7d7f849d49be79d242ef0.tar.bz2 linux-3.10-d42b3a2906a10b732ea7d7f849d49be79d242ef0.zip |
Merge branch 'core-efi-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 EFI update from Peter Anvin:
"EFI tree, from Matt Fleming. Most of the patches are the new efivarfs
filesystem by Matt Garrett & co. The balance are support for EFI
wallclock in the absence of a hardware-specific driver, and various
fixes and cleanups."
* 'core-efi-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (24 commits)
efivarfs: Make efivarfs_fill_super() static
x86, efi: Check table header length in efi_bgrt_init()
efivarfs: Use query_variable_info() to limit kmalloc()
efivarfs: Fix return value of efivarfs_file_write()
efivarfs: Return a consistent error when efivarfs_get_inode() fails
efivarfs: Make 'datasize' unsigned long
efivarfs: Add unique magic number
efivarfs: Replace magic number with sizeof(attributes)
efivarfs: Return an error if we fail to read a variable
efi: Clarify GUID length calculations
efivarfs: Implement exclusive access for {get,set}_variable
efivarfs: efivarfs_fill_super() ensure we clean up correctly on error
efivarfs: efivarfs_fill_super() ensure we free our temporary name
efivarfs: efivarfs_fill_super() fix inode reference counts
efivarfs: efivarfs_create() ensure we drop our reference on inode on error
efivarfs: efivarfs_file_read ensure we free data in error paths
x86-64/efi: Use EFI to deal with platform wall clock (again)
x86/kernel: remove tboot 1:1 page table creation code
x86, efi: 1:1 pagetable mapping for virtual EFI calls
x86, mm: Include the entire kernel memory map in trampoline_pgd
...
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/include/asm/efi.h | 28 | ||||
-rw-r--r-- | arch/x86/kernel/tboot.c | 78 | ||||
-rw-r--r-- | arch/x86/mm/init_64.c | 9 | ||||
-rw-r--r-- | arch/x86/mm/ioremap.c | 105 | ||||
-rw-r--r-- | arch/x86/mm/pageattr.c | 10 | ||||
-rw-r--r-- | arch/x86/platform/efi/efi-bgrt.c | 2 | ||||
-rw-r--r-- | arch/x86/platform/efi/efi.c | 30 | ||||
-rw-r--r-- | arch/x86/platform/efi/efi_64.c | 15 | ||||
-rw-r--r-- | arch/x86/realmode/init.c | 17 |
9 files changed, 181 insertions, 113 deletions
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index 6e8fdf5ad11..fd13815fe85 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h @@ -69,23 +69,37 @@ extern u64 efi_call6(void *fp, u64 arg1, u64 arg2, u64 arg3, efi_call6((void *)(f), (u64)(a1), (u64)(a2), (u64)(a3), \ (u64)(a4), (u64)(a5), (u64)(a6)) +extern unsigned long efi_call_virt_prelog(void); +extern void efi_call_virt_epilog(unsigned long); + +#define efi_callx(x, func, ...) \ + ({ \ + efi_status_t __status; \ + unsigned long __pgd; \ + \ + __pgd = efi_call_virt_prelog(); \ + __status = efi_call##x(func, __VA_ARGS__); \ + efi_call_virt_epilog(__pgd); \ + __status; \ + }) + #define efi_call_virt0(f) \ - efi_call0((void *)(efi.systab->runtime->f)) + efi_callx(0, (void *)(efi.systab->runtime->f)) #define efi_call_virt1(f, a1) \ - efi_call1((void *)(efi.systab->runtime->f), (u64)(a1)) + efi_callx(1, (void *)(efi.systab->runtime->f), (u64)(a1)) #define efi_call_virt2(f, a1, a2) \ - efi_call2((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2)) + efi_callx(2, (void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2)) #define efi_call_virt3(f, a1, a2, a3) \ - efi_call3((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \ + efi_callx(3, (void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \ (u64)(a3)) #define efi_call_virt4(f, a1, a2, a3, a4) \ - efi_call4((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \ + efi_callx(4, (void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \ (u64)(a3), (u64)(a4)) #define efi_call_virt5(f, a1, a2, a3, a4, a5) \ - efi_call5((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \ + efi_callx(5, (void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \ (u64)(a3), (u64)(a4), (u64)(a5)) #define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \ - efi_call6((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \ + efi_callx(6, (void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \ (u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6)) extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size, diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c index f84fe00fad4..d4f460f962e 100644 --- a/arch/x86/kernel/tboot.c +++ b/arch/x86/kernel/tboot.c @@ -103,71 +103,13 @@ void __init tboot_probe(void) pr_debug("tboot_size: 0x%x\n", tboot->tboot_size); } -static pgd_t *tboot_pg_dir; -static struct mm_struct tboot_mm = { - .mm_rb = RB_ROOT, - .pgd = swapper_pg_dir, - .mm_users = ATOMIC_INIT(2), - .mm_count = ATOMIC_INIT(1), - .mmap_sem = __RWSEM_INITIALIZER(init_mm.mmap_sem), - .page_table_lock = __SPIN_LOCK_UNLOCKED(init_mm.page_table_lock), - .mmlist = LIST_HEAD_INIT(init_mm.mmlist), -}; - static inline void switch_to_tboot_pt(void) { - write_cr3(virt_to_phys(tboot_pg_dir)); -} - -static int map_tboot_page(unsigned long vaddr, unsigned long pfn, - pgprot_t prot) -{ - pgd_t *pgd; - pud_t *pud; - pmd_t *pmd; - pte_t *pte; - - pgd = pgd_offset(&tboot_mm, vaddr); - pud = pud_alloc(&tboot_mm, pgd, vaddr); - if (!pud) - return -1; - pmd = pmd_alloc(&tboot_mm, pud, vaddr); - if (!pmd) - return -1; - pte = pte_alloc_map(&tboot_mm, NULL, pmd, vaddr); - if (!pte) - return -1; - set_pte_at(&tboot_mm, vaddr, pte, pfn_pte(pfn, prot)); - pte_unmap(pte); - return 0; -} - -static int map_tboot_pages(unsigned long vaddr, unsigned long start_pfn, - unsigned long nr) -{ - /* Reuse the original kernel mapping */ - tboot_pg_dir = pgd_alloc(&tboot_mm); - if (!tboot_pg_dir) - return -1; - - for (; nr > 0; nr--, vaddr += PAGE_SIZE, start_pfn++) { - if (map_tboot_page(vaddr, start_pfn, PAGE_KERNEL_EXEC)) - return -1; - } - - return 0; -} - -static void tboot_create_trampoline(void) -{ - u32 map_base, map_size; - - /* Create identity map for tboot shutdown code. */ - map_base = PFN_DOWN(tboot->tboot_base); - map_size = PFN_UP(tboot->tboot_size); - if (map_tboot_pages(map_base << PAGE_SHIFT, map_base, map_size)) - panic("tboot: Error mapping tboot pages (mfns) @ 0x%x, 0x%x\n", - map_base, map_size); +#ifdef CONFIG_X86_32 + load_cr3(initial_page_table); +#else + write_cr3(real_mode_header->trampoline_pgd); +#endif } #ifdef CONFIG_ACPI_SLEEP @@ -225,14 +167,6 @@ void tboot_shutdown(u32 shutdown_type) if (!tboot_enabled()) return; - /* - * if we're being called before the 1:1 mapping is set up then just - * return and let the normal shutdown happen; this should only be - * due to very early panic() - */ - if (!tboot_pg_dir) - return; - /* if this is S3 then set regions to MAC */ if (shutdown_type == TB_SHUTDOWN_S3) if (tboot_setup_sleep()) @@ -343,8 +277,6 @@ static __init int tboot_late_init(void) if (!tboot_enabled()) return 0; - tboot_create_trampoline(); - atomic_set(&ap_wfs_count, 0); register_hotcpu_notifier(&tboot_cpu_notifier); diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 2ead3c8a4c8..07519a12044 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -108,13 +108,13 @@ void sync_global_pgds(unsigned long start, unsigned long end) for (address = start; address <= end; address += PGDIR_SIZE) { const pgd_t *pgd_ref = pgd_offset_k(address); struct page *page; + pgd_t *pgd; if (pgd_none(*pgd_ref)) continue; spin_lock(&pgd_lock); list_for_each_entry(page, &pgd_list, lru) { - pgd_t *pgd; spinlock_t *pgt_lock; pgd = (pgd_t *)page_address(page) + pgd_index(address); @@ -130,6 +130,13 @@ void sync_global_pgds(unsigned long start, unsigned long end) spin_unlock(pgt_lock); } + + pgd = __va(real_mode_header->trampoline_pgd); + pgd += pgd_index(address); + + if (pgd_none(*pgd)) + set_pgd(pgd, *pgd_ref); + spin_unlock(&pgd_lock); } } diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 78fe3f1ac49..e190f7b5665 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -50,6 +50,107 @@ int ioremap_change_attr(unsigned long vaddr, unsigned long size, return err; } +#ifdef CONFIG_X86_64 +static void ident_pte_range(unsigned long paddr, unsigned long vaddr, + pmd_t *ppmd, pmd_t *vpmd, unsigned long end) +{ + pte_t *ppte = pte_offset_kernel(ppmd, paddr); + pte_t *vpte = pte_offset_kernel(vpmd, vaddr); + + do { + set_pte(ppte, *vpte); + } while (ppte++, vpte++, vaddr += PAGE_SIZE, vaddr != end); +} + +static int ident_pmd_range(unsigned long paddr, unsigned long vaddr, + pud_t *ppud, pud_t *vpud, unsigned long end) +{ + pmd_t *ppmd = pmd_offset(ppud, paddr); + pmd_t *vpmd = pmd_offset(vpud, vaddr); + unsigned long next; + + do { + next = pmd_addr_end(vaddr, end); + + if (!pmd_present(*ppmd)) { + pte_t *ppte = (pte_t *)get_zeroed_page(GFP_KERNEL); + if (!ppte) + return 1; + + set_pmd(ppmd, __pmd(_KERNPG_TABLE | __pa(ppte))); + } + + ident_pte_range(paddr, vaddr, ppmd, vpmd, next); + } while (ppmd++, vpmd++, vaddr = next, vaddr != end); + + return 0; +} + +static int ident_pud_range(unsigned long paddr, unsigned long vaddr, + pgd_t *ppgd, pgd_t *vpgd, unsigned long end) +{ + pud_t *ppud = pud_offset(ppgd, paddr); + pud_t *vpud = pud_offset(vpgd, vaddr); + unsigned long next; + + do { + next = pud_addr_end(vaddr, end); + + if (!pud_present(*ppud)) { + pmd_t *ppmd = (pmd_t *)get_zeroed_page(GFP_KERNEL); + if (!ppmd) + return 1; + + set_pud(ppud, __pud(_KERNPG_TABLE | __pa(ppmd))); + } + + if (ident_pmd_range(paddr, vaddr, ppud, vpud, next)) + return 1; + } while (ppud++, vpud++, vaddr = next, vaddr != end); + + return 0; +} + +static int insert_identity_mapping(resource_size_t paddr, unsigned long vaddr, + unsigned long size) +{ + unsigned long end = vaddr + size; + unsigned long next; + pgd_t *vpgd, *ppgd; + + /* Don't map over the guard hole. */ + if (paddr >= 0x800000000000 || paddr + size > 0x800000000000) + return 1; + + ppgd = __va(real_mode_header->trampoline_pgd) + pgd_index(paddr); + + vpgd = pgd_offset_k(vaddr); + do { + next = pgd_addr_end(vaddr, end); + + if (!pgd_present(*ppgd)) { + pud_t *ppud = (pud_t *)get_zeroed_page(GFP_KERNEL); + if (!ppud) + return 1; + + set_pgd(ppgd, __pgd(_KERNPG_TABLE | __pa(ppud))); + } + + if (ident_pud_range(paddr, vaddr, ppgd, vpgd, next)) + return 1; + } while (ppgd++, vpgd++, vaddr = next, vaddr != end); + + return 0; +} +#else +static inline int insert_identity_mapping(resource_size_t paddr, + unsigned long vaddr, + unsigned long size) +{ + return 0; +} +#endif /* CONFIG_X86_64 */ + /* * Remap an arbitrary physical address space into the kernel virtual * address space. Needed when the kernel wants to access high addresses @@ -163,6 +264,10 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr, ret_addr = (void __iomem *) (vaddr + offset); mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr); + if (insert_identity_mapping(phys_addr, vaddr, size)) + printk(KERN_WARNING "ioremap: unable to map 0x%llx in identity pagetable\n", + (unsigned long long)phys_addr); + /* * Check if the request spans more than any BAR in the iomem resource * tree. diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index a718e0d2350..931930a9616 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -919,11 +919,13 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages, /* * On success we use clflush, when the CPU supports it to - * avoid the wbindv. If the CPU does not support it and in the - * error case we fall back to cpa_flush_all (which uses - * wbindv): + * avoid the wbindv. If the CPU does not support it, in the + * error case, and during early boot (for EFI) we fall back + * to cpa_flush_all (which uses wbinvd): */ - if (!ret && cpu_has_clflush) { + if (early_boot_irqs_disabled) + __cpa_flush_all((void *)(long)cache); + else if (!ret && cpu_has_clflush) { if (cpa.flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) { cpa_flush_array(addr, numpages, cache, cpa.flags, pages); diff --git a/arch/x86/platform/efi/efi-bgrt.c b/arch/x86/platform/efi/efi-bgrt.c index f6a0c1b8e51..d9c1b95af17 100644 --- a/arch/x86/platform/efi/efi-bgrt.c +++ b/arch/x86/platform/efi/efi-bgrt.c @@ -39,6 +39,8 @@ void efi_bgrt_init(void) if (ACPI_FAILURE(status)) return; + if (bgrt_tab->header.length < sizeof(*bgrt_tab)) + return; if (bgrt_tab->version != 1) return; if (bgrt_tab->image_type != 0 || !bgrt_tab->image_address) diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index ad4439145f8..0a34d9e9c26 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c @@ -239,22 +239,7 @@ static efi_status_t __init phys_efi_set_virtual_address_map( return status; } -static efi_status_t __init phys_efi_get_time(efi_time_t *tm, - efi_time_cap_t *tc) -{ - unsigned long flags; - efi_status_t status; - - spin_lock_irqsave(&rtc_lock, flags); - efi_call_phys_prelog(); - status = efi_call_phys2(efi_phys.get_time, virt_to_phys(tm), - virt_to_phys(tc)); - efi_call_phys_epilog(); - spin_unlock_irqrestore(&rtc_lock, flags); - return status; -} - -int efi_set_rtc_mmss(unsigned long nowtime) +static int efi_set_rtc_mmss(unsigned long nowtime) { int real_seconds, real_minutes; efi_status_t status; @@ -283,7 +268,7 @@ int efi_set_rtc_mmss(unsigned long nowtime) return 0; } -unsigned long efi_get_time(void) +static unsigned long efi_get_time(void) { efi_status_t status; efi_time_t eft; @@ -639,18 +624,13 @@ static int __init efi_runtime_init(void) } /* * We will only need *early* access to the following - * two EFI runtime services before set_virtual_address_map + * EFI runtime service before set_virtual_address_map * is invoked. */ - efi_phys.get_time = (efi_get_time_t *)runtime->get_time; efi_phys.set_virtual_address_map = (efi_set_virtual_address_map_t *) runtime->set_virtual_address_map; - /* - * Make efi_get_time can be called before entering - * virtual mode. - */ - efi.get_time = phys_efi_get_time; + early_iounmap(runtime, sizeof(efi_runtime_services_t)); return 0; @@ -736,12 +716,10 @@ void __init efi_init(void) efi_enabled = 0; return; } -#ifdef CONFIG_X86_32 if (efi_is_native()) { x86_platform.get_wallclock = efi_get_time; x86_platform.set_wallclock = efi_set_rtc_mmss; } -#endif #if EFI_DEBUG print_efi_memmap(); diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index 95fd505dfeb..06c8b2e662a 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c @@ -58,6 +58,21 @@ static void __init early_code_mapping_set_exec(int executable) } } +unsigned long efi_call_virt_prelog(void) +{ + unsigned long saved; + + saved = read_cr3(); + write_cr3(real_mode_header->trampoline_pgd); + + return saved; +} + +void efi_call_virt_epilog(unsigned long saved) +{ + write_cr3(saved); +} + void __init efi_call_phys_prelog(void) { unsigned long vaddress; diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c index cbca565af5b..8e6ab613785 100644 --- a/arch/x86/realmode/init.c +++ b/arch/x86/realmode/init.c @@ -78,8 +78,21 @@ void __init setup_real_mode(void) *trampoline_cr4_features = read_cr4(); trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd); - trampoline_pgd[0] = __pa(level3_ident_pgt) + _KERNPG_TABLE; - trampoline_pgd[511] = __pa(level3_kernel_pgt) + _KERNPG_TABLE; + + /* + * Create an identity mapping for all of physical memory. + */ + for (i = 0; i <= pgd_index(max_pfn << PAGE_SHIFT); i++) { + int index = pgd_index(PAGE_OFFSET) + i; + + trampoline_pgd[i] = (u64)pgd_val(swapper_pg_dir[index]); + } + + /* + * Copy the upper-half of the kernel pages tables. + */ + for (i = pgd_index(PAGE_OFFSET); i < PTRS_PER_PGD; i++) + trampoline_pgd[i] = (u64)pgd_val(swapper_pg_dir[i]); #endif } |