From 80c517b0ff71a4c874fed9196fd990d2d9e911f3 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Fri, 8 Aug 2014 12:51:39 +0100 Subject: arm64: add helper functions to read I-cache attributes This adds helper functions and #defines to to read the line size and the number of sets from the level 1 instruction cache. Signed-off-by: Ard Biesheuvel Acked-by: Will Deacon Signed-off-by: Will Deacon --- arch/arm64/include/asm/cachetype.h | 20 ++++++++++++++++++++ arch/arm64/kernel/cpuinfo.c | 14 ++++++++++++++ 2 files changed, 34 insertions(+) diff --git a/arch/arm64/include/asm/cachetype.h b/arch/arm64/include/asm/cachetype.h index 7a2e0762cb40..4c631a0a3609 100644 --- a/arch/arm64/include/asm/cachetype.h +++ b/arch/arm64/include/asm/cachetype.h @@ -39,6 +39,26 @@ extern unsigned long __icache_flags; +#define CCSIDR_EL1_LINESIZE_MASK 0x7 +#define CCSIDR_EL1_LINESIZE(x) ((x) & CCSIDR_EL1_LINESIZE_MASK) + +#define CCSIDR_EL1_NUMSETS_SHIFT 13 +#define CCSIDR_EL1_NUMSETS_MASK (0x7fff << CCSIDR_EL1_NUMSETS_SHIFT) +#define CCSIDR_EL1_NUMSETS(x) \ + (((x) & CCSIDR_EL1_NUMSETS_MASK) >> CCSIDR_EL1_NUMSETS_SHIFT) + +extern u64 __attribute_const__ icache_get_ccsidr(void); + +static inline int icache_get_linesize(void) +{ + return 16 << CCSIDR_EL1_LINESIZE(icache_get_ccsidr()); +} + +static inline int icache_get_numsets(void) +{ + return 1 + CCSIDR_EL1_NUMSETS(icache_get_ccsidr()); +} + /* * Whilst the D-side always behaves as PIPT on AArch64, aliasing is * permitted in the I-cache. diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 177169623026..d8c5a59a5687 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -20,8 +20,10 @@ #include #include +#include #include #include +#include #include #include @@ -190,3 +192,15 @@ void __init cpuinfo_store_boot_cpu(void) boot_cpu_data = *info; } + +u64 __attribute_const__ icache_get_ccsidr(void) +{ + u64 ccsidr; + + WARN_ON(preemptible()); + + /* Select L1 I-cache and read its size ID register */ + asm("msr csselr_el1, %1; isb; mrs %0, ccsidr_el1" + : "=r"(ccsidr) : "r"(1L)); + return ccsidr; +} -- cgit v1.2.3 From 169c018de7b6d376f821f9fae0ab23dc5c7bb549 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Fri, 8 Aug 2014 12:51:40 +0100 Subject: arm64: don't flag non-aliasing VIPT I-caches as aliasing VIPT caches are non-aliasing if the index is derived from address bits that are always equal between VA and PA. Classifying these as aliasing results in unnecessary flushing which may hurt performance. Signed-off-by: Ard Biesheuvel Acked-by: Will Deacon Signed-off-by: Will Deacon --- arch/arm64/kernel/cpuinfo.c | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index d8c5a59a5687..504fdaa8367e 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -49,8 +49,18 @@ static void cpuinfo_detect_icache_policy(struct cpuinfo_arm64 *info) unsigned int cpu = smp_processor_id(); u32 l1ip = CTR_L1IP(info->reg_ctr); - if (l1ip != ICACHE_POLICY_PIPT) - set_bit(ICACHEF_ALIASING, &__icache_flags); + if (l1ip != ICACHE_POLICY_PIPT) { + /* + * VIPT caches are non-aliasing if the VA always equals the PA + * in all bit positions that are covered by the index. This is + * the case if the size of a way (# of sets * line size) does + * not exceed PAGE_SIZE. + */ + u32 waysize = icache_get_numsets() * icache_get_linesize(); + + if (l1ip != ICACHE_POLICY_VIPT || waysize > PAGE_SIZE) + set_bit(ICACHEF_ALIASING, &__icache_flags); + } if (l1ip == ICACHE_POLICY_AIVIVT) set_bit(ICACHEF_AIVIVT, &__icache_flags); -- cgit v1.2.3 From 113954c6463d1d80a206e91627ae49711f8b47cd Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Wed, 30 Jul 2014 11:59:02 +0100 Subject: arm64: spin-table: handle unmapped cpu-release-addrs In certain cases the cpu-release-addr of a CPU may not fall in the linear mapping (e.g. when the kernel is loaded above this address due to the presence of other images in memory). This is problematic for the spin-table code as it assumes that it can trivially convert a cpu-release-addr to a valid VA in the linear map. This patch modifies the spin-table code to use a temporary cached mapping to write to a given cpu-release-addr, enabling us to support addresses regardless of whether they are covered by the linear mapping. Acked-by: Leif Lindholm Tested-by: Leif Lindholm Tested-by: Mark Salter Signed-off-by: Mark Rutland [ardb: added (__force void *) cast] Signed-off-by: Ard Biesheuvel Signed-off-by: Will Deacon --- arch/arm64/kernel/smp_spin_table.c | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) diff --git a/arch/arm64/kernel/smp_spin_table.c b/arch/arm64/kernel/smp_spin_table.c index 0347d38eea29..4f93c67e63de 100644 --- a/arch/arm64/kernel/smp_spin_table.c +++ b/arch/arm64/kernel/smp_spin_table.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include @@ -65,12 +66,21 @@ static int smp_spin_table_cpu_init(struct device_node *dn, unsigned int cpu) static int smp_spin_table_cpu_prepare(unsigned int cpu) { - void **release_addr; + __le64 __iomem *release_addr; if (!cpu_release_addr[cpu]) return -ENODEV; - release_addr = __va(cpu_release_addr[cpu]); + /* + * The cpu-release-addr may or may not be inside the linear mapping. + * As ioremap_cache will either give us a new mapping or reuse the + * existing linear mapping, we can use it to cover both cases. In + * either case the memory will be MT_NORMAL. + */ + release_addr = ioremap_cache(cpu_release_addr[cpu], + sizeof(*release_addr)); + if (!release_addr) + return -ENOMEM; /* * We write the release address as LE regardless of the native @@ -79,15 +89,17 @@ static int smp_spin_table_cpu_prepare(unsigned int cpu) * boot-loader's endianess before jumping. This is mandated by * the boot protocol. */ - release_addr[0] = (void *) cpu_to_le64(__pa(secondary_holding_pen)); - - __flush_dcache_area(release_addr, sizeof(release_addr[0])); + writeq_relaxed(__pa(secondary_holding_pen), release_addr); + __flush_dcache_area((__force void *)release_addr, + sizeof(*release_addr)); /* * Send an event to wake up the secondary CPU. */ sev(); + iounmap(release_addr); + return 0; } -- cgit v1.2.3 From c16173fa568582113145daee70fc317b10bc51e0 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Wed, 30 Jul 2014 11:59:03 +0100 Subject: arm64/efi: efistub: cover entire static mem footprint in PE/COFF .text The static memory footprint of a kernel Image at boot is larger than the Image file itself. Things like .bss data and initial page tables are allocated statically but populated dynamically so their content is not contained in the Image file. However, if EFI (or GRUB) has loaded the Image at precisely the desired offset of base of DRAM + TEXT_OFFSET, the Image will be booted in place, and we have to make sure that the allocation done by the PE/COFF loader is large enough. Fix this by growing the PE/COFF .text section to cover the entire static memory footprint. The part of the section that is not covered by the payload will be zero initialised by the PE/COFF loader. Acked-by: Mark Salter Acked-by: Mark Rutland Acked-by: Leif Lindholm Tested-by: Leif Lindholm Signed-off-by: Ard Biesheuvel Signed-off-by: Will Deacon --- arch/arm64/kernel/head.S | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 873069056229..0a6e4f924df8 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -151,7 +151,7 @@ optional_header: .short 0x20b // PE32+ format .byte 0x02 // MajorLinkerVersion .byte 0x14 // MinorLinkerVersion - .long _edata - stext // SizeOfCode + .long _end - stext // SizeOfCode .long 0 // SizeOfInitializedData .long 0 // SizeOfUninitializedData .long efi_stub_entry - efi_head // AddressOfEntryPoint @@ -169,7 +169,7 @@ extra_header_fields: .short 0 // MinorSubsystemVersion .long 0 // Win32VersionValue - .long _edata - efi_head // SizeOfImage + .long _end - efi_head // SizeOfImage // Everything before the kernel image is considered part of the header .long stext - efi_head // SizeOfHeaders @@ -216,7 +216,7 @@ section_table: .byte 0 .byte 0 .byte 0 // end of 0 padding of section name - .long _edata - stext // VirtualSize + .long _end - stext // VirtualSize .long stext - efi_head // VirtualAddress .long _edata - stext // SizeOfRawData .long stext - efi_head // PointerToRawData -- cgit v1.2.3 From 58015ec6b8e13c980c20d9fff3f986838c004348 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Wed, 30 Jul 2014 11:59:04 +0100 Subject: arm64/efi: efistub: don't abort if base of DRAM is occupied If we cannot relocate the kernel Image to its preferred offset of base of DRAM plus TEXT_OFFSET, instead relocate it to the lowest available 2 MB boundary plus TEXT_OFFSET. We may lose a bit of memory at the low end, but we can still proceed normally otherwise. Acked-by: Mark Salter Acked-by: Mark Rutland Acked-by: Leif Lindholm Tested-by: Leif Lindholm Signed-off-by: Ard Biesheuvel Signed-off-by: Will Deacon --- arch/arm64/kernel/efi-stub.c | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/arch/arm64/kernel/efi-stub.c b/arch/arm64/kernel/efi-stub.c index 1317fef8dde9..d27dd982ff26 100644 --- a/arch/arm64/kernel/efi-stub.c +++ b/arch/arm64/kernel/efi-stub.c @@ -28,20 +28,16 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table, kernel_size = _edata - _text; if (*image_addr != (dram_base + TEXT_OFFSET)) { kernel_memsize = kernel_size + (_end - _edata); - status = efi_relocate_kernel(sys_table, image_addr, - kernel_size, kernel_memsize, - dram_base + TEXT_OFFSET, - PAGE_SIZE); + status = efi_low_alloc(sys_table, kernel_memsize + TEXT_OFFSET, + SZ_2M, reserve_addr); if (status != EFI_SUCCESS) { pr_efi_err(sys_table, "Failed to relocate kernel\n"); return status; } - if (*image_addr != (dram_base + TEXT_OFFSET)) { - pr_efi_err(sys_table, "Failed to alloc kernel memory\n"); - efi_free(sys_table, kernel_memsize, *image_addr); - return EFI_LOAD_ERROR; - } - *image_size = kernel_memsize; + memcpy((void *)*reserve_addr + TEXT_OFFSET, (void *)*image_addr, + kernel_size); + *image_addr = *reserve_addr + TEXT_OFFSET; + *reserve_size = kernel_memsize + TEXT_OFFSET; } -- cgit v1.2.3 From 5e051531447259e5df95c44bccb69979537c19e4 Mon Sep 17 00:00:00 2001 From: Arun Chandran Date: Mon, 18 Aug 2014 10:06:58 +0100 Subject: arm64: convert part of soft_restart() to assembly The current soft_restart() and setup_restart implementations incorrectly assume that compiler will not spill/fill values to/from stack. However this assumption seems to be wrong, revealed by the disassembly of the currently existing code (v3.16) built with Linaro GCC 4.9-2014.05. ffffffc000085224 : ffffffc000085224: a9be7bfd stp x29, x30, [sp,#-32]! ffffffc000085228: 910003fd mov x29, sp ffffffc00008522c: f9000fa0 str x0, [x29,#24] ffffffc000085230: 94003d21 bl ffffffc0000946b4 ffffffc000085234: 94003b33 bl ffffffc000093f00 ffffffc000085238: 94003dfa bl ffffffc000094a20 ffffffc00008523c: 94003b31 bl ffffffc000093f00 ffffffc000085240: b0003321 adrp x1, ffffffc0006ea000 ffffffc000085244: f9400fa0 ldr x0, [x29,#24] ----> spilled addr ffffffc000085248: f942fc22 ldr x2, [x1,#1528] ----> global memstart_addr ffffffc00008524c: f0000061 adrp x1, ffffffc000094000 <__inval_cache_range+0x40> ffffffc000085250: 91290021 add x1, x1, #0xa40 ffffffc000085254: 8b010041 add x1, x2, x1 ffffffc000085258: d2c00802 mov x2, #0x4000000000 // #274877906944 ffffffc00008525c: 8b020021 add x1, x1, x2 ffffffc000085260: d63f0020 blr x1 ... Here the compiler generates memory accesses after the cache is disabled, loading stale values for the spilled value and global variable. As we cannot control when the compiler will access memory we must rewrite the functions in assembly to stash values we need in registers prior to disabling the cache, avoiding the use of memory. Reviewed-by: Mark Rutland Signed-off-by: Arun Chandran Signed-off-by: Will Deacon --- arch/arm64/include/asm/proc-fns.h | 2 ++ arch/arm64/kernel/process.c | 30 ++---------------------------- arch/arm64/mm/proc.S | 15 +++++++++++++++ 3 files changed, 19 insertions(+), 28 deletions(-) diff --git a/arch/arm64/include/asm/proc-fns.h b/arch/arm64/include/asm/proc-fns.h index 0c657bb54597..9a8fd84f8fb2 100644 --- a/arch/arm64/include/asm/proc-fns.h +++ b/arch/arm64/include/asm/proc-fns.h @@ -32,6 +32,8 @@ extern void cpu_cache_off(void); extern void cpu_do_idle(void); extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm); extern void cpu_reset(unsigned long addr) __attribute__((noreturn)); +void cpu_soft_restart(phys_addr_t cpu_reset, + unsigned long addr) __attribute__((noreturn)); extern void cpu_do_suspend(struct cpu_suspend_ctx *ptr); extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr); diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 1309d64aa926..bf669228a59f 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -57,36 +57,10 @@ unsigned long __stack_chk_guard __read_mostly; EXPORT_SYMBOL(__stack_chk_guard); #endif -static void setup_restart(void) -{ - /* - * Tell the mm system that we are going to reboot - - * we may need it to insert some 1:1 mappings so that - * soft boot works. - */ - setup_mm_for_reboot(); - - /* Clean and invalidate caches */ - flush_cache_all(); - - /* Turn D-cache off */ - cpu_cache_off(); - - /* Push out any further dirty data, and ensure cache is empty */ - flush_cache_all(); -} - void soft_restart(unsigned long addr) { - typedef void (*phys_reset_t)(unsigned long); - phys_reset_t phys_reset; - - setup_restart(); - - /* Switch to the identity mapping */ - phys_reset = (phys_reset_t)virt_to_phys(cpu_reset); - phys_reset(addr); - + setup_mm_for_reboot(); + cpu_soft_restart(virt_to_phys(cpu_reset), addr); /* Should never get here */ BUG(); } diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 7736779c9809..4e778b13291b 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -76,6 +76,21 @@ ENTRY(cpu_reset) ret x0 ENDPROC(cpu_reset) +ENTRY(cpu_soft_restart) + /* Save address of cpu_reset() and reset address */ + mov x19, x0 + mov x20, x1 + + /* Turn D-cache off */ + bl cpu_cache_off + + /* Push out all dirty data, and ensure cache is empty */ + bl flush_cache_all + + mov x0, x20 + ret x19 +ENDPROC(cpu_soft_restart) + /* * cpu_do_idle() * -- cgit v1.2.3 From b6d4f2800b7bad654caf00654f4bff21594ef838 Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Tue, 19 Aug 2014 20:41:42 +0100 Subject: arm64: Introduce {set,clear}_pte_bit It's useful to be able to change individual bits in ptes at times. Introduce functions for this and update existing pte_mk* functions to use these primatives. Acked-by: Will Deacon Signed-off-by: Laura Abbott [will: added missing inline keyword for new header functions] Signed-off-by: Will Deacon --- arch/arm64/include/asm/pgtable.h | 33 +++++++++++++++++++-------------- 1 file changed, 19 insertions(+), 14 deletions(-) diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index ffe1ba0506d1..d58e40cde88e 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -149,46 +149,51 @@ extern struct page *empty_zero_page; #define pte_valid_not_user(pte) \ ((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID) -static inline pte_t pte_wrprotect(pte_t pte) +static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot) { - pte_val(pte) &= ~PTE_WRITE; + pte_val(pte) &= ~pgprot_val(prot); return pte; } -static inline pte_t pte_mkwrite(pte_t pte) +static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot) { - pte_val(pte) |= PTE_WRITE; + pte_val(pte) |= pgprot_val(prot); return pte; } +static inline pte_t pte_wrprotect(pte_t pte) +{ + return clear_pte_bit(pte, __pgprot(PTE_WRITE)); +} + +static inline pte_t pte_mkwrite(pte_t pte) +{ + return set_pte_bit(pte, __pgprot(PTE_WRITE)); +} + static inline pte_t pte_mkclean(pte_t pte) { - pte_val(pte) &= ~PTE_DIRTY; - return pte; + return clear_pte_bit(pte, __pgprot(PTE_DIRTY)); } static inline pte_t pte_mkdirty(pte_t pte) { - pte_val(pte) |= PTE_DIRTY; - return pte; + return set_pte_bit(pte, __pgprot(PTE_DIRTY)); } static inline pte_t pte_mkold(pte_t pte) { - pte_val(pte) &= ~PTE_AF; - return pte; + return clear_pte_bit(pte, __pgprot(PTE_AF)); } static inline pte_t pte_mkyoung(pte_t pte) { - pte_val(pte) |= PTE_AF; - return pte; + return set_pte_bit(pte, __pgprot(PTE_AF)); } static inline pte_t pte_mkspecial(pte_t pte) { - pte_val(pte) |= PTE_SPECIAL; - return pte; + return set_pte_bit(pte, __pgprot(PTE_SPECIAL)); } static inline void set_pte(pte_t *ptep, pte_t pte) -- cgit v1.2.3 From 11d91a770f1fff44dafdf88d6089a3451f99c9b6 Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Tue, 19 Aug 2014 20:41:43 +0100 Subject: arm64: Add CONFIG_DEBUG_SET_MODULE_RONX support In a similar fashion to other architecture, add the infrastructure and Kconfig to enable DEBUG_SET_MODULE_RONX support. When enabled, module ranges will be marked read-only/no-execute as appropriate. Signed-off-by: Laura Abbott [will: fixed off-by-one in module end check] Signed-off-by: Will Deacon --- arch/arm64/Kconfig.debug | 11 +++++ arch/arm64/include/asm/cacheflush.h | 4 ++ arch/arm64/mm/Makefile | 2 +- arch/arm64/mm/pageattr.c | 96 +++++++++++++++++++++++++++++++++++++ 4 files changed, 112 insertions(+), 1 deletion(-) create mode 100644 arch/arm64/mm/pageattr.c diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug index 4ee8e90b7a45..0a12933e50ed 100644 --- a/arch/arm64/Kconfig.debug +++ b/arch/arm64/Kconfig.debug @@ -43,4 +43,15 @@ config ARM64_RANDOMIZE_TEXT_OFFSET of TEXT_OFFSET and platforms must not require a specific value. +config DEBUG_SET_MODULE_RONX + bool "Set loadable kernel module data as NX and text as RO" + depends on MODULES + help + This option helps catch unintended modifications to loadable + kernel module's text and read-only data. It also prevents execution + of module data. Such protection may interfere with run-time code + patching and dynamic kernel tracing - and they might also protect + against certain classes of kernel exploits. + If in doubt, say "N". + endmenu diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h index f2defe1c380c..689b6379188c 100644 --- a/arch/arm64/include/asm/cacheflush.h +++ b/arch/arm64/include/asm/cacheflush.h @@ -148,4 +148,8 @@ static inline void flush_cache_vunmap(unsigned long start, unsigned long end) { } +int set_memory_ro(unsigned long addr, int numpages); +int set_memory_rw(unsigned long addr, int numpages); +int set_memory_x(unsigned long addr, int numpages); +int set_memory_nx(unsigned long addr, int numpages); #endif diff --git a/arch/arm64/mm/Makefile b/arch/arm64/mm/Makefile index 3ecb56c624d3..c56179ed2c09 100644 --- a/arch/arm64/mm/Makefile +++ b/arch/arm64/mm/Makefile @@ -1,5 +1,5 @@ obj-y := dma-mapping.o extable.o fault.o init.o \ cache.o copypage.o flush.o \ ioremap.o mmap.o pgd.o mmu.o \ - context.o proc.o + context.o proc.o pageattr.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c new file mode 100644 index 000000000000..75e744e4cec5 --- /dev/null +++ b/arch/arm64/mm/pageattr.c @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include +#include +#include +#include + +#include +#include + +struct page_change_data { + pgprot_t set_mask; + pgprot_t clear_mask; +}; + +static int change_page_range(pte_t *ptep, pgtable_t token, unsigned long addr, + void *data) +{ + struct page_change_data *cdata = data; + pte_t pte = *ptep; + + pte = clear_pte_bit(pte, cdata->clear_mask); + pte = set_pte_bit(pte, cdata->set_mask); + + set_pte(ptep, pte); + return 0; +} + +static int change_memory_common(unsigned long addr, int numpages, + pgprot_t set_mask, pgprot_t clear_mask) +{ + unsigned long start = addr; + unsigned long size = PAGE_SIZE*numpages; + unsigned long end = start + size; + int ret; + struct page_change_data data; + + if (!IS_ALIGNED(addr, PAGE_SIZE)) { + addr &= PAGE_MASK; + WARN_ON_ONCE(1); + } + + if (!is_module_address(start) || !is_module_address(end - 1)) + return -EINVAL; + + data.set_mask = set_mask; + data.clear_mask = clear_mask; + + ret = apply_to_page_range(&init_mm, start, size, change_page_range, + &data); + + flush_tlb_kernel_range(start, end); + return ret; +} + +int set_memory_ro(unsigned long addr, int numpages) +{ + return change_memory_common(addr, numpages, + __pgprot(PTE_RDONLY), + __pgprot(PTE_WRITE)); +} +EXPORT_SYMBOL_GPL(set_memory_ro); + +int set_memory_rw(unsigned long addr, int numpages) +{ + return change_memory_common(addr, numpages, + __pgprot(PTE_WRITE), + __pgprot(PTE_RDONLY)); +} +EXPORT_SYMBOL_GPL(set_memory_rw); + +int set_memory_nx(unsigned long addr, int numpages) +{ + return change_memory_common(addr, numpages, + __pgprot(PTE_PXN), + __pgprot(0)); +} +EXPORT_SYMBOL_GPL(set_memory_nx); + +int set_memory_x(unsigned long addr, int numpages) +{ + return change_memory_common(addr, numpages, + __pgprot(0), + __pgprot(PTE_PXN)); +} +EXPORT_SYMBOL_GPL(set_memory_x); -- cgit v1.2.3 From 3337a10e0d0cbc9225cefc23aa7a604b698367ed Mon Sep 17 00:00:00 2001 From: Behan Webster Date: Wed, 27 Aug 2014 05:29:29 +0100 Subject: arm64: LLVMLinux: Add current_stack_pointer() for arm64 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Define a global named register for current_stack_pointer. The use of this new variable guarantees that both gcc and clang can access this register in C code. Signed-off-by: Behan Webster Reviewed-by: Jan-Simon Möller Reviewed-by: Mark Charlebois Reviewed-by: Olof Johansson Acked-by: Will Deacon Signed-off-by: Will Deacon --- arch/arm64/include/asm/thread_info.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h index 45108d802f5e..356e03774df4 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h @@ -68,6 +68,11 @@ struct thread_info { #define init_thread_info (init_thread_union.thread_info) #define init_stack (init_thread_union.stack) +/* + * how to get the current stack pointer from C + */ +register unsigned long current_stack_pointer asm ("sp"); + /* * how to get the thread information struct from C */ -- cgit v1.2.3 From bb28cec4ea2f5151c08e061c6de825a8c853bbd6 Mon Sep 17 00:00:00 2001 From: Behan Webster Date: Wed, 27 Aug 2014 05:29:30 +0100 Subject: arm64: LLVMLinux: Use current_stack_pointer in save_stack_trace_tsk MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use the global current_stack_pointer to get the value of the stack pointer. This change supports being able to compile the kernel with both gcc and clang. Signed-off-by: Behan Webster Signed-off-by: Mark Charlebois Reviewed-by: Jan-Simon Möller Reviewed-by: Olof Johansson Acked-by: Will Deacon Signed-off-by: Will Deacon --- arch/arm64/kernel/stacktrace.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index 55437ba1f5a4..407991bf79f5 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -111,10 +111,9 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) frame.sp = thread_saved_sp(tsk); frame.pc = thread_saved_pc(tsk); } else { - register unsigned long current_sp asm("sp"); data.no_sched_functions = 0; frame.fp = (unsigned long)__builtin_frame_address(0); - frame.sp = current_sp; + frame.sp = current_stack_pointer; frame.pc = (unsigned long)save_stack_trace_tsk; } -- cgit v1.2.3 From 786248705ecf5290f26534e8eef62ba6dd63b806 Mon Sep 17 00:00:00 2001 From: Behan Webster Date: Wed, 27 Aug 2014 05:29:31 +0100 Subject: arm64: LLVMLinux: Calculate current_thread_info from current_stack_pointer MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use the global current_stack_pointer to get the value of the stack pointer. This change supports being able to compile the kernel with both gcc and clang. Signed-off-by: Behan Webster Signed-off-by: Mark Charlebois Reviewed-by: Jan-Simon Möller Reviewed-by: Olof Johansson Acked-by: Will Deacon Signed-off-by: Will Deacon --- arch/arm64/include/asm/thread_info.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h index 356e03774df4..459bf8e53208 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h @@ -80,8 +80,8 @@ static inline struct thread_info *current_thread_info(void) __attribute_const__; static inline struct thread_info *current_thread_info(void) { - register unsigned long sp asm ("sp"); - return (struct thread_info *)(sp & ~(THREAD_SIZE - 1)); + return (struct thread_info *) + (current_stack_pointer & ~(THREAD_SIZE - 1)); } #define thread_saved_pc(tsk) \ -- cgit v1.2.3 From 2128df143d840a20e12818290eb6e40b95cc4ac0 Mon Sep 17 00:00:00 2001 From: Behan Webster Date: Wed, 27 Aug 2014 05:29:32 +0100 Subject: arm64: LLVMLinux: Use current_stack_pointer in kernel/traps.c Use the global current_stack_pointer to get the value of the stack pointer. This change supports being able to compile the kernel with both gcc and clang. Signed-off-by: Behan Webster Signed-off-by: Mark Charlebois Reviewed-by: Olof Johansson Acked-by: Will Deacon Signed-off-by: Will Deacon --- arch/arm64/kernel/traps.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 02cd3f023e9a..de1b085e7963 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -132,7 +132,6 @@ static void dump_instr(const char *lvl, struct pt_regs *regs) static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) { struct stackframe frame; - const register unsigned long current_sp asm ("sp"); pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk); @@ -145,7 +144,7 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) frame.pc = regs->pc; } else if (tsk == current) { frame.fp = (unsigned long)__builtin_frame_address(0); - frame.sp = current_sp; + frame.sp = current_stack_pointer; frame.pc = (unsigned long)dump_backtrace; } else { /* -- cgit v1.2.3 From 34ccf8f455f1ae7761810a74308f82daca67ced1 Mon Sep 17 00:00:00 2001 From: Mark Charlebois Date: Wed, 27 Aug 2014 05:29:33 +0100 Subject: arm64: LLVMLinux: Use global stack register variable for aarch64 To support both Clang and GCC, use the global stack register variable vs a local register variable. Author: Mark Charlebois Signed-off-by: Mark Charlebois Signed-off-by: Behan Webster Signed-off-by: Will Deacon --- arch/arm64/include/asm/percpu.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h index 453a179469a3..5279e5733386 100644 --- a/arch/arm64/include/asm/percpu.h +++ b/arch/arm64/include/asm/percpu.h @@ -26,13 +26,13 @@ static inline void set_my_cpu_offset(unsigned long off) static inline unsigned long __my_cpu_offset(void) { unsigned long off; - register unsigned long *sp asm ("sp"); /* * We want to allow caching the value, so avoid using volatile and * instead use a fake stack read to hazard against barrier(). */ - asm("mrs %0, tpidr_el1" : "=r" (off) : "Q" (*sp)); + asm("mrs %0, tpidr_el1" : "=r" (off) : + "Q" (*(const unsigned long *)current_stack_pointer)); return off; } -- cgit v1.2.3 From a4ceab1adbe960c781e9e2f659d7f7840eefd786 Mon Sep 17 00:00:00 2001 From: Behan Webster Date: Wed, 27 Aug 2014 05:29:34 +0100 Subject: arm64: LLVMLinux: Use global stack pointer in return_address() The global register current_stack_pointer holds the current stack pointer. This change supports being able to compile the kernel with both gcc and clang. Author: Mark Charlebois Signed-off-by: Mark Charlebois Signed-off-by: Behan Webster Signed-off-by: Will Deacon --- arch/arm64/kernel/return_address.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/arch/arm64/kernel/return_address.c b/arch/arm64/kernel/return_address.c index 89102a6ffad5..6c4fd2810ecb 100644 --- a/arch/arm64/kernel/return_address.c +++ b/arch/arm64/kernel/return_address.c @@ -36,13 +36,12 @@ void *return_address(unsigned int level) { struct return_address_data data; struct stackframe frame; - register unsigned long current_sp asm ("sp"); data.level = level + 2; data.addr = NULL; frame.fp = (unsigned long)__builtin_frame_address(0); - frame.sp = current_sp; + frame.sp = current_stack_pointer; frame.pc = (unsigned long)return_address; /* dummy */ walk_stackframe(&frame, save_return_addr, &data); -- cgit v1.2.3 From 617d2fbc45233bed182accd3507d0df4d213492c Mon Sep 17 00:00:00 2001 From: Zi Shen Lim Date: Wed, 27 Aug 2014 05:15:17 +0100 Subject: arm64: introduce aarch64_insn_gen_comp_branch_imm() Introduce function to generate compare & branch (immediate) instructions. Signed-off-by: Zi Shen Lim Acked-by: Will Deacon Signed-off-by: Will Deacon --- arch/arm64/include/asm/insn.h | 57 ++++++++++++++++++++++++++++ arch/arm64/kernel/insn.c | 88 ++++++++++++++++++++++++++++++++++++++++--- 2 files changed, 140 insertions(+), 5 deletions(-) diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h index dc1f73b13e74..a98c4954b380 100644 --- a/arch/arm64/include/asm/insn.h +++ b/arch/arm64/include/asm/insn.h @@ -2,6 +2,8 @@ * Copyright (C) 2013 Huawei Ltd. * Author: Jiang Liu * + * Copyright (C) 2014 Zi Shen Lim + * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. @@ -67,9 +69,58 @@ enum aarch64_insn_imm_type { AARCH64_INSN_IMM_MAX }; +enum aarch64_insn_register_type { + AARCH64_INSN_REGTYPE_RT, +}; + +enum aarch64_insn_register { + AARCH64_INSN_REG_0 = 0, + AARCH64_INSN_REG_1 = 1, + AARCH64_INSN_REG_2 = 2, + AARCH64_INSN_REG_3 = 3, + AARCH64_INSN_REG_4 = 4, + AARCH64_INSN_REG_5 = 5, + AARCH64_INSN_REG_6 = 6, + AARCH64_INSN_REG_7 = 7, + AARCH64_INSN_REG_8 = 8, + AARCH64_INSN_REG_9 = 9, + AARCH64_INSN_REG_10 = 10, + AARCH64_INSN_REG_11 = 11, + AARCH64_INSN_REG_12 = 12, + AARCH64_INSN_REG_13 = 13, + AARCH64_INSN_REG_14 = 14, + AARCH64_INSN_REG_15 = 15, + AARCH64_INSN_REG_16 = 16, + AARCH64_INSN_REG_17 = 17, + AARCH64_INSN_REG_18 = 18, + AARCH64_INSN_REG_19 = 19, + AARCH64_INSN_REG_20 = 20, + AARCH64_INSN_REG_21 = 21, + AARCH64_INSN_REG_22 = 22, + AARCH64_INSN_REG_23 = 23, + AARCH64_INSN_REG_24 = 24, + AARCH64_INSN_REG_25 = 25, + AARCH64_INSN_REG_26 = 26, + AARCH64_INSN_REG_27 = 27, + AARCH64_INSN_REG_28 = 28, + AARCH64_INSN_REG_29 = 29, + AARCH64_INSN_REG_FP = 29, /* Frame pointer */ + AARCH64_INSN_REG_30 = 30, + AARCH64_INSN_REG_LR = 30, /* Link register */ + AARCH64_INSN_REG_ZR = 31, /* Zero: as source register */ + AARCH64_INSN_REG_SP = 31 /* Stack pointer: as load/store base reg */ +}; + +enum aarch64_insn_variant { + AARCH64_INSN_VARIANT_32BIT, + AARCH64_INSN_VARIANT_64BIT +}; + enum aarch64_insn_branch_type { AARCH64_INSN_BRANCH_NOLINK, AARCH64_INSN_BRANCH_LINK, + AARCH64_INSN_BRANCH_COMP_ZERO, + AARCH64_INSN_BRANCH_COMP_NONZERO, }; #define __AARCH64_INSN_FUNCS(abbr, mask, val) \ @@ -80,6 +131,8 @@ static __always_inline u32 aarch64_insn_get_##abbr##_value(void) \ __AARCH64_INSN_FUNCS(b, 0xFC000000, 0x14000000) __AARCH64_INSN_FUNCS(bl, 0xFC000000, 0x94000000) +__AARCH64_INSN_FUNCS(cbz, 0xFE000000, 0x34000000) +__AARCH64_INSN_FUNCS(cbnz, 0xFE000000, 0x35000000) __AARCH64_INSN_FUNCS(svc, 0xFFE0001F, 0xD4000001) __AARCH64_INSN_FUNCS(hvc, 0xFFE0001F, 0xD4000002) __AARCH64_INSN_FUNCS(smc, 0xFFE0001F, 0xD4000003) @@ -97,6 +150,10 @@ u32 aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type, u32 insn, u64 imm); u32 aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr, enum aarch64_insn_branch_type type); +u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr, + enum aarch64_insn_register reg, + enum aarch64_insn_variant variant, + enum aarch64_insn_branch_type type); u32 aarch64_insn_gen_hint(enum aarch64_insn_hint_op op); u32 aarch64_insn_gen_nop(void); diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c index 92f36835486b..d9f7827c5058 100644 --- a/arch/arm64/kernel/insn.c +++ b/arch/arm64/kernel/insn.c @@ -2,6 +2,8 @@ * Copyright (C) 2013 Huawei Ltd. * Author: Jiang Liu * + * Copyright (C) 2014 Zi Shen Lim + * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. @@ -23,6 +25,8 @@ #include #include +#define AARCH64_INSN_SF_BIT BIT(31) + static int aarch64_insn_encoding_class[] = { AARCH64_INSN_CLS_UNKNOWN, AARCH64_INSN_CLS_UNKNOWN, @@ -264,10 +268,36 @@ u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type, return insn; } -u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr, - enum aarch64_insn_branch_type type) +static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type, + u32 insn, + enum aarch64_insn_register reg) +{ + int shift; + + if (reg < AARCH64_INSN_REG_0 || reg > AARCH64_INSN_REG_SP) { + pr_err("%s: unknown register encoding %d\n", __func__, reg); + return 0; + } + + switch (type) { + case AARCH64_INSN_REGTYPE_RT: + shift = 0; + break; + default: + pr_err("%s: unknown register type encoding %d\n", __func__, + type); + return 0; + } + + insn &= ~(GENMASK(4, 0) << shift); + insn |= reg << shift; + + return insn; +} + +static inline long branch_imm_common(unsigned long pc, unsigned long addr, + long range) { - u32 insn; long offset; /* @@ -276,13 +306,24 @@ u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr, */ BUG_ON((pc & 0x3) || (addr & 0x3)); + offset = ((long)addr - (long)pc); + BUG_ON(offset < -range || offset >= range); + + return offset; +} + +u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr, + enum aarch64_insn_branch_type type) +{ + u32 insn; + long offset; + /* * B/BL support [-128M, 128M) offset * ARM64 virtual address arrangement guarantees all kernel and module * texts are within +/-128M. */ - offset = ((long)addr - (long)pc); - BUG_ON(offset < -SZ_128M || offset >= SZ_128M); + offset = branch_imm_common(pc, addr, SZ_128M); if (type == AARCH64_INSN_BRANCH_LINK) insn = aarch64_insn_get_bl_value(); @@ -293,6 +334,43 @@ u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr, offset >> 2); } +u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr, + enum aarch64_insn_register reg, + enum aarch64_insn_variant variant, + enum aarch64_insn_branch_type type) +{ + u32 insn; + long offset; + + offset = branch_imm_common(pc, addr, SZ_1M); + + switch (type) { + case AARCH64_INSN_BRANCH_COMP_ZERO: + insn = aarch64_insn_get_cbz_value(); + break; + case AARCH64_INSN_BRANCH_COMP_NONZERO: + insn = aarch64_insn_get_cbnz_value(); + break; + default: + BUG_ON(1); + } + + switch (variant) { + case AARCH64_INSN_VARIANT_32BIT: + break; + case AARCH64_INSN_VARIANT_64BIT: + insn |= AARCH64_INSN_SF_BIT; + break; + default: + BUG_ON(1); + } + + insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg); + + return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn, + offset >> 2); +} + u32 __kprobes aarch64_insn_gen_hint(enum aarch64_insn_hint_op op) { return aarch64_insn_get_hint_value() | op; -- cgit v1.2.3 From c0cafbae20d2878883ec3c06d6ea30ff38a6bf92 Mon Sep 17 00:00:00 2001 From: Zi Shen Lim Date: Wed, 27 Aug 2014 05:15:18 +0100 Subject: arm64: introduce aarch64_insn_gen_branch_reg() Introduce function to generate unconditional branch (register) instructions. Signed-off-by: Zi Shen Lim Acked-by: Will Deacon Signed-off-by: Will Deacon --- arch/arm64/include/asm/insn.h | 7 +++++++ arch/arm64/kernel/insn.c | 35 +++++++++++++++++++++++++++++++++-- 2 files changed, 40 insertions(+), 2 deletions(-) diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h index a98c4954b380..5080962058b0 100644 --- a/arch/arm64/include/asm/insn.h +++ b/arch/arm64/include/asm/insn.h @@ -71,6 +71,7 @@ enum aarch64_insn_imm_type { enum aarch64_insn_register_type { AARCH64_INSN_REGTYPE_RT, + AARCH64_INSN_REGTYPE_RN, }; enum aarch64_insn_register { @@ -119,6 +120,7 @@ enum aarch64_insn_variant { enum aarch64_insn_branch_type { AARCH64_INSN_BRANCH_NOLINK, AARCH64_INSN_BRANCH_LINK, + AARCH64_INSN_BRANCH_RETURN, AARCH64_INSN_BRANCH_COMP_ZERO, AARCH64_INSN_BRANCH_COMP_NONZERO, }; @@ -138,6 +140,9 @@ __AARCH64_INSN_FUNCS(hvc, 0xFFE0001F, 0xD4000002) __AARCH64_INSN_FUNCS(smc, 0xFFE0001F, 0xD4000003) __AARCH64_INSN_FUNCS(brk, 0xFFE0001F, 0xD4200000) __AARCH64_INSN_FUNCS(hint, 0xFFFFF01F, 0xD503201F) +__AARCH64_INSN_FUNCS(br, 0xFFFFFC1F, 0xD61F0000) +__AARCH64_INSN_FUNCS(blr, 0xFFFFFC1F, 0xD63F0000) +__AARCH64_INSN_FUNCS(ret, 0xFFFFFC1F, 0xD65F0000) #undef __AARCH64_INSN_FUNCS @@ -156,6 +161,8 @@ u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr, enum aarch64_insn_branch_type type); u32 aarch64_insn_gen_hint(enum aarch64_insn_hint_op op); u32 aarch64_insn_gen_nop(void); +u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg, + enum aarch64_insn_branch_type type); bool aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn); diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c index d9f7827c5058..67979364daf6 100644 --- a/arch/arm64/kernel/insn.c +++ b/arch/arm64/kernel/insn.c @@ -283,6 +283,9 @@ static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type, case AARCH64_INSN_REGTYPE_RT: shift = 0; break; + case AARCH64_INSN_REGTYPE_RN: + shift = 5; + break; default: pr_err("%s: unknown register type encoding %d\n", __func__, type); @@ -325,10 +328,16 @@ u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr, */ offset = branch_imm_common(pc, addr, SZ_128M); - if (type == AARCH64_INSN_BRANCH_LINK) + switch (type) { + case AARCH64_INSN_BRANCH_LINK: insn = aarch64_insn_get_bl_value(); - else + break; + case AARCH64_INSN_BRANCH_NOLINK: insn = aarch64_insn_get_b_value(); + break; + default: + BUG_ON(1); + } return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn, offset >> 2); @@ -380,3 +389,25 @@ u32 __kprobes aarch64_insn_gen_nop(void) { return aarch64_insn_gen_hint(AARCH64_INSN_HINT_NOP); } + +u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg, + enum aarch64_insn_branch_type type) +{ + u32 insn; + + switch (type) { + case AARCH64_INSN_BRANCH_NOLINK: + insn = aarch64_insn_get_br_value(); + break; + case AARCH64_INSN_BRANCH_LINK: + insn = aarch64_insn_get_blr_value(); + break; + case AARCH64_INSN_BRANCH_RETURN: + insn = aarch64_insn_get_ret_value(); + break; + default: + BUG_ON(1); + } + + return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, reg); +} -- cgit v1.2.3 From 345e0d35ecdd7aff31881462a6f7786fda3241d9 Mon Sep 17 00:00:00 2001 From: Zi Shen Lim Date: Wed, 27 Aug 2014 05:15:19 +0100 Subject: arm64: introduce aarch64_insn_gen_cond_branch_imm() Introduce function to generate conditional branch (immediate) instructions. Signed-off-by: Zi Shen Lim Acked-by: Will Deacon Signed-off-by: Will Deacon --- arch/arm64/include/asm/insn.h | 21 +++++++++++++++++++++ arch/arm64/kernel/insn.c | 17 +++++++++++++++++ 2 files changed, 38 insertions(+) diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h index 5080962058b0..86a8a9cc9ec6 100644 --- a/arch/arm64/include/asm/insn.h +++ b/arch/arm64/include/asm/insn.h @@ -117,6 +117,24 @@ enum aarch64_insn_variant { AARCH64_INSN_VARIANT_64BIT }; +enum aarch64_insn_condition { + AARCH64_INSN_COND_EQ = 0x0, /* == */ + AARCH64_INSN_COND_NE = 0x1, /* != */ + AARCH64_INSN_COND_CS = 0x2, /* unsigned >= */ + AARCH64_INSN_COND_CC = 0x3, /* unsigned < */ + AARCH64_INSN_COND_MI = 0x4, /* < 0 */ + AARCH64_INSN_COND_PL = 0x5, /* >= 0 */ + AARCH64_INSN_COND_VS = 0x6, /* overflow */ + AARCH64_INSN_COND_VC = 0x7, /* no overflow */ + AARCH64_INSN_COND_HI = 0x8, /* unsigned > */ + AARCH64_INSN_COND_LS = 0x9, /* unsigned <= */ + AARCH64_INSN_COND_GE = 0xa, /* signed >= */ + AARCH64_INSN_COND_LT = 0xb, /* signed < */ + AARCH64_INSN_COND_GT = 0xc, /* signed > */ + AARCH64_INSN_COND_LE = 0xd, /* signed <= */ + AARCH64_INSN_COND_AL = 0xe, /* always */ +}; + enum aarch64_insn_branch_type { AARCH64_INSN_BRANCH_NOLINK, AARCH64_INSN_BRANCH_LINK, @@ -135,6 +153,7 @@ __AARCH64_INSN_FUNCS(b, 0xFC000000, 0x14000000) __AARCH64_INSN_FUNCS(bl, 0xFC000000, 0x94000000) __AARCH64_INSN_FUNCS(cbz, 0xFE000000, 0x34000000) __AARCH64_INSN_FUNCS(cbnz, 0xFE000000, 0x35000000) +__AARCH64_INSN_FUNCS(bcond, 0xFF000010, 0x54000000) __AARCH64_INSN_FUNCS(svc, 0xFFE0001F, 0xD4000001) __AARCH64_INSN_FUNCS(hvc, 0xFFE0001F, 0xD4000002) __AARCH64_INSN_FUNCS(smc, 0xFFE0001F, 0xD4000003) @@ -159,6 +178,8 @@ u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr, enum aarch64_insn_register reg, enum aarch64_insn_variant variant, enum aarch64_insn_branch_type type); +u32 aarch64_insn_gen_cond_branch_imm(unsigned long pc, unsigned long addr, + enum aarch64_insn_condition cond); u32 aarch64_insn_gen_hint(enum aarch64_insn_hint_op op); u32 aarch64_insn_gen_nop(void); u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg, diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c index 67979364daf6..b65edc02cf81 100644 --- a/arch/arm64/kernel/insn.c +++ b/arch/arm64/kernel/insn.c @@ -380,6 +380,23 @@ u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr, offset >> 2); } +u32 aarch64_insn_gen_cond_branch_imm(unsigned long pc, unsigned long addr, + enum aarch64_insn_condition cond) +{ + u32 insn; + long offset; + + offset = branch_imm_common(pc, addr, SZ_1M); + + insn = aarch64_insn_get_bcond_value(); + + BUG_ON(cond < AARCH64_INSN_COND_EQ || cond > AARCH64_INSN_COND_AL); + insn |= cond; + + return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn, + offset >> 2); +} + u32 __kprobes aarch64_insn_gen_hint(enum aarch64_insn_hint_op op) { return aarch64_insn_get_hint_value() | op; -- cgit v1.2.3 From 17cac179888166a4e8e252d00ad511e999859293 Mon Sep 17 00:00:00 2001 From: Zi Shen Lim Date: Wed, 27 Aug 2014 05:15:20 +0100 Subject: arm64: introduce aarch64_insn_gen_load_store_reg() Introduce function to generate load/store (register offset) instructions. Signed-off-by: Zi Shen Lim Acked-by: Will Deacon Signed-off-by: Will Deacon --- arch/arm64/include/asm/insn.h | 20 ++++++++++++++ arch/arm64/kernel/insn.c | 62 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 82 insertions(+) diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h index 86a8a9cc9ec6..5bc1cc391985 100644 --- a/arch/arm64/include/asm/insn.h +++ b/arch/arm64/include/asm/insn.h @@ -72,6 +72,7 @@ enum aarch64_insn_imm_type { enum aarch64_insn_register_type { AARCH64_INSN_REGTYPE_RT, AARCH64_INSN_REGTYPE_RN, + AARCH64_INSN_REGTYPE_RM, }; enum aarch64_insn_register { @@ -143,12 +144,26 @@ enum aarch64_insn_branch_type { AARCH64_INSN_BRANCH_COMP_NONZERO, }; +enum aarch64_insn_size_type { + AARCH64_INSN_SIZE_8, + AARCH64_INSN_SIZE_16, + AARCH64_INSN_SIZE_32, + AARCH64_INSN_SIZE_64, +}; + +enum aarch64_insn_ldst_type { + AARCH64_INSN_LDST_LOAD_REG_OFFSET, + AARCH64_INSN_LDST_STORE_REG_OFFSET, +}; + #define __AARCH64_INSN_FUNCS(abbr, mask, val) \ static __always_inline bool aarch64_insn_is_##abbr(u32 code) \ { return (code & (mask)) == (val); } \ static __always_inline u32 aarch64_insn_get_##abbr##_value(void) \ { return (val); } +__AARCH64_INSN_FUNCS(str_reg, 0x3FE0EC00, 0x38206800) +__AARCH64_INSN_FUNCS(ldr_reg, 0x3FE0EC00, 0x38606800) __AARCH64_INSN_FUNCS(b, 0xFC000000, 0x14000000) __AARCH64_INSN_FUNCS(bl, 0xFC000000, 0x94000000) __AARCH64_INSN_FUNCS(cbz, 0xFE000000, 0x34000000) @@ -184,6 +199,11 @@ u32 aarch64_insn_gen_hint(enum aarch64_insn_hint_op op); u32 aarch64_insn_gen_nop(void); u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg, enum aarch64_insn_branch_type type); +u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg, + enum aarch64_insn_register base, + enum aarch64_insn_register offset, + enum aarch64_insn_size_type size, + enum aarch64_insn_ldst_type type); bool aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn); diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c index b65edc02cf81..b882c85527dc 100644 --- a/arch/arm64/kernel/insn.c +++ b/arch/arm64/kernel/insn.c @@ -286,6 +286,9 @@ static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type, case AARCH64_INSN_REGTYPE_RN: shift = 5; break; + case AARCH64_INSN_REGTYPE_RM: + shift = 16; + break; default: pr_err("%s: unknown register type encoding %d\n", __func__, type); @@ -298,6 +301,35 @@ static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type, return insn; } +static u32 aarch64_insn_encode_ldst_size(enum aarch64_insn_size_type type, + u32 insn) +{ + u32 size; + + switch (type) { + case AARCH64_INSN_SIZE_8: + size = 0; + break; + case AARCH64_INSN_SIZE_16: + size = 1; + break; + case AARCH64_INSN_SIZE_32: + size = 2; + break; + case AARCH64_INSN_SIZE_64: + size = 3; + break; + default: + pr_err("%s: unknown size encoding %d\n", __func__, type); + return 0; + } + + insn &= ~GENMASK(31, 30); + insn |= size << 30; + + return insn; +} + static inline long branch_imm_common(unsigned long pc, unsigned long addr, long range) { @@ -428,3 +460,33 @@ u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg, return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, reg); } + +u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg, + enum aarch64_insn_register base, + enum aarch64_insn_register offset, + enum aarch64_insn_size_type size, + enum aarch64_insn_ldst_type type) +{ + u32 insn; + + switch (type) { + case AARCH64_INSN_LDST_LOAD_REG_OFFSET: + insn = aarch64_insn_get_ldr_reg_value(); + break; + case AARCH64_INSN_LDST_STORE_REG_OFFSET: + insn = aarch64_insn_get_str_reg_value(); + break; + default: + BUG_ON(1); + } + + insn = aarch64_insn_encode_ldst_size(size, insn); + + insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg); + + insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, + base); + + return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, + offset); +} -- cgit v1.2.3 From 1bba567d0f3050e33b4dd1404fdcbceaf5a73034 Mon Sep 17 00:00:00 2001 From: Zi Shen Lim Date: Wed, 27 Aug 2014 05:15:21 +0100 Subject: arm64: introduce aarch64_insn_gen_load_store_pair() Introduce function to generate load/store pair instructions. Signed-off-by: Zi Shen Lim Acked-by: Will Deacon Signed-off-by: Will Deacon --- arch/arm64/include/asm/insn.h | 16 +++++++++++ arch/arm64/kernel/insn.c | 65 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 81 insertions(+) diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h index 5bc1cc391985..eef8f1ef6736 100644 --- a/arch/arm64/include/asm/insn.h +++ b/arch/arm64/include/asm/insn.h @@ -66,12 +66,14 @@ enum aarch64_insn_imm_type { AARCH64_INSN_IMM_14, AARCH64_INSN_IMM_12, AARCH64_INSN_IMM_9, + AARCH64_INSN_IMM_7, AARCH64_INSN_IMM_MAX }; enum aarch64_insn_register_type { AARCH64_INSN_REGTYPE_RT, AARCH64_INSN_REGTYPE_RN, + AARCH64_INSN_REGTYPE_RT2, AARCH64_INSN_REGTYPE_RM, }; @@ -154,6 +156,10 @@ enum aarch64_insn_size_type { enum aarch64_insn_ldst_type { AARCH64_INSN_LDST_LOAD_REG_OFFSET, AARCH64_INSN_LDST_STORE_REG_OFFSET, + AARCH64_INSN_LDST_LOAD_PAIR_PRE_INDEX, + AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX, + AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX, + AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX, }; #define __AARCH64_INSN_FUNCS(abbr, mask, val) \ @@ -164,6 +170,10 @@ static __always_inline u32 aarch64_insn_get_##abbr##_value(void) \ __AARCH64_INSN_FUNCS(str_reg, 0x3FE0EC00, 0x38206800) __AARCH64_INSN_FUNCS(ldr_reg, 0x3FE0EC00, 0x38606800) +__AARCH64_INSN_FUNCS(stp_post, 0x7FC00000, 0x28800000) +__AARCH64_INSN_FUNCS(ldp_post, 0x7FC00000, 0x28C00000) +__AARCH64_INSN_FUNCS(stp_pre, 0x7FC00000, 0x29800000) +__AARCH64_INSN_FUNCS(ldp_pre, 0x7FC00000, 0x29C00000) __AARCH64_INSN_FUNCS(b, 0xFC000000, 0x14000000) __AARCH64_INSN_FUNCS(bl, 0xFC000000, 0x94000000) __AARCH64_INSN_FUNCS(cbz, 0xFE000000, 0x34000000) @@ -204,6 +214,12 @@ u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg, enum aarch64_insn_register offset, enum aarch64_insn_size_type size, enum aarch64_insn_ldst_type type); +u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1, + enum aarch64_insn_register reg2, + enum aarch64_insn_register base, + int offset, + enum aarch64_insn_variant variant, + enum aarch64_insn_ldst_type type); bool aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn); diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c index b882c85527dc..7880c060f684 100644 --- a/arch/arm64/kernel/insn.c +++ b/arch/arm64/kernel/insn.c @@ -255,6 +255,10 @@ u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type, mask = BIT(9) - 1; shift = 12; break; + case AARCH64_INSN_IMM_7: + mask = BIT(7) - 1; + shift = 15; + break; default: pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n", type); @@ -286,6 +290,9 @@ static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type, case AARCH64_INSN_REGTYPE_RN: shift = 5; break; + case AARCH64_INSN_REGTYPE_RT2: + shift = 10; + break; case AARCH64_INSN_REGTYPE_RM: shift = 16; break; @@ -490,3 +497,61 @@ u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg, return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, offset); } + +u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1, + enum aarch64_insn_register reg2, + enum aarch64_insn_register base, + int offset, + enum aarch64_insn_variant variant, + enum aarch64_insn_ldst_type type) +{ + u32 insn; + int shift; + + switch (type) { + case AARCH64_INSN_LDST_LOAD_PAIR_PRE_INDEX: + insn = aarch64_insn_get_ldp_pre_value(); + break; + case AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX: + insn = aarch64_insn_get_stp_pre_value(); + break; + case AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX: + insn = aarch64_insn_get_ldp_post_value(); + break; + case AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX: + insn = aarch64_insn_get_stp_post_value(); + break; + default: + BUG_ON(1); + } + + switch (variant) { + case AARCH64_INSN_VARIANT_32BIT: + /* offset must be multiples of 4 in the range [-256, 252] */ + BUG_ON(offset & 0x3); + BUG_ON(offset < -256 || offset > 252); + shift = 2; + break; + case AARCH64_INSN_VARIANT_64BIT: + /* offset must be multiples of 8 in the range [-512, 504] */ + BUG_ON(offset & 0x7); + BUG_ON(offset < -512 || offset > 504); + shift = 3; + insn |= AARCH64_INSN_SF_BIT; + break; + default: + BUG_ON(1); + } + + insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, + reg1); + + insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn, + reg2); + + insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, + base); + + return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_7, insn, + offset >> shift); +} -- cgit v1.2.3 From 9951a157fa678db0ec92e5fc4c6320c038ffb67e Mon Sep 17 00:00:00 2001 From: Zi Shen Lim Date: Wed, 27 Aug 2014 05:15:22 +0100 Subject: arm64: introduce aarch64_insn_gen_add_sub_imm() Introduce function to generate add/subtract (immediate) instructions. Signed-off-by: Zi Shen Lim Acked-by: Will Deacon Signed-off-by: Will Deacon --- arch/arm64/include/asm/insn.h | 16 ++++++++++++++++ arch/arm64/kernel/insn.c | 44 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 60 insertions(+) diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h index eef8f1ef6736..29386aaf2e85 100644 --- a/arch/arm64/include/asm/insn.h +++ b/arch/arm64/include/asm/insn.h @@ -75,6 +75,7 @@ enum aarch64_insn_register_type { AARCH64_INSN_REGTYPE_RN, AARCH64_INSN_REGTYPE_RT2, AARCH64_INSN_REGTYPE_RM, + AARCH64_INSN_REGTYPE_RD, }; enum aarch64_insn_register { @@ -162,6 +163,13 @@ enum aarch64_insn_ldst_type { AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX, }; +enum aarch64_insn_adsb_type { + AARCH64_INSN_ADSB_ADD, + AARCH64_INSN_ADSB_SUB, + AARCH64_INSN_ADSB_ADD_SETFLAGS, + AARCH64_INSN_ADSB_SUB_SETFLAGS +}; + #define __AARCH64_INSN_FUNCS(abbr, mask, val) \ static __always_inline bool aarch64_insn_is_##abbr(u32 code) \ { return (code & (mask)) == (val); } \ @@ -174,6 +182,10 @@ __AARCH64_INSN_FUNCS(stp_post, 0x7FC00000, 0x28800000) __AARCH64_INSN_FUNCS(ldp_post, 0x7FC00000, 0x28C00000) __AARCH64_INSN_FUNCS(stp_pre, 0x7FC00000, 0x29800000) __AARCH64_INSN_FUNCS(ldp_pre, 0x7FC00000, 0x29C00000) +__AARCH64_INSN_FUNCS(add_imm, 0x7F000000, 0x11000000) +__AARCH64_INSN_FUNCS(adds_imm, 0x7F000000, 0x31000000) +__AARCH64_INSN_FUNCS(sub_imm, 0x7F000000, 0x51000000) +__AARCH64_INSN_FUNCS(subs_imm, 0x7F000000, 0x71000000) __AARCH64_INSN_FUNCS(b, 0xFC000000, 0x14000000) __AARCH64_INSN_FUNCS(bl, 0xFC000000, 0x94000000) __AARCH64_INSN_FUNCS(cbz, 0xFE000000, 0x34000000) @@ -220,6 +232,10 @@ u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1, int offset, enum aarch64_insn_variant variant, enum aarch64_insn_ldst_type type); +u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst, + enum aarch64_insn_register src, + int imm, enum aarch64_insn_variant variant, + enum aarch64_insn_adsb_type type); bool aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn); diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c index 7880c060f684..ec3a90256ff9 100644 --- a/arch/arm64/kernel/insn.c +++ b/arch/arm64/kernel/insn.c @@ -285,6 +285,7 @@ static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type, switch (type) { case AARCH64_INSN_REGTYPE_RT: + case AARCH64_INSN_REGTYPE_RD: shift = 0; break; case AARCH64_INSN_REGTYPE_RN: @@ -555,3 +556,46 @@ u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1, return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_7, insn, offset >> shift); } + +u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst, + enum aarch64_insn_register src, + int imm, enum aarch64_insn_variant variant, + enum aarch64_insn_adsb_type type) +{ + u32 insn; + + switch (type) { + case AARCH64_INSN_ADSB_ADD: + insn = aarch64_insn_get_add_imm_value(); + break; + case AARCH64_INSN_ADSB_SUB: + insn = aarch64_insn_get_sub_imm_value(); + break; + case AARCH64_INSN_ADSB_ADD_SETFLAGS: + insn = aarch64_insn_get_adds_imm_value(); + break; + case AARCH64_INSN_ADSB_SUB_SETFLAGS: + insn = aarch64_insn_get_subs_imm_value(); + break; + default: + BUG_ON(1); + } + + switch (variant) { + case AARCH64_INSN_VARIANT_32BIT: + break; + case AARCH64_INSN_VARIANT_64BIT: + insn |= AARCH64_INSN_SF_BIT; + break; + default: + BUG_ON(1); + } + + BUG_ON(imm & ~(SZ_4K - 1)); + + insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst); + + insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src); + + return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, imm); +} -- cgit v1.2.3 From 4a89d2c98e1efadd135015668c499ae1bbd2131f Mon Sep 17 00:00:00 2001 From: Zi Shen Lim Date: Wed, 27 Aug 2014 05:15:23 +0100 Subject: arm64: introduce aarch64_insn_gen_bitfield() Introduce function to generate bitfield instructions. Signed-off-by: Zi Shen Lim Acked-by: Will Deacon Signed-off-by: Will Deacon --- arch/arm64/include/asm/insn.h | 16 +++++++++++++ arch/arm64/kernel/insn.c | 56 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 72 insertions(+) diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h index 29386aaf2e85..8fd31fc29c47 100644 --- a/arch/arm64/include/asm/insn.h +++ b/arch/arm64/include/asm/insn.h @@ -67,6 +67,8 @@ enum aarch64_insn_imm_type { AARCH64_INSN_IMM_12, AARCH64_INSN_IMM_9, AARCH64_INSN_IMM_7, + AARCH64_INSN_IMM_S, + AARCH64_INSN_IMM_R, AARCH64_INSN_IMM_MAX }; @@ -170,6 +172,12 @@ enum aarch64_insn_adsb_type { AARCH64_INSN_ADSB_SUB_SETFLAGS }; +enum aarch64_insn_bitfield_type { + AARCH64_INSN_BITFIELD_MOVE, + AARCH64_INSN_BITFIELD_MOVE_UNSIGNED, + AARCH64_INSN_BITFIELD_MOVE_SIGNED +}; + #define __AARCH64_INSN_FUNCS(abbr, mask, val) \ static __always_inline bool aarch64_insn_is_##abbr(u32 code) \ { return (code & (mask)) == (val); } \ @@ -186,6 +194,9 @@ __AARCH64_INSN_FUNCS(add_imm, 0x7F000000, 0x11000000) __AARCH64_INSN_FUNCS(adds_imm, 0x7F000000, 0x31000000) __AARCH64_INSN_FUNCS(sub_imm, 0x7F000000, 0x51000000) __AARCH64_INSN_FUNCS(subs_imm, 0x7F000000, 0x71000000) +__AARCH64_INSN_FUNCS(sbfm, 0x7F800000, 0x13000000) +__AARCH64_INSN_FUNCS(bfm, 0x7F800000, 0x33000000) +__AARCH64_INSN_FUNCS(ubfm, 0x7F800000, 0x53000000) __AARCH64_INSN_FUNCS(b, 0xFC000000, 0x14000000) __AARCH64_INSN_FUNCS(bl, 0xFC000000, 0x94000000) __AARCH64_INSN_FUNCS(cbz, 0xFE000000, 0x34000000) @@ -236,6 +247,11 @@ u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst, enum aarch64_insn_register src, int imm, enum aarch64_insn_variant variant, enum aarch64_insn_adsb_type type); +u32 aarch64_insn_gen_bitfield(enum aarch64_insn_register dst, + enum aarch64_insn_register src, + int immr, int imms, + enum aarch64_insn_variant variant, + enum aarch64_insn_bitfield_type type); bool aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn); diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c index ec3a90256ff9..e07d026e75d8 100644 --- a/arch/arm64/kernel/insn.c +++ b/arch/arm64/kernel/insn.c @@ -26,6 +26,7 @@ #include #define AARCH64_INSN_SF_BIT BIT(31) +#define AARCH64_INSN_N_BIT BIT(22) static int aarch64_insn_encoding_class[] = { AARCH64_INSN_CLS_UNKNOWN, @@ -259,6 +260,14 @@ u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type, mask = BIT(7) - 1; shift = 15; break; + case AARCH64_INSN_IMM_S: + mask = BIT(6) - 1; + shift = 10; + break; + case AARCH64_INSN_IMM_R: + mask = BIT(6) - 1; + shift = 16; + break; default: pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n", type); @@ -599,3 +608,50 @@ u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst, return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, imm); } + +u32 aarch64_insn_gen_bitfield(enum aarch64_insn_register dst, + enum aarch64_insn_register src, + int immr, int imms, + enum aarch64_insn_variant variant, + enum aarch64_insn_bitfield_type type) +{ + u32 insn; + u32 mask; + + switch (type) { + case AARCH64_INSN_BITFIELD_MOVE: + insn = aarch64_insn_get_bfm_value(); + break; + case AARCH64_INSN_BITFIELD_MOVE_UNSIGNED: + insn = aarch64_insn_get_ubfm_value(); + break; + case AARCH64_INSN_BITFIELD_MOVE_SIGNED: + insn = aarch64_insn_get_sbfm_value(); + break; + default: + BUG_ON(1); + } + + switch (variant) { + case AARCH64_INSN_VARIANT_32BIT: + mask = GENMASK(4, 0); + break; + case AARCH64_INSN_VARIANT_64BIT: + insn |= AARCH64_INSN_SF_BIT | AARCH64_INSN_N_BIT; + mask = GENMASK(5, 0); + break; + default: + BUG_ON(1); + } + + BUG_ON(immr & ~mask); + BUG_ON(imms & ~mask); + + insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst); + + insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src); + + insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr); + + return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms); +} -- cgit v1.2.3 From 6098f2d5c7a349d388499503bb129d0088870dd6 Mon Sep 17 00:00:00 2001 From: Zi Shen Lim Date: Wed, 27 Aug 2014 05:15:24 +0100 Subject: arm64: introduce aarch64_insn_gen_movewide() Introduce function to generate move wide (immediate) instructions. Signed-off-by: Zi Shen Lim Acked-by: Will Deacon Signed-off-by: Will Deacon --- arch/arm64/include/asm/insn.h | 13 +++++++++++++ arch/arm64/kernel/insn.c | 43 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 56 insertions(+) diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h index 8fd31fc29c47..49dec288f5ac 100644 --- a/arch/arm64/include/asm/insn.h +++ b/arch/arm64/include/asm/insn.h @@ -172,6 +172,12 @@ enum aarch64_insn_adsb_type { AARCH64_INSN_ADSB_SUB_SETFLAGS }; +enum aarch64_insn_movewide_type { + AARCH64_INSN_MOVEWIDE_ZERO, + AARCH64_INSN_MOVEWIDE_KEEP, + AARCH64_INSN_MOVEWIDE_INVERSE +}; + enum aarch64_insn_bitfield_type { AARCH64_INSN_BITFIELD_MOVE, AARCH64_INSN_BITFIELD_MOVE_UNSIGNED, @@ -194,9 +200,12 @@ __AARCH64_INSN_FUNCS(add_imm, 0x7F000000, 0x11000000) __AARCH64_INSN_FUNCS(adds_imm, 0x7F000000, 0x31000000) __AARCH64_INSN_FUNCS(sub_imm, 0x7F000000, 0x51000000) __AARCH64_INSN_FUNCS(subs_imm, 0x7F000000, 0x71000000) +__AARCH64_INSN_FUNCS(movn, 0x7F800000, 0x12800000) __AARCH64_INSN_FUNCS(sbfm, 0x7F800000, 0x13000000) __AARCH64_INSN_FUNCS(bfm, 0x7F800000, 0x33000000) +__AARCH64_INSN_FUNCS(movz, 0x7F800000, 0x52800000) __AARCH64_INSN_FUNCS(ubfm, 0x7F800000, 0x53000000) +__AARCH64_INSN_FUNCS(movk, 0x7F800000, 0x72800000) __AARCH64_INSN_FUNCS(b, 0xFC000000, 0x14000000) __AARCH64_INSN_FUNCS(bl, 0xFC000000, 0x94000000) __AARCH64_INSN_FUNCS(cbz, 0xFE000000, 0x34000000) @@ -252,6 +261,10 @@ u32 aarch64_insn_gen_bitfield(enum aarch64_insn_register dst, int immr, int imms, enum aarch64_insn_variant variant, enum aarch64_insn_bitfield_type type); +u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst, + int imm, int shift, + enum aarch64_insn_variant variant, + enum aarch64_insn_movewide_type type); bool aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn); diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c index e07d026e75d8..7aa278404b80 100644 --- a/arch/arm64/kernel/insn.c +++ b/arch/arm64/kernel/insn.c @@ -655,3 +655,46 @@ u32 aarch64_insn_gen_bitfield(enum aarch64_insn_register dst, return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms); } + +u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst, + int imm, int shift, + enum aarch64_insn_variant variant, + enum aarch64_insn_movewide_type type) +{ + u32 insn; + + switch (type) { + case AARCH64_INSN_MOVEWIDE_ZERO: + insn = aarch64_insn_get_movz_value(); + break; + case AARCH64_INSN_MOVEWIDE_KEEP: + insn = aarch64_insn_get_movk_value(); + break; + case AARCH64_INSN_MOVEWIDE_INVERSE: + insn = aarch64_insn_get_movn_value(); + break; + default: + BUG_ON(1); + } + + BUG_ON(imm & ~(SZ_64K - 1)); + + switch (variant) { + case AARCH64_INSN_VARIANT_32BIT: + BUG_ON(shift != 0 && shift != 16); + break; + case AARCH64_INSN_VARIANT_64BIT: + insn |= AARCH64_INSN_SF_BIT; + BUG_ON(shift != 0 && shift != 16 && shift != 32 && + shift != 48); + break; + default: + BUG_ON(1); + } + + insn |= (shift >> 4) << 21; + + insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst); + + return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm); +} -- cgit v1.2.3 From 5fdc639a7a5b187f75b7408ee7ae9f9c06771218 Mon Sep 17 00:00:00 2001 From: Zi Shen Lim Date: Wed, 27 Aug 2014 05:15:25 +0100 Subject: arm64: introduce aarch64_insn_gen_add_sub_shifted_reg() Introduce function to generate add/subtract (shifted register) instructions. Signed-off-by: Zi Shen Lim Acked-by: Will Deacon Signed-off-by: Will Deacon --- arch/arm64/include/asm/insn.h | 11 ++++++++++ arch/arm64/kernel/insn.c | 49 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 60 insertions(+) diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h index 49dec288f5ac..c0a765dae1f0 100644 --- a/arch/arm64/include/asm/insn.h +++ b/arch/arm64/include/asm/insn.h @@ -67,6 +67,7 @@ enum aarch64_insn_imm_type { AARCH64_INSN_IMM_12, AARCH64_INSN_IMM_9, AARCH64_INSN_IMM_7, + AARCH64_INSN_IMM_6, AARCH64_INSN_IMM_S, AARCH64_INSN_IMM_R, AARCH64_INSN_IMM_MAX @@ -206,6 +207,10 @@ __AARCH64_INSN_FUNCS(bfm, 0x7F800000, 0x33000000) __AARCH64_INSN_FUNCS(movz, 0x7F800000, 0x52800000) __AARCH64_INSN_FUNCS(ubfm, 0x7F800000, 0x53000000) __AARCH64_INSN_FUNCS(movk, 0x7F800000, 0x72800000) +__AARCH64_INSN_FUNCS(add, 0x7F200000, 0x0B000000) +__AARCH64_INSN_FUNCS(adds, 0x7F200000, 0x2B000000) +__AARCH64_INSN_FUNCS(sub, 0x7F200000, 0x4B000000) +__AARCH64_INSN_FUNCS(subs, 0x7F200000, 0x6B000000) __AARCH64_INSN_FUNCS(b, 0xFC000000, 0x14000000) __AARCH64_INSN_FUNCS(bl, 0xFC000000, 0x94000000) __AARCH64_INSN_FUNCS(cbz, 0xFE000000, 0x34000000) @@ -265,6 +270,12 @@ u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst, int imm, int shift, enum aarch64_insn_variant variant, enum aarch64_insn_movewide_type type); +u32 aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst, + enum aarch64_insn_register src, + enum aarch64_insn_register reg, + int shift, + enum aarch64_insn_variant variant, + enum aarch64_insn_adsb_type type); bool aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn); diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c index 7aa278404b80..d7a4dd48e959 100644 --- a/arch/arm64/kernel/insn.c +++ b/arch/arm64/kernel/insn.c @@ -260,6 +260,7 @@ u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type, mask = BIT(7) - 1; shift = 15; break; + case AARCH64_INSN_IMM_6: case AARCH64_INSN_IMM_S: mask = BIT(6) - 1; shift = 10; @@ -698,3 +699,51 @@ u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst, return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm); } + +u32 aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst, + enum aarch64_insn_register src, + enum aarch64_insn_register reg, + int shift, + enum aarch64_insn_variant variant, + enum aarch64_insn_adsb_type type) +{ + u32 insn; + + switch (type) { + case AARCH64_INSN_ADSB_ADD: + insn = aarch64_insn_get_add_value(); + break; + case AARCH64_INSN_ADSB_SUB: + insn = aarch64_insn_get_sub_value(); + break; + case AARCH64_INSN_ADSB_ADD_SETFLAGS: + insn = aarch64_insn_get_adds_value(); + break; + case AARCH64_INSN_ADSB_SUB_SETFLAGS: + insn = aarch64_insn_get_subs_value(); + break; + default: + BUG_ON(1); + } + + switch (variant) { + case AARCH64_INSN_VARIANT_32BIT: + BUG_ON(shift & ~(SZ_32 - 1)); + break; + case AARCH64_INSN_VARIANT_64BIT: + insn |= AARCH64_INSN_SF_BIT; + BUG_ON(shift & ~(SZ_64 - 1)); + break; + default: + BUG_ON(1); + } + + + insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst); + + insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src); + + insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg); + + return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift); +} -- cgit v1.2.3 From 546dd36b44613c770655531ee3ada6a9e9907d71 Mon Sep 17 00:00:00 2001 From: Zi Shen Lim Date: Wed, 27 Aug 2014 05:15:26 +0100 Subject: arm64: introduce aarch64_insn_gen_data1() Introduce function to generate data-processing (1 source) instructions. Signed-off-by: Zi Shen Lim Acked-by: Will Deacon Signed-off-by: Will Deacon --- arch/arm64/include/asm/insn.h | 13 +++++++++++++ arch/arm64/kernel/insn.c | 37 +++++++++++++++++++++++++++++++++++++ 2 files changed, 50 insertions(+) diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h index c0a765dae1f0..246d214e596a 100644 --- a/arch/arm64/include/asm/insn.h +++ b/arch/arm64/include/asm/insn.h @@ -185,6 +185,12 @@ enum aarch64_insn_bitfield_type { AARCH64_INSN_BITFIELD_MOVE_SIGNED }; +enum aarch64_insn_data1_type { + AARCH64_INSN_DATA1_REVERSE_16, + AARCH64_INSN_DATA1_REVERSE_32, + AARCH64_INSN_DATA1_REVERSE_64, +}; + #define __AARCH64_INSN_FUNCS(abbr, mask, val) \ static __always_inline bool aarch64_insn_is_##abbr(u32 code) \ { return (code & (mask)) == (val); } \ @@ -211,6 +217,9 @@ __AARCH64_INSN_FUNCS(add, 0x7F200000, 0x0B000000) __AARCH64_INSN_FUNCS(adds, 0x7F200000, 0x2B000000) __AARCH64_INSN_FUNCS(sub, 0x7F200000, 0x4B000000) __AARCH64_INSN_FUNCS(subs, 0x7F200000, 0x6B000000) +__AARCH64_INSN_FUNCS(rev16, 0x7FFFFC00, 0x5AC00400) +__AARCH64_INSN_FUNCS(rev32, 0x7FFFFC00, 0x5AC00800) +__AARCH64_INSN_FUNCS(rev64, 0x7FFFFC00, 0x5AC00C00) __AARCH64_INSN_FUNCS(b, 0xFC000000, 0x14000000) __AARCH64_INSN_FUNCS(bl, 0xFC000000, 0x94000000) __AARCH64_INSN_FUNCS(cbz, 0xFE000000, 0x34000000) @@ -276,6 +285,10 @@ u32 aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst, int shift, enum aarch64_insn_variant variant, enum aarch64_insn_adsb_type type); +u32 aarch64_insn_gen_data1(enum aarch64_insn_register dst, + enum aarch64_insn_register src, + enum aarch64_insn_variant variant, + enum aarch64_insn_data1_type type); bool aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn); diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c index d7a4dd48e959..81ef3b59348b 100644 --- a/arch/arm64/kernel/insn.c +++ b/arch/arm64/kernel/insn.c @@ -747,3 +747,40 @@ u32 aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst, return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift); } + +u32 aarch64_insn_gen_data1(enum aarch64_insn_register dst, + enum aarch64_insn_register src, + enum aarch64_insn_variant variant, + enum aarch64_insn_data1_type type) +{ + u32 insn; + + switch (type) { + case AARCH64_INSN_DATA1_REVERSE_16: + insn = aarch64_insn_get_rev16_value(); + break; + case AARCH64_INSN_DATA1_REVERSE_32: + insn = aarch64_insn_get_rev32_value(); + break; + case AARCH64_INSN_DATA1_REVERSE_64: + BUG_ON(variant != AARCH64_INSN_VARIANT_64BIT); + insn = aarch64_insn_get_rev64_value(); + break; + default: + BUG_ON(1); + } + + switch (variant) { + case AARCH64_INSN_VARIANT_32BIT: + break; + case AARCH64_INSN_VARIANT_64BIT: + insn |= AARCH64_INSN_SF_BIT; + break; + default: + BUG_ON(1); + } + + insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst); + + return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src); +} -- cgit v1.2.3 From 6481063989283f7cbeb0b6c38506ba4dd319f93a Mon Sep 17 00:00:00 2001 From: Zi Shen Lim Date: Wed, 27 Aug 2014 05:15:27 +0100 Subject: arm64: introduce aarch64_insn_gen_data2() Introduce function to generate data-processing (2 source) instructions. Signed-off-by: Zi Shen Lim Acked-by: Will Deacon Signed-off-by: Will Deacon --- arch/arm64/include/asm/insn.h | 20 ++++++++++++++++++ arch/arm64/kernel/insn.c | 48 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 68 insertions(+) diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h index 246d214e596a..367245fab9b7 100644 --- a/arch/arm64/include/asm/insn.h +++ b/arch/arm64/include/asm/insn.h @@ -191,6 +191,15 @@ enum aarch64_insn_data1_type { AARCH64_INSN_DATA1_REVERSE_64, }; +enum aarch64_insn_data2_type { + AARCH64_INSN_DATA2_UDIV, + AARCH64_INSN_DATA2_SDIV, + AARCH64_INSN_DATA2_LSLV, + AARCH64_INSN_DATA2_LSRV, + AARCH64_INSN_DATA2_ASRV, + AARCH64_INSN_DATA2_RORV, +}; + #define __AARCH64_INSN_FUNCS(abbr, mask, val) \ static __always_inline bool aarch64_insn_is_##abbr(u32 code) \ { return (code & (mask)) == (val); } \ @@ -217,6 +226,12 @@ __AARCH64_INSN_FUNCS(add, 0x7F200000, 0x0B000000) __AARCH64_INSN_FUNCS(adds, 0x7F200000, 0x2B000000) __AARCH64_INSN_FUNCS(sub, 0x7F200000, 0x4B000000) __AARCH64_INSN_FUNCS(subs, 0x7F200000, 0x6B000000) +__AARCH64_INSN_FUNCS(udiv, 0x7FE0FC00, 0x1AC00800) +__AARCH64_INSN_FUNCS(sdiv, 0x7FE0FC00, 0x1AC00C00) +__AARCH64_INSN_FUNCS(lslv, 0x7FE0FC00, 0x1AC02000) +__AARCH64_INSN_FUNCS(lsrv, 0x7FE0FC00, 0x1AC02400) +__AARCH64_INSN_FUNCS(asrv, 0x7FE0FC00, 0x1AC02800) +__AARCH64_INSN_FUNCS(rorv, 0x7FE0FC00, 0x1AC02C00) __AARCH64_INSN_FUNCS(rev16, 0x7FFFFC00, 0x5AC00400) __AARCH64_INSN_FUNCS(rev32, 0x7FFFFC00, 0x5AC00800) __AARCH64_INSN_FUNCS(rev64, 0x7FFFFC00, 0x5AC00C00) @@ -289,6 +304,11 @@ u32 aarch64_insn_gen_data1(enum aarch64_insn_register dst, enum aarch64_insn_register src, enum aarch64_insn_variant variant, enum aarch64_insn_data1_type type); +u32 aarch64_insn_gen_data2(enum aarch64_insn_register dst, + enum aarch64_insn_register src, + enum aarch64_insn_register reg, + enum aarch64_insn_variant variant, + enum aarch64_insn_data2_type type); bool aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn); diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c index 81ef3b59348b..c054164c677b 100644 --- a/arch/arm64/kernel/insn.c +++ b/arch/arm64/kernel/insn.c @@ -784,3 +784,51 @@ u32 aarch64_insn_gen_data1(enum aarch64_insn_register dst, return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src); } + +u32 aarch64_insn_gen_data2(enum aarch64_insn_register dst, + enum aarch64_insn_register src, + enum aarch64_insn_register reg, + enum aarch64_insn_variant variant, + enum aarch64_insn_data2_type type) +{ + u32 insn; + + switch (type) { + case AARCH64_INSN_DATA2_UDIV: + insn = aarch64_insn_get_udiv_value(); + break; + case AARCH64_INSN_DATA2_SDIV: + insn = aarch64_insn_get_sdiv_value(); + break; + case AARCH64_INSN_DATA2_LSLV: + insn = aarch64_insn_get_lslv_value(); + break; + case AARCH64_INSN_DATA2_LSRV: + insn = aarch64_insn_get_lsrv_value(); + break; + case AARCH64_INSN_DATA2_ASRV: + insn = aarch64_insn_get_asrv_value(); + break; + case AARCH64_INSN_DATA2_RORV: + insn = aarch64_insn_get_rorv_value(); + break; + default: + BUG_ON(1); + } + + switch (variant) { + case AARCH64_INSN_VARIANT_32BIT: + break; + case AARCH64_INSN_VARIANT_64BIT: + insn |= AARCH64_INSN_SF_BIT; + break; + default: + BUG_ON(1); + } + + insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst); + + insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src); + + return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg); +} -- cgit v1.2.3 From 27f95ba59b34509dc8afa2f89ad51c044df9d7c7 Mon Sep 17 00:00:00 2001 From: Zi Shen Lim Date: Wed, 27 Aug 2014 05:15:28 +0100 Subject: arm64: introduce aarch64_insn_gen_data3() Introduce function to generate data-processing (3 source) instructions. Signed-off-by: Zi Shen Lim Acked-by: Will Deacon Signed-off-by: Will Deacon --- arch/arm64/include/asm/insn.h | 14 ++++++++++++++ arch/arm64/kernel/insn.c | 42 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 56 insertions(+) diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h index 367245fab9b7..36e8465cdf71 100644 --- a/arch/arm64/include/asm/insn.h +++ b/arch/arm64/include/asm/insn.h @@ -79,6 +79,7 @@ enum aarch64_insn_register_type { AARCH64_INSN_REGTYPE_RT2, AARCH64_INSN_REGTYPE_RM, AARCH64_INSN_REGTYPE_RD, + AARCH64_INSN_REGTYPE_RA, }; enum aarch64_insn_register { @@ -200,6 +201,11 @@ enum aarch64_insn_data2_type { AARCH64_INSN_DATA2_RORV, }; +enum aarch64_insn_data3_type { + AARCH64_INSN_DATA3_MADD, + AARCH64_INSN_DATA3_MSUB, +}; + #define __AARCH64_INSN_FUNCS(abbr, mask, val) \ static __always_inline bool aarch64_insn_is_##abbr(u32 code) \ { return (code & (mask)) == (val); } \ @@ -226,6 +232,8 @@ __AARCH64_INSN_FUNCS(add, 0x7F200000, 0x0B000000) __AARCH64_INSN_FUNCS(adds, 0x7F200000, 0x2B000000) __AARCH64_INSN_FUNCS(sub, 0x7F200000, 0x4B000000) __AARCH64_INSN_FUNCS(subs, 0x7F200000, 0x6B000000) +__AARCH64_INSN_FUNCS(madd, 0x7FE08000, 0x1B000000) +__AARCH64_INSN_FUNCS(msub, 0x7FE08000, 0x1B008000) __AARCH64_INSN_FUNCS(udiv, 0x7FE0FC00, 0x1AC00800) __AARCH64_INSN_FUNCS(sdiv, 0x7FE0FC00, 0x1AC00C00) __AARCH64_INSN_FUNCS(lslv, 0x7FE0FC00, 0x1AC02000) @@ -309,6 +317,12 @@ u32 aarch64_insn_gen_data2(enum aarch64_insn_register dst, enum aarch64_insn_register reg, enum aarch64_insn_variant variant, enum aarch64_insn_data2_type type); +u32 aarch64_insn_gen_data3(enum aarch64_insn_register dst, + enum aarch64_insn_register src, + enum aarch64_insn_register reg1, + enum aarch64_insn_register reg2, + enum aarch64_insn_variant variant, + enum aarch64_insn_data3_type type); bool aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn); diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c index c054164c677b..f73a4bfbb946 100644 --- a/arch/arm64/kernel/insn.c +++ b/arch/arm64/kernel/insn.c @@ -302,6 +302,7 @@ static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type, shift = 5; break; case AARCH64_INSN_REGTYPE_RT2: + case AARCH64_INSN_REGTYPE_RA: shift = 10; break; case AARCH64_INSN_REGTYPE_RM: @@ -832,3 +833,44 @@ u32 aarch64_insn_gen_data2(enum aarch64_insn_register dst, return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg); } + +u32 aarch64_insn_gen_data3(enum aarch64_insn_register dst, + enum aarch64_insn_register src, + enum aarch64_insn_register reg1, + enum aarch64_insn_register reg2, + enum aarch64_insn_variant variant, + enum aarch64_insn_data3_type type) +{ + u32 insn; + + switch (type) { + case AARCH64_INSN_DATA3_MADD: + insn = aarch64_insn_get_madd_value(); + break; + case AARCH64_INSN_DATA3_MSUB: + insn = aarch64_insn_get_msub_value(); + break; + default: + BUG_ON(1); + } + + switch (variant) { + case AARCH64_INSN_VARIANT_32BIT: + break; + case AARCH64_INSN_VARIANT_64BIT: + insn |= AARCH64_INSN_SF_BIT; + break; + default: + BUG_ON(1); + } + + insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst); + + insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RA, insn, src); + + insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, + reg1); + + return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, + reg2); +} -- cgit v1.2.3 From 5e6e15a2c4b529fd3cbf367b734842c4d8f6b0fa Mon Sep 17 00:00:00 2001 From: Zi Shen Lim Date: Wed, 27 Aug 2014 05:15:29 +0100 Subject: arm64: introduce aarch64_insn_gen_logical_shifted_reg() Introduce function to generate logical (shifted register) instructions. Signed-off-by: Zi Shen Lim Acked-by: Will Deacon Signed-off-by: Will Deacon --- arch/arm64/include/asm/insn.h | 25 ++++++++++++++++++ arch/arm64/kernel/insn.c | 60 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 85 insertions(+) diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h index 36e8465cdf71..56a9e63b6c33 100644 --- a/arch/arm64/include/asm/insn.h +++ b/arch/arm64/include/asm/insn.h @@ -206,6 +206,17 @@ enum aarch64_insn_data3_type { AARCH64_INSN_DATA3_MSUB, }; +enum aarch64_insn_logic_type { + AARCH64_INSN_LOGIC_AND, + AARCH64_INSN_LOGIC_BIC, + AARCH64_INSN_LOGIC_ORR, + AARCH64_INSN_LOGIC_ORN, + AARCH64_INSN_LOGIC_EOR, + AARCH64_INSN_LOGIC_EON, + AARCH64_INSN_LOGIC_AND_SETFLAGS, + AARCH64_INSN_LOGIC_BIC_SETFLAGS +}; + #define __AARCH64_INSN_FUNCS(abbr, mask, val) \ static __always_inline bool aarch64_insn_is_##abbr(u32 code) \ { return (code & (mask)) == (val); } \ @@ -243,6 +254,14 @@ __AARCH64_INSN_FUNCS(rorv, 0x7FE0FC00, 0x1AC02C00) __AARCH64_INSN_FUNCS(rev16, 0x7FFFFC00, 0x5AC00400) __AARCH64_INSN_FUNCS(rev32, 0x7FFFFC00, 0x5AC00800) __AARCH64_INSN_FUNCS(rev64, 0x7FFFFC00, 0x5AC00C00) +__AARCH64_INSN_FUNCS(and, 0x7F200000, 0x0A000000) +__AARCH64_INSN_FUNCS(bic, 0x7F200000, 0x0A200000) +__AARCH64_INSN_FUNCS(orr, 0x7F200000, 0x2A000000) +__AARCH64_INSN_FUNCS(orn, 0x7F200000, 0x2A200000) +__AARCH64_INSN_FUNCS(eor, 0x7F200000, 0x4A000000) +__AARCH64_INSN_FUNCS(eon, 0x7F200000, 0x4A200000) +__AARCH64_INSN_FUNCS(ands, 0x7F200000, 0x6A000000) +__AARCH64_INSN_FUNCS(bics, 0x7F200000, 0x6A200000) __AARCH64_INSN_FUNCS(b, 0xFC000000, 0x14000000) __AARCH64_INSN_FUNCS(bl, 0xFC000000, 0x94000000) __AARCH64_INSN_FUNCS(cbz, 0xFE000000, 0x34000000) @@ -323,6 +342,12 @@ u32 aarch64_insn_gen_data3(enum aarch64_insn_register dst, enum aarch64_insn_register reg2, enum aarch64_insn_variant variant, enum aarch64_insn_data3_type type); +u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst, + enum aarch64_insn_register src, + enum aarch64_insn_register reg, + int shift, + enum aarch64_insn_variant variant, + enum aarch64_insn_logic_type type); bool aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn); diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c index f73a4bfbb946..0668ee5c5bf9 100644 --- a/arch/arm64/kernel/insn.c +++ b/arch/arm64/kernel/insn.c @@ -874,3 +874,63 @@ u32 aarch64_insn_gen_data3(enum aarch64_insn_register dst, return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg2); } + +u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst, + enum aarch64_insn_register src, + enum aarch64_insn_register reg, + int shift, + enum aarch64_insn_variant variant, + enum aarch64_insn_logic_type type) +{ + u32 insn; + + switch (type) { + case AARCH64_INSN_LOGIC_AND: + insn = aarch64_insn_get_and_value(); + break; + case AARCH64_INSN_LOGIC_BIC: + insn = aarch64_insn_get_bic_value(); + break; + case AARCH64_INSN_LOGIC_ORR: + insn = aarch64_insn_get_orr_value(); + break; + case AARCH64_INSN_LOGIC_ORN: + insn = aarch64_insn_get_orn_value(); + break; + case AARCH64_INSN_LOGIC_EOR: + insn = aarch64_insn_get_eor_value(); + break; + case AARCH64_INSN_LOGIC_EON: + insn = aarch64_insn_get_eon_value(); + break; + case AARCH64_INSN_LOGIC_AND_SETFLAGS: + insn = aarch64_insn_get_ands_value(); + break; + case AARCH64_INSN_LOGIC_BIC_SETFLAGS: + insn = aarch64_insn_get_bics_value(); + break; + default: + BUG_ON(1); + } + + switch (variant) { + case AARCH64_INSN_VARIANT_32BIT: + BUG_ON(shift & ~(SZ_32 - 1)); + break; + case AARCH64_INSN_VARIANT_64BIT: + insn |= AARCH64_INSN_SF_BIT; + BUG_ON(shift & ~(SZ_64 - 1)); + break; + default: + BUG_ON(1); + } + + + insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst); + + insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src); + + insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg); + + return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift); +} -- cgit v1.2.3 From e54bcde3d69d40023ae77727213d14f920eb264a Mon Sep 17 00:00:00 2001 From: Zi Shen Lim Date: Tue, 26 Aug 2014 21:15:30 -0700 Subject: arm64: eBPF JIT compiler The JIT compiler emits A64 instructions. It supports eBPF only. Legacy BPF is supported thanks to conversion by BPF core. JIT is enabled in the same way as for other architectures: echo 1 > /proc/sys/net/core/bpf_jit_enable Or for additional compiler output: echo 2 > /proc/sys/net/core/bpf_jit_enable See Documentation/networking/filter.txt for more information. The implementation passes all 57 tests in lib/test_bpf.c on ARMv8 Foundation Model :) Also tested by Will on Juno platform. Signed-off-by: Zi Shen Lim Acked-by: Alexei Starovoitov Acked-by: Will Deacon Signed-off-by: Will Deacon --- Documentation/networking/filter.txt | 6 +- arch/arm64/Kconfig | 1 + arch/arm64/Makefile | 1 + arch/arm64/net/Makefile | 4 + arch/arm64/net/bpf_jit.h | 169 +++++++++ arch/arm64/net/bpf_jit_comp.c | 677 ++++++++++++++++++++++++++++++++++++ 6 files changed, 855 insertions(+), 3 deletions(-) create mode 100644 arch/arm64/net/Makefile create mode 100644 arch/arm64/net/bpf_jit.h create mode 100644 arch/arm64/net/bpf_jit_comp.c diff --git a/Documentation/networking/filter.txt b/Documentation/networking/filter.txt index c48a9704bda8..1842d4f8ae13 100644 --- a/Documentation/networking/filter.txt +++ b/Documentation/networking/filter.txt @@ -462,9 +462,9 @@ JIT compiler ------------ The Linux kernel has a built-in BPF JIT compiler for x86_64, SPARC, PowerPC, -ARM and s390 and can be enabled through CONFIG_BPF_JIT. The JIT compiler is -transparently invoked for each attached filter from user space or for internal -kernel users if it has been previously enabled by root: +ARM, ARM64 and s390 and can be enabled through CONFIG_BPF_JIT. The JIT compiler +is transparently invoked for each attached filter from user space or for +internal kernel users if it has been previously enabled by root: echo 1 > /proc/sys/net/core/bpf_jit_enable diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index fd4e81a4e1ce..cfea6237a9e0 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -35,6 +35,7 @@ config ARM64 select HAVE_ARCH_JUMP_LABEL select HAVE_ARCH_KGDB select HAVE_ARCH_TRACEHOOK + select HAVE_BPF_JIT select HAVE_C_RECORDMCOUNT select HAVE_CC_STACKPROTECTOR select HAVE_DEBUG_BUGVERBOSE diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 2df5e5daeebe..59c86b6b3052 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -47,6 +47,7 @@ endif export TEXT_OFFSET GZFLAGS core-y += arch/arm64/kernel/ arch/arm64/mm/ +core-$(CONFIG_NET) += arch/arm64/net/ core-$(CONFIG_KVM) += arch/arm64/kvm/ core-$(CONFIG_XEN) += arch/arm64/xen/ core-$(CONFIG_CRYPTO) += arch/arm64/crypto/ diff --git a/arch/arm64/net/Makefile b/arch/arm64/net/Makefile new file mode 100644 index 000000000000..da9763378284 --- /dev/null +++ b/arch/arm64/net/Makefile @@ -0,0 +1,4 @@ +# +# ARM64 networking code +# +obj-$(CONFIG_BPF_JIT) += bpf_jit_comp.o diff --git a/arch/arm64/net/bpf_jit.h b/arch/arm64/net/bpf_jit.h new file mode 100644 index 000000000000..2134f7e6c288 --- /dev/null +++ b/arch/arm64/net/bpf_jit.h @@ -0,0 +1,169 @@ +/* + * BPF JIT compiler for ARM64 + * + * Copyright (C) 2014 Zi Shen Lim + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#ifndef _BPF_JIT_H +#define _BPF_JIT_H + +#include + +/* 5-bit Register Operand */ +#define A64_R(x) AARCH64_INSN_REG_##x +#define A64_FP AARCH64_INSN_REG_FP +#define A64_LR AARCH64_INSN_REG_LR +#define A64_ZR AARCH64_INSN_REG_ZR +#define A64_SP AARCH64_INSN_REG_SP + +#define A64_VARIANT(sf) \ + ((sf) ? AARCH64_INSN_VARIANT_64BIT : AARCH64_INSN_VARIANT_32BIT) + +/* Compare & branch (immediate) */ +#define A64_COMP_BRANCH(sf, Rt, offset, type) \ + aarch64_insn_gen_comp_branch_imm(0, offset, Rt, A64_VARIANT(sf), \ + AARCH64_INSN_BRANCH_COMP_##type) +#define A64_CBZ(sf, Rt, imm19) A64_COMP_BRANCH(sf, Rt, (imm19) << 2, ZERO) + +/* Conditional branch (immediate) */ +#define A64_COND_BRANCH(cond, offset) \ + aarch64_insn_gen_cond_branch_imm(0, offset, cond) +#define A64_COND_EQ AARCH64_INSN_COND_EQ /* == */ +#define A64_COND_NE AARCH64_INSN_COND_NE /* != */ +#define A64_COND_CS AARCH64_INSN_COND_CS /* unsigned >= */ +#define A64_COND_HI AARCH64_INSN_COND_HI /* unsigned > */ +#define A64_COND_GE AARCH64_INSN_COND_GE /* signed >= */ +#define A64_COND_GT AARCH64_INSN_COND_GT /* signed > */ +#define A64_B_(cond, imm19) A64_COND_BRANCH(cond, (imm19) << 2) + +/* Unconditional branch (immediate) */ +#define A64_BRANCH(offset, type) aarch64_insn_gen_branch_imm(0, offset, \ + AARCH64_INSN_BRANCH_##type) +#define A64_B(imm26) A64_BRANCH((imm26) << 2, NOLINK) +#define A64_BL(imm26) A64_BRANCH((imm26) << 2, LINK) + +/* Unconditional branch (register) */ +#define A64_BLR(Rn) aarch64_insn_gen_branch_reg(Rn, AARCH64_INSN_BRANCH_LINK) +#define A64_RET(Rn) aarch64_insn_gen_branch_reg(Rn, AARCH64_INSN_BRANCH_RETURN) + +/* Load/store register (register offset) */ +#define A64_LS_REG(Rt, Rn, Rm, size, type) \ + aarch64_insn_gen_load_store_reg(Rt, Rn, Rm, \ + AARCH64_INSN_SIZE_##size, \ + AARCH64_INSN_LDST_##type##_REG_OFFSET) +#define A64_STRB(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 8, STORE) +#define A64_LDRB(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 8, LOAD) +#define A64_STRH(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 16, STORE) +#define A64_LDRH(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 16, LOAD) +#define A64_STR32(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 32, STORE) +#define A64_LDR32(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 32, LOAD) +#define A64_STR64(Xt, Xn, Xm) A64_LS_REG(Xt, Xn, Xm, 64, STORE) +#define A64_LDR64(Xt, Xn, Xm) A64_LS_REG(Xt, Xn, Xm, 64, LOAD) + +/* Load/store register pair */ +#define A64_LS_PAIR(Rt, Rt2, Rn, offset, ls, type) \ + aarch64_insn_gen_load_store_pair(Rt, Rt2, Rn, offset, \ + AARCH64_INSN_VARIANT_64BIT, \ + AARCH64_INSN_LDST_##ls##_PAIR_##type) +/* Rn -= 16; Rn[0] = Rt; Rn[8] = Rt2; */ +#define A64_PUSH(Rt, Rt2, Rn) A64_LS_PAIR(Rt, Rt2, Rn, -16, STORE, PRE_INDEX) +/* Rt = Rn[0]; Rt2 = Rn[8]; Rn += 16; */ +#define A64_POP(Rt, Rt2, Rn) A64_LS_PAIR(Rt, Rt2, Rn, 16, LOAD, POST_INDEX) + +/* Add/subtract (immediate) */ +#define A64_ADDSUB_IMM(sf, Rd, Rn, imm12, type) \ + aarch64_insn_gen_add_sub_imm(Rd, Rn, imm12, \ + A64_VARIANT(sf), AARCH64_INSN_ADSB_##type) +/* Rd = Rn OP imm12 */ +#define A64_ADD_I(sf, Rd, Rn, imm12) A64_ADDSUB_IMM(sf, Rd, Rn, imm12, ADD) +#define A64_SUB_I(sf, Rd, Rn, imm12) A64_ADDSUB_IMM(sf, Rd, Rn, imm12, SUB) +/* Rd = Rn */ +#define A64_MOV(sf, Rd, Rn) A64_ADD_I(sf, Rd, Rn, 0) + +/* Bitfield move */ +#define A64_BITFIELD(sf, Rd, Rn, immr, imms, type) \ + aarch64_insn_gen_bitfield(Rd, Rn, immr, imms, \ + A64_VARIANT(sf), AARCH64_INSN_BITFIELD_MOVE_##type) +/* Signed, with sign replication to left and zeros to right */ +#define A64_SBFM(sf, Rd, Rn, ir, is) A64_BITFIELD(sf, Rd, Rn, ir, is, SIGNED) +/* Unsigned, with zeros to left and right */ +#define A64_UBFM(sf, Rd, Rn, ir, is) A64_BITFIELD(sf, Rd, Rn, ir, is, UNSIGNED) + +/* Rd = Rn << shift */ +#define A64_LSL(sf, Rd, Rn, shift) ({ \ + int sz = (sf) ? 64 : 32; \ + A64_UBFM(sf, Rd, Rn, (unsigned)-(shift) % sz, sz - 1 - (shift)); \ +}) +/* Rd = Rn >> shift */ +#define A64_LSR(sf, Rd, Rn, shift) A64_UBFM(sf, Rd, Rn, shift, (sf) ? 63 : 31) +/* Rd = Rn >> shift; signed */ +#define A64_ASR(sf, Rd, Rn, shift) A64_SBFM(sf, Rd, Rn, shift, (sf) ? 63 : 31) + +/* Move wide (immediate) */ +#define A64_MOVEW(sf, Rd, imm16, shift, type) \ + aarch64_insn_gen_movewide(Rd, imm16, shift, \ + A64_VARIANT(sf), AARCH64_INSN_MOVEWIDE_##type) +/* Rd = Zeros (for MOVZ); + * Rd |= imm16 << shift (where shift is {0, 16, 32, 48}); + * Rd = ~Rd; (for MOVN); */ +#define A64_MOVN(sf, Rd, imm16, shift) A64_MOVEW(sf, Rd, imm16, shift, INVERSE) +#define A64_MOVZ(sf, Rd, imm16, shift) A64_MOVEW(sf, Rd, imm16, shift, ZERO) +#define A64_MOVK(sf, Rd, imm16, shift) A64_MOVEW(sf, Rd, imm16, shift, KEEP) + +/* Add/subtract (shifted register) */ +#define A64_ADDSUB_SREG(sf, Rd, Rn, Rm, type) \ + aarch64_insn_gen_add_sub_shifted_reg(Rd, Rn, Rm, 0, \ + A64_VARIANT(sf), AARCH64_INSN_ADSB_##type) +/* Rd = Rn OP Rm */ +#define A64_ADD(sf, Rd, Rn, Rm) A64_ADDSUB_SREG(sf, Rd, Rn, Rm, ADD) +#define A64_SUB(sf, Rd, Rn, Rm) A64_ADDSUB_SREG(sf, Rd, Rn, Rm, SUB) +#define A64_SUBS(sf, Rd, Rn, Rm) A64_ADDSUB_SREG(sf, Rd, Rn, Rm, SUB_SETFLAGS) +/* Rd = -Rm */ +#define A64_NEG(sf, Rd, Rm) A64_SUB(sf, Rd, A64_ZR, Rm) +/* Rn - Rm; set condition flags */ +#define A64_CMP(sf, Rn, Rm) A64_SUBS(sf, A64_ZR, Rn, Rm) + +/* Data-processing (1 source) */ +#define A64_DATA1(sf, Rd, Rn, type) aarch64_insn_gen_data1(Rd, Rn, \ + A64_VARIANT(sf), AARCH64_INSN_DATA1_##type) +/* Rd = BSWAPx(Rn) */ +#define A64_REV16(sf, Rd, Rn) A64_DATA1(sf, Rd, Rn, REVERSE_16) +#define A64_REV32(sf, Rd, Rn) A64_DATA1(sf, Rd, Rn, REVERSE_32) +#define A64_REV64(Rd, Rn) A64_DATA1(1, Rd, Rn, REVERSE_64) + +/* Data-processing (2 source) */ +/* Rd = Rn OP Rm */ +#define A64_UDIV(sf, Rd, Rn, Rm) aarch64_insn_gen_data2(Rd, Rn, Rm, \ + A64_VARIANT(sf), AARCH64_INSN_DATA2_UDIV) + +/* Data-processing (3 source) */ +/* Rd = Ra + Rn * Rm */ +#define A64_MADD(sf, Rd, Ra, Rn, Rm) aarch64_insn_gen_data3(Rd, Ra, Rn, Rm, \ + A64_VARIANT(sf), AARCH64_INSN_DATA3_MADD) +/* Rd = Rn * Rm */ +#define A64_MUL(sf, Rd, Rn, Rm) A64_MADD(sf, Rd, A64_ZR, Rn, Rm) + +/* Logical (shifted register) */ +#define A64_LOGIC_SREG(sf, Rd, Rn, Rm, type) \ + aarch64_insn_gen_logical_shifted_reg(Rd, Rn, Rm, 0, \ + A64_VARIANT(sf), AARCH64_INSN_LOGIC_##type) +/* Rd = Rn OP Rm */ +#define A64_AND(sf, Rd, Rn, Rm) A64_LOGIC_SREG(sf, Rd, Rn, Rm, AND) +#define A64_ORR(sf, Rd, Rn, Rm) A64_LOGIC_SREG(sf, Rd, Rn, Rm, ORR) +#define A64_EOR(sf, Rd, Rn, Rm) A64_LOGIC_SREG(sf, Rd, Rn, Rm, EOR) +#define A64_ANDS(sf, Rd, Rn, Rm) A64_LOGIC_SREG(sf, Rd, Rn, Rm, AND_SETFLAGS) +/* Rn & Rm; set condition flags */ +#define A64_TST(sf, Rn, Rm) A64_ANDS(sf, A64_ZR, Rn, Rm) + +#endif /* _BPF_JIT_H */ diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c new file mode 100644 index 000000000000..38c42965382d --- /dev/null +++ b/arch/arm64/net/bpf_jit_comp.c @@ -0,0 +1,677 @@ +/* + * BPF JIT compiler for ARM64 + * + * Copyright (C) 2014 Zi Shen Lim + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#define pr_fmt(fmt) "bpf_jit: " fmt + +#include +#include +#include +#include +#include +#include +#include + +#include "bpf_jit.h" + +int bpf_jit_enable __read_mostly; + +#define TMP_REG_1 (MAX_BPF_REG + 0) +#define TMP_REG_2 (MAX_BPF_REG + 1) + +/* Map BPF registers to A64 registers */ +static const int bpf2a64[] = { + /* return value from in-kernel function, and exit value from eBPF */ + [BPF_REG_0] = A64_R(7), + /* arguments from eBPF program to in-kernel function */ + [BPF_REG_1] = A64_R(0), + [BPF_REG_2] = A64_R(1), + [BPF_REG_3] = A64_R(2), + [BPF_REG_4] = A64_R(3), + [BPF_REG_5] = A64_R(4), + /* callee saved registers that in-kernel function will preserve */ + [BPF_REG_6] = A64_R(19), + [BPF_REG_7] = A64_R(20), + [BPF_REG_8] = A64_R(21), + [BPF_REG_9] = A64_R(22), + /* read-only frame pointer to access stack */ + [BPF_REG_FP] = A64_FP, + /* temporary register for internal BPF JIT */ + [TMP_REG_1] = A64_R(23), + [TMP_REG_2] = A64_R(24), +}; + +struct jit_ctx { + const struct bpf_prog *prog; + int idx; + int tmp_used; + int body_offset; + int *offset; + u32 *image; +}; + +static inline void emit(const u32 insn, struct jit_ctx *ctx) +{ + if (ctx->image != NULL) + ctx->image[ctx->idx] = cpu_to_le32(insn); + + ctx->idx++; +} + +static inline void emit_a64_mov_i64(const int reg, const u64 val, + struct jit_ctx *ctx) +{ + u64 tmp = val; + int shift = 0; + + emit(A64_MOVZ(1, reg, tmp & 0xffff, shift), ctx); + tmp >>= 16; + shift += 16; + while (tmp) { + if (tmp & 0xffff) + emit(A64_MOVK(1, reg, tmp & 0xffff, shift), ctx); + tmp >>= 16; + shift += 16; + } +} + +static inline void emit_a64_mov_i(const int is64, const int reg, + const s32 val, struct jit_ctx *ctx) +{ + u16 hi = val >> 16; + u16 lo = val & 0xffff; + + if (hi & 0x8000) { + if (hi == 0xffff) { + emit(A64_MOVN(is64, reg, (u16)~lo, 0), ctx); + } else { + emit(A64_MOVN(is64, reg, (u16)~hi, 16), ctx); + emit(A64_MOVK(is64, reg, lo, 0), ctx); + } + } else { + emit(A64_MOVZ(is64, reg, lo, 0), ctx); + if (hi) + emit(A64_MOVK(is64, reg, hi, 16), ctx); + } +} + +static inline int bpf2a64_offset(int bpf_to, int bpf_from, + const struct jit_ctx *ctx) +{ + int to = ctx->offset[bpf_to + 1]; + /* -1 to account for the Branch instruction */ + int from = ctx->offset[bpf_from + 1] - 1; + + return to - from; +} + +static inline int epilogue_offset(const struct jit_ctx *ctx) +{ + int to = ctx->offset[ctx->prog->len - 1]; + int from = ctx->idx - ctx->body_offset; + + return to - from; +} + +/* Stack must be multiples of 16B */ +#define STACK_ALIGN(sz) (((sz) + 15) & ~15) + +static void build_prologue(struct jit_ctx *ctx) +{ + const u8 r6 = bpf2a64[BPF_REG_6]; + const u8 r7 = bpf2a64[BPF_REG_7]; + const u8 r8 = bpf2a64[BPF_REG_8]; + const u8 r9 = bpf2a64[BPF_REG_9]; + const u8 fp = bpf2a64[BPF_REG_FP]; + const u8 ra = bpf2a64[BPF_REG_A]; + const u8 rx = bpf2a64[BPF_REG_X]; + const u8 tmp1 = bpf2a64[TMP_REG_1]; + const u8 tmp2 = bpf2a64[TMP_REG_2]; + int stack_size = MAX_BPF_STACK; + + stack_size += 4; /* extra for skb_copy_bits buffer */ + stack_size = STACK_ALIGN(stack_size); + + /* Save callee-saved register */ + emit(A64_PUSH(r6, r7, A64_SP), ctx); + emit(A64_PUSH(r8, r9, A64_SP), ctx); + if (ctx->tmp_used) + emit(A64_PUSH(tmp1, tmp2, A64_SP), ctx); + + /* Set up BPF stack */ + emit(A64_SUB_I(1, A64_SP, A64_SP, stack_size), ctx); + + /* Set up frame pointer */ + emit(A64_MOV(1, fp, A64_SP), ctx); + + /* Clear registers A and X */ + emit_a64_mov_i64(ra, 0, ctx); + emit_a64_mov_i64(rx, 0, ctx); +} + +static void build_epilogue(struct jit_ctx *ctx) +{ + const u8 r0 = bpf2a64[BPF_REG_0]; + const u8 r6 = bpf2a64[BPF_REG_6]; + const u8 r7 = bpf2a64[BPF_REG_7]; + const u8 r8 = bpf2a64[BPF_REG_8]; + const u8 r9 = bpf2a64[BPF_REG_9]; + const u8 fp = bpf2a64[BPF_REG_FP]; + const u8 tmp1 = bpf2a64[TMP_REG_1]; + const u8 tmp2 = bpf2a64[TMP_REG_2]; + int stack_size = MAX_BPF_STACK; + + stack_size += 4; /* extra for skb_copy_bits buffer */ + stack_size = STACK_ALIGN(stack_size); + + /* We're done with BPF stack */ + emit(A64_ADD_I(1, A64_SP, A64_SP, stack_size), ctx); + + /* Restore callee-saved register */ + if (ctx->tmp_used) + emit(A64_POP(tmp1, tmp2, A64_SP), ctx); + emit(A64_POP(r8, r9, A64_SP), ctx); + emit(A64_POP(r6, r7, A64_SP), ctx); + + /* Restore frame pointer */ + emit(A64_MOV(1, fp, A64_SP), ctx); + + /* Set return value */ + emit(A64_MOV(1, A64_R(0), r0), ctx); + + emit(A64_RET(A64_LR), ctx); +} + +static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) +{ + const u8 code = insn->code; + const u8 dst = bpf2a64[insn->dst_reg]; + const u8 src = bpf2a64[insn->src_reg]; + const u8 tmp = bpf2a64[TMP_REG_1]; + const u8 tmp2 = bpf2a64[TMP_REG_2]; + const s16 off = insn->off; + const s32 imm = insn->imm; + const int i = insn - ctx->prog->insnsi; + const bool is64 = BPF_CLASS(code) == BPF_ALU64; + u8 jmp_cond; + s32 jmp_offset; + + switch (code) { + /* dst = src */ + case BPF_ALU | BPF_MOV | BPF_X: + case BPF_ALU64 | BPF_MOV | BPF_X: + emit(A64_MOV(is64, dst, src), ctx); + break; + /* dst = dst OP src */ + case BPF_ALU | BPF_ADD | BPF_X: + case BPF_ALU64 | BPF_ADD | BPF_X: + emit(A64_ADD(is64, dst, dst, src), ctx); + break; + case BPF_ALU | BPF_SUB | BPF_X: + case BPF_ALU64 | BPF_SUB | BPF_X: + emit(A64_SUB(is64, dst, dst, src), ctx); + break; + case BPF_ALU | BPF_AND | BPF_X: + case BPF_ALU64 | BPF_AND | BPF_X: + emit(A64_AND(is64, dst, dst, src), ctx); + break; + case BPF_ALU | BPF_OR | BPF_X: + case BPF_ALU64 | BPF_OR | BPF_X: + emit(A64_ORR(is64, dst, dst, src), ctx); + break; + case BPF_ALU | BPF_XOR | BPF_X: + case BPF_ALU64 | BPF_XOR | BPF_X: + emit(A64_EOR(is64, dst, dst, src), ctx); + break; + case BPF_ALU | BPF_MUL | BPF_X: + case BPF_ALU64 | BPF_MUL | BPF_X: + emit(A64_MUL(is64, dst, dst, src), ctx); + break; + case BPF_ALU | BPF_DIV | BPF_X: + case BPF_ALU64 | BPF_DIV | BPF_X: + emit(A64_UDIV(is64, dst, dst, src), ctx); + break; + case BPF_ALU | BPF_MOD | BPF_X: + case BPF_ALU64 | BPF_MOD | BPF_X: + ctx->tmp_used = 1; + emit(A64_UDIV(is64, tmp, dst, src), ctx); + emit(A64_MUL(is64, tmp, tmp, src), ctx); + emit(A64_SUB(is64, dst, dst, tmp), ctx); + break; + /* dst = -dst */ + case BPF_ALU | BPF_NEG: + case BPF_ALU64 | BPF_NEG: + emit(A64_NEG(is64, dst, dst), ctx); + break; + /* dst = BSWAP##imm(dst) */ + case BPF_ALU | BPF_END | BPF_FROM_LE: + case BPF_ALU | BPF_END | BPF_FROM_BE: +#ifdef CONFIG_CPU_BIG_ENDIAN + if (BPF_SRC(code) == BPF_FROM_BE) + break; +#else /* !CONFIG_CPU_BIG_ENDIAN */ + if (BPF_SRC(code) == BPF_FROM_LE) + break; +#endif + switch (imm) { + case 16: + emit(A64_REV16(is64, dst, dst), ctx); + break; + case 32: + emit(A64_REV32(is64, dst, dst), ctx); + break; + case 64: + emit(A64_REV64(dst, dst), ctx); + break; + } + break; + /* dst = imm */ + case BPF_ALU | BPF_MOV | BPF_K: + case BPF_ALU64 | BPF_MOV | BPF_K: + emit_a64_mov_i(is64, dst, imm, ctx); + break; + /* dst = dst OP imm */ + case BPF_ALU | BPF_ADD | BPF_K: + case BPF_ALU64 | BPF_ADD | BPF_K: + ctx->tmp_used = 1; + emit_a64_mov_i(is64, tmp, imm, ctx); + emit(A64_ADD(is64, dst, dst, tmp), ctx); + break; + case BPF_ALU | BPF_SUB | BPF_K: + case BPF_ALU64 | BPF_SUB | BPF_K: + ctx->tmp_used = 1; + emit_a64_mov_i(is64, tmp, imm, ctx); + emit(A64_SUB(is64, dst, dst, tmp), ctx); + break; + case BPF_ALU | BPF_AND | BPF_K: + case BPF_ALU64 | BPF_AND | BPF_K: + ctx->tmp_used = 1; + emit_a64_mov_i(is64, tmp, imm, ctx); + emit(A64_AND(is64, dst, dst, tmp), ctx); + break; + case BPF_ALU | BPF_OR | BPF_K: + case BPF_ALU64 | BPF_OR | BPF_K: + ctx->tmp_used = 1; + emit_a64_mov_i(is64, tmp, imm, ctx); + emit(A64_ORR(is64, dst, dst, tmp), ctx); + break; + case BPF_ALU | BPF_XOR | BPF_K: + case BPF_ALU64 | BPF_XOR | BPF_K: + ctx->tmp_used = 1; + emit_a64_mov_i(is64, tmp, imm, ctx); + emit(A64_EOR(is64, dst, dst, tmp), ctx); + break; + case BPF_ALU | BPF_MUL | BPF_K: + case BPF_ALU64 | BPF_MUL | BPF_K: + ctx->tmp_used = 1; + emit_a64_mov_i(is64, tmp, imm, ctx); + emit(A64_MUL(is64, dst, dst, tmp), ctx); + break; + case BPF_ALU | BPF_DIV | BPF_K: + case BPF_ALU64 | BPF_DIV | BPF_K: + ctx->tmp_used = 1; + emit_a64_mov_i(is64, tmp, imm, ctx); + emit(A64_UDIV(is64, dst, dst, tmp), ctx); + break; + case BPF_ALU | BPF_MOD | BPF_K: + case BPF_ALU64 | BPF_MOD | BPF_K: + ctx->tmp_used = 1; + emit_a64_mov_i(is64, tmp2, imm, ctx); + emit(A64_UDIV(is64, tmp, dst, tmp2), ctx); + emit(A64_MUL(is64, tmp, tmp, tmp2), ctx); + emit(A64_SUB(is64, dst, dst, tmp), ctx); + break; + case BPF_ALU | BPF_LSH | BPF_K: + case BPF_ALU64 | BPF_LSH | BPF_K: + emit(A64_LSL(is64, dst, dst, imm), ctx); + break; + case BPF_ALU | BPF_RSH | BPF_K: + case BPF_ALU64 | BPF_RSH | BPF_K: + emit(A64_LSR(is64, dst, dst, imm), ctx); + break; + case BPF_ALU | BPF_ARSH | BPF_K: + case BPF_ALU64 | BPF_ARSH | BPF_K: + emit(A64_ASR(is64, dst, dst, imm), ctx); + break; + +#define check_imm(bits, imm) do { \ + if ((((imm) > 0) && ((imm) >> (bits))) || \ + (((imm) < 0) && (~(imm) >> (bits)))) { \ + pr_info("[%2d] imm=%d(0x%x) out of range\n", \ + i, imm, imm); \ + return -EINVAL; \ + } \ +} while (0) +#define check_imm19(imm) check_imm(19, imm) +#define check_imm26(imm) check_imm(26, imm) + + /* JUMP off */ + case BPF_JMP | BPF_JA: + jmp_offset = bpf2a64_offset(i + off, i, ctx); + check_imm26(jmp_offset); + emit(A64_B(jmp_offset), ctx); + break; + /* IF (dst COND src) JUMP off */ + case BPF_JMP | BPF_JEQ | BPF_X: + case BPF_JMP | BPF_JGT | BPF_X: + case BPF_JMP | BPF_JGE | BPF_X: + case BPF_JMP | BPF_JNE | BPF_X: + case BPF_JMP | BPF_JSGT | BPF_X: + case BPF_JMP | BPF_JSGE | BPF_X: + emit(A64_CMP(1, dst, src), ctx); +emit_cond_jmp: + jmp_offset = bpf2a64_offset(i + off, i, ctx); + check_imm19(jmp_offset); + switch (BPF_OP(code)) { + case BPF_JEQ: + jmp_cond = A64_COND_EQ; + break; + case BPF_JGT: + jmp_cond = A64_COND_HI; + break; + case BPF_JGE: + jmp_cond = A64_COND_CS; + break; + case BPF_JNE: + jmp_cond = A64_COND_NE; + break; + case BPF_JSGT: + jmp_cond = A64_COND_GT; + break; + case BPF_JSGE: + jmp_cond = A64_COND_GE; + break; + default: + return -EFAULT; + } + emit(A64_B_(jmp_cond, jmp_offset), ctx); + break; + case BPF_JMP | BPF_JSET | BPF_X: + emit(A64_TST(1, dst, src), ctx); + goto emit_cond_jmp; + /* IF (dst COND imm) JUMP off */ + case BPF_JMP | BPF_JEQ | BPF_K: + case BPF_JMP | BPF_JGT | BPF_K: + case BPF_JMP | BPF_JGE | BPF_K: + case BPF_JMP | BPF_JNE | BPF_K: + case BPF_JMP | BPF_JSGT | BPF_K: + case BPF_JMP | BPF_JSGE | BPF_K: + ctx->tmp_used = 1; + emit_a64_mov_i(1, tmp, imm, ctx); + emit(A64_CMP(1, dst, tmp), ctx); + goto emit_cond_jmp; + case BPF_JMP | BPF_JSET | BPF_K: + ctx->tmp_used = 1; + emit_a64_mov_i(1, tmp, imm, ctx); + emit(A64_TST(1, dst, tmp), ctx); + goto emit_cond_jmp; + /* function call */ + case BPF_JMP | BPF_CALL: + { + const u8 r0 = bpf2a64[BPF_REG_0]; + const u64 func = (u64)__bpf_call_base + imm; + + ctx->tmp_used = 1; + emit_a64_mov_i64(tmp, func, ctx); + emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx); + emit(A64_MOV(1, A64_FP, A64_SP), ctx); + emit(A64_BLR(tmp), ctx); + emit(A64_MOV(1, r0, A64_R(0)), ctx); + emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx); + break; + } + /* function return */ + case BPF_JMP | BPF_EXIT: + if (i == ctx->prog->len - 1) + break; + jmp_offset = epilogue_offset(ctx); + check_imm26(jmp_offset); + emit(A64_B(jmp_offset), ctx); + break; + + /* LDX: dst = *(size *)(src + off) */ + case BPF_LDX | BPF_MEM | BPF_W: + case BPF_LDX | BPF_MEM | BPF_H: + case BPF_LDX | BPF_MEM | BPF_B: + case BPF_LDX | BPF_MEM | BPF_DW: + ctx->tmp_used = 1; + emit_a64_mov_i(1, tmp, off, ctx); + switch (BPF_SIZE(code)) { + case BPF_W: + emit(A64_LDR32(dst, src, tmp), ctx); + break; + case BPF_H: + emit(A64_LDRH(dst, src, tmp), ctx); + break; + case BPF_B: + emit(A64_LDRB(dst, src, tmp), ctx); + break; + case BPF_DW: + emit(A64_LDR64(dst, src, tmp), ctx); + break; + } + break; + + /* ST: *(size *)(dst + off) = imm */ + case BPF_ST | BPF_MEM | BPF_W: + case BPF_ST | BPF_MEM | BPF_H: + case BPF_ST | BPF_MEM | BPF_B: + case BPF_ST | BPF_MEM | BPF_DW: + goto notyet; + + /* STX: *(size *)(dst + off) = src */ + case BPF_STX | BPF_MEM | BPF_W: + case BPF_STX | BPF_MEM | BPF_H: + case BPF_STX | BPF_MEM | BPF_B: + case BPF_STX | BPF_MEM | BPF_DW: + ctx->tmp_used = 1; + emit_a64_mov_i(1, tmp, off, ctx); + switch (BPF_SIZE(code)) { + case BPF_W: + emit(A64_STR32(src, dst, tmp), ctx); + break; + case BPF_H: + emit(A64_STRH(src, dst, tmp), ctx); + break; + case BPF_B: + emit(A64_STRB(src, dst, tmp), ctx); + break; + case BPF_DW: + emit(A64_STR64(src, dst, tmp), ctx); + break; + } + break; + /* STX XADD: lock *(u32 *)(dst + off) += src */ + case BPF_STX | BPF_XADD | BPF_W: + /* STX XADD: lock *(u64 *)(dst + off) += src */ + case BPF_STX | BPF_XADD | BPF_DW: + goto notyet; + + /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */ + case BPF_LD | BPF_ABS | BPF_W: + case BPF_LD | BPF_ABS | BPF_H: + case BPF_LD | BPF_ABS | BPF_B: + /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + src + imm)) */ + case BPF_LD | BPF_IND | BPF_W: + case BPF_LD | BPF_IND | BPF_H: + case BPF_LD | BPF_IND | BPF_B: + { + const u8 r0 = bpf2a64[BPF_REG_0]; /* r0 = return value */ + const u8 r6 = bpf2a64[BPF_REG_6]; /* r6 = pointer to sk_buff */ + const u8 fp = bpf2a64[BPF_REG_FP]; + const u8 r1 = bpf2a64[BPF_REG_1]; /* r1: struct sk_buff *skb */ + const u8 r2 = bpf2a64[BPF_REG_2]; /* r2: int k */ + const u8 r3 = bpf2a64[BPF_REG_3]; /* r3: unsigned int size */ + const u8 r4 = bpf2a64[BPF_REG_4]; /* r4: void *buffer */ + const u8 r5 = bpf2a64[BPF_REG_5]; /* r5: void *(*func)(...) */ + int size; + + emit(A64_MOV(1, r1, r6), ctx); + emit_a64_mov_i(0, r2, imm, ctx); + if (BPF_MODE(code) == BPF_IND) + emit(A64_ADD(0, r2, r2, src), ctx); + switch (BPF_SIZE(code)) { + case BPF_W: + size = 4; + break; + case BPF_H: + size = 2; + break; + case BPF_B: + size = 1; + break; + default: + return -EINVAL; + } + emit_a64_mov_i64(r3, size, ctx); + emit(A64_ADD_I(1, r4, fp, MAX_BPF_STACK), ctx); + emit_a64_mov_i64(r5, (unsigned long)bpf_load_pointer, ctx); + emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx); + emit(A64_MOV(1, A64_FP, A64_SP), ctx); + emit(A64_BLR(r5), ctx); + emit(A64_MOV(1, r0, A64_R(0)), ctx); + emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx); + + jmp_offset = epilogue_offset(ctx); + check_imm19(jmp_offset); + emit(A64_CBZ(1, r0, jmp_offset), ctx); + emit(A64_MOV(1, r5, r0), ctx); + switch (BPF_SIZE(code)) { + case BPF_W: + emit(A64_LDR32(r0, r5, A64_ZR), ctx); +#ifndef CONFIG_CPU_BIG_ENDIAN + emit(A64_REV32(0, r0, r0), ctx); +#endif + break; + case BPF_H: + emit(A64_LDRH(r0, r5, A64_ZR), ctx); +#ifndef CONFIG_CPU_BIG_ENDIAN + emit(A64_REV16(0, r0, r0), ctx); +#endif + break; + case BPF_B: + emit(A64_LDRB(r0, r5, A64_ZR), ctx); + break; + } + break; + } +notyet: + pr_info_once("*** NOT YET: opcode %02x ***\n", code); + return -EFAULT; + + default: + pr_err_once("unknown opcode %02x\n", code); + return -EINVAL; + } + + return 0; +} + +static int build_body(struct jit_ctx *ctx) +{ + const struct bpf_prog *prog = ctx->prog; + int i; + + for (i = 0; i < prog->len; i++) { + const struct bpf_insn *insn = &prog->insnsi[i]; + int ret; + + if (ctx->image == NULL) + ctx->offset[i] = ctx->idx; + + ret = build_insn(insn, ctx); + if (ret) + return ret; + } + + return 0; +} + +static inline void bpf_flush_icache(void *start, void *end) +{ + flush_icache_range((unsigned long)start, (unsigned long)end); +} + +void bpf_jit_compile(struct bpf_prog *prog) +{ + /* Nothing to do here. We support Internal BPF. */ +} + +void bpf_int_jit_compile(struct bpf_prog *prog) +{ + struct jit_ctx ctx; + int image_size; + + if (!bpf_jit_enable) + return; + + if (!prog || !prog->len) + return; + + memset(&ctx, 0, sizeof(ctx)); + ctx.prog = prog; + + ctx.offset = kcalloc(prog->len, sizeof(int), GFP_KERNEL); + if (ctx.offset == NULL) + return; + + /* 1. Initial fake pass to compute ctx->idx. */ + + /* Fake pass to fill in ctx->offset. */ + if (build_body(&ctx)) + goto out; + + build_prologue(&ctx); + + build_epilogue(&ctx); + + /* Now we know the actual image size. */ + image_size = sizeof(u32) * ctx.idx; + ctx.image = module_alloc(image_size); + if (unlikely(ctx.image == NULL)) + goto out; + + /* 2. Now, the actual pass. */ + + ctx.idx = 0; + build_prologue(&ctx); + + ctx.body_offset = ctx.idx; + if (build_body(&ctx)) + goto out; + + build_epilogue(&ctx); + + /* And we're done. */ + if (bpf_jit_enable > 1) + bpf_jit_dump(prog->len, image_size, 2, ctx.image); + + bpf_flush_icache(ctx.image, ctx.image + ctx.idx); + prog->bpf_func = (void *)ctx.image; + prog->jited = 1; + +out: + kfree(ctx.offset); +} + +void bpf_jit_free(struct bpf_prog *prog) +{ + if (prog->jited) + module_free(NULL, prog->bpf_func); + + kfree(prog); +} -- cgit v1.2.3 From e3672649faae400e8a598938766a63f395a27ae6 Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Mon, 8 Sep 2014 12:44:48 +0100 Subject: arm64: defconfig: increase NR_CPUS default to 64 Raising the current maximum limit to 64. This is needed for Cavium's Thunder systems that will have at least 48 cores per die. The change keeps the current memory footprint in cpu mask structures. It does not break existing code. Setting the maximum to 64 cpus still boots systems with less cpus. Mark's Juno happily booted with a NR_CPUS=64 kernel. Tested on our Thunder system with 48 cores. We could see interrupts to all cores. Cc: Radha Mohan Chintakuntla Cc: Mark Rutland Acked-by: Arnd Bergmann Signed-off-by: Robert Richter Signed-off-by: Will Deacon --- arch/arm64/Kconfig | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index cfea6237a9e0..f0d3a2d85a5b 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -253,11 +253,11 @@ config SCHED_SMT places. If unsure say N here. config NR_CPUS - int "Maximum number of CPUs (2-32)" - range 2 32 + int "Maximum number of CPUs (2-64)" + range 2 64 depends on SMP # These have to remain sorted largest to smallest - default "8" + default "64" config HOTPLUG_CPU bool "Support for hot-pluggable CPUs" -- cgit v1.2.3 From 3f8161b260cb9232bb926a5d6c1cc2672fea07c7 Mon Sep 17 00:00:00 2001 From: Lorenzo Pieralisi Date: Wed, 27 Nov 2013 16:22:55 +0000 Subject: Documentation: arm: define DT idle states bindings ARM based platforms implement a variety of power management schemes that allow processors to enter idle states at run-time. The parameters defining these idle states vary on a per-platform basis forcing the OS to hardcode the state parameters in platform specific static tables whose size grows as the number of platforms supported in the kernel increases and hampers device drivers standardization. Therefore, this patch aims at standardizing idle state device tree bindings for ARM platforms. Bindings define idle state parameters inclusive of entry methods and state latencies, to allow operating systems to retrieve the configuration entries from the device tree and initialize the related power management drivers, paving the way for common code in the kernel to deal with idle states and removing the need for static data in current and previous kernel versions. ARM64 platforms require the DT to define an entry-method property for idle states. On system implementing PSCI as an enable-method to enter low-power states the PSCI CPU suspend method requires the power_state parameter to be passed to the PSCI CPU suspend function. This parameter is specific to a power state and platform specific, therefore must be provided by firmware to the OS in order to enable proper call sequence. Thus, this patch also adds a property in the PSCI bindings that describes how the PSCI CPU suspend power_state parameter should be defined in DT in all device nodes that rely on PSCI CPU suspend method usage. Acked-by: Catalin Marinas Acked-by: Daniel Lezcano Acked-by: Nicolas Pitre Reviewed-by: Rob Herring Reviewed-by: Sebastian Capella Signed-off-by: Lorenzo Pieralisi Signed-off-by: Catalin Marinas --- Documentation/devicetree/bindings/arm/cpus.txt | 8 + .../devicetree/bindings/arm/idle-states.txt | 679 +++++++++++++++++++++ Documentation/devicetree/bindings/arm/psci.txt | 14 +- 3 files changed, 700 insertions(+), 1 deletion(-) create mode 100644 Documentation/devicetree/bindings/arm/idle-states.txt diff --git a/Documentation/devicetree/bindings/arm/cpus.txt b/Documentation/devicetree/bindings/arm/cpus.txt index 298e2f6b33c6..6fd0f15e899a 100644 --- a/Documentation/devicetree/bindings/arm/cpus.txt +++ b/Documentation/devicetree/bindings/arm/cpus.txt @@ -219,6 +219,12 @@ nodes to be present and contain the properties described below. Value type: Definition: Specifies the ACC[2] node associated with this CPU. + - cpu-idle-states + Usage: Optional + Value type: + Definition: + # List of phandles to idle state nodes supported + by this cpu [3]. Example 1 (dual-cluster big.LITTLE system 32-bit): @@ -415,3 +421,5 @@ cpus { -- [1] arm/msm/qcom,saw2.txt [2] arm/msm/qcom,kpss-acc.txt +[3] ARM Linux kernel documentation - idle states bindings + Documentation/devicetree/bindings/arm/idle-states.txt diff --git a/Documentation/devicetree/bindings/arm/idle-states.txt b/Documentation/devicetree/bindings/arm/idle-states.txt new file mode 100644 index 000000000000..37375c7f3ccc --- /dev/null +++ b/Documentation/devicetree/bindings/arm/idle-states.txt @@ -0,0 +1,679 @@ +========================================== +ARM idle states binding description +========================================== + +========================================== +1 - Introduction +========================================== + +ARM systems contain HW capable of managing power consumption dynamically, +where cores can be put in different low-power states (ranging from simple +wfi to power gating) according to OS PM policies. The CPU states representing +the range of dynamic idle states that a processor can enter at run-time, can be +specified through device tree bindings representing the parameters required +to enter/exit specific idle states on a given processor. + +According to the Server Base System Architecture document (SBSA, [3]), the +power states an ARM CPU can be put into are identified by the following list: + +- Running +- Idle_standby +- Idle_retention +- Sleep +- Off + +The power states described in the SBSA document define the basic CPU states on +top of which ARM platforms implement power management schemes that allow an OS +PM implementation to put the processor in different idle states (which include +states listed above; "off" state is not an idle state since it does not have +wake-up capabilities, hence it is not considered in this document). + +Idle state parameters (eg entry latency) are platform specific and need to be +characterized with bindings that provide the required information to OS PM +code so that it can build the required tables and use them at runtime. + +The device tree binding definition for ARM idle states is the subject of this +document. + +=========================================== +2 - idle-states definitions +=========================================== + +Idle states are characterized for a specific system through a set of +timing and energy related properties, that underline the HW behaviour +triggered upon idle states entry and exit. + +The following diagram depicts the CPU execution phases and related timing +properties required to enter and exit an idle state: + +..__[EXEC]__|__[PREP]__|__[ENTRY]__|__[IDLE]__|__[EXIT]__|__[EXEC]__.. + | | | | | + + |<------ entry ------->| + | latency | + |<- exit ->| + | latency | + |<-------- min-residency -------->| + |<------- wakeup-latency ------->| + + Diagram 1: CPU idle state execution phases + +EXEC: Normal CPU execution. + +PREP: Preparation phase before committing the hardware to idle mode + like cache flushing. This is abortable on pending wake-up + event conditions. The abort latency is assumed to be negligible + (i.e. less than the ENTRY + EXIT duration). If aborted, CPU + goes back to EXEC. This phase is optional. If not abortable, + this should be included in the ENTRY phase instead. + +ENTRY: The hardware is committed to idle mode. This period must run + to completion up to IDLE before anything else can happen. + +IDLE: This is the actual energy-saving idle period. This may last + between 0 and infinite time, until a wake-up event occurs. + +EXIT: Period during which the CPU is brought back to operational + mode (EXEC). + +entry-latency: Worst case latency required to enter the idle state. The +exit-latency may be guaranteed only after entry-latency has passed. + +min-residency: Minimum period, including preparation and entry, for a given +idle state to be worthwhile energywise. + +wakeup-latency: Maximum delay between the signaling of a wake-up event and the +CPU being able to execute normal code again. If not specified, this is assumed +to be entry-latency + exit-latency. + +These timing parameters can be used by an OS in different circumstances. + +An idle CPU requires the expected min-residency time to select the most +appropriate idle state based on the expected expiry time of the next IRQ +(ie wake-up) that causes the CPU to return to the EXEC phase. + +An operating system scheduler may need to compute the shortest wake-up delay +for CPUs in the system by detecting how long will it take to get a CPU out +of an idle state, eg: + +wakeup-delay = exit-latency + max(entry-latency - (now - entry-timestamp), 0) + +In other words, the scheduler can make its scheduling decision by selecting +(eg waking-up) the CPU with the shortest wake-up latency. +The wake-up latency must take into account the entry latency if that period +has not expired. The abortable nature of the PREP period can be ignored +if it cannot be relied upon (e.g. the PREP deadline may occur much sooner than +the worst case since it depends on the CPU operating conditions, ie caches +state). + +An OS has to reliably probe the wakeup-latency since some devices can enforce +latency constraints guarantees to work properly, so the OS has to detect the +worst case wake-up latency it can incur if a CPU is allowed to enter an +idle state, and possibly to prevent that to guarantee reliable device +functioning. + +The min-residency time parameter deserves further explanation since it is +expressed in time units but must factor in energy consumption coefficients. + +The energy consumption of a cpu when it enters a power state can be roughly +characterised by the following graph: + + | + | + | + e | + n | /--- + e | /------ + r | /------ + g | /----- + y | /------ + | ---- + | /| + | / | + | / | + | / | + | / | + | / | + |/ | + -----|-------+---------------------------------- + 0| 1 time(ms) + + Graph 1: Energy vs time example + +The graph is split in two parts delimited by time 1ms on the X-axis. +The graph curve with X-axis values = { x | 0 < x < 1ms } has a steep slope +and denotes the energy costs incurred whilst entering and leaving the idle +state. +The graph curve in the area delimited by X-axis values = {x | x > 1ms } has +shallower slope and essentially represents the energy consumption of the idle +state. + +min-residency is defined for a given idle state as the minimum expected +residency time for a state (inclusive of preparation and entry) after +which choosing that state become the most energy efficient option. A good +way to visualise this, is by taking the same graph above and comparing some +states energy consumptions plots. + +For sake of simplicity, let's consider a system with two idle states IDLE1, +and IDLE2: + + | + | + | + | /-- IDLE1 + e | /--- + n | /---- + e | /--- + r | /-----/--------- IDLE2 + g | /-------/--------- + y | ------------ /---| + | / /---- | + | / /--- | + | / /---- | + | / /--- | + | --- | + | / | + | / | + |/ | time + ---/----------------------------+------------------------ + |IDLE1-energy < IDLE2-energy | IDLE2-energy < IDLE1-energy + | + IDLE2-min-residency + + Graph 2: idle states min-residency example + +In graph 2 above, that takes into account idle states entry/exit energy +costs, it is clear that if the idle state residency time (ie time till next +wake-up IRQ) is less than IDLE2-min-residency, IDLE1 is the better idle state +choice energywise. + +This is mainly down to the fact that IDLE1 entry/exit energy costs are lower +than IDLE2. + +However, the lower power consumption (ie shallower energy curve slope) of idle +state IDLE2 implies that after a suitable time, IDLE2 becomes more energy +efficient. + +The time at which IDLE2 becomes more energy efficient than IDLE1 (and other +shallower states in a system with multiple idle states) is defined +IDLE2-min-residency and corresponds to the time when energy consumption of +IDLE1 and IDLE2 states breaks even. + +The definitions provided in this section underpin the idle states +properties specification that is the subject of the following sections. + +=========================================== +3 - idle-states node +=========================================== + +ARM processor idle states are defined within the idle-states node, which is +a direct child of the cpus node [1] and provides a container where the +processor idle states, defined as device tree nodes, are listed. + +- idle-states node + + Usage: Optional - On ARM systems, it is a container of processor idle + states nodes. If the system does not provide CPU + power management capabilities or the processor just + supports idle_standby an idle-states node is not + required. + + Description: idle-states node is a container node, where its + subnodes describe the CPU idle states. + + Node name must be "idle-states". + + The idle-states node's parent node must be the cpus node. + + The idle-states node's child nodes can be: + + - one or more state nodes + + Any other configuration is considered invalid. + + An idle-states node defines the following properties: + + - entry-method + Value type: + Usage and definition depend on ARM architecture version. + # On ARM v8 64-bit this property is required and must + be one of: + - "psci" (see bindings in [2]) + # On ARM 32-bit systems this property is optional + +The nodes describing the idle states (state) can only be defined within the +idle-states node, any other configuration is considered invalid and therefore +must be ignored. + +=========================================== +4 - state node +=========================================== + +A state node represents an idle state description and must be defined as +follows: + +- state node + + Description: must be child of the idle-states node + + The state node name shall follow standard device tree naming + rules ([5], 2.2.1 "Node names"), in particular state nodes which + are siblings within a single common parent must be given a unique name. + + The idle state entered by executing the wfi instruction (idle_standby + SBSA,[3][4]) is considered standard on all ARM platforms and therefore + must not be listed. + + With the definitions provided above, the following list represents + the valid properties for a state node: + + - compatible + Usage: Required + Value type: + Definition: Must be "arm,idle-state". + + - local-timer-stop + Usage: See definition + Value type: + Definition: if present the CPU local timer control logic is + lost on state entry, otherwise it is retained. + + - entry-latency-us + Usage: Required + Value type: + Definition: u32 value representing worst case latency in + microseconds required to enter the idle state. + The exit-latency-us duration may be guaranteed + only after entry-latency-us has passed. + + - exit-latency-us + Usage: Required + Value type: + Definition: u32 value representing worst case latency + in microseconds required to exit the idle state. + + - min-residency-us + Usage: Required + Value type: + Definition: u32 value representing minimum residency duration + in microseconds, inclusive of preparation and + entry, for this idle state to be considered + worthwhile energy wise (refer to section 2 of + this document for a complete description). + + - wakeup-latency-us: + Usage: Optional + Value type: + Definition: u32 value representing maximum delay between the + signaling of a wake-up event and the CPU being + able to execute normal code again. If omitted, + this is assumed to be equal to: + + entry-latency-us + exit-latency-us + + It is important to supply this value on systems + where the duration of PREP phase (see diagram 1, + section 2) is non-neglibigle. + In such systems entry-latency-us + exit-latency-us + will exceed wakeup-latency-us by this duration. + + In addition to the properties listed above, a state node may require + additional properties specifics to the entry-method defined in the + idle-states node, please refer to the entry-method bindings + documentation for properties definitions. + +=========================================== +4 - Examples +=========================================== + +Example 1 (ARM 64-bit, 16-cpu system, PSCI enable-method): + +cpus { + #size-cells = <0>; + #address-cells = <2>; + + CPU0: cpu@0 { + device_type = "cpu"; + compatible = "arm,cortex-a57"; + reg = <0x0 0x0>; + enable-method = "psci"; + cpu-idle-states = <&CPU_RETENTION_0_0 &CPU_SLEEP_0_0 + &CLUSTER_RETENTION_0 &CLUSTER_SLEEP_0>; + }; + + CPU1: cpu@1 { + device_type = "cpu"; + compatible = "arm,cortex-a57"; + reg = <0x0 0x1>; + enable-method = "psci"; + cpu-idle-states = <&CPU_RETENTION_0_0 &CPU_SLEEP_0_0 + &CLUSTER_RETENTION_0 &CLUSTER_SLEEP_0>; + }; + + CPU2: cpu@100 { + device_type = "cpu"; + compatible = "arm,cortex-a57"; + reg = <0x0 0x100>; + enable-method = "psci"; + cpu-idle-states = <&CPU_RETENTION_0_0 &CPU_SLEEP_0_0 + &CLUSTER_RETENTION_0 &CLUSTER_SLEEP_0>; + }; + + CPU3: cpu@101 { + device_type = "cpu"; + compatible = "arm,cortex-a57"; + reg = <0x0 0x101>; + enable-method = "psci"; + cpu-idle-states = <&CPU_RETENTION_0_0 &CPU_SLEEP_0_0 + &CLUSTER_RETENTION_0 &CLUSTER_SLEEP_0>; + }; + + CPU4: cpu@10000 { + device_type = "cpu"; + compatible = "arm,cortex-a57"; + reg = <0x0 0x10000>; + enable-method = "psci"; + cpu-idle-states = <&CPU_RETENTION_0_0 &CPU_SLEEP_0_0 + &CLUSTER_RETENTION_0 &CLUSTER_SLEEP_0>; + }; + + CPU5: cpu@10001 { + device_type = "cpu"; + compatible = "arm,cortex-a57"; + reg = <0x0 0x10001>; + enable-method = "psci"; + cpu-idle-states = <&CPU_RETENTION_0_0 &CPU_SLEEP_0_0 + &CLUSTER_RETENTION_0 &CLUSTER_SLEEP_0>; + }; + + CPU6: cpu@10100 { + device_type = "cpu"; + compatible = "arm,cortex-a57"; + reg = <0x0 0x10100>; + enable-method = "psci"; + cpu-idle-states = <&CPU_RETENTION_0_0 &CPU_SLEEP_0_0 + &CLUSTER_RETENTION_0 &CLUSTER_SLEEP_0>; + }; + + CPU7: cpu@10101 { + device_type = "cpu"; + compatible = "arm,cortex-a57"; + reg = <0x0 0x10101>; + enable-method = "psci"; + cpu-idle-states = <&CPU_RETENTION_0_0 &CPU_SLEEP_0_0 + &CLUSTER_RETENTION_0 &CLUSTER_SLEEP_0>; + }; + + CPU8: cpu@100000000 { + device_type = "cpu"; + compatible = "arm,cortex-a53"; + reg = <0x1 0x0>; + enable-method = "psci"; + cpu-idle-states = <&CPU_RETENTION_1_0 &CPU_SLEEP_1_0 + &CLUSTER_RETENTION_1 &CLUSTER_SLEEP_1>; + }; + + CPU9: cpu@100000001 { + device_type = "cpu"; + compatible = "arm,cortex-a53"; + reg = <0x1 0x1>; + enable-method = "psci"; + cpu-idle-states = <&CPU_RETENTION_1_0 &CPU_SLEEP_1_0 + &CLUSTER_RETENTION_1 &CLUSTER_SLEEP_1>; + }; + + CPU10: cpu@100000100 { + device_type = "cpu"; + compatible = "arm,cortex-a53"; + reg = <0x1 0x100>; + enable-method = "psci"; + cpu-idle-states = <&CPU_RETENTION_1_0 &CPU_SLEEP_1_0 + &CLUSTER_RETENTION_1 &CLUSTER_SLEEP_1>; + }; + + CPU11: cpu@100000101 { + device_type = "cpu"; + compatible = "arm,cortex-a53"; + reg = <0x1 0x101>; + enable-method = "psci"; + cpu-idle-states = <&CPU_RETENTION_1_0 &CPU_SLEEP_1_0 + &CLUSTER_RETENTION_1 &CLUSTER_SLEEP_1>; + }; + + CPU12: cpu@100010000 { + device_type = "cpu"; + compatible = "arm,cortex-a53"; + reg = <0x1 0x10000>; + enable-method = "psci"; + cpu-idle-states = <&CPU_RETENTION_1_0 &CPU_SLEEP_1_0 + &CLUSTER_RETENTION_1 &CLUSTER_SLEEP_1>; + }; + + CPU13: cpu@100010001 { + device_type = "cpu"; + compatible = "arm,cortex-a53"; + reg = <0x1 0x10001>; + enable-method = "psci"; + cpu-idle-states = <&CPU_RETENTION_1_0 &CPU_SLEEP_1_0 + &CLUSTER_RETENTION_1 &CLUSTER_SLEEP_1>; + }; + + CPU14: cpu@100010100 { + device_type = "cpu"; + compatible = "arm,cortex-a53"; + reg = <0x1 0x10100>; + enable-method = "psci"; + cpu-idle-states = <&CPU_RETENTION_1_0 &CPU_SLEEP_1_0 + &CLUSTER_RETENTION_1 &CLUSTER_SLEEP_1>; + }; + + CPU15: cpu@100010101 { + device_type = "cpu"; + compatible = "arm,cortex-a53"; + reg = <0x1 0x10101>; + enable-method = "psci"; + cpu-idle-states = <&CPU_RETENTION_1_0 &CPU_SLEEP_1_0 + &CLUSTER_RETENTION_1 &CLUSTER_SLEEP_1>; + }; + + idle-states { + entry-method = "arm,psci"; + + CPU_RETENTION_0_0: cpu-retention-0-0 { + compatible = "arm,idle-state"; + arm,psci-suspend-param = <0x0010000>; + entry-latency-us = <20>; + exit-latency-us = <40>; + min-residency-us = <80>; + }; + + CLUSTER_RETENTION_0: cluster-retention-0 { + compatible = "arm,idle-state"; + local-timer-stop; + arm,psci-suspend-param = <0x1010000>; + entry-latency-us = <50>; + exit-latency-us = <100>; + min-residency-us = <250>; + wakeup-latency-us = <130>; + }; + + CPU_SLEEP_0_0: cpu-sleep-0-0 { + compatible = "arm,idle-state"; + local-timer-stop; + arm,psci-suspend-param = <0x0010000>; + entry-latency-us = <250>; + exit-latency-us = <500>; + min-residency-us = <950>; + }; + + CLUSTER_SLEEP_0: cluster-sleep-0 { + compatible = "arm,idle-state"; + local-timer-stop; + arm,psci-suspend-param = <0x1010000>; + entry-latency-us = <600>; + exit-latency-us = <1100>; + min-residency-us = <2700>; + wakeup-latency-us = <1500>; + }; + + CPU_RETENTION_1_0: cpu-retention-1-0 { + compatible = "arm,idle-state"; + arm,psci-suspend-param = <0x0010000>; + entry-latency-us = <20>; + exit-latency-us = <40>; + min-residency-us = <90>; + }; + + CLUSTER_RETENTION_1: cluster-retention-1 { + compatible = "arm,idle-state"; + local-timer-stop; + arm,psci-suspend-param = <0x1010000>; + entry-latency-us = <50>; + exit-latency-us = <100>; + min-residency-us = <270>; + wakeup-latency-us = <100>; + }; + + CPU_SLEEP_1_0: cpu-sleep-1-0 { + compatible = "arm,idle-state"; + local-timer-stop; + arm,psci-suspend-param = <0x0010000>; + entry-latency-us = <70>; + exit-latency-us = <100>; + min-residency-us = <300>; + wakeup-latency-us = <150>; + }; + + CLUSTER_SLEEP_1: cluster-sleep-1 { + compatible = "arm,idle-state"; + local-timer-stop; + arm,psci-suspend-param = <0x1010000>; + entry-latency-us = <500>; + exit-latency-us = <1200>; + min-residency-us = <3500>; + wakeup-latency-us = <1300>; + }; + }; + +}; + +Example 2 (ARM 32-bit, 8-cpu system, two clusters): + +cpus { + #size-cells = <0>; + #address-cells = <1>; + + CPU0: cpu@0 { + device_type = "cpu"; + compatible = "arm,cortex-a15"; + reg = <0x0>; + cpu-idle-states = <&CPU_SLEEP_0_0 &CLUSTER_SLEEP_0>; + }; + + CPU1: cpu@1 { + device_type = "cpu"; + compatible = "arm,cortex-a15"; + reg = <0x1>; + cpu-idle-states = <&CPU_SLEEP_0_0 &CLUSTER_SLEEP_0>; + }; + + CPU2: cpu@2 { + device_type = "cpu"; + compatible = "arm,cortex-a15"; + reg = <0x2>; + cpu-idle-states = <&CPU_SLEEP_0_0 &CLUSTER_SLEEP_0>; + }; + + CPU3: cpu@3 { + device_type = "cpu"; + compatible = "arm,cortex-a15"; + reg = <0x3>; + cpu-idle-states = <&CPU_SLEEP_0_0 &CLUSTER_SLEEP_0>; + }; + + CPU4: cpu@100 { + device_type = "cpu"; + compatible = "arm,cortex-a7"; + reg = <0x100>; + cpu-idle-states = <&CPU_SLEEP_1_0 &CLUSTER_SLEEP_1>; + }; + + CPU5: cpu@101 { + device_type = "cpu"; + compatible = "arm,cortex-a7"; + reg = <0x101>; + cpu-idle-states = <&CPU_SLEEP_1_0 &CLUSTER_SLEEP_1>; + }; + + CPU6: cpu@102 { + device_type = "cpu"; + compatible = "arm,cortex-a7"; + reg = <0x102>; + cpu-idle-states = <&CPU_SLEEP_1_0 &CLUSTER_SLEEP_1>; + }; + + CPU7: cpu@103 { + device_type = "cpu"; + compatible = "arm,cortex-a7"; + reg = <0x103>; + cpu-idle-states = <&CPU_SLEEP_1_0 &CLUSTER_SLEEP_1>; + }; + + idle-states { + CPU_SLEEP_0_0: cpu-sleep-0-0 { + compatible = "arm,idle-state"; + local-timer-stop; + entry-latency-us = <200>; + exit-latency-us = <100>; + min-residency-us = <400>; + wakeup-latency-us = <250>; + }; + + CLUSTER_SLEEP_0: cluster-sleep-0 { + compatible = "arm,idle-state"; + local-timer-stop; + entry-latency-us = <500>; + exit-latency-us = <1500>; + min-residency-us = <2500>; + wakeup-latency-us = <1700>; + }; + + CPU_SLEEP_1_0: cpu-sleep-1-0 { + compatible = "arm,idle-state"; + local-timer-stop; + entry-latency-us = <300>; + exit-latency-us = <500>; + min-residency-us = <900>; + wakeup-latency-us = <600>; + }; + + CLUSTER_SLEEP_1: cluster-sleep-1 { + compatible = "arm,idle-state"; + local-timer-stop; + entry-latency-us = <800>; + exit-latency-us = <2000>; + min-residency-us = <6500>; + wakeup-latency-us = <2300>; + }; + }; + +}; + +=========================================== +5 - References +=========================================== + +[1] ARM Linux Kernel documentation - CPUs bindings + Documentation/devicetree/bindings/arm/cpus.txt + +[2] ARM Linux Kernel documentation - PSCI bindings + Documentation/devicetree/bindings/arm/psci.txt + +[3] ARM Server Base System Architecture (SBSA) + http://infocenter.arm.com/help/index.jsp + +[4] ARM Architecture Reference Manuals + http://infocenter.arm.com/help/index.jsp + +[5] ePAPR standard + https://www.power.org/documentation/epapr-version-1-1/ diff --git a/Documentation/devicetree/bindings/arm/psci.txt b/Documentation/devicetree/bindings/arm/psci.txt index b4a58f39223c..5aa40ede0e99 100644 --- a/Documentation/devicetree/bindings/arm/psci.txt +++ b/Documentation/devicetree/bindings/arm/psci.txt @@ -50,6 +50,16 @@ Main node optional properties: - migrate : Function ID for MIGRATE operation +Device tree nodes that require usage of PSCI CPU_SUSPEND function (ie idle +state nodes, as per bindings in [1]) must specify the following properties: + +- arm,psci-suspend-param + Usage: Required for state nodes[1] if the corresponding + idle-states node entry-method property is set + to "psci". + Value type: + Definition: power_state parameter to pass to the PSCI + suspend call. Example: @@ -64,7 +74,6 @@ Case 1: PSCI v0.1 only. migrate = <0x95c10003>; }; - Case 2: PSCI v0.2 only psci { @@ -88,3 +97,6 @@ Case 3: PSCI v0.2 and PSCI v0.1. ... }; + +[1] Kernel documentation - ARM idle states bindings + Documentation/devicetree/bindings/arm/idle-states.txt -- cgit v1.2.3 From 714f59925595b9c2ea9c22b107b340d38e3b3bc9 Mon Sep 17 00:00:00 2001 From: Lorenzo Pieralisi Date: Thu, 7 Aug 2014 14:54:50 +0100 Subject: arm64: kernel: refactor the CPU suspend API for retention states CPU suspend is the standard kernel interface to be used to enter low-power states on ARM64 systems. Current cpu_suspend implementation by default assumes that all low power states are losing the CPU context, so the CPU registers must be saved and cleaned to DRAM upon state entry. Furthermore, the current cpu_suspend() implementation assumes that if the CPU suspend back-end method returns when called, this has to be considered an error regardless of the return code (which can be successful) since the CPU was not expected to return from a code path that is different from cpu_resume code path - eg returning from the reset vector. All in all this means that the current API does not cope well with low-power states that preserve the CPU context when entered (ie retention states), since first of all the context is saved for nothing on state entry for those states and a successful state entry can return as a normal function return, which is considered an error by the current CPU suspend implementation. This patch refactors the cpu_suspend() API so that it can be split in two separate functionalities. The arm64 cpu_suspend API just provides a wrapper around CPU suspend operation hook. A new function is introduced (for architecture code use only) for states that require context saving upon entry: __cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) __cpu_suspend() saves the context on function entry and calls the so called suspend finisher (ie fn) to complete the suspend operation. The finisher is not expected to return, unless it fails in which case the error is propagated back to the __cpu_suspend caller. The API refactoring results in the following pseudo code call sequence for a suspending CPU, when triggered from a kernel subsystem: /* * int cpu_suspend(unsigned long idx) * @idx: idle state index */ { -> cpu_suspend(idx) |---> CPU operations suspend hook called, if present |--> if (retention_state) |--> direct suspend back-end call (eg PSCI suspend) else |--> __cpu_suspend(idx, &back_end_finisher); } By refactoring the cpu_suspend API this way, the CPU operations back-end has a chance to detect whether idle states require state saving or not and can call the required suspend operations accordingly either through simple function call or indirectly through __cpu_suspend() which carries out state saving and suspend finisher dispatching to complete idle state entry. Reviewed-by: Catalin Marinas Reviewed-by: Hanjun Guo Signed-off-by: Lorenzo Pieralisi Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/suspend.h | 1 + arch/arm64/kernel/sleep.S | 47 ++++++++++++++++++++++++++++----------- arch/arm64/kernel/suspend.c | 48 ++++++++++++++++++++++++---------------- 3 files changed, 64 insertions(+), 32 deletions(-) diff --git a/arch/arm64/include/asm/suspend.h b/arch/arm64/include/asm/suspend.h index e9c149c042e0..456d67c1f0fa 100644 --- a/arch/arm64/include/asm/suspend.h +++ b/arch/arm64/include/asm/suspend.h @@ -21,6 +21,7 @@ struct sleep_save_sp { phys_addr_t save_ptr_stash_phys; }; +extern int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long)); extern void cpu_resume(void); extern int cpu_suspend(unsigned long); diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S index b1925729c692..a564b440416a 100644 --- a/arch/arm64/kernel/sleep.S +++ b/arch/arm64/kernel/sleep.S @@ -49,28 +49,39 @@ orr \dst, \dst, \mask // dst|=(aff3>>rs3) .endm /* - * Save CPU state for a suspend. This saves callee registers, and allocates - * space on the kernel stack to save the CPU specific registers + some - * other data for resume. + * Save CPU state for a suspend and execute the suspend finisher. + * On success it will return 0 through cpu_resume - ie through a CPU + * soft/hard reboot from the reset vector. + * On failure it returns the suspend finisher return value or force + * -EOPNOTSUPP if the finisher erroneously returns 0 (the suspend finisher + * is not allowed to return, if it does this must be considered failure). + * It saves callee registers, and allocates space on the kernel stack + * to save the CPU specific registers + some other data for resume. * * x0 = suspend finisher argument + * x1 = suspend finisher function pointer */ -ENTRY(__cpu_suspend) +ENTRY(__cpu_suspend_enter) stp x29, lr, [sp, #-96]! stp x19, x20, [sp,#16] stp x21, x22, [sp,#32] stp x23, x24, [sp,#48] stp x25, x26, [sp,#64] stp x27, x28, [sp,#80] + /* + * Stash suspend finisher and its argument in x20 and x19 + */ + mov x19, x0 + mov x20, x1 mov x2, sp sub sp, sp, #CPU_SUSPEND_SZ // allocate cpu_suspend_ctx - mov x1, sp + mov x0, sp /* - * x1 now points to struct cpu_suspend_ctx allocated on the stack + * x0 now points to struct cpu_suspend_ctx allocated on the stack */ - str x2, [x1, #CPU_CTX_SP] - ldr x2, =sleep_save_sp - ldr x2, [x2, #SLEEP_SAVE_SP_VIRT] + str x2, [x0, #CPU_CTX_SP] + ldr x1, =sleep_save_sp + ldr x1, [x1, #SLEEP_SAVE_SP_VIRT] #ifdef CONFIG_SMP mrs x7, mpidr_el1 ldr x9, =mpidr_hash @@ -82,11 +93,21 @@ ENTRY(__cpu_suspend) ldp w3, w4, [x9, #MPIDR_HASH_SHIFTS] ldp w5, w6, [x9, #(MPIDR_HASH_SHIFTS + 8)] compute_mpidr_hash x8, x3, x4, x5, x6, x7, x10 - add x2, x2, x8, lsl #3 + add x1, x1, x8, lsl #3 #endif - bl __cpu_suspend_finisher + bl __cpu_suspend_save + /* + * Grab suspend finisher in x20 and its argument in x19 + */ + mov x0, x19 + mov x1, x20 + /* + * We are ready for power down, fire off the suspend finisher + * in x1, with argument in x0 + */ + blr x1 /* - * Never gets here, unless suspend fails. + * Never gets here, unless suspend finisher fails. * Successful cpu_suspend should return from cpu_resume, returning * through this code path is considered an error * If the return value is set to 0 force x0 = -EOPNOTSUPP @@ -103,7 +124,7 @@ ENTRY(__cpu_suspend) ldp x27, x28, [sp, #80] ldp x29, lr, [sp], #96 ret -ENDPROC(__cpu_suspend) +ENDPROC(__cpu_suspend_enter) .ltorg /* diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c index 55a99b9a97e0..13ad4dbb1615 100644 --- a/arch/arm64/kernel/suspend.c +++ b/arch/arm64/kernel/suspend.c @@ -9,22 +9,19 @@ #include #include -extern int __cpu_suspend(unsigned long); +extern int __cpu_suspend_enter(unsigned long arg, int (*fn)(unsigned long)); /* - * This is called by __cpu_suspend() to save the state, and do whatever + * This is called by __cpu_suspend_enter() to save the state, and do whatever * flushing is required to ensure that when the CPU goes to sleep we have * the necessary data available when the caches are not searched. * - * @arg: Argument to pass to suspend operations - * @ptr: CPU context virtual address - * @save_ptr: address of the location where the context physical address - * must be saved + * ptr: CPU context virtual address + * save_ptr: address of the location where the context physical address + * must be saved */ -int __cpu_suspend_finisher(unsigned long arg, struct cpu_suspend_ctx *ptr, - phys_addr_t *save_ptr) +void notrace __cpu_suspend_save(struct cpu_suspend_ctx *ptr, + phys_addr_t *save_ptr) { - int cpu = smp_processor_id(); - *save_ptr = virt_to_phys(ptr); cpu_do_suspend(ptr); @@ -35,8 +32,6 @@ int __cpu_suspend_finisher(unsigned long arg, struct cpu_suspend_ctx *ptr, */ __flush_dcache_area(ptr, sizeof(*ptr)); __flush_dcache_area(save_ptr, sizeof(*save_ptr)); - - return cpu_ops[cpu]->cpu_suspend(arg); } /* @@ -56,15 +51,15 @@ void __init cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *)) } /** - * cpu_suspend + * cpu_suspend() - function to enter a low-power state + * @arg: argument to pass to CPU suspend operations * - * @arg: argument to pass to the finisher function + * Return: 0 on success, -EOPNOTSUPP if CPU suspend hook not initialized, CPU + * operations back-end error code otherwise. */ int cpu_suspend(unsigned long arg) { - struct mm_struct *mm = current->active_mm; - int ret, cpu = smp_processor_id(); - unsigned long flags; + int cpu = smp_processor_id(); /* * If cpu_ops have not been registered or suspend @@ -72,6 +67,21 @@ int cpu_suspend(unsigned long arg) */ if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_suspend) return -EOPNOTSUPP; + return cpu_ops[cpu]->cpu_suspend(arg); +} + +/* + * __cpu_suspend + * + * arg: argument to pass to the finisher function + * fn: finisher function pointer + * + */ +int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) +{ + struct mm_struct *mm = current->active_mm; + int ret; + unsigned long flags; /* * From this point debug exceptions are disabled to prevent @@ -86,7 +96,7 @@ int cpu_suspend(unsigned long arg) * page tables, so that the thread address space is properly * set-up on function return. */ - ret = __cpu_suspend(arg); + ret = __cpu_suspend_enter(arg, fn); if (ret == 0) { cpu_switch_mm(mm->pgd, mm); flush_tlb_all(); @@ -95,7 +105,7 @@ int cpu_suspend(unsigned long arg) * Restore per-cpu offset before any kernel * subsystem relying on it has a chance to run. */ - set_my_cpu_offset(per_cpu_offset(cpu)); + set_my_cpu_offset(per_cpu_offset(smp_processor_id())); /* * Restore HW breakpoint registers to sane values -- cgit v1.2.3 From d64f84f696463c58e1908510e45b0f5d450f737a Mon Sep 17 00:00:00 2001 From: Lorenzo Pieralisi Date: Thu, 17 Jul 2014 10:30:07 +0100 Subject: arm64: kernel: introduce cpu_init_idle CPU operation The CPUidle subsystem on ARM64 machines requires the idle states implementation back-end to initialize idle states parameter upon boot. This patch adds a hook in the CPU operations structure that should be initialized by the CPU operations back-end in order to provide a function that initializes cpu idle states. This patch also adds the infrastructure to arm64 kernel required to export the CPU operations based initialization interface, so that drivers (ie CPUidle) can use it when they are initialized at probe time. Reviewed-by: Catalin Marinas Signed-off-by: Lorenzo Pieralisi Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/cpu_ops.h | 3 +++ arch/arm64/include/asm/cpuidle.h | 13 +++++++++++++ arch/arm64/kernel/Makefile | 1 + arch/arm64/kernel/cpuidle.c | 31 +++++++++++++++++++++++++++++++ 4 files changed, 48 insertions(+) create mode 100644 arch/arm64/include/asm/cpuidle.h create mode 100644 arch/arm64/kernel/cpuidle.c diff --git a/arch/arm64/include/asm/cpu_ops.h b/arch/arm64/include/asm/cpu_ops.h index d7b4b38a8e86..47dfa31ad71a 100644 --- a/arch/arm64/include/asm/cpu_ops.h +++ b/arch/arm64/include/asm/cpu_ops.h @@ -28,6 +28,8 @@ struct device_node; * enable-method property. * @cpu_init: Reads any data necessary for a specific enable-method from the * devicetree, for a given cpu node and proposed logical id. + * @cpu_init_idle: Reads any data necessary to initialize CPU idle states from + * devicetree, for a given cpu node and proposed logical id. * @cpu_prepare: Early one-time preparation step for a cpu. If there is a * mechanism for doing so, tests whether it is possible to boot * the given CPU. @@ -47,6 +49,7 @@ struct device_node; struct cpu_operations { const char *name; int (*cpu_init)(struct device_node *, unsigned int); + int (*cpu_init_idle)(struct device_node *, unsigned int); int (*cpu_prepare)(unsigned int); int (*cpu_boot)(unsigned int); void (*cpu_postboot)(void); diff --git a/arch/arm64/include/asm/cpuidle.h b/arch/arm64/include/asm/cpuidle.h new file mode 100644 index 000000000000..b52a9932e2b1 --- /dev/null +++ b/arch/arm64/include/asm/cpuidle.h @@ -0,0 +1,13 @@ +#ifndef __ASM_CPUIDLE_H +#define __ASM_CPUIDLE_H + +#ifdef CONFIG_CPU_IDLE +extern int cpu_init_idle(unsigned int cpu); +#else +static inline int cpu_init_idle(unsigned int cpu) +{ + return -EOPNOTSUPP; +} +#endif + +#endif diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index df7ef8768fc2..6e9538c2d28a 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -26,6 +26,7 @@ arm64-obj-$(CONFIG_PERF_EVENTS) += perf_regs.o arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o arm64-obj-$(CONFIG_ARM64_CPU_SUSPEND) += sleep.o suspend.o +arm64-obj-$(CONFIG_CPU_IDLE) += cpuidle.o arm64-obj-$(CONFIG_JUMP_LABEL) += jump_label.o arm64-obj-$(CONFIG_KGDB) += kgdb.o arm64-obj-$(CONFIG_EFI) += efi.o efi-stub.o efi-entry.o diff --git a/arch/arm64/kernel/cpuidle.c b/arch/arm64/kernel/cpuidle.c new file mode 100644 index 000000000000..19d17f51db37 --- /dev/null +++ b/arch/arm64/kernel/cpuidle.c @@ -0,0 +1,31 @@ +/* + * ARM64 CPU idle arch support + * + * Copyright (C) 2014 ARM Ltd. + * Author: Lorenzo Pieralisi + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include + +#include +#include + +int cpu_init_idle(unsigned int cpu) +{ + int ret = -EOPNOTSUPP; + struct device_node *cpu_node = of_cpu_device_node_get(cpu); + + if (!cpu_node) + return -ENODEV; + + if (cpu_ops[cpu] && cpu_ops[cpu]->cpu_init_idle) + ret = cpu_ops[cpu]->cpu_init_idle(cpu_node, cpu); + + of_node_put(cpu_node); + return ret; +} -- cgit v1.2.3 From 18910ab0d916b1a87016d69efd027714a80521dd Mon Sep 17 00:00:00 2001 From: Lorenzo Pieralisi Date: Fri, 27 Sep 2013 10:25:02 +0100 Subject: arm64: add PSCI CPU_SUSPEND based cpu_suspend support This patch implements the cpu_suspend cpu operations method through the PSCI CPU SUSPEND API. The PSCI implementation translates the idle state index passed by the cpu_suspend core call into a valid PSCI state according to the PSCI states initialized at boot through the cpu_init_idle() CPU operations hook. The PSCI CPU suspend operation hook checks if the PSCI state is a standby state. If it is, it calls the PSCI suspend implementation straight away, without saving any context. If the state is a power down state the kernel calls the __cpu_suspend API (that saves the CPU context) and passed the PSCI suspend finisher as a parameter so that PSCI can be called by the __cpu_suspend implementation after saving and flushing the context as last function before power down. For power down states, entry point is set to cpu_resume physical address, that represents the default kernel execution address following a CPU reset. Reviewed-by: Ashwin Chaugule Reviewed-by: Catalin Marinas Signed-off-by: Lorenzo Pieralisi Signed-off-by: Catalin Marinas --- arch/arm64/kernel/psci.c | 104 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 104 insertions(+) diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c index 553954771a67..866c1c821860 100644 --- a/arch/arm64/kernel/psci.c +++ b/arch/arm64/kernel/psci.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include @@ -28,6 +29,7 @@ #include #include #include +#include #include #define PSCI_POWER_STATE_TYPE_STANDBY 0 @@ -65,6 +67,8 @@ enum psci_function { PSCI_FN_MAX, }; +static DEFINE_PER_CPU_READ_MOSTLY(struct psci_power_state *, psci_power_state); + static u32 psci_function_id[PSCI_FN_MAX]; static int psci_to_linux_errno(int errno) @@ -93,6 +97,18 @@ static u32 psci_power_state_pack(struct psci_power_state state) & PSCI_0_2_POWER_STATE_AFFL_MASK); } +static void psci_power_state_unpack(u32 power_state, + struct psci_power_state *state) +{ + state->id = (power_state & PSCI_0_2_POWER_STATE_ID_MASK) >> + PSCI_0_2_POWER_STATE_ID_SHIFT; + state->type = (power_state & PSCI_0_2_POWER_STATE_TYPE_MASK) >> + PSCI_0_2_POWER_STATE_TYPE_SHIFT; + state->affinity_level = + (power_state & PSCI_0_2_POWER_STATE_AFFL_MASK) >> + PSCI_0_2_POWER_STATE_AFFL_SHIFT; +} + /* * The following two functions are invoked via the invoke_psci_fn pointer * and will not be inlined, allowing us to piggyback on the AAPCS. @@ -199,6 +215,63 @@ static int psci_migrate_info_type(void) return err; } +static int __maybe_unused cpu_psci_cpu_init_idle(struct device_node *cpu_node, + unsigned int cpu) +{ + int i, ret, count = 0; + struct psci_power_state *psci_states; + struct device_node *state_node; + + /* + * If the PSCI cpu_suspend function hook has not been initialized + * idle states must not be enabled, so bail out + */ + if (!psci_ops.cpu_suspend) + return -EOPNOTSUPP; + + /* Count idle states */ + while ((state_node = of_parse_phandle(cpu_node, "cpu-idle-states", + count))) { + count++; + of_node_put(state_node); + } + + if (!count) + return -ENODEV; + + psci_states = kcalloc(count, sizeof(*psci_states), GFP_KERNEL); + if (!psci_states) + return -ENOMEM; + + for (i = 0; i < count; i++) { + u32 psci_power_state; + + state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i); + + ret = of_property_read_u32(state_node, + "arm,psci-suspend-param", + &psci_power_state); + if (ret) { + pr_warn(" * %s missing arm,psci-suspend-param property\n", + state_node->full_name); + of_node_put(state_node); + goto free_mem; + } + + of_node_put(state_node); + pr_debug("psci-power-state %#x index %d\n", psci_power_state, + i); + psci_power_state_unpack(psci_power_state, &psci_states[i]); + } + /* Idle states parsed correctly, initialize per-cpu pointer */ + per_cpu(psci_power_state, cpu) = psci_states; + return 0; + +free_mem: + kfree(psci_states); + return ret; +} + static int get_set_conduit_method(struct device_node *np) { const char *method; @@ -436,8 +509,39 @@ static int cpu_psci_cpu_kill(unsigned int cpu) #endif #endif +static int psci_suspend_finisher(unsigned long index) +{ + struct psci_power_state *state = __get_cpu_var(psci_power_state); + + return psci_ops.cpu_suspend(state[index - 1], + virt_to_phys(cpu_resume)); +} + +static int __maybe_unused cpu_psci_cpu_suspend(unsigned long index) +{ + int ret; + struct psci_power_state *state = __get_cpu_var(psci_power_state); + /* + * idle state index 0 corresponds to wfi, should never be called + * from the cpu_suspend operations + */ + if (WARN_ON_ONCE(!index)) + return -EINVAL; + + if (state->type == PSCI_POWER_STATE_TYPE_STANDBY) + ret = psci_ops.cpu_suspend(state[index - 1], 0); + else + ret = __cpu_suspend(index, psci_suspend_finisher); + + return ret; +} + const struct cpu_operations cpu_psci_ops = { .name = "psci", +#ifdef CONFIG_CPU_IDLE + .cpu_init_idle = cpu_psci_cpu_init_idle, + .cpu_suspend = cpu_psci_cpu_suspend, +#endif #ifdef CONFIG_SMP .cpu_init = cpu_psci_cpu_init, .cpu_prepare = cpu_psci_cpu_prepare, -- cgit v1.2.3 From 60ef0494f197d4705b17132ee7d496283b5604b3 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Thu, 11 Sep 2014 10:36:48 +0100 Subject: net: bpf: arm64: fix module memory leak when JIT image build fails On ARM64, when the BPF JIT compiler fills the JIT image body with opcodes during translation of eBPF into ARM64 opcodes, we may fail for several reasons during that phase: one being that we jump to the notyet label for not yet supported eBPF instructions such as BPF_ST. In that case we only free offsets, but not the actual allocated target image where opcodes are being stored. Fix it by calling module_free() on dismantle time in case of errors. Signed-off-by: Daniel Borkmann Acked-by: Zi Shen Lim Acked-by: Will Deacon Cc: Alexei Starovoitov Signed-off-by: Catalin Marinas --- arch/arm64/net/bpf_jit_comp.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index 38c42965382d..7ae33545535b 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -651,8 +651,10 @@ void bpf_int_jit_compile(struct bpf_prog *prog) build_prologue(&ctx); ctx.body_offset = ctx.idx; - if (build_body(&ctx)) + if (build_body(&ctx)) { + module_free(NULL, ctx.image); goto out; + } build_epilogue(&ctx); -- cgit v1.2.3 From b4da1840dc4d92f05419bd2abbde82131d4301d9 Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Thu, 11 Sep 2014 23:10:32 +0100 Subject: arm64: pageattr: Correctly adjust unaligned start addresses The start address needs to be actually updated after it is detected to be unaligned. Adjust it and the end address properly. Reported-by: Zi Shen Lim Reviewed-by: Zi Shen Lim Signed-off-by: Laura Abbott Signed-off-by: Catalin Marinas --- arch/arm64/mm/pageattr.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c index 75e744e4cec5..bb0ea94c4ba1 100644 --- a/arch/arm64/mm/pageattr.c +++ b/arch/arm64/mm/pageattr.c @@ -46,7 +46,8 @@ static int change_memory_common(unsigned long addr, int numpages, struct page_change_data data; if (!IS_ALIGNED(addr, PAGE_SIZE)) { - addr &= PAGE_MASK; + start &= PAGE_MASK; + end = start + size; WARN_ON_ONCE(1); } -- cgit v1.2.3 From fe184066abda9de114a80db24f1496ee6c31d1af Mon Sep 17 00:00:00 2001 From: Mark Charlebois Date: Mon, 15 Sep 2014 06:30:15 +0100 Subject: arm64: LLVMLinux: Fix inline arm64 assembly for use with clang Remove '#' from immediate parameter in AARCH64 inline assembly in mmu. This code now works with both gcc and clang. Signed-off-by: Mark Charlebois Signed-off-by: Behan Webster Acked-by: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/mm/mmu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index c55567283cde..6894ef3e6234 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -94,7 +94,7 @@ static int __init early_cachepolicy(char *p) */ asm volatile( " mrs %0, mair_el1\n" - " bfi %0, %1, #%2, #8\n" + " bfi %0, %1, %2, #8\n" " msr mair_el1, %0\n" " isb\n" : "=&r" (tmp) -- cgit v1.2.3 From 7a9c43bed891d1f8d639c69893ee194f5700d0b2 Mon Sep 17 00:00:00 2001 From: Jon Masters Date: Tue, 26 Aug 2014 21:23:38 +0100 Subject: setup: Move unmask of async interrupts after possible earlycon setup The kernel wants to enable reporting of asynchronous interrupts (i.e. System Errors) as early as possible. But if this happens too early then any pending System Error on initial entry into the kernel may never be reported where a user can see it. This situation will occur if the kernel is configured with CONFIG_PANIC_ON_OOPS set and (default or command line) enabled, in which case the kernel will panic as intended, however the associated logging messages indicating this failure condition will remain only in the kernel ring buffer and never be flushed out to the (not yet configured) console. Therefore, this patch moves the enabling of asynchronous interrupts during early setup to as early as reasonable, but after parsing any possible earlycon parameters setting up earlycon. Signed-off-by: Jon Masters Signed-off-by: Catalin Marinas --- arch/arm64/kernel/setup.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index edb146d01857..2437196cc5d4 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -365,11 +365,6 @@ u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID }; void __init setup_arch(char **cmdline_p) { - /* - * Unmask asynchronous aborts early to catch possible system errors. - */ - local_async_enable(); - setup_processor(); setup_machine_fdt(__fdt_pointer); @@ -385,6 +380,12 @@ void __init setup_arch(char **cmdline_p) parse_early_param(); + /* + * Unmask asynchronous aborts after bringing up possible earlycon. + * (Report possible System Errors once we can report this occurred) + */ + local_async_enable(); + efi_init(); arm64_memblock_init(); -- cgit v1.2.3 From a6583c7c8114c4850b57365e85da85e37d5fc568 Mon Sep 17 00:00:00 2001 From: Ganapatrao Kulkarni Date: Tue, 16 Sep 2014 18:53:54 +0100 Subject: arm64:mm: initialize max_mapnr using function set_max_mapnr Initializing max_mapnr using set_max_mapnr() helper function instead of direct reference. Also not adding PHYS_PFN_OFFSET to max_pfn, since it already contains it. Acked-by: Will Deacon Signed-off-by: Ganapatrao Kulkarni Signed-off-by: Catalin Marinas --- arch/arm64/mm/init.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 5472c2401876..271e654d852d 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -256,7 +256,7 @@ static void __init free_unused_memmap(void) */ void __init mem_init(void) { - max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map; + set_max_mapnr(pfn_to_page(max_pfn) - mem_map); #ifndef CONFIG_SPARSEMEM_VMEMMAP free_unused_memmap(); -- cgit v1.2.3 From 9f1ae7596aad71d18c3e88a3927f3f76b037b8fe Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Fri, 19 Sep 2014 12:05:45 +0100 Subject: arm64: Correct ftrace calls to aarch64_insn_gen_branch_imm() The aarch64_insn_gen_branch_imm() function takes an enum as the last argument rather than a bool. It happens to work because AARCH64_INSN_BRANCH_LINK matches 'true' but better to use the actual type. Signed-off-by: Catalin Marinas --- arch/arm64/kernel/ftrace.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c index 7924d73b6476..cf8556ae09d0 100644 --- a/arch/arm64/kernel/ftrace.c +++ b/arch/arm64/kernel/ftrace.c @@ -58,7 +58,8 @@ int ftrace_update_ftrace_func(ftrace_func_t func) u32 new; pc = (unsigned long)&ftrace_call; - new = aarch64_insn_gen_branch_imm(pc, (unsigned long)func, true); + new = aarch64_insn_gen_branch_imm(pc, (unsigned long)func, + AARCH64_INSN_BRANCH_LINK); return ftrace_modify_code(pc, 0, new, false); } @@ -72,7 +73,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) u32 old, new; old = aarch64_insn_gen_nop(); - new = aarch64_insn_gen_branch_imm(pc, addr, true); + new = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK); return ftrace_modify_code(pc, old, new, true); } @@ -86,7 +87,7 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long pc = rec->ip; u32 old, new; - old = aarch64_insn_gen_branch_imm(pc, addr, true); + old = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK); new = aarch64_insn_gen_nop(); return ftrace_modify_code(pc, old, new, true); @@ -154,7 +155,8 @@ static int ftrace_modify_graph_caller(bool enable) u32 branch, nop; branch = aarch64_insn_gen_branch_imm(pc, - (unsigned long)ftrace_graph_caller, false); + (unsigned long)ftrace_graph_caller, + AARCH64_INSN_BRANCH_LINK); nop = aarch64_insn_gen_nop(); if (enable) -- cgit v1.2.3 From 668ebd106860f09f43993517f786a2ddfd0f9ebe Mon Sep 17 00:00:00 2001 From: Yi Li Date: Mon, 22 Sep 2014 11:11:18 +0100 Subject: arm64: dmi: Add SMBIOS/DMI support SMBIOS is important for server hardware vendors. It implements a spec for providing descriptive information about the platform. Things like serial numbers, physical layout of the ports, build configuration data, and the like. This has been tested by dmidecode and lshw tools. This patch adds the call to dmi_scan_machine() to arm64_enter_virtual_mode(), as that is the point where the EFI Configuration Tables are registered as being available. It needs to be in an early_initcall anyway as dmi_id_init(), which is an arch_initcall itself, depends on dmi_scan_machine() having been called already. Signed-off-by: Yi Li Signed-off-by: Ard Biesheuvel Signed-off-by: Catalin Marinas --- arch/arm64/Kconfig | 11 +++++++++++ arch/arm64/include/asm/dmi.h | 31 +++++++++++++++++++++++++++++++ arch/arm64/kernel/efi.c | 8 ++++++++ 3 files changed, 50 insertions(+) create mode 100644 arch/arm64/include/asm/dmi.h diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index f0d3a2d85a5b..6e72fa301a38 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -369,6 +369,17 @@ config EFI allow the kernel to be booted as an EFI application. This is only useful on systems that have UEFI firmware. +config DMI + bool "Enable support for SMBIOS (DMI) tables" + depends on EFI + default y + help + This enables SMBIOS/DMI feature for systems. + + This option is only useful on systems that have UEFI firmware. + However, even with this option, the resultant kernel should + continue to boot on existing non-UEFI platforms. + endmenu menu "Userspace binary formats" diff --git a/arch/arm64/include/asm/dmi.h b/arch/arm64/include/asm/dmi.h new file mode 100644 index 000000000000..69d37d87b159 --- /dev/null +++ b/arch/arm64/include/asm/dmi.h @@ -0,0 +1,31 @@ +/* + * arch/arm64/include/asm/dmi.h + * + * Copyright (C) 2013 Linaro Limited. + * Written by: Yi Li (yi.li@linaro.org) + * + * based on arch/ia64/include/asm/dmi.h + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#ifndef __ASM_DMI_H +#define __ASM_DMI_H + +#include +#include + +/* + * According to section 2.3.6 of the UEFI spec, the firmware should not + * request a virtual mapping for configuration tables such as SMBIOS. + * This means we have to map them before use. + */ +#define dmi_early_remap(x, l) ioremap_cache(x, l) +#define dmi_early_unmap(x, l) iounmap(x) +#define dmi_remap(x, l) ioremap_cache(x, l) +#define dmi_unmap(x) iounmap(x) +#define dmi_alloc(l) kzalloc(l, GFP_KERNEL) + +#endif diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c index 03aaa99e1ea0..b71ab0e5780c 100644 --- a/arch/arm64/kernel/efi.c +++ b/arch/arm64/kernel/efi.c @@ -11,6 +11,7 @@ * */ +#include #include #include #include @@ -435,6 +436,13 @@ static int __init arm64_enter_virtual_mode(void) } set_bit(EFI_SYSTEM_TABLES, &efi.flags); + /* + * DMI depends on EFI on arm64, and dmi_scan_machine() needs to be + * called early because dmi_id_init(), which is an arch_initcall itself, + * depends on dmi_scan_machine() having been called already. + */ + dmi_scan_machine(); + local_irq_save(flags); cpu_switch_mm(idmap_pg_dir, &init_mm); -- cgit v1.2.3 From c9d571bee9628bc925253a21ea5943707aadb5f1 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Wed, 17 Sep 2014 12:56:07 +0100 Subject: of: amba: use of_dma_configure for AMBA devices Commit 591c1e ("of: configure the platform device dma parameters) introduced a common mechanism to configure DMA from DT properties. AMBA devices created from DT can take advantage of this, too. Signed-off-by: Robin Murphy Acked-by: Will Deacon Acked-by: Rob Herring Signed-off-by: Catalin Marinas --- drivers/of/platform.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/drivers/of/platform.c b/drivers/of/platform.c index 0197725e033a..3b64d0bf5bba 100644 --- a/drivers/of/platform.c +++ b/drivers/of/platform.c @@ -160,11 +160,10 @@ EXPORT_SYMBOL(of_device_alloc); * can use Platform bus notifier and handle BUS_NOTIFY_ADD_DEVICE event * to fix up DMA configuration. */ -static void of_dma_configure(struct platform_device *pdev) +static void of_dma_configure(struct device *dev) { u64 dma_addr, paddr, size; int ret; - struct device *dev = &pdev->dev; /* * Set default dma-mask to 32 bit. Drivers are expected to setup @@ -229,7 +228,7 @@ static struct platform_device *of_platform_device_create_pdata( if (!dev) goto err_clear_flag; - of_dma_configure(dev); + of_dma_configure(&dev->dev); dev->dev.bus = &platform_bus_type; dev->dev.platform_data = platform_data; @@ -291,7 +290,6 @@ static struct amba_device *of_amba_device_create(struct device_node *node, } /* setup generic device info */ - dev->dev.coherent_dma_mask = ~0; dev->dev.of_node = of_node_get(node); dev->dev.parent = parent; dev->dev.platform_data = platform_data; @@ -299,6 +297,7 @@ static struct amba_device *of_amba_device_create(struct device_node *node, dev_set_name(&dev->dev, "%s", bus_id); else of_device_make_bus_id(&dev->dev); + of_dma_configure(&dev->dev); /* Allow the HW Peripheral ID to be overridden */ prop = of_get_property(node, "arm,primecell-periphid", NULL); -- cgit v1.2.3 From 2189064795dc3fb4101e5c34d28c6b62b8a3bfd9 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Mon, 22 Sep 2014 11:48:31 +0100 Subject: arm64: Implement set_arch_dma_coherent_ops() to replace bus notifiers Commit 6ecba8eb51b7 (arm64: Use bus notifiers to set per-device coherent DMA ops) introduced bus notifiers to set the coherent dma ops based on the 'dma-coherent' DT property. Since the generic of_dma_configure() handles this property for platform and AMBA devices, replace the notifiers with set_arch_dma_coherent_ops(). Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/dma-mapping.h | 7 +++++++ arch/arm64/mm/dma-mapping.c | 31 ------------------------------- 2 files changed, 7 insertions(+), 31 deletions(-) diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h index dc82e52acdb3..adeae3f6f0fc 100644 --- a/arch/arm64/include/asm/dma-mapping.h +++ b/arch/arm64/include/asm/dma-mapping.h @@ -52,6 +52,13 @@ static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops) dev->archdata.dma_ops = ops; } +static inline int set_arch_dma_coherent_ops(struct device *dev) +{ + set_dma_ops(dev, &coherent_swiotlb_dma_ops); + return 0; +} +#define set_arch_dma_coherent_ops set_arch_dma_coherent_ops + #include static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index 4164c5ace9f8..d6ceb3df0047 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -22,11 +22,8 @@ #include #include #include -#include -#include #include #include -#include #include @@ -308,40 +305,12 @@ struct dma_map_ops coherent_swiotlb_dma_ops = { }; EXPORT_SYMBOL(coherent_swiotlb_dma_ops); -static int dma_bus_notifier(struct notifier_block *nb, - unsigned long event, void *_dev) -{ - struct device *dev = _dev; - - if (event != BUS_NOTIFY_ADD_DEVICE) - return NOTIFY_DONE; - - if (of_property_read_bool(dev->of_node, "dma-coherent")) - set_dma_ops(dev, &coherent_swiotlb_dma_ops); - - return NOTIFY_OK; -} - -static struct notifier_block platform_bus_nb = { - .notifier_call = dma_bus_notifier, -}; - -static struct notifier_block amba_bus_nb = { - .notifier_call = dma_bus_notifier, -}; - extern int swiotlb_late_init_with_default_size(size_t default_size); static int __init swiotlb_late_init(void) { size_t swiotlb_size = min(SZ_64M, MAX_ORDER_NR_PAGES << PAGE_SHIFT); - /* - * These must be registered before of_platform_populate(). - */ - bus_register_notifier(&platform_bus_type, &platform_bus_nb); - bus_register_notifier(&amba_bustype, &amba_bus_nb); - dma_ops = &noncoherent_swiotlb_dma_ops; return swiotlb_late_init_with_default_size(swiotlb_size); -- cgit v1.2.3 From 6f325eaa86f167f8c1e1ffa288dfb991248a6ac7 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Mon, 22 Sep 2014 18:02:49 +0100 Subject: Revert "arm64: dmi: Add SMBIOS/DMI support" This reverts commit 668ebd106860f09f43993517f786a2ddfd0f9ebe. ... because of lots of warnings during boot if Linux isn't started as an EFI application: WARNING: CPU: 4 PID: 1 at /work/Linux/linux-2.6-aarch64/drivers/firmware/dmi_scan.c:591 dmi_matches+0x10c/0x110() dmi check: not initialized yet. Modules linked in: CPU: 4 PID: 1 Comm: swapper/0 Not tainted 3.17.0-rc4+ #606 Call trace: [] dump_backtrace+0x0/0x124 [] show_stack+0x10/0x1c [] dump_stack+0x74/0xb8 [] warn_slowpath_common+0x8c/0xb4 [] warn_slowpath_fmt+0x4c/0x58 [] dmi_matches+0x108/0x110 [] dmi_check_system+0x24/0x68 [] atkbd_init+0x10/0x34 [] do_one_initcall+0x88/0x1a0 [] kernel_init_freeable+0x148/0x1e8 [] kernel_init+0x10/0xd4 Signed-off-by: Catalin Marinas --- arch/arm64/Kconfig | 11 ----------- arch/arm64/include/asm/dmi.h | 31 ------------------------------- arch/arm64/kernel/efi.c | 8 -------- 3 files changed, 50 deletions(-) delete mode 100644 arch/arm64/include/asm/dmi.h diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 6e72fa301a38..f0d3a2d85a5b 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -369,17 +369,6 @@ config EFI allow the kernel to be booted as an EFI application. This is only useful on systems that have UEFI firmware. -config DMI - bool "Enable support for SMBIOS (DMI) tables" - depends on EFI - default y - help - This enables SMBIOS/DMI feature for systems. - - This option is only useful on systems that have UEFI firmware. - However, even with this option, the resultant kernel should - continue to boot on existing non-UEFI platforms. - endmenu menu "Userspace binary formats" diff --git a/arch/arm64/include/asm/dmi.h b/arch/arm64/include/asm/dmi.h deleted file mode 100644 index 69d37d87b159..000000000000 --- a/arch/arm64/include/asm/dmi.h +++ /dev/null @@ -1,31 +0,0 @@ -/* - * arch/arm64/include/asm/dmi.h - * - * Copyright (C) 2013 Linaro Limited. - * Written by: Yi Li (yi.li@linaro.org) - * - * based on arch/ia64/include/asm/dmi.h - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ - -#ifndef __ASM_DMI_H -#define __ASM_DMI_H - -#include -#include - -/* - * According to section 2.3.6 of the UEFI spec, the firmware should not - * request a virtual mapping for configuration tables such as SMBIOS. - * This means we have to map them before use. - */ -#define dmi_early_remap(x, l) ioremap_cache(x, l) -#define dmi_early_unmap(x, l) iounmap(x) -#define dmi_remap(x, l) ioremap_cache(x, l) -#define dmi_unmap(x) iounmap(x) -#define dmi_alloc(l) kzalloc(l, GFP_KERNEL) - -#endif diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c index b71ab0e5780c..03aaa99e1ea0 100644 --- a/arch/arm64/kernel/efi.c +++ b/arch/arm64/kernel/efi.c @@ -11,7 +11,6 @@ * */ -#include #include #include #include @@ -436,13 +435,6 @@ static int __init arm64_enter_virtual_mode(void) } set_bit(EFI_SYSTEM_TABLES, &efi.flags); - /* - * DMI depends on EFI on arm64, and dmi_scan_machine() needs to be - * called early because dmi_id_init(), which is an arch_initcall itself, - * depends on dmi_scan_machine() having been called already. - */ - dmi_scan_machine(); - local_irq_save(flags); cpu_switch_mm(idmap_pg_dir, &init_mm); -- cgit v1.2.3 From 1059c6bf8534acda249e7e65c81e7696fb074dc1 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 22 Sep 2014 11:19:04 +0100 Subject: arm64: debug: don't re-enable debug exceptions on return from el1_dbg When returning from a debug exception taken from EL1, we unmask debug exceptions after handling the exception. This is crucial for debug exceptions taken from EL0, so that any kernel work on the ret_to_user path can be debugged by kgdb. However, when returning back to EL1 the only thing left to do is to restore the original register state before the exception return. If single-step has been enabled by the debug exception handler, we will get stuck in an infinite debug exception loop, since we will take the step exception as soon as we unmask debug exceptions. This patch avoids unmasking debug exceptions on the debug exception return path when the exception was taken from EL1. Fixes: 2a2830703a23 (arm64: debug: avoid accessing mdscr_el1 on fault paths where possible) Cc: #3.16+ Reported-by: David Long Reported-by: AKASHI Takahiro Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/kernel/entry.S | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index f0b5e5120a87..726b910fe6ec 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -324,7 +324,6 @@ el1_dbg: mrs x0, far_el1 mov x2, sp // struct pt_regs bl do_debug_exception - enable_dbg kernel_exit 1 el1_inv: // TODO: add support for undefined instructions in kernel mode -- cgit v1.2.3 From a9ae04c9faeff1ad617e4f4492af3143d8c5ad9b Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 16 Sep 2014 17:42:33 +0100 Subject: arm64: insn: Add return statements after BUG_ON() Following a recent series of enhancements to the insn code the ARMv8 allnoconfig build has been generating a large number of warnings in the form of: arch/arm64/kernel/insn.c:689:8: warning: 'insn' may be used uninitialized in this function [-Wmaybe-uninitialized] This is because BUG() and related macros can be compiled out so we get execution paths which normally result in a panic compiling out to noops instead. I wasn't able to immediately identify a sensible return value to use in these cases so just return AARCH64_BREAK_FAULT - this is all "should never happen" code so hopefully it never has a practical impact. Signed-off-by: Mark Brown [catalin.marinas@arm.com: AARCH64_BREAK_FAULT definition contributed by Daniel Borkmann] [catalin.marinas@arm.com: replace return 0 with AARCH64_BREAK_FAULT] Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/debug-monitors.h | 8 ++++++++ arch/arm64/kernel/insn.c | 25 +++++++++++++++++++++++++ 2 files changed, 33 insertions(+) diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h index 7fb343779498..230132f9a7d8 100644 --- a/arch/arm64/include/asm/debug-monitors.h +++ b/arch/arm64/include/asm/debug-monitors.h @@ -48,9 +48,11 @@ /* * #imm16 values used for BRK instruction generation * Allowed values for kgbd are 0x400 - 0x7ff + * 0x100: for triggering a fault on purpose (reserved) * 0x400: for dynamic BRK instruction * 0x401: for compile time BRK instruction */ +#define FAULT_BRK_IMM 0x100 #define KGDB_DYN_DGB_BRK_IMM 0x400 #define KDBG_COMPILED_DBG_BRK_IMM 0x401 @@ -60,6 +62,12 @@ */ #define AARCH64_BREAK_MON 0xd4200000 +/* + * BRK instruction for provoking a fault on purpose + * Unlike kgdb, #imm16 value with unallocated handler is used for faulting. + */ +#define AARCH64_BREAK_FAULT (AARCH64_BREAK_MON | (FAULT_BRK_IMM << 5)) + /* * Extract byte from BRK instruction */ diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c index 0668ee5c5bf9..e007714ded04 100644 --- a/arch/arm64/kernel/insn.c +++ b/arch/arm64/kernel/insn.c @@ -22,7 +22,9 @@ #include #include #include + #include +#include #include #define AARCH64_INSN_SF_BIT BIT(31) @@ -388,6 +390,7 @@ u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr, break; default: BUG_ON(1); + return AARCH64_BREAK_FAULT; } return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn, @@ -413,6 +416,7 @@ u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr, break; default: BUG_ON(1); + return AARCH64_BREAK_FAULT; } switch (variant) { @@ -423,6 +427,7 @@ u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr, break; default: BUG_ON(1); + return AARCH64_BREAK_FAULT; } insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg); @@ -475,6 +480,7 @@ u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg, break; default: BUG_ON(1); + return AARCH64_BREAK_FAULT; } return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, reg); @@ -497,6 +503,7 @@ u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg, break; default: BUG_ON(1); + return AARCH64_BREAK_FAULT; } insn = aarch64_insn_encode_ldst_size(size, insn); @@ -535,6 +542,7 @@ u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1, break; default: BUG_ON(1); + return AARCH64_BREAK_FAULT; } switch (variant) { @@ -553,6 +561,7 @@ u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1, break; default: BUG_ON(1); + return AARCH64_BREAK_FAULT; } insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, @@ -590,6 +599,7 @@ u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst, break; default: BUG_ON(1); + return AARCH64_BREAK_FAULT; } switch (variant) { @@ -600,6 +610,7 @@ u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst, break; default: BUG_ON(1); + return AARCH64_BREAK_FAULT; } BUG_ON(imm & ~(SZ_4K - 1)); @@ -632,6 +643,7 @@ u32 aarch64_insn_gen_bitfield(enum aarch64_insn_register dst, break; default: BUG_ON(1); + return AARCH64_BREAK_FAULT; } switch (variant) { @@ -644,6 +656,7 @@ u32 aarch64_insn_gen_bitfield(enum aarch64_insn_register dst, break; default: BUG_ON(1); + return AARCH64_BREAK_FAULT; } BUG_ON(immr & ~mask); @@ -677,6 +690,7 @@ u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst, break; default: BUG_ON(1); + return AARCH64_BREAK_FAULT; } BUG_ON(imm & ~(SZ_64K - 1)); @@ -692,6 +706,7 @@ u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst, break; default: BUG_ON(1); + return AARCH64_BREAK_FAULT; } insn |= (shift >> 4) << 21; @@ -725,6 +740,7 @@ u32 aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst, break; default: BUG_ON(1); + return AARCH64_BREAK_FAULT; } switch (variant) { @@ -737,6 +753,7 @@ u32 aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst, break; default: BUG_ON(1); + return AARCH64_BREAK_FAULT; } @@ -769,6 +786,7 @@ u32 aarch64_insn_gen_data1(enum aarch64_insn_register dst, break; default: BUG_ON(1); + return AARCH64_BREAK_FAULT; } switch (variant) { @@ -779,6 +797,7 @@ u32 aarch64_insn_gen_data1(enum aarch64_insn_register dst, break; default: BUG_ON(1); + return AARCH64_BREAK_FAULT; } insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst); @@ -815,6 +834,7 @@ u32 aarch64_insn_gen_data2(enum aarch64_insn_register dst, break; default: BUG_ON(1); + return AARCH64_BREAK_FAULT; } switch (variant) { @@ -825,6 +845,7 @@ u32 aarch64_insn_gen_data2(enum aarch64_insn_register dst, break; default: BUG_ON(1); + return AARCH64_BREAK_FAULT; } insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst); @@ -852,6 +873,7 @@ u32 aarch64_insn_gen_data3(enum aarch64_insn_register dst, break; default: BUG_ON(1); + return AARCH64_BREAK_FAULT; } switch (variant) { @@ -862,6 +884,7 @@ u32 aarch64_insn_gen_data3(enum aarch64_insn_register dst, break; default: BUG_ON(1); + return AARCH64_BREAK_FAULT; } insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst); @@ -911,6 +934,7 @@ u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst, break; default: BUG_ON(1); + return AARCH64_BREAK_FAULT; } switch (variant) { @@ -923,6 +947,7 @@ u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst, break; default: BUG_ON(1); + return AARCH64_BREAK_FAULT; } -- cgit v1.2.3 From 7acf71d1a224b6e7a40a244d804cea1780a643ed Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Thu, 25 Sep 2014 13:47:47 +0100 Subject: arm64: Fix typos in KGDB macros Some of the KGDB macros used for generating the BRK instructions had the wrong spelling for DBG and KGDB abbreviations. Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/debug-monitors.h | 22 +++++++++++----------- arch/arm64/include/asm/kgdb.h | 2 +- arch/arm64/kernel/kgdb.c | 4 ++-- 3 files changed, 14 insertions(+), 14 deletions(-) diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h index 230132f9a7d8..40ec68aa6870 100644 --- a/arch/arm64/include/asm/debug-monitors.h +++ b/arch/arm64/include/asm/debug-monitors.h @@ -53,8 +53,8 @@ * 0x401: for compile time BRK instruction */ #define FAULT_BRK_IMM 0x100 -#define KGDB_DYN_DGB_BRK_IMM 0x400 -#define KDBG_COMPILED_DBG_BRK_IMM 0x401 +#define KGDB_DYN_DBG_BRK_IMM 0x400 +#define KGDB_COMPILED_DBG_BRK_IMM 0x401 /* * BRK instruction encoding @@ -71,22 +71,22 @@ /* * Extract byte from BRK instruction */ -#define KGDB_DYN_DGB_BRK_INS_BYTE(x) \ +#define KGDB_DYN_DBG_BRK_INS_BYTE(x) \ ((((AARCH64_BREAK_MON) & 0xffe0001f) >> (x * 8)) & 0xff) /* * Extract byte from BRK #imm16 */ -#define KGBD_DYN_DGB_BRK_IMM_BYTE(x) \ - (((((KGDB_DYN_DGB_BRK_IMM) & 0xffff) << 5) >> (x * 8)) & 0xff) +#define KGBD_DYN_DBG_BRK_IMM_BYTE(x) \ + (((((KGDB_DYN_DBG_BRK_IMM) & 0xffff) << 5) >> (x * 8)) & 0xff) -#define KGDB_DYN_DGB_BRK_BYTE(x) \ - (KGDB_DYN_DGB_BRK_INS_BYTE(x) | KGBD_DYN_DGB_BRK_IMM_BYTE(x)) +#define KGDB_DYN_DBG_BRK_BYTE(x) \ + (KGDB_DYN_DBG_BRK_INS_BYTE(x) | KGBD_DYN_DBG_BRK_IMM_BYTE(x)) -#define KGDB_DYN_BRK_INS_BYTE0 KGDB_DYN_DGB_BRK_BYTE(0) -#define KGDB_DYN_BRK_INS_BYTE1 KGDB_DYN_DGB_BRK_BYTE(1) -#define KGDB_DYN_BRK_INS_BYTE2 KGDB_DYN_DGB_BRK_BYTE(2) -#define KGDB_DYN_BRK_INS_BYTE3 KGDB_DYN_DGB_BRK_BYTE(3) +#define KGDB_DYN_BRK_INS_BYTE0 KGDB_DYN_DBG_BRK_BYTE(0) +#define KGDB_DYN_BRK_INS_BYTE1 KGDB_DYN_DBG_BRK_BYTE(1) +#define KGDB_DYN_BRK_INS_BYTE2 KGDB_DYN_DBG_BRK_BYTE(2) +#define KGDB_DYN_BRK_INS_BYTE3 KGDB_DYN_DBG_BRK_BYTE(3) #define CACHE_FLUSH_IS_SAFE 1 diff --git a/arch/arm64/include/asm/kgdb.h b/arch/arm64/include/asm/kgdb.h index 3c8aafc1082f..f69f69c8120c 100644 --- a/arch/arm64/include/asm/kgdb.h +++ b/arch/arm64/include/asm/kgdb.h @@ -29,7 +29,7 @@ static inline void arch_kgdb_breakpoint(void) { - asm ("brk %0" : : "I" (KDBG_COMPILED_DBG_BRK_IMM)); + asm ("brk %0" : : "I" (KGDB_COMPILED_DBG_BRK_IMM)); } extern void kgdb_handle_bus_error(void); diff --git a/arch/arm64/kernel/kgdb.c b/arch/arm64/kernel/kgdb.c index 75c9cf1aafee..a0d10c55f307 100644 --- a/arch/arm64/kernel/kgdb.c +++ b/arch/arm64/kernel/kgdb.c @@ -235,13 +235,13 @@ static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned int esr) static struct break_hook kgdb_brkpt_hook = { .esr_mask = 0xffffffff, - .esr_val = DBG_ESR_VAL_BRK(KGDB_DYN_DGB_BRK_IMM), + .esr_val = DBG_ESR_VAL_BRK(KGDB_DYN_DBG_BRK_IMM), .fn = kgdb_brk_fn }; static struct break_hook kgdb_compiled_brkpt_hook = { .esr_mask = 0xffffffff, - .esr_val = DBG_ESR_VAL_BRK(KDBG_COMPILED_DBG_BRK_IMM), + .esr_val = DBG_ESR_VAL_BRK(KGDB_COMPILED_DBG_BRK_IMM), .fn = kgdb_compiled_brk_fn }; -- cgit v1.2.3 From a52ce121918382a4249d621cceb07c30e1c00fa2 Mon Sep 17 00:00:00 2001 From: Sean Paul Date: Wed, 1 Oct 2014 16:31:50 +0100 Subject: arm64: Use DMA_ERROR_CODE to denote failed allocation This patch replaces the static assignment of ~0 to dma_handle with DMA_ERROR_CODE to be consistent with other platforms. Signed-off-by: Sean Paul Signed-off-by: Catalin Marinas --- arch/arm64/mm/dma-mapping.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index d6ceb3df0047..2c71077cacfd 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -122,7 +122,7 @@ static void *__dma_alloc_noncoherent(struct device *dev, size_t size, no_map: __dma_free_coherent(dev, size, ptr, *dma_handle, attrs); no_mem: - *dma_handle = ~0; + *dma_handle = DMA_ERROR_CODE; return NULL; } -- cgit v1.2.3 From 6c34f1f5424395994c125f8c68bed395920ecc58 Mon Sep 17 00:00:00 2001 From: Kyle McMartin Date: Tue, 16 Sep 2014 22:37:18 +0100 Subject: aarch64: filter $x from kallsyms Similar to ARM, AArch64 is generating $x and $d syms... which isn't terribly helpful when looking at %pF output and the like. Filter those out in kallsyms, modpost and when looking at module symbols. Seems simplest since none of these check EM_ARM anyway, to just add it to the strchr used, rather than trying to make things overly complicated. initcall_debug improves: dmesg_before.txt: initcall $x+0x0/0x154 [sg] returned 0 after 26331 usecs dmesg_after.txt: initcall init_sg+0x0/0x154 [sg] returned 0 after 15461 usecs Signed-off-by: Kyle McMartin Acked-by: Rusty Russell Signed-off-by: Catalin Marinas --- kernel/module.c | 2 +- scripts/kallsyms.c | 2 +- scripts/mod/modpost.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/kernel/module.c b/kernel/module.c index 03214bd288e9..3d52936031cc 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -3388,7 +3388,7 @@ static inline int is_arm_mapping_symbol(const char *str) { if (str[0] == '.' && str[1] == 'L') return true; - return str[0] == '$' && strchr("atd", str[1]) + return str[0] == '$' && strchr("axtd", str[1]) && (str[2] == '\0' || str[2] == '.'); } diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c index dc7aa45e80ce..c6d33bd15b04 100644 --- a/scripts/kallsyms.c +++ b/scripts/kallsyms.c @@ -84,7 +84,7 @@ static void usage(void) */ static inline int is_arm_mapping_symbol(const char *str) { - return str[0] == '$' && strchr("atd", str[1]) + return str[0] == '$' && strchr("axtd", str[1]) && (str[2] == '\0' || str[2] == '.'); } diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c index 091d90573b63..3017ec20e9f8 100644 --- a/scripts/mod/modpost.c +++ b/scripts/mod/modpost.c @@ -1146,7 +1146,7 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr, static inline int is_arm_mapping_symbol(const char *str) { - return str[0] == '$' && strchr("atd", str[1]) + return str[0] == '$' && strchr("axtd", str[1]) && (str[2] == '\0' || str[2] == '.'); } -- cgit v1.2.3 From 097cbd8d261ccc602c963ddf823d5023be439e04 Mon Sep 17 00:00:00 2001 From: Min-Hua Chen Date: Thu, 2 Oct 2014 15:56:59 +0100 Subject: arm64: Use phys_addr_t type for physical address Change the type of physical address from unsigned long to phys_addr_t, make valid_phys_addr_range more readable. Signed-off-by: Min-Hua Chen Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/io.h | 2 +- arch/arm64/mm/mmap.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h index e0ecdcf6632d..f771e8bcad4a 100644 --- a/arch/arm64/include/asm/io.h +++ b/arch/arm64/include/asm/io.h @@ -243,7 +243,7 @@ extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size); * (PHYS_OFFSET and PHYS_MASK taken into account). */ #define ARCH_HAS_VALID_PHYS_ADDR_RANGE -extern int valid_phys_addr_range(unsigned long addr, size_t size); +extern int valid_phys_addr_range(phys_addr_t addr, size_t size); extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size); extern int devmem_is_allowed(unsigned long pfn); diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c index 8ed6cb1a900f..1d73662f00ff 100644 --- a/arch/arm64/mm/mmap.c +++ b/arch/arm64/mm/mmap.c @@ -102,7 +102,7 @@ EXPORT_SYMBOL_GPL(arch_pick_mmap_layout); * You really shouldn't be using read() or write() on /dev/mem. This might go * away in the future. */ -int valid_phys_addr_range(unsigned long addr, size_t size) +int valid_phys_addr_range(phys_addr_t addr, size_t size) { if (addr < PHYS_OFFSET) return 0; -- cgit v1.2.3 From c8fdd497a44450e6183cb43b314fd81504d5c15e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= Date: Wed, 10 Sep 2014 09:26:19 +0100 Subject: ARM64: make of_device_ids const MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit of_device_ids (i.e. compatible strings and the respective data) are not supposed to change at runtime. All functions working with of_device_ids provided by work with const of_device_ids. So mark the only non-const struct in arch/arm64 as const, too. Signed-off-by: Uwe Kleine-König Signed-off-by: Catalin Marinas --- arch/arm64/kernel/perf_event.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index baf5afb7e6a0..aa29ecb4f800 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -1276,7 +1276,7 @@ arch_initcall(cpu_pmu_reset); /* * PMU platform driver and devicetree bindings. */ -static struct of_device_id armpmu_of_device_ids[] = { +static const struct of_device_id armpmu_of_device_ids[] = { {.compatible = "arm,armv8-pmuv3"}, {}, }; -- cgit v1.2.3 From 0a6479b0ffad8dd236915e271faaf2cbb4cac287 Mon Sep 17 00:00:00 2001 From: Geoff Levand Date: Fri, 22 Aug 2014 20:49:16 +0100 Subject: arm64: Remove unneeded extern keyword Function prototypes are never definitions, so remove any 'extern' keyword from the funcion prototypes in cpu_ops.h. Fixes warnings emited by checkpatch. Signed-off-by: Geoff Levand Acked-by: Mark Rutland Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/cpu_ops.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm64/include/asm/cpu_ops.h b/arch/arm64/include/asm/cpu_ops.h index 47dfa31ad71a..6f8e2ef9094a 100644 --- a/arch/arm64/include/asm/cpu_ops.h +++ b/arch/arm64/include/asm/cpu_ops.h @@ -64,7 +64,7 @@ struct cpu_operations { }; extern const struct cpu_operations *cpu_ops[NR_CPUS]; -extern int __init cpu_read_ops(struct device_node *dn, int cpu); -extern void __init cpu_read_bootcpu_ops(void); +int __init cpu_read_ops(struct device_node *dn, int cpu); +void __init cpu_read_bootcpu_ops(void); #endif /* ifndef __ASM_CPU_OPS_H */ -- cgit v1.2.3