summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-04-25 12:39:01 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2023-04-25 12:39:01 -0700
commitdf45da57cbd35715d590a36a12968a94508ccd1f (patch)
treef001218ffbd01d42f829be275df5542b1b454f2d /arch
parent53b5e72b9d89853b7e622239676163ede52acffe (diff)
parenteeb3557cc188e42ae7f7bef2d6dc5bf0e078412e (diff)
downloadlinux-rpi-df45da57cbd35715d590a36a12968a94508ccd1f.tar.gz
linux-rpi-df45da57cbd35715d590a36a12968a94508ccd1f.tar.bz2
linux-rpi-df45da57cbd35715d590a36a12968a94508ccd1f.zip
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 updates from Will Deacon: "ACPI: - Improve error reporting when failing to manage SDEI on AGDI device removal Assembly routines: - Improve register constraints so that the compiler can make use of the zero register instead of moving an immediate #0 into a GPR - Allow the compiler to allocate the registers used for CAS instructions CPU features and system registers: - Cleanups to the way in which CPU features are identified from the ID register fields - Extend system register definition generation to handle Enum types when defining shared register fields - Generate definitions for new _EL2 registers and add new fields for ID_AA64PFR1_EL1 - Allow SVE to be disabled separately from SME on the kernel command-line Tracing: - Support for "direct calls" in ftrace, which enables BPF tracing for arm64 Kdump: - Don't bother unmapping the crashkernel from the linear mapping, which then allows us to use huge (block) mappings and reduce TLB pressure when a crashkernel is loaded. Memory management: - Try again to remove data cache invalidation from the coherent DMA allocation path - Simplify the fixmap code by mapping at page granularity - Allow the kfence pool to be allocated early, preventing the rest of the linear mapping from being forced to page granularity Perf and PMU: - Move CPU PMU code out to drivers/perf/ where it can be reused by the 32-bit ARM architecture when running on ARMv8 CPUs - Fix race between CPU PMU probing and pKVM host de-privilege - Add support for Apple M2 CPU PMU - Adjust the generic PERF_COUNT_HW_BRANCH_INSTRUCTIONS event dynamically, depending on what the CPU actually supports - Minor fixes and cleanups to system PMU drivers Stack tracing: - Use the XPACLRI instruction to strip PAC from pointers, rather than rolling our own function in C - Remove redundant PAC removal for toolchains that handle this in their builtins - Make backtracing more resilient in the face of instrumentation Miscellaneous: - Fix single-step with KGDB - Remove harmless warning when 'nokaslr' is passed on the kernel command-line - Minor fixes and cleanups across the board" * tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (72 commits) KVM: arm64: Ensure CPU PMU probes before pKVM host de-privilege arm64: kexec: include reboot.h arm64: delete dead code in this_cpu_set_vectors() arm64/cpufeature: Use helper macro to specify ID register for capabilites drivers/perf: hisi: add NULL check for name drivers/perf: hisi: Remove redundant initialized of pmu->name arm64/cpufeature: Consistently use symbolic constants for min_field_value arm64/cpufeature: Pull out helper for CPUID register definitions arm64/sysreg: Convert HFGITR_EL2 to automatic generation ACPI: AGDI: Improve error reporting for problems during .remove() arm64: kernel: Fix kernel warning when nokaslr is passed to commandline perf/arm-cmn: Fix port detection for CMN-700 arm64: kgdb: Set PSTATE.SS to 1 to re-enable single-step arm64: move PAC masks to <asm/pointer_auth.h> arm64: use XPACLRI to strip PAC arm64: avoid redundant PAC stripping in __builtin_return_address() arm64/sme: Fix some comments of ARM SME arm64/signal: Alloc tpidr2 sigframe after checking system_supports_tpidr2() arm64/signal: Use system_supports_tpidr2() to check TPIDR2 arm64/idreg: Don't disable SME when disabling SVE ...
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/include/asm/arm_pmuv3.h247
-rw-r--r--arch/arm/mm/Kconfig2
-rw-r--r--arch/arm64/Kconfig18
-rw-r--r--arch/arm64/include/asm/arm_pmuv3.h155
-rw-r--r--arch/arm64/include/asm/atomic_lse.h17
-rw-r--r--arch/arm64/include/asm/barrier.h10
-rw-r--r--arch/arm64/include/asm/compat.h4
-rw-r--r--arch/arm64/include/asm/compiler.h36
-rw-r--r--arch/arm64/include/asm/debug-monitors.h1
-rw-r--r--arch/arm64/include/asm/fixmap.h22
-rw-r--r--arch/arm64/include/asm/ftrace.h22
-rw-r--r--arch/arm64/include/asm/kernel-pgtable.h5
-rw-r--r--arch/arm64/include/asm/kexec.h6
-rw-r--r--arch/arm64/include/asm/kfence.h10
-rw-r--r--arch/arm64/include/asm/memory.h5
-rw-r--r--arch/arm64/include/asm/mmu.h2
-rw-r--r--arch/arm64/include/asm/perf_event.h249
-rw-r--r--arch/arm64/include/asm/pointer_auth.h13
-rw-r--r--arch/arm64/include/asm/sysreg.h9
-rw-r--r--arch/arm64/include/asm/uaccess.h6
-rw-r--r--arch/arm64/kernel/Makefile1
-rw-r--r--arch/arm64/kernel/armv8_deprecated.c4
-rw-r--r--arch/arm64/kernel/asm-offsets.c6
-rw-r--r--arch/arm64/kernel/cpufeature.c272
-rw-r--r--arch/arm64/kernel/crash_core.c1
-rw-r--r--arch/arm64/kernel/debug-monitors.c5
-rw-r--r--arch/arm64/kernel/entry-ftrace.S90
-rw-r--r--arch/arm64/kernel/fpsimd.c4
-rw-r--r--arch/arm64/kernel/ftrace.c46
-rw-r--r--arch/arm64/kernel/idreg-override.c9
-rw-r--r--arch/arm64/kernel/kgdb.c2
-rw-r--r--arch/arm64/kernel/machine_kexec.c23
-rw-r--r--arch/arm64/kernel/perf_callchain.c2
-rw-r--r--arch/arm64/kernel/perf_event.c1467
-rw-r--r--arch/arm64/kernel/process.c2
-rw-r--r--arch/arm64/kernel/proton-pack.c3
-rw-r--r--arch/arm64/kernel/signal.c18
-rw-r--r--arch/arm64/kernel/stacktrace.c144
-rw-r--r--arch/arm64/kvm/arm.c45
-rw-r--r--arch/arm64/kvm/pkvm.c47
-rw-r--r--arch/arm64/lib/uaccess_flushcache.c6
-rw-r--r--arch/arm64/mm/Makefile2
-rw-r--r--arch/arm64/mm/dma-mapping.c17
-rw-r--r--arch/arm64/mm/fixmap.c203
-rw-r--r--arch/arm64/mm/init.c34
-rw-r--r--arch/arm64/mm/mmu.c288
-rw-r--r--arch/arm64/mm/pageattr.c7
-rw-r--r--arch/arm64/mm/ptdump.c2
-rwxr-xr-xarch/arm64/tools/gen-sysreg.awk95
-rw-r--r--arch/arm64/tools/sysreg165
-rw-r--r--arch/s390/kernel/mcount.S5
-rw-r--r--arch/x86/kernel/ftrace_32.S5
-rw-r--r--arch/x86/kernel/ftrace_64.S4
53 files changed, 1356 insertions, 2507 deletions
diff --git a/arch/arm/include/asm/arm_pmuv3.h b/arch/arm/include/asm/arm_pmuv3.h
new file mode 100644
index 000000000000..78d3d4b82c6c
--- /dev/null
+++ b/arch/arm/include/asm/arm_pmuv3.h
@@ -0,0 +1,247 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2012 ARM Ltd.
+ */
+
+#ifndef __ASM_PMUV3_H
+#define __ASM_PMUV3_H
+
+#include <asm/cp15.h>
+#include <asm/cputype.h>
+
+#define PMCCNTR __ACCESS_CP15_64(0, c9)
+
+#define PMCR __ACCESS_CP15(c9, 0, c12, 0)
+#define PMCNTENSET __ACCESS_CP15(c9, 0, c12, 1)
+#define PMCNTENCLR __ACCESS_CP15(c9, 0, c12, 2)
+#define PMOVSR __ACCESS_CP15(c9, 0, c12, 3)
+#define PMSELR __ACCESS_CP15(c9, 0, c12, 5)
+#define PMCEID0 __ACCESS_CP15(c9, 0, c12, 6)
+#define PMCEID1 __ACCESS_CP15(c9, 0, c12, 7)
+#define PMXEVTYPER __ACCESS_CP15(c9, 0, c13, 1)
+#define PMXEVCNTR __ACCESS_CP15(c9, 0, c13, 2)
+#define PMUSERENR __ACCESS_CP15(c9, 0, c14, 0)
+#define PMINTENSET __ACCESS_CP15(c9, 0, c14, 1)
+#define PMINTENCLR __ACCESS_CP15(c9, 0, c14, 2)
+#define PMMIR __ACCESS_CP15(c9, 0, c14, 6)
+#define PMCCFILTR __ACCESS_CP15(c14, 0, c15, 7)
+
+#define PMEVCNTR0 __ACCESS_CP15(c14, 0, c8, 0)
+#define PMEVCNTR1 __ACCESS_CP15(c14, 0, c8, 1)
+#define PMEVCNTR2 __ACCESS_CP15(c14, 0, c8, 2)
+#define PMEVCNTR3 __ACCESS_CP15(c14, 0, c8, 3)
+#define PMEVCNTR4 __ACCESS_CP15(c14, 0, c8, 4)
+#define PMEVCNTR5 __ACCESS_CP15(c14, 0, c8, 5)
+#define PMEVCNTR6 __ACCESS_CP15(c14, 0, c8, 6)
+#define PMEVCNTR7 __ACCESS_CP15(c14, 0, c8, 7)
+#define PMEVCNTR8 __ACCESS_CP15(c14, 0, c9, 0)
+#define PMEVCNTR9 __ACCESS_CP15(c14, 0, c9, 1)
+#define PMEVCNTR10 __ACCESS_CP15(c14, 0, c9, 2)
+#define PMEVCNTR11 __ACCESS_CP15(c14, 0, c9, 3)
+#define PMEVCNTR12 __ACCESS_CP15(c14, 0, c9, 4)
+#define PMEVCNTR13 __ACCESS_CP15(c14, 0, c9, 5)
+#define PMEVCNTR14 __ACCESS_CP15(c14, 0, c9, 6)
+#define PMEVCNTR15 __ACCESS_CP15(c14, 0, c9, 7)
+#define PMEVCNTR16 __ACCESS_CP15(c14, 0, c10, 0)
+#define PMEVCNTR17 __ACCESS_CP15(c14, 0, c10, 1)
+#define PMEVCNTR18 __ACCESS_CP15(c14, 0, c10, 2)
+#define PMEVCNTR19 __ACCESS_CP15(c14, 0, c10, 3)
+#define PMEVCNTR20 __ACCESS_CP15(c14, 0, c10, 4)
+#define PMEVCNTR21 __ACCESS_CP15(c14, 0, c10, 5)
+#define PMEVCNTR22 __ACCESS_CP15(c14, 0, c10, 6)
+#define PMEVCNTR23 __ACCESS_CP15(c14, 0, c10, 7)
+#define PMEVCNTR24 __ACCESS_CP15(c14, 0, c11, 0)
+#define PMEVCNTR25 __ACCESS_CP15(c14, 0, c11, 1)
+#define PMEVCNTR26 __ACCESS_CP15(c14, 0, c11, 2)
+#define PMEVCNTR27 __ACCESS_CP15(c14, 0, c11, 3)
+#define PMEVCNTR28 __ACCESS_CP15(c14, 0, c11, 4)
+#define PMEVCNTR29 __ACCESS_CP15(c14, 0, c11, 5)
+#define PMEVCNTR30 __ACCESS_CP15(c14, 0, c11, 6)
+
+#define PMEVTYPER0 __ACCESS_CP15(c14, 0, c12, 0)
+#define PMEVTYPER1 __ACCESS_CP15(c14, 0, c12, 1)
+#define PMEVTYPER2 __ACCESS_CP15(c14, 0, c12, 2)
+#define PMEVTYPER3 __ACCESS_CP15(c14, 0, c12, 3)
+#define PMEVTYPER4 __ACCESS_CP15(c14, 0, c12, 4)
+#define PMEVTYPER5 __ACCESS_CP15(c14, 0, c12, 5)
+#define PMEVTYPER6 __ACCESS_CP15(c14, 0, c12, 6)
+#define PMEVTYPER7 __ACCESS_CP15(c14, 0, c12, 7)
+#define PMEVTYPER8 __ACCESS_CP15(c14, 0, c13, 0)
+#define PMEVTYPER9 __ACCESS_CP15(c14, 0, c13, 1)
+#define PMEVTYPER10 __ACCESS_CP15(c14, 0, c13, 2)
+#define PMEVTYPER11 __ACCESS_CP15(c14, 0, c13, 3)
+#define PMEVTYPER12 __ACCESS_CP15(c14, 0, c13, 4)
+#define PMEVTYPER13 __ACCESS_CP15(c14, 0, c13, 5)
+#define PMEVTYPER14 __ACCESS_CP15(c14, 0, c13, 6)
+#define PMEVTYPER15 __ACCESS_CP15(c14, 0, c13, 7)
+#define PMEVTYPER16 __ACCESS_CP15(c14, 0, c14, 0)
+#define PMEVTYPER17 __ACCESS_CP15(c14, 0, c14, 1)
+#define PMEVTYPER18 __ACCESS_CP15(c14, 0, c14, 2)
+#define PMEVTYPER19 __ACCESS_CP15(c14, 0, c14, 3)
+#define PMEVTYPER20 __ACCESS_CP15(c14, 0, c14, 4)
+#define PMEVTYPER21 __ACCESS_CP15(c14, 0, c14, 5)
+#define PMEVTYPER22 __ACCESS_CP15(c14, 0, c14, 6)
+#define PMEVTYPER23 __ACCESS_CP15(c14, 0, c14, 7)
+#define PMEVTYPER24 __ACCESS_CP15(c14, 0, c15, 0)
+#define PMEVTYPER25 __ACCESS_CP15(c14, 0, c15, 1)
+#define PMEVTYPER26 __ACCESS_CP15(c14, 0, c15, 2)
+#define PMEVTYPER27 __ACCESS_CP15(c14, 0, c15, 3)
+#define PMEVTYPER28 __ACCESS_CP15(c14, 0, c15, 4)
+#define PMEVTYPER29 __ACCESS_CP15(c14, 0, c15, 5)
+#define PMEVTYPER30 __ACCESS_CP15(c14, 0, c15, 6)
+
+#define RETURN_READ_PMEVCNTRN(n) \
+ return read_sysreg(PMEVCNTR##n)
+static unsigned long read_pmevcntrn(int n)
+{
+ PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN);
+ return 0;
+}
+
+#define WRITE_PMEVCNTRN(n) \
+ write_sysreg(val, PMEVCNTR##n)
+static void write_pmevcntrn(int n, unsigned long val)
+{
+ PMEVN_SWITCH(n, WRITE_PMEVCNTRN);
+}
+
+#define WRITE_PMEVTYPERN(n) \
+ write_sysreg(val, PMEVTYPER##n)
+static void write_pmevtypern(int n, unsigned long val)
+{
+ PMEVN_SWITCH(n, WRITE_PMEVTYPERN);
+}
+
+static inline unsigned long read_pmmir(void)
+{
+ return read_sysreg(PMMIR);
+}
+
+static inline u32 read_pmuver(void)
+{
+ /* PMUVers is not a signed field */
+ u32 dfr0 = read_cpuid_ext(CPUID_EXT_DFR0);
+
+ return (dfr0 >> 24) & 0xf;
+}
+
+static inline void write_pmcr(u32 val)
+{
+ write_sysreg(val, PMCR);
+}
+
+static inline u32 read_pmcr(void)
+{
+ return read_sysreg(PMCR);
+}
+
+static inline void write_pmselr(u32 val)
+{
+ write_sysreg(val, PMSELR);
+}
+
+static inline void write_pmccntr(u64 val)
+{
+ write_sysreg(val, PMCCNTR);
+}
+
+static inline u64 read_pmccntr(void)
+{
+ return read_sysreg(PMCCNTR);
+}
+
+static inline void write_pmxevcntr(u32 val)
+{
+ write_sysreg(val, PMXEVCNTR);
+}
+
+static inline u32 read_pmxevcntr(void)
+{
+ return read_sysreg(PMXEVCNTR);
+}
+
+static inline void write_pmxevtyper(u32 val)
+{
+ write_sysreg(val, PMXEVTYPER);
+}
+
+static inline void write_pmcntenset(u32 val)
+{
+ write_sysreg(val, PMCNTENSET);
+}
+
+static inline void write_pmcntenclr(u32 val)
+{
+ write_sysreg(val, PMCNTENCLR);
+}
+
+static inline void write_pmintenset(u32 val)
+{
+ write_sysreg(val, PMINTENSET);
+}
+
+static inline void write_pmintenclr(u32 val)
+{
+ write_sysreg(val, PMINTENCLR);
+}
+
+static inline void write_pmccfiltr(u32 val)
+{
+ write_sysreg(val, PMCCFILTR);
+}
+
+static inline void write_pmovsclr(u32 val)
+{
+ write_sysreg(val, PMOVSR);
+}
+
+static inline u32 read_pmovsclr(void)
+{
+ return read_sysreg(PMOVSR);
+}
+
+static inline void write_pmuserenr(u32 val)
+{
+ write_sysreg(val, PMUSERENR);
+}
+
+static inline u32 read_pmceid0(void)
+{
+ return read_sysreg(PMCEID0);
+}
+
+static inline u32 read_pmceid1(void)
+{
+ return read_sysreg(PMCEID1);
+}
+
+static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {}
+static inline void kvm_clr_pmu_events(u32 clr) {}
+static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr)
+{
+ return false;
+}
+
+/* PMU Version in DFR Register */
+#define ARMV8_PMU_DFR_VER_NI 0
+#define ARMV8_PMU_DFR_VER_V3P4 0x5
+#define ARMV8_PMU_DFR_VER_V3P5 0x6
+#define ARMV8_PMU_DFR_VER_IMP_DEF 0xF
+
+static inline bool pmuv3_implemented(int pmuver)
+{
+ return !(pmuver == ARMV8_PMU_DFR_VER_IMP_DEF ||
+ pmuver == ARMV8_PMU_DFR_VER_NI);
+}
+
+static inline bool is_pmuv3p4(int pmuver)
+{
+ return pmuver >= ARMV8_PMU_DFR_VER_V3P4;
+}
+
+static inline bool is_pmuv3p5(int pmuver)
+{
+ return pmuver >= ARMV8_PMU_DFR_VER_V3P5;
+}
+
+#endif
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index c5bbae86f725..be183ed1232d 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -403,7 +403,7 @@ config CPU_V6K
select CPU_THUMB_CAPABLE
select CPU_TLB_V6 if MMU
-# ARMv7
+# ARMv7 and ARMv8 architectures
config CPU_V7
bool
select CPU_32v6K
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 5b71e0bc2ffa..3f5bf55050e8 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -186,6 +186,10 @@ config ARM64
select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_CONTIGUOUS
select HAVE_DYNAMIC_FTRACE
+ select HAVE_DYNAMIC_FTRACE_WITH_ARGS \
+ if $(cc-option,-fpatchable-function-entry=2)
+ select HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS \
+ if DYNAMIC_FTRACE_WITH_ARGS && DYNAMIC_FTRACE_WITH_CALL_OPS
select HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS \
if (DYNAMIC_FTRACE_WITH_ARGS && !CFI_CLANG && \
!CC_OPTIMIZE_FOR_SIZE)
@@ -363,6 +367,20 @@ config ARCH_PROC_KCORE_TEXT
config BROKEN_GAS_INST
def_bool !$(as-instr,1:\n.inst 0\n.rept . - 1b\n\nnop\n.endr\n)
+config BUILTIN_RETURN_ADDRESS_STRIPS_PAC
+ bool
+ # Clang's __builtin_return_adddress() strips the PAC since 12.0.0
+ # https://reviews.llvm.org/D75044
+ default y if CC_IS_CLANG && (CLANG_VERSION >= 120000)
+ # GCC's __builtin_return_address() strips the PAC since 11.1.0,
+ # and this was backported to 10.2.0, 9.4.0, 8.5.0, but not earlier
+ # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=94891
+ default y if CC_IS_GCC && (GCC_VERSION >= 110100)
+ default y if CC_IS_GCC && (GCC_VERSION >= 100200) && (GCC_VERSION < 110000)
+ default y if CC_IS_GCC && (GCC_VERSION >= 90400) && (GCC_VERSION < 100000)
+ default y if CC_IS_GCC && (GCC_VERSION >= 80500) && (GCC_VERSION < 90000)
+ default n
+
config KASAN_SHADOW_OFFSET
hex
depends on KASAN_GENERIC || KASAN_SW_TAGS
diff --git a/arch/arm64/include/asm/arm_pmuv3.h b/arch/arm64/include/asm/arm_pmuv3.h
new file mode 100644
index 000000000000..d6b51deb7bf0
--- /dev/null
+++ b/arch/arm64/include/asm/arm_pmuv3.h
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2012 ARM Ltd.
+ */
+
+#ifndef __ASM_PMUV3_H
+#define __ASM_PMUV3_H
+
+#include <linux/kvm_host.h>
+
+#include <asm/cpufeature.h>
+#include <asm/sysreg.h>
+
+#define RETURN_READ_PMEVCNTRN(n) \
+ return read_sysreg(pmevcntr##n##_el0)
+static unsigned long read_pmevcntrn(int n)
+{
+ PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN);
+ return 0;
+}
+
+#define WRITE_PMEVCNTRN(n) \
+ write_sysreg(val, pmevcntr##n##_el0)
+static void write_pmevcntrn(int n, unsigned long val)
+{
+ PMEVN_SWITCH(n, WRITE_PMEVCNTRN);
+}
+
+#define WRITE_PMEVTYPERN(n) \
+ write_sysreg(val, pmevtyper##n##_el0)
+static void write_pmevtypern(int n, unsigned long val)
+{
+ PMEVN_SWITCH(n, WRITE_PMEVTYPERN);
+}
+
+static inline unsigned long read_pmmir(void)
+{
+ return read_cpuid(PMMIR_EL1);
+}
+
+static inline u32 read_pmuver(void)
+{
+ u64 dfr0 = read_sysreg(id_aa64dfr0_el1);
+
+ return cpuid_feature_extract_unsigned_field(dfr0,
+ ID_AA64DFR0_EL1_PMUVer_SHIFT);
+}
+
+static inline void write_pmcr(u32 val)
+{
+ write_sysreg(val, pmcr_el0);
+}
+
+static inline u32 read_pmcr(void)
+{
+ return read_sysreg(pmcr_el0);
+}
+
+static inline void write_pmselr(u32 val)
+{
+ write_sysreg(val, pmselr_el0);
+}
+
+static inline void write_pmccntr(u64 val)
+{
+ write_sysreg(val, pmccntr_el0);
+}
+
+static inline u64 read_pmccntr(void)
+{
+ return read_sysreg(pmccntr_el0);
+}
+
+static inline void write_pmxevcntr(u32 val)
+{
+ write_sysreg(val, pmxevcntr_el0);
+}
+
+static inline u32 read_pmxevcntr(void)
+{
+ return read_sysreg(pmxevcntr_el0);
+}
+
+static inline void write_pmxevtyper(u32 val)
+{
+ write_sysreg(val, pmxevtyper_el0);
+}
+
+static inline void write_pmcntenset(u32 val)
+{
+ write_sysreg(val, pmcntenset_el0);
+}
+
+static inline void write_pmcntenclr(u32 val)
+{
+ write_sysreg(val, pmcntenclr_el0);
+}
+
+static inline void write_pmintenset(u32 val)
+{
+ write_sysreg(val, pmintenset_el1);
+}
+
+static inline void write_pmintenclr(u32 val)
+{
+ write_sysreg(val, pmintenclr_el1);
+}
+
+static inline void write_pmccfiltr(u32 val)
+{
+ write_sysreg(val, pmccfiltr_el0);
+}
+
+static inline void write_pmovsclr(u32 val)
+{
+ write_sysreg(val, pmovsclr_el0);
+}
+
+static inline u32 read_pmovsclr(void)
+{
+ return read_sysreg(pmovsclr_el0);
+}
+
+static inline void write_pmuserenr(u32 val)
+{
+ write_sysreg(val, pmuserenr_el0);
+}
+
+static inline u32 read_pmceid0(void)
+{
+ return read_sysreg(pmceid0_el0);
+}
+
+static inline u32 read_pmceid1(void)
+{
+ return read_sysreg(pmceid1_el0);
+}
+
+static inline bool pmuv3_implemented(int pmuver)
+{
+ return !(pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF ||
+ pmuver == ID_AA64DFR0_EL1_PMUVer_NI);
+}
+
+static inline bool is_pmuv3p4(int pmuver)
+{
+ return pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P4;
+}
+
+static inline bool is_pmuv3p5(int pmuver)
+{
+ return pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P5;
+}
+
+#endif
diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h
index a94d6dacc029..319958b95cfd 100644
--- a/arch/arm64/include/asm/atomic_lse.h
+++ b/arch/arm64/include/asm/atomic_lse.h
@@ -251,22 +251,15 @@ __lse__cmpxchg_case_##name##sz(volatile void *ptr, \
u##sz old, \
u##sz new) \
{ \
- register unsigned long x0 asm ("x0") = (unsigned long)ptr; \
- register u##sz x1 asm ("x1") = old; \
- register u##sz x2 asm ("x2") = new; \
- unsigned long tmp; \
- \
asm volatile( \
__LSE_PREAMBLE \
- " mov %" #w "[tmp], %" #w "[old]\n" \
- " cas" #mb #sfx "\t%" #w "[tmp], %" #w "[new], %[v]\n" \
- " mov %" #w "[ret], %" #w "[tmp]" \
- : [ret] "+r" (x0), [v] "+Q" (*(u##sz *)ptr), \
- [tmp] "=&r" (tmp) \
- : [old] "r" (x1), [new] "r" (x2) \
+ " cas" #mb #sfx " %" #w "[old], %" #w "[new], %[v]\n" \
+ : [v] "+Q" (*(u##sz *)ptr), \
+ [old] "+r" (old) \
+ : [new] "rZ" (new) \
: cl); \
\
- return x0; \
+ return old; \
}
__CMPXCHG_CASE(w, b, , 8, )
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index 3dd8982a9ce3..cf2987464c18 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -131,25 +131,25 @@ do { \
case 1: \
asm volatile ("stlrb %w1, %0" \
: "=Q" (*__p) \
- : "r" (*(__u8 *)__u.__c) \
+ : "rZ" (*(__u8 *)__u.__c) \
: "memory"); \
break; \
case 2: \
asm volatile ("stlrh %w1, %0" \
: "=Q" (*__p) \
- : "r" (*(__u16 *)__u.__c) \
+ : "rZ" (*(__u16 *)__u.__c) \
: "memory"); \
break; \
case 4: \
asm volatile ("stlr %w1, %0" \
: "=Q" (*__p) \
- : "r" (*(__u32 *)__u.__c) \
+ : "rZ" (*(__u32 *)__u.__c) \
: "memory"); \
break; \
case 8: \
- asm volatile ("stlr %1, %0" \
+ asm volatile ("stlr %x1, %0" \
: "=Q" (*__p) \
- : "r" (*(__u64 *)__u.__c) \
+ : "rZ" (*(__u64 *)__u.__c) \
: "memory"); \
break; \
} \
diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h
index 9f362274a4f7..74575c3d6987 100644
--- a/arch/arm64/include/asm/compat.h
+++ b/arch/arm64/include/asm/compat.h
@@ -83,10 +83,6 @@ struct compat_statfs {
int f_spare[4];
};
-#define COMPAT_RLIM_INFINITY 0xffffffff
-
-#define COMPAT_OFF_T_MAX 0x7fffffff
-
#define compat_user_stack_pointer() (user_stack_pointer(task_pt_regs(current)))
#define COMPAT_MINSIGSTKSZ 2048
diff --git a/arch/arm64/include/asm/compiler.h b/arch/arm64/include/asm/compiler.h
index 6fb2e6bcc392..9bbd7b7097ff 100644
--- a/arch/arm64/include/asm/compiler.h
+++ b/arch/arm64/include/asm/compiler.h
@@ -8,19 +8,33 @@
#define ARM64_ASM_PREAMBLE
#endif
-/*
- * The EL0/EL1 pointer bits used by a pointer authentication code.
- * This is dependent on TBI0/TBI1 being enabled, or bits 63:56 would also apply.
- */
-#define ptrauth_user_pac_mask() GENMASK_ULL(54, vabits_actual)
-#define ptrauth_kernel_pac_mask() GENMASK_ULL(63, vabits_actual)
+#define xpaclri(ptr) \
+({ \
+ register unsigned long __xpaclri_ptr asm("x30") = (ptr); \
+ \
+ asm( \
+ ARM64_ASM_PREAMBLE \
+ " hint #7\n" \
+ : "+r" (__xpaclri_ptr)); \
+ \
+ __xpaclri_ptr; \
+})
-/* Valid for EL0 TTBR0 and EL1 TTBR1 instruction pointers */
-#define ptrauth_clear_pac(ptr) \
- ((ptr & BIT_ULL(55)) ? (ptr | ptrauth_kernel_pac_mask()) : \
- (ptr & ~ptrauth_user_pac_mask()))
+#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL
+#define ptrauth_strip_kernel_insn_pac(ptr) xpaclri(ptr)
+#else
+#define ptrauth_strip_kernel_insn_pac(ptr) (ptr)
+#endif
+
+#ifdef CONFIG_ARM64_PTR_AUTH
+#define ptrauth_strip_user_insn_pac(ptr) xpaclri(ptr)
+#else
+#define ptrauth_strip_user_insn_pac(ptr) (ptr)
+#endif
+#if !defined(CONFIG_BUILTIN_RETURN_ADDRESS_STRIPS_PAC)
#define __builtin_return_address(val) \
- (void *)(ptrauth_clear_pac((unsigned long)__builtin_return_address(val)))
+ (void *)(ptrauth_strip_kernel_insn_pac((unsigned long)__builtin_return_address(val)))
+#endif
#endif /* __ASM_COMPILER_H */
diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h
index 7b7e05c02691..13d437bcbf58 100644
--- a/arch/arm64/include/asm/debug-monitors.h
+++ b/arch/arm64/include/asm/debug-monitors.h
@@ -104,6 +104,7 @@ void user_regs_reset_single_step(struct user_pt_regs *regs,
void kernel_enable_single_step(struct pt_regs *regs);
void kernel_disable_single_step(void);
int kernel_active_single_step(void);
+void kernel_rewind_single_step(struct pt_regs *regs);
#ifdef CONFIG_HAVE_HW_BREAKPOINT
int reinstall_suspended_bps(struct pt_regs *regs);
diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h
index 71ed5fdf718b..58c294a96676 100644
--- a/arch/arm64/include/asm/fixmap.h
+++ b/arch/arm64/include/asm/fixmap.h
@@ -17,6 +17,7 @@
#ifndef __ASSEMBLY__
#include <linux/kernel.h>
+#include <linux/math.h>
#include <linux/sizes.h>
#include <asm/boot.h>
#include <asm/page.h>
@@ -36,17 +37,13 @@ enum fixed_addresses {
FIX_HOLE,
/*
- * Reserve a virtual window for the FDT that is 2 MB larger than the
- * maximum supported size, and put it at the top of the fixmap region.
- * The additional space ensures that any FDT that does not exceed
- * MAX_FDT_SIZE can be mapped regardless of whether it crosses any
- * 2 MB alignment boundaries.
- *
- * Keep this at the top so it remains 2 MB aligned.
+ * Reserve a virtual window for the FDT that is a page bigger than the
+ * maximum supported size. The additional space ensures that any FDT
+ * that does not exceed MAX_FDT_SIZE can be mapped regardless of
+ * whether it crosses any page boundary.
*/
-#define FIX_FDT_SIZE (MAX_FDT_SIZE + SZ_2M)
FIX_FDT_END,
- FIX_FDT = FIX_FDT_END + FIX_FDT_SIZE / PAGE_SIZE - 1,
+ FIX_FDT = FIX_FDT_END + DIV_ROUND_UP(MAX_FDT_SIZE, PAGE_SIZE) + 1,
FIX_EARLYCON_MEM_BASE,
FIX_TEXT_POKE0,
@@ -95,12 +92,15 @@ enum fixed_addresses {
__end_of_fixed_addresses
};
-#define FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT)
-#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
+#define FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT)
+#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
+#define FIXADDR_TOT_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
+#define FIXADDR_TOT_START (FIXADDR_TOP - FIXADDR_TOT_SIZE)
#define FIXMAP_PAGE_IO __pgprot(PROT_DEVICE_nGnRE)
void __init early_fixmap_init(void);
+void __init fixmap_copy(pgd_t *pgdir);
#define __early_set_fixmap __set_fixmap
diff --git a/arch/arm64/include/asm/ftrace.h b/arch/arm64/include/asm/ftrace.h
index 1c2672bbbf37..b87d70b693c6 100644
--- a/arch/arm64/include/asm/ftrace.h
+++ b/arch/arm64/include/asm/ftrace.h
@@ -70,10 +70,19 @@ struct ftrace_ops;
#define arch_ftrace_get_regs(regs) NULL
+/*
+ * Note: sizeof(struct ftrace_regs) must be a multiple of 16 to ensure correct
+ * stack alignment
+ */
struct ftrace_regs {
/* x0 - x8 */
unsigned long regs[9];
+
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+ unsigned long direct_tramp;
+#else
unsigned long __unused;
+#endif
unsigned long fp;
unsigned long lr;
@@ -136,6 +145,19 @@ int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec);
void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct ftrace_regs *fregs);
#define ftrace_graph_func ftrace_graph_func
+
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+static inline void arch_ftrace_set_direct_caller(struct ftrace_regs *fregs,
+ unsigned long addr)
+{
+ /*
+ * The ftrace trampoline will return to this address instead of the
+ * instrumented function.
+ */
+ fregs->direct_tramp = addr;
+}
+#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
+
#endif
#define ftrace_return_address(n) return_address(n)
diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h
index fcd14197756f..186dd7f85b14 100644
--- a/arch/arm64/include/asm/kernel-pgtable.h
+++ b/arch/arm64/include/asm/kernel-pgtable.h
@@ -59,8 +59,11 @@
#define EARLY_KASLR (0)
#endif
+#define SPAN_NR_ENTRIES(vstart, vend, shift) \
+ ((((vend) - 1) >> (shift)) - ((vstart) >> (shift)) + 1)
+
#define EARLY_ENTRIES(vstart, vend, shift, add) \
- ((((vend) - 1) >> (shift)) - ((vstart) >> (shift)) + 1 + add)
+ (SPAN_NR_ENTRIES(vstart, vend, shift) + (add))
#define EARLY_PGDS(vstart, vend, add) (EARLY_ENTRIES(vstart, vend, PGDIR_SHIFT, add))
diff --git a/arch/arm64/include/asm/kexec.h b/arch/arm64/include/asm/kexec.h
index 559bfae26715..9ac9572a3bbe 100644
--- a/arch/arm64/include/asm/kexec.h
+++ b/arch/arm64/include/asm/kexec.h
@@ -102,12 +102,6 @@ void cpu_soft_restart(unsigned long el2_switch, unsigned long entry,
int machine_kexec_post_load(struct kimage *image);
#define machine_kexec_post_load machine_kexec_post_load
-
-void arch_kexec_protect_crashkres(void);
-#define arch_kexec_protect_crashkres arch_kexec_protect_crashkres
-
-void arch_kexec_unprotect_crashkres(void);
-#define arch_kexec_unprotect_crashkres arch_kexec_unprotect_crashkres
#endif
#define ARCH_HAS_KIMAGE_ARCH
diff --git a/arch/arm64/include/asm/kfence.h b/arch/arm64/include/asm/kfence.h
index aa855c6a0ae6..a81937fae9f6 100644
--- a/arch/arm64/include/asm/kfence.h
+++ b/arch/arm64/include/asm/kfence.h
@@ -19,4 +19,14 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect)
return true;
}
+#ifdef CONFIG_KFENCE
+extern bool kfence_early_init;
+static inline bool arm64_kfence_can_set_direct_map(void)
+{
+ return !kfence_early_init;
+}
+#else /* CONFIG_KFENCE */
+static inline bool arm64_kfence_can_set_direct_map(void) { return false; }
+#endif /* CONFIG_KFENCE */
+
#endif /* __ASM_KFENCE_H */
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 78e5163836a0..efcd68154a3a 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -374,11 +374,6 @@ static inline void *phys_to_virt(phys_addr_t x)
})
void dump_mem_limit(void);
-
-static inline bool defer_reserve_crashkernel(void)
-{
- return IS_ENABLED(CONFIG_ZONE_DMA) || IS_ENABLED(CONFIG_ZONE_DMA32);
-}
#endif /* !ASSEMBLY */
/*
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index 48f8466a4be9..4384eaa0aeb7 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -65,6 +65,8 @@ extern void paging_init(void);
extern void bootmem_init(void);
extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
extern void init_mem_pgprot(void);
+extern void create_mapping_noalloc(phys_addr_t phys, unsigned long virt,
+ phys_addr_t size, pgprot_t prot);
extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
unsigned long virt, phys_addr_t size,
pgprot_t prot, bool page_mappings_only);
diff --git a/arch/arm64/include/asm/perf_event.h b/arch/arm64/include/asm/perf_event.h
index 3eaf462f5752..eb7071c9eb34 100644
--- a/arch/arm64/include/asm/perf_event.h
+++ b/arch/arm64/include/asm/perf_event.h
@@ -9,255 +9,6 @@
#include <asm/stack_pointer.h>
#include <asm/ptrace.h>
-#define ARMV8_PMU_MAX_COUNTERS 32
-#define ARMV8_PMU_COUNTER_MASK (ARMV8_PMU_MAX_COUNTERS - 1)
-
-/*
- * Common architectural and microarchitectural event numbers.
- */
-#define ARMV8_PMUV3_PERFCTR_SW_INCR 0x0000
-#define ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL 0x0001
-#define ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL 0x0002
-#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL 0x0003
-#define ARMV8_PMUV3_PERFCTR_L1D_CACHE 0x0004
-#define ARMV8_PMUV3_PERFCTR_L1D_TLB_REFILL 0x0005
-#define ARMV8_PMUV3_PERFCTR_LD_RETIRED 0x0006
-#define ARMV8_PMUV3_PERFCTR_ST_RETIRED 0x0007
-#define ARMV8_PMUV3_PERFCTR_INST_RETIRED 0x0008
-#define ARMV8_PMUV3_PERFCTR_EXC_TAKEN 0x0009
-#define ARMV8_PMUV3_PERFCTR_EXC_RETURN 0x000A
-#define ARMV8_PMUV3_PERFCTR_CID_WRITE_RETIRED 0x000B
-#define ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED 0x000C
-#define ARMV8_PMUV3_PERFCTR_BR_IMMED_RETIRED 0x000D
-#define ARMV8_PMUV3_PERFCTR_BR_RETURN_RETIRED 0x000E
-#define ARMV8_PMUV3_PERFCTR_UNALIGNED_LDST_RETIRED 0x000F
-#define ARMV8_PMUV3_PERFCTR_BR_MIS_PRED 0x0010
-#define ARMV8_PMUV3_PERFCTR_CPU_CYCLES 0x0011
-#define ARMV8_PMUV3_PERFCTR_BR_PRED 0x0012
-#define ARMV8_PMUV3_PERFCTR_MEM_ACCESS 0x0013
-#define ARMV8_PMUV3_PERFCTR_L1I_CACHE 0x0014
-#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_WB 0x0015
-#define ARMV8_PMUV3_PERFCTR_L2D_CACHE 0x0016
-#define ARMV8_PMUV3_PERFCTR_L2D_CACHE_REFILL 0x0017
-#define ARMV8_PMUV3_PERFCTR_L2D_CACHE_WB 0x0018
-#define ARMV8_PMUV3_PERFCTR_BUS_ACCESS 0x0019
-#define ARMV8_PMUV3_PERFCTR_MEMORY_ERROR 0x001A
-#define ARMV8_PMUV3_PERFCTR_INST_SPEC 0x001B
-#define ARMV8_PMUV3_PERFCTR_TTBR_WRITE_RETIRED 0x001C
-#define ARMV8_PMUV3_PERFCTR_BUS_CYCLES 0x001D
-#define ARMV8_PMUV3_PERFCTR_CHAIN 0x001E
-#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_ALLOCATE 0x001F
-#define ARMV8_PMUV3_PERFCTR_L2D_CACHE_ALLOCATE 0x0020
-#define ARMV8_PMUV3_PERFCTR_BR_RETIRED 0x0021
-#define ARMV8_PMUV3_PERFCTR_BR_MIS_PRED_RETIRED 0x0022
-#define ARMV8_PMUV3_PERFCTR_STALL_FRONTEND 0x0023
-#define ARMV8_PMUV3_PERFCTR_STALL_BACKEND 0x0024
-#define ARMV8_PMUV3_PERFCTR_L1D_TLB 0x0025
-#define ARMV8_PMUV3_PERFCTR_L1I_TLB 0x0026
-#define ARMV8_PMUV3_PERFCTR_L2I_CACHE 0x0027
-#define ARMV8_PMUV3_PERFCTR_L2I_CACHE_REFILL 0x0028
-#define ARMV8_PMUV3_PERFCTR_L3D_CACHE_ALLOCATE 0x0029
-#define ARMV8_PMUV3_PERFCTR_L3D_CACHE_REFILL 0x002A
-#define ARMV8_PMUV3_PERFCTR_L3D_CACHE 0x002B
-#define ARMV8_PMUV3_PERFCTR_L3D_CACHE_WB 0x002C
-#define ARMV8_PMUV3_PERFCTR_L2D_TLB_REFILL 0x002D
-#define ARMV8_PMUV3_PERFCTR_L2I_TLB_REFILL 0x002E
-#define ARMV8_PMUV3_PERFCTR_L2D_TLB 0x002F
-#define ARMV8_PMUV3_PERFCTR_L2I_TLB 0x0030
-#define ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS 0x0031
-#define ARMV8_PMUV3_PERFCTR_LL_CACHE 0x0032
-#define ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS 0x0033
-#define ARMV8_PMUV3_PERFCTR_DTLB_WALK 0x0034
-#define ARMV8_PMUV3_PERFCTR_ITLB_WALK 0x0035
-#define ARMV8_PMUV3_PERFCTR_LL_CACHE_RD 0x0036
-#define ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS_RD 0x0037
-#define ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS_RD 0x0038
-#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_LMISS_RD 0x0039
-#define ARMV8_PMUV3_PERFCTR_OP_RETIRED 0x003A
-#define ARMV8_PMUV3_PERFCTR_OP_SPEC 0x003B
-#define ARMV8_PMUV3_PERFCTR_STALL 0x003C
-#define ARMV8_PMUV3_PERFCTR_STALL_SLOT_BACKEND 0x003D
-#define ARMV8_PMUV3_PERFCTR_STALL_SLOT_FRONTEND 0x003E
-#define ARMV8_PMUV3_PERFCTR_STALL_SLOT 0x003F
-
-/* Statistical profiling extension microarchitectural events */
-#define ARMV8_SPE_PERFCTR_SAMPLE_POP 0x4000
-#define ARMV8_SPE_PERFCTR_SAMPLE_FEED 0x4001
-#define ARMV8_SPE_PERFCTR_SAMPLE_FILTRATE 0x4002
-#define ARMV8_SPE_PERFCTR_SAMPLE_COLLISION 0x4003
-
-/* AMUv1 architecture events */
-#define ARMV8_AMU_PERFCTR_CNT_CYCLES 0x4004
-#define ARMV8_AMU_PERFCTR_STALL_BACKEND_MEM 0x4005
-
-/* long-latency read miss events */
-#define ARMV8_PMUV3_PERFCTR_L1I_CACHE_LMISS 0x4006
-#define ARMV8_PMUV3_PERFCTR_L2D_CACHE_LMISS_RD 0x4009
-#define ARMV8_PMUV3_PERFCTR_L2I_CACHE_LMISS 0x400A
-#define ARMV8_PMUV3_PERFCTR_L3D_CACHE_LMISS_RD 0x400B
-
-/* Trace buffer events */
-#define ARMV8_PMUV3_PERFCTR_TRB_WRAP 0x400C
-#define ARMV8_PMUV3_PERFCTR_TRB_TRIG 0x400E
-
-/* Trace unit events */
-#define ARMV8_PMUV3_PERFCTR_TRCEXTOUT0 0x4010
-#define ARMV8_PMUV3_PERFCTR_TRCEXTOUT1 0x4011
-#define ARMV8_PMUV3_PERFCTR_TRCEXTOUT2 0x4012
-#define ARMV8_PMUV3_PERFCTR_TRCEXTOUT3 0x4013
-#define ARMV8_PMUV3_PERFCTR_CTI_TRIGOUT4 0x4018
-#define ARMV8_PMUV3_PERFCTR_CTI_TRIGOUT5 0x4019
-#define ARMV8_PMUV3_PERFCTR_CTI_TRIGOUT6 0x401A
-#define ARMV8_PMUV3_PERFCTR_CTI_TRIGOUT7 0x401B
-
-/* additional latency from alignment events */
-#define ARMV8_PMUV3_PERFCTR_LDST_ALIGN_LAT 0x4020
-#define ARMV8_PMUV3_PERFCTR_LD_ALIGN_LAT 0x4021
-#define ARMV8_PMUV3_PERFCTR_ST_ALIGN_LAT 0x4022
-
-/* Armv8.5 Memory Tagging Extension events */
-#define ARMV8_MTE_PERFCTR_MEM_ACCESS_CHECKED 0x4024
-#define ARMV8_MTE_PERFCTR_MEM_ACCESS_CHECKED_RD 0x4025
-#define ARMV8_MTE_PERFCTR_MEM_ACCESS_CHECKED_WR 0x4026
-
-/* ARMv8 recommended implementation defined event types */
-#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD 0x0040
-#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR 0x0041
-#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD 0x0042
-#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_WR 0x0043
-#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_INNER 0x0044
-#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_OUTER 0x0045
-#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WB_VICTIM 0x0046
-#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WB_CLEAN 0x0047
-#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_INVAL 0x0048
-
-#define ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD 0x004C
-#define ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR 0x004D
-#define ARMV8_IMPDEF_PERFCTR_L1D_TLB_RD 0x004E
-#define ARMV8_IMPDEF_PERFCTR_L1D_TLB_WR 0x004F
-#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_RD 0x0050
-#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_WR 0x0051
-#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_REFILL_RD 0x0052
-#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_REFILL_WR 0x0053
-
-#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_WB_VICTIM 0x0056
-#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_WB_CLEAN 0x0057
-#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_INVAL 0x0058
-
-#define ARMV8_IMPDEF_PERFCTR_L2D_TLB_REFILL_RD 0x005C
-#define ARMV8_IMPDEF_PERFCTR_L2D_TLB_REFILL_WR 0x005D
-#define ARMV8_IMPDEF_PERFCTR_L2D_TLB_RD 0x005E
-#define ARMV8_IMPDEF_PERFCTR_L2D_TLB_WR 0x005F
-#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD 0x0060
-#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR 0x0061
-#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_SHARED 0x0062
-#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_NOT_SHARED 0x0063
-#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_NORMAL 0x0064
-#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_PERIPH 0x0065
-#define ARMV8_IMPDEF_PERFCTR_MEM_ACCESS_RD 0x0066
-#define ARMV8_IMPDEF_PERFCTR_MEM_ACCESS_WR 0x0067
-#define ARMV8_IMPDEF_PERFCTR_UNALIGNED_LD_SPEC 0x0068
-#define ARMV8_IMPDEF_PERFCTR_UNALIGNED_ST_SPEC 0x0069
-#define ARMV8_IMPDEF_PERFCTR_UNALIGNED_LDST_SPEC 0x006A
-
-#define ARMV8_IMPDEF_PERFCTR_LDREX_SPEC 0x006C
-#define ARMV8_IMPDEF_PERFCTR_STREX_PASS_SPEC 0x006D
-#define ARMV8_IMPDEF_PERFCTR_STREX_FAIL_SPEC 0x006E
-#define ARMV8_IMPDEF_PERFCTR_STREX_SPEC 0x006F
-#define ARMV8_IMPDEF_PERFCTR_LD_SPEC 0x0070
-#define ARMV8_IMPDEF_PERFCTR_ST_SPEC 0x0071
-#define ARMV8_IMPDEF_PERFCTR_LDST_SPEC 0x0072
-#define ARMV8_IMPDEF_PERFCTR_DP_SPEC 0x0073
-#define ARMV8_IMPDEF_PERFCTR_ASE_SPEC 0x0074
-#define ARMV8_IMPDEF_PERFCTR_VFP_SPEC 0x0075
-#define ARMV8_IMPDEF_PERFCTR_PC_WRITE_SPEC 0x0076
-#define ARMV8_IMPDEF_PERFCTR_CRYPTO_SPEC 0x0077
-#define ARMV8_IMPDEF_PERFCTR_BR_IMMED_SPEC 0x0078
-#define ARMV8_IMPDEF_PERFCTR_BR_RETURN_SPEC 0x0079
-#define ARMV8_IMPDEF_PERFCTR_BR_INDIRECT_SPEC 0x007A
-
-#define ARMV8_IMPDEF_PERFCTR_ISB_SPEC 0x007C
-#define ARMV8_IMPDEF_PERFCTR_DSB_SPEC 0x007D
-#define ARMV8_IMPDEF_PERFCTR_DMB_SPEC 0x007E
-
-#define ARMV8_IMPDEF_PERFCTR_EXC_UNDEF 0x0081
-#define ARMV8_IMPDEF_PERFCTR_EXC_SVC 0x0082
-#define ARMV8_IMPDEF_PERFCTR_EXC_PABORT 0x0083
-#define ARMV8_IMPDEF_PERFCTR_EXC_DABORT 0x0084
-
-#define ARMV8_IMPDEF_PERFCTR_EXC_IRQ 0x0086
-#define ARMV8_IMPDEF_PERFCTR_EXC_FIQ 0x0087
-#define ARMV8_IMPDEF_PERFCTR_EXC_SMC 0x0088
-
-#define ARMV8_IMPDEF_PERFCTR_EXC_HVC 0x008A
-#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_PABORT 0x008B
-#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_DABORT 0x008C
-#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_OTHER 0x008D
-#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_IRQ 0x008E
-#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_FIQ 0x008F
-#define ARMV8_IMPDEF_PERFCTR_RC_LD_SPEC 0x0090
-#define ARMV8_IMPDEF_PERFCTR_RC_ST_SPEC 0x0091
-
-#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_RD 0x00A0
-#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_WR 0x00A1
-#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_REFILL_RD 0x00A2
-#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_REFILL_WR 0x00A3
-
-#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_WB_VICTIM 0x00A6
-#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_WB_CLEAN 0x00A7
-#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_INVAL 0x00A8
-
-/*
- * Per-CPU PMCR: config reg
- */
-#define ARMV8_PMU_PMCR_E (1 << 0) /* Enable all counters */
-#define ARMV8_PMU_PMCR_P (1 << 1) /* Reset all counters */
-#define ARMV8_PMU_PMCR_C (1 << 2) /* Cycle counter reset */
-#define ARMV8_PMU_PMCR_D (1 << 3) /* CCNT counts every 64th cpu cycle */
-#define ARMV8_PMU_PMCR_X (1 << 4) /* Export to ETM */
-#define ARMV8_PMU_PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
-#define ARMV8_PMU_PMCR_LC (1 << 6) /* Overflow on 64 bit cycle counter */
-#define ARMV8_PMU_PMCR_LP (1 << 7) /* Long event counter enable */
-#define ARMV8_PMU_PMCR_N_SHIFT 11 /* Number of counters supported */
-#define ARMV8_PMU_PMCR_N_MASK 0x1f
-#define ARMV8_PMU_PMCR_MASK 0xff /* Mask for writable bits */
-
-/*
- * PMOVSR: counters overflow flag status reg
- */
-#define ARMV8_PMU_OVSR_MASK 0xffffffff /* Mask for writable bits */
-#define ARMV8_PMU_OVERFLOWED_MASK ARMV8_PMU_OVSR_MASK
-
-/*
- * PMXEVTYPER: Event selection reg
- */
-#define ARMV8_PMU_EVTYPE_MASK 0xc800ffff /* Mask for writable bits */
-#define ARMV8_PMU_EVTYPE_EVENT 0xffff /* Mask for EVENT bits */
-
-/*
- * Event filters for PMUv3
- */
-#define ARMV8_PMU_EXCLUDE_EL1 (1U << 31)
-#define ARMV8_PMU_EXCLUDE_EL0 (1U << 30)
-#define ARMV8_PMU_INCLUDE_EL2 (1U << 27)
-
-/*
- * PMUSERENR: user enable reg
- */
-#define ARMV8_PMU_USERENR_MASK 0xf /* Mask for writable bits */
-#define ARMV8_PMU_USERENR_EN (1 << 0) /* PMU regs can be accessed at EL0 */
-#define ARMV8_PMU_USERENR_SW (1 << 1) /* PMSWINC can be written at EL0 */
-#define ARMV8_PMU_USERENR_CR (1 << 2) /* Cycle counter can be read at EL0 */
-#define ARMV8_PMU_USERENR_ER (1 << 3) /* Event counter can be read at EL0 */
-
-/* PMMIR_EL1.SLOTS mask */
-#define ARMV8_PMU_SLOTS_MASK 0xff
-
-#define ARMV8_PMU_BUS_SLOTS_SHIFT 8
-#define ARMV8_PMU_BUS_SLOTS_MASK 0xff
-#define ARMV8_PMU_BUS_WIDTH_SHIFT 16
-#define ARMV8_PMU_BUS_WIDTH_MASK 0xf
-
#ifdef CONFIG_PERF_EVENTS
struct pt_regs;
extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
diff --git a/arch/arm64/include/asm/pointer_auth.h b/arch/arm64/include/asm/pointer_auth.h
index efb098de3a84..d2e0306e65d3 100644
--- a/arch/arm64/include/asm/pointer_auth.h
+++ b/arch/arm64/include/asm/pointer_auth.h
@@ -10,6 +10,13 @@
#include <asm/memory.h>
#include <asm/sysreg.h>
+/*
+ * The EL0/EL1 pointer bits used by a pointer authentication code.
+ * This is dependent on TBI0/TBI1 being enabled, or bits 63:56 would also apply.
+ */
+#define ptrauth_user_pac_mask() GENMASK_ULL(54, vabits_actual)
+#define ptrauth_kernel_pac_mask() GENMASK_ULL(63, vabits_actual)
+
#define PR_PAC_ENABLED_KEYS_MASK \
(PR_PAC_APIAKEY | PR_PAC_APIBKEY | PR_PAC_APDAKEY | PR_PAC_APDBKEY)
@@ -97,11 +104,6 @@ extern int ptrauth_set_enabled_keys(struct task_struct *tsk, unsigned long keys,
unsigned long enabled);
extern int ptrauth_get_enabled_keys(struct task_struct *tsk);
-static inline unsigned long ptrauth_strip_insn_pac(unsigned long ptr)
-{
- return ptrauth_clear_pac(ptr);
-}
-
static __always_inline void ptrauth_enable(void)
{
if (!system_supports_address_auth())
@@ -133,7 +135,6 @@ static __always_inline void ptrauth_enable(void)
#define ptrauth_prctl_reset_keys(tsk, arg) (-EINVAL)
#define ptrauth_set_enabled_keys(tsk, keys, enabled) (-EINVAL)
#define ptrauth_get_enabled_keys(tsk) (-EINVAL)
-#define ptrauth_strip_insn_pac(lr) (lr)
#define ptrauth_suspend_exit()
#define ptrauth_thread_init_user()
#define ptrauth_thread_switch_user(tsk)
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 9e3ecba3c4e6..c48b41c9b0cc 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -419,9 +419,6 @@
#define SYS_MDCR_EL2 sys_reg(3, 4, 1, 1, 1)
#define SYS_CPTR_EL2 sys_reg(3, 4, 1, 1, 2)
#define SYS_HSTR_EL2 sys_reg(3, 4, 1, 1, 3)
-#define SYS_HFGRTR_EL2 sys_reg(3, 4, 1, 1, 4)
-#define SYS_HFGWTR_EL2 sys_reg(3, 4, 1, 1, 5)
-#define SYS_HFGITR_EL2 sys_reg(3, 4, 1, 1, 6)
#define SYS_HACR_EL2 sys_reg(3, 4, 1, 1, 7)
#define SYS_TTBR0_EL2 sys_reg(3, 4, 2, 0, 0)
@@ -758,12 +755,6 @@
#define ICH_VTR_TDS_SHIFT 19
#define ICH_VTR_TDS_MASK (1 << ICH_VTR_TDS_SHIFT)
-/* HFG[WR]TR_EL2 bit definitions */
-#define HFGxTR_EL2_nTPIDR2_EL0_SHIFT 55
-#define HFGxTR_EL2_nTPIDR2_EL0_MASK BIT_MASK(HFGxTR_EL2_nTPIDR2_EL0_SHIFT)
-#define HFGxTR_EL2_nSMPRI_EL1_SHIFT 54
-#define HFGxTR_EL2_nSMPRI_EL1_MASK BIT_MASK(HFGxTR_EL2_nSMPRI_EL1_SHIFT)
-
#define ARM64_FEATURE_FIELD_BITS 4
/* Defined for compatibility only, do not add new users. */
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 5c7b2f9d5913..8209e6a86989 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -237,7 +237,7 @@ static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
"1: " load " " reg "1, [%2]\n" \
"2:\n" \
_ASM_EXTABLE_##type##ACCESS_ERR_ZERO(1b, 2b, %w0, %w1) \
- : "+r" (err), "=&r" (x) \
+ : "+r" (err), "=r" (x) \
: "r" (addr))
#define __raw_get_mem(ldr, x, ptr, err, type) \
@@ -327,7 +327,7 @@ do { \
"2:\n" \
_ASM_EXTABLE_##type##ACCESS_ERR(1b, 2b, %w0) \
: "+r" (err) \
- : "r" (x), "r" (addr))
+ : "rZ" (x), "r" (addr))
#define __raw_put_mem(str, x, ptr, err, type) \
do { \
@@ -449,8 +449,6 @@ extern long strncpy_from_user(char *dest, const char __user *src, long count);
extern __must_check long strnlen_user(const char __user *str, long n);
#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
-struct page;
-void memcpy_page_flushcache(char *to, struct page *page, size_t offset, size_t len);
extern unsigned long __must_check __copy_user_flushcache(void *to, const void __user *from, unsigned long n);
static inline int __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index ceba6792f5b3..7c2bb4e72476 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -45,7 +45,6 @@ obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o
obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_ARM64_MODULE_PLTS) += module-plts.o
obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o
-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
obj-$(CONFIG_CPU_PM) += sleep.o suspend.o
obj-$(CONFIG_CPU_IDLE) += cpuidle.o
diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c
index 8a9052cf3013..1febd412b4d2 100644
--- a/arch/arm64/kernel/armv8_deprecated.c
+++ b/arch/arm64/kernel/armv8_deprecated.c
@@ -420,14 +420,14 @@ static DEFINE_MUTEX(insn_emulation_mutex);
static void enable_insn_hw_mode(void *data)
{
- struct insn_emulation *insn = (struct insn_emulation *)data;
+ struct insn_emulation *insn = data;
if (insn->set_hw_mode)
insn->set_hw_mode(true);
}
static void disable_insn_hw_mode(void *data)
{
- struct insn_emulation *insn = (struct insn_emulation *)data;
+ struct insn_emulation *insn = data;
if (insn->set_hw_mode)
insn->set_hw_mode(false);
}
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index ae345b06e9f7..0996094b0d22 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -93,6 +93,9 @@ int main(void)
DEFINE(FREGS_LR, offsetof(struct ftrace_regs, lr));
DEFINE(FREGS_SP, offsetof(struct ftrace_regs, sp));
DEFINE(FREGS_PC, offsetof(struct ftrace_regs, pc));
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+ DEFINE(FREGS_DIRECT_TRAMP, offsetof(struct ftrace_regs, direct_tramp));
+#endif
DEFINE(FREGS_SIZE, sizeof(struct ftrace_regs));
BLANK();
#endif
@@ -197,6 +200,9 @@ int main(void)
#endif
#ifdef CONFIG_FUNCTION_TRACER
DEFINE(FTRACE_OPS_FUNC, offsetof(struct ftrace_ops, func));
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+ DEFINE(FTRACE_OPS_DIRECT_CALL, offsetof(struct ftrace_ops, direct_call));
+#endif
#endif
return 0;
}
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 2e3e55139777..1bdad599e769 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -140,6 +140,13 @@ void dump_cpu_features(void)
pr_emerg("0x%*pb\n", ARM64_NCAPS, &cpu_hwcaps);
}
+#define ARM64_CPUID_FIELDS(reg, field, min_value) \
+ .sys_reg = SYS_##reg, \
+ .field_pos = reg##_##field##_SHIFT, \
+ .field_width = reg##_##field##_WIDTH, \
+ .sign = reg##_##field##_SIGNED, \
+ .min_field_value = reg##_##field##_##min_value,
+
#define __ARM64_FTR_BITS(SIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
{ \
.sign = SIGNED, \
@@ -2206,22 +2213,14 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.capability = ARM64_HAS_GIC_CPUIF_SYSREGS,
.type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
.matches = has_useable_gicv3_cpuif,
- .sys_reg = SYS_ID_AA64PFR0_EL1,
- .field_pos = ID_AA64PFR0_EL1_GIC_SHIFT,
- .field_width = 4,
- .sign = FTR_UNSIGNED,
- .min_field_value = 1,
+ ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, GIC, IMP)
},
{
.desc = "Enhanced Counter Virtualization",
.capability = ARM64_HAS_ECV,
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.matches = has_cpuid_feature,
- .sys_reg = SYS_ID_AA64MMFR0_EL1,
- .field_pos = ID_AA64MMFR0_EL1_ECV_SHIFT,
- .field_width = 4,
- .sign = FTR_UNSIGNED,
- .min_field_value = 1,
+ ARM64_CPUID_FIELDS(ID_AA64MMFR0_EL1, ECV, IMP)
},
#ifdef CONFIG_ARM64_PAN
{
@@ -2229,12 +2228,8 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.capability = ARM64_HAS_PAN,
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.matches = has_cpuid_feature,
- .sys_reg = SYS_ID_AA64MMFR1_EL1,
- .field_pos = ID_AA64MMFR1_EL1_PAN_SHIFT,
- .field_width = 4,
- .sign = FTR_UNSIGNED,
- .min_field_value = 1,
.cpu_enable = cpu_enable_pan,
+ ARM64_CPUID_FIELDS(ID_AA64MMFR1_EL1, PAN, IMP)
},
#endif /* CONFIG_ARM64_PAN */
#ifdef CONFIG_ARM64_EPAN
@@ -2243,11 +2238,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.capability = ARM64_HAS_EPAN,
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.matches = has_cpuid_feature,
- .sys_reg = SYS_ID_AA64MMFR1_EL1,
- .field_pos = ID_AA64MMFR1_EL1_PAN_SHIFT,
- .field_width = 4,
- .sign = FTR_UNSIGNED,
- .min_field_value = 3,
+ ARM64_CPUID_FIELDS(ID_AA64MMFR1_EL1, PAN, PAN3)
},
#endif /* CONFIG_ARM64_EPAN */
#ifdef CONFIG_ARM64_LSE_ATOMICS
@@ -2256,11 +2247,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.capability = ARM64_HAS_LSE_ATOMICS,
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.matches = has_cpuid_feature,
- .sys_reg = SYS_ID_AA64ISAR0_EL1,
- .field_pos = ID_AA64ISAR0_EL1_ATOMIC_SHIFT,
- .field_width = 4,
- .sign = FTR_UNSIGNED,
- .min_field_value = 2,
+ ARM64_CPUID_FIELDS(ID_AA64ISAR0_EL1, ATOMIC, IMP)
},
#endif /* CONFIG_ARM64_LSE_ATOMICS */
{
@@ -2281,21 +2268,13 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.capability = ARM64_HAS_NESTED_VIRT,
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.matches = has_nested_virt_support,
- .sys_reg = SYS_ID_AA64MMFR2_EL1,
- .sign = FTR_UNSIGNED,
- .field_pos = ID_AA64MMFR2_EL1_NV_SHIFT,
- .field_width = 4,
- .min_field_value = ID_AA64MMFR2_EL1_NV_IMP,
+ ARM64_CPUID_FIELDS(ID_AA64MMFR2_EL1, NV, IMP)
},
{
.capability = ARM64_HAS_32BIT_EL0_DO_NOT_USE,
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.matches = has_32bit_el0,
- .sys_reg = SYS_ID_AA64PFR0_EL1,
- .sign = FTR_UNSIGNED,
- .field_pos = ID_AA64PFR0_EL1_EL0_SHIFT,
- .field_width = 4,
- .min_field_value = ID_AA64PFR0_EL1_ELx_32BIT_64BIT,
+ ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, EL0, AARCH32)
},
#ifdef CONFIG_KVM
{
@@ -2303,11 +2282,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.capability = ARM64_HAS_32BIT_EL1,
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.matches = has_cpuid_feature,
- .sys_reg = SYS_ID_AA64PFR0_EL1,
- .sign = FTR_UNSIGNED,
- .field_pos = ID_AA64PFR0_EL1_EL1_SHIFT,
- .field_width = 4,
- .min_field_value = ID_AA64PFR0_EL1_ELx_32BIT_64BIT,
+ ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, EL1, AARCH32)
},
{
.desc = "Protected KVM",
@@ -2320,17 +2295,14 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.desc = "Kernel page table isolation (KPTI)",
.capability = ARM64_UNMAP_KERNEL_AT_EL0,
.type = ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE,
+ .cpu_enable = kpti_install_ng_mappings,
+ .matches = unmap_kernel_at_el0,
/*
* The ID feature fields below are used to indicate that
* the CPU doesn't need KPTI. See unmap_kernel_at_el0 for
* more details.
*/
- .sys_reg = SYS_ID_AA64PFR0_EL1,
- .field_pos = ID_AA64PFR0_EL1_CSV3_SHIFT,
- .field_width = 4,
- .min_field_value = 1,
- .matches = unmap_kernel_at_el0,
- .cpu_enable = kpti_install_ng_mappings,
+ ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, CSV3, IMP)
},
{
/* FP/SIMD is not implemented */
@@ -2345,21 +2317,14 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.capability = ARM64_HAS_DCPOP,
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.matches = has_cpuid_feature,
- .sys_reg = SYS_ID_AA64ISAR1_EL1,
- .field_pos = ID_AA64ISAR1_EL1_DPB_SHIFT,
- .field_width = 4,
- .min_field_value = 1,
+ ARM64_CPUID_FIELDS(ID_AA64ISAR1_EL1, DPB, IMP)
},
{
.desc = "Data cache clean to Point of Deep Persistence",
.capability = ARM64_HAS_DCPODP,
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.matches = has_cpuid_feature,
- .sys_reg = SYS_ID_AA64ISAR1_EL1,
- .sign = FTR_UNSIGNED,
- .field_pos = ID_AA64ISAR1_EL1_DPB_SHIFT,
- .field_width = 4,
- .min_field_value = 2,
+ ARM64_CPUID_FIELDS(ID_AA64ISAR1_EL1, DPB, DPB2)
},
#endif
#ifdef CONFIG_ARM64_SVE
@@ -2367,13 +2332,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.desc = "Scalable Vector Extension",
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.capability = ARM64_SVE,
- .sys_reg = SYS_ID_AA64PFR0_EL1,
- .sign = FTR_UNSIGNED,
- .field_pos = ID_AA64PFR0_EL1_SVE_SHIFT,
- .field_width = 4,
- .min_field_value = ID_AA64PFR0_EL1_SVE_IMP,
- .matches = has_cpuid_feature,
.cpu_enable = sve_kernel_enable,
+ .matches = has_cpuid_feature,
+ ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, SVE, IMP)
},
#endif /* CONFIG_ARM64_SVE */
#ifdef CONFIG_ARM64_RAS_EXTN
@@ -2382,12 +2343,8 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.capability = ARM64_HAS_RAS_EXTN,
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.matches = has_cpuid_feature,
- .sys_reg = SYS_ID_AA64PFR0_EL1,
- .sign = FTR_UNSIGNED,
- .field_pos = ID_AA64PFR0_EL1_RAS_SHIFT,
- .field_width = 4,
- .min_field_value = ID_AA64PFR0_EL1_RAS_IMP,
.cpu_enable = cpu_clear_disr,
+ ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, RAS, IMP)
},
#endif /* CONFIG_ARM64_RAS_EXTN */
#ifdef CONFIG_ARM64_AMU_EXTN
@@ -2401,12 +2358,8 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.capability = ARM64_HAS_AMU_EXTN,
.type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
.matches = has_amu,
- .sys_reg = SYS_ID_AA64PFR0_EL1,
- .sign = FTR_UNSIGNED,
- .field_pos = ID_AA64PFR0_EL1_AMU_SHIFT,
- .field_width = 4,
- .min_field_value = ID_AA64PFR0_EL1_AMU_IMP,
.cpu_enable = cpu_amu_enable,
+ ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, AMU, IMP)
},
#endif /* CONFIG_ARM64_AMU_EXTN */
{
@@ -2426,34 +2379,22 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.desc = "Stage-2 Force Write-Back",
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.capability = ARM64_HAS_STAGE2_FWB,
- .sys_reg = SYS_ID_AA64MMFR2_EL1,
- .sign = FTR_UNSIGNED,
- .field_pos = ID_AA64MMFR2_EL1_FWB_SHIFT,
- .field_width = 4,
- .min_field_value = 1,
.matches = has_cpuid_feature,
+ ARM64_CPUID_FIELDS(ID_AA64MMFR2_EL1, FWB, IMP)
},
{
.desc = "ARMv8.4 Translation Table Level",
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.capability = ARM64_HAS_ARMv8_4_TTL,
- .sys_reg = SYS_ID_AA64MMFR2_EL1,
- .sign = FTR_UNSIGNED,
- .field_pos = ID_AA64MMFR2_EL1_TTL_SHIFT,
- .field_width = 4,
- .min_field_value = 1,
.matches = has_cpuid_feature,
+ ARM64_CPUID_FIELDS(ID_AA64MMFR2_EL1, TTL, IMP)
},
{
.desc = "TLB range maintenance instructions",
.capability = ARM64_HAS_TLB_RANGE,
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.matches = has_cpuid_feature,
- .sys_reg = SYS_ID_AA64ISAR0_EL1,
- .field_pos = ID_AA64ISAR0_EL1_TLB_SHIFT,
- .field_width = 4,
- .sign = FTR_UNSIGNED,
- .min_field_value = ID_AA64ISAR0_EL1_TLB_RANGE,
+ ARM64_CPUID_FIELDS(ID_AA64ISAR0_EL1, TLB, RANGE)
},
#ifdef CONFIG_ARM64_HW_AFDBM
{
@@ -2467,13 +2408,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
*/
.type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
.capability = ARM64_HW_DBM,
- .sys_reg = SYS_ID_AA64MMFR1_EL1,
- .sign = FTR_UNSIGNED,
- .field_pos = ID_AA64MMFR1_EL1_HAFDBS_SHIFT,
- .field_width = 4,
- .min_field_value = 2,
.matches = has_hw_dbm,
.cpu_enable = cpu_enable_hw_dbm,
+ ARM64_CPUID_FIELDS(ID_AA64MMFR1_EL1, HAFDBS, DBM)
},
#endif
{
@@ -2481,21 +2418,14 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.capability = ARM64_HAS_CRC32,
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.matches = has_cpuid_feature,
- .sys_reg = SYS_ID_AA64ISAR0_EL1,
- .field_pos = ID_AA64ISAR0_EL1_CRC32_SHIFT,
- .field_width = 4,
- .min_field_value = 1,
+ ARM64_CPUID_FIELDS(ID_AA64ISAR0_EL1, CRC32, IMP)
},
{
.desc = "Speculative Store Bypassing Safe (SSBS)",
.capability = ARM64_SSBS,
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.matches = has_cpuid_feature,
- .sys_reg = SYS_ID_AA64PFR1_EL1,
- .field_pos = ID_AA64PFR1_EL1_SSBS_SHIFT,
- .field_width = 4,
- .sign = FTR_UNSIGNED,
- .min_field_value = ID_AA64PFR1_EL1_SSBS_IMP,
+ ARM64_CPUID_FIELDS(ID_AA64PFR1_EL1, SSBS, IMP)
},
#ifdef CONFIG_ARM64_CNP
{
@@ -2503,12 +2433,8 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.capability = ARM64_HAS_CNP,
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.matches = has_useable_cnp,
- .sys_reg = SYS_ID_AA64MMFR2_EL1,
- .sign = FTR_UNSIGNED,
- .field_pos = ID_AA64MMFR2_EL1_CnP_SHIFT,
- .field_width = 4,
- .min_field_value = 1,
.cpu_enable = cpu_enable_cnp,
+ ARM64_CPUID_FIELDS(ID_AA64MMFR2_EL1, CnP, IMP)
},
#endif
{
@@ -2516,45 +2442,29 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.capability = ARM64_HAS_SB,
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.matches = has_cpuid_feature,
- .sys_reg = SYS_ID_AA64ISAR1_EL1,
- .field_pos = ID_AA64ISAR1_EL1_SB_SHIFT,
- .field_width = 4,
- .sign = FTR_UNSIGNED,
- .min_field_value = 1,
+ ARM64_CPUID_FIELDS(ID_AA64ISAR1_EL1, SB, IMP)
},
#ifdef CONFIG_ARM64_PTR_AUTH
{
.desc = "Address authentication (architected QARMA5 algorithm)",
.capability = ARM64_HAS_ADDRESS_AUTH_ARCH_QARMA5,
.type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
- .sys_reg = SYS_ID_AA64ISAR1_EL1,
- .sign = FTR_UNSIGNED,
- .field_pos = ID_AA64ISAR1_EL1_APA_SHIFT,
- .field_width = 4,
- .min_field_value = ID_AA64ISAR1_EL1_APA_PAuth,
.matches = has_address_auth_cpucap,
+ ARM64_CPUID_FIELDS(ID_AA64ISAR1_EL1, APA, PAuth)
},
{
.desc = "Address authentication (architected QARMA3 algorithm)",
.capability = ARM64_HAS_ADDRESS_AUTH_ARCH_QARMA3,
.type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
- .sys_reg = SYS_ID_AA64ISAR2_EL1,
- .sign = FTR_UNSIGNED,
- .field_pos = ID_AA64ISAR2_EL1_APA3_SHIFT,
- .field_width = 4,
- .min_field_value = ID_AA64ISAR2_EL1_APA3_PAuth,
.matches = has_address_auth_cpucap,
+ ARM64_CPUID_FIELDS(ID_AA64ISAR2_EL1, APA3, PAuth)
},
{
.desc = "Address authentication (IMP DEF algorithm)",
.capability = ARM64_HAS_ADDRESS_AUTH_IMP_DEF,
.type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
- .sys_reg = SYS_ID_AA64ISAR1_EL1,
- .sign = FTR_UNSIGNED,
- .field_pos = ID_AA64ISAR1_EL1_API_SHIFT,
- .field_width = 4,
- .min_field_value = ID_AA64ISAR1_EL1_API_PAuth,
.matches = has_address_auth_cpucap,
+ ARM64_CPUID_FIELDS(ID_AA64ISAR1_EL1, API, PAuth)
},
{
.capability = ARM64_HAS_ADDRESS_AUTH,
@@ -2565,34 +2475,22 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.desc = "Generic authentication (architected QARMA5 algorithm)",
.capability = ARM64_HAS_GENERIC_AUTH_ARCH_QARMA5,
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
- .sys_reg = SYS_ID_AA64ISAR1_EL1,
- .sign = FTR_UNSIGNED,
- .field_pos = ID_AA64ISAR1_EL1_GPA_SHIFT,
- .field_width = 4,
- .min_field_value = ID_AA64ISAR1_EL1_GPA_IMP,
.matches = has_cpuid_feature,
+ ARM64_CPUID_FIELDS(ID_AA64ISAR1_EL1, GPA, IMP)
},
{
.desc = "Generic authentication (architected QARMA3 algorithm)",
.capability = ARM64_HAS_GENERIC_AUTH_ARCH_QARMA3,
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
- .sys_reg = SYS_ID_AA64ISAR2_EL1,
- .sign = FTR_UNSIGNED,
- .field_pos = ID_AA64ISAR2_EL1_GPA3_SHIFT,
- .field_width = 4,
- .min_field_value = ID_AA64ISAR2_EL1_GPA3_IMP,
.matches = has_cpuid_feature,
+ ARM64_CPUID_FIELDS(ID_AA64ISAR2_EL1, GPA3, IMP)
},
{
.desc = "Generic authentication (IMP DEF algorithm)",
.capability = ARM64_HAS_GENERIC_AUTH_IMP_DEF,
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
- .sys_reg = SYS_ID_AA64ISAR1_EL1,
- .sign = FTR_UNSIGNED,
- .field_pos = ID_AA64ISAR1_EL1_GPI_SHIFT,
- .field_width = 4,
- .min_field_value = ID_AA64ISAR1_EL1_GPI_IMP,
.matches = has_cpuid_feature,
+ ARM64_CPUID_FIELDS(ID_AA64ISAR1_EL1, GPI, IMP)
},
{
.capability = ARM64_HAS_GENERIC_AUTH,
@@ -2624,13 +2522,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.desc = "E0PD",
.capability = ARM64_HAS_E0PD,
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
- .sys_reg = SYS_ID_AA64MMFR2_EL1,
- .sign = FTR_UNSIGNED,
- .field_width = 4,
- .field_pos = ID_AA64MMFR2_EL1_E0PD_SHIFT,
- .matches = has_cpuid_feature,
- .min_field_value = 1,
.cpu_enable = cpu_enable_e0pd,
+ .matches = has_cpuid_feature,
+ ARM64_CPUID_FIELDS(ID_AA64MMFR2_EL1, E0PD, IMP)
},
#endif
{
@@ -2638,11 +2532,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.capability = ARM64_HAS_RNG,
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.matches = has_cpuid_feature,
- .sys_reg = SYS_ID_AA64ISAR0_EL1,
- .field_pos = ID_AA64ISAR0_EL1_RNDR_SHIFT,
- .field_width = 4,
- .sign = FTR_UNSIGNED,
- .min_field_value = 1,
+ ARM64_CPUID_FIELDS(ID_AA64ISAR0_EL1, RNDR, IMP)
},
#ifdef CONFIG_ARM64_BTI
{
@@ -2655,11 +2545,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
#endif
.matches = has_cpuid_feature,
.cpu_enable = bti_enable,
- .sys_reg = SYS_ID_AA64PFR1_EL1,
- .field_pos = ID_AA64PFR1_EL1_BT_SHIFT,
- .field_width = 4,
- .min_field_value = ID_AA64PFR1_EL1_BT_IMP,
- .sign = FTR_UNSIGNED,
+ ARM64_CPUID_FIELDS(ID_AA64PFR1_EL1, BT, IMP)
},
#endif
#ifdef CONFIG_ARM64_MTE
@@ -2668,120 +2554,80 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.capability = ARM64_MTE,
.type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
.matches = has_cpuid_feature,
- .sys_reg = SYS_ID_AA64PFR1_EL1,
- .field_pos = ID_AA64PFR1_EL1_MTE_SHIFT,
- .field_width = 4,
- .min_field_value = ID_AA64PFR1_EL1_MTE_MTE2,
- .sign = FTR_UNSIGNED,
.cpu_enable = cpu_enable_mte,
+ ARM64_CPUID_FIELDS(ID_AA64PFR1_EL1, MTE, MTE2)
},
{
.desc = "Asymmetric MTE Tag Check Fault",
.capability = ARM64_MTE_ASYMM,
.type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
.matches = has_cpuid_feature,
- .sys_reg = SYS_ID_AA64PFR1_EL1,
- .field_pos = ID_AA64PFR1_EL1_MTE_SHIFT,
- .field_width = 4,
- .min_field_value = ID_AA64PFR1_EL1_MTE_MTE3,
- .sign = FTR_UNSIGNED,
+ ARM64_CPUID_FIELDS(ID_AA64PFR1_EL1, MTE, MTE3)
},
#endif /* CONFIG_ARM64_MTE */
{
.desc = "RCpc load-acquire (LDAPR)",
.capability = ARM64_HAS_LDAPR,
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
- .sys_reg = SYS_ID_AA64ISAR1_EL1,
- .sign = FTR_UNSIGNED,
- .field_pos = ID_AA64ISAR1_EL1_LRCPC_SHIFT,
- .field_width = 4,
.matches = has_cpuid_feature,
- .min_field_value = 1,
+ ARM64_CPUID_FIELDS(ID_AA64ISAR1_EL1, LRCPC, IMP)
},
#ifdef CONFIG_ARM64_SME
{
.desc = "Scalable Matrix Extension",
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.capability = ARM64_SME,
- .sys_reg = SYS_ID_AA64PFR1_EL1,
- .sign = FTR_UNSIGNED,
- .field_pos = ID_AA64PFR1_EL1_SME_SHIFT,
- .field_width = 4,
- .min_field_value = ID_AA64PFR1_EL1_SME_IMP,
.matches = has_cpuid_feature,
.cpu_enable = sme_kernel_enable,
+ ARM64_CPUID_FIELDS(ID_AA64PFR1_EL1, SME, IMP)
},
/* FA64 should be sorted after the base SME capability */
{
.desc = "FA64",
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.capability = ARM64_SME_FA64,
- .sys_reg = SYS_ID_AA64SMFR0_EL1,
- .sign = FTR_UNSIGNED,
- .field_pos = ID_AA64SMFR0_EL1_FA64_SHIFT,
- .field_width = 1,
- .min_field_value = ID_AA64SMFR0_EL1_FA64_IMP,
.matches = has_cpuid_feature,
.cpu_enable = fa64_kernel_enable,
+ ARM64_CPUID_FIELDS(ID_AA64SMFR0_EL1, FA64, IMP)
},
{
.desc = "SME2",
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.capability = ARM64_SME2,
- .sys_reg = SYS_ID_AA64PFR1_EL1,
- .sign = FTR_UNSIGNED,
- .field_pos = ID_AA64PFR1_EL1_SME_SHIFT,
- .field_width = ID_AA64PFR1_EL1_SME_WIDTH,
- .min_field_value = ID_AA64PFR1_EL1_SME_SME2,
.matches = has_cpuid_feature,
.cpu_enable = sme2_kernel_enable,
+ ARM64_CPUID_FIELDS(ID_AA64PFR1_EL1, SME, SME2)
},
#endif /* CONFIG_ARM64_SME */
{
.desc = "WFx with timeout",
.capability = ARM64_HAS_WFXT,
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
- .sys_reg = SYS_ID_AA64ISAR2_EL1,
- .sign = FTR_UNSIGNED,
- .field_pos = ID_AA64ISAR2_EL1_WFxT_SHIFT,
- .field_width = 4,
.matches = has_cpuid_feature,
- .min_field_value = ID_AA64ISAR2_EL1_WFxT_IMP,
+ ARM64_CPUID_FIELDS(ID_AA64ISAR2_EL1, WFxT, IMP)
},
{
.desc = "Trap EL0 IMPLEMENTATION DEFINED functionality",
.capability = ARM64_HAS_TIDCP1,
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
- .sys_reg = SYS_ID_AA64MMFR1_EL1,
- .sign = FTR_UNSIGNED,
- .field_pos = ID_AA64MMFR1_EL1_TIDCP1_SHIFT,
- .field_width = 4,
- .min_field_value = ID_AA64MMFR1_EL1_TIDCP1_IMP,
.matches = has_cpuid_feature,
.cpu_enable = cpu_trap_el0_impdef,
+ ARM64_CPUID_FIELDS(ID_AA64MMFR1_EL1, TIDCP1, IMP)
},
{
.desc = "Data independent timing control (DIT)",
.capability = ARM64_HAS_DIT,
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
- .sys_reg = SYS_ID_AA64PFR0_EL1,
- .sign = FTR_UNSIGNED,
- .field_pos = ID_AA64PFR0_EL1_DIT_SHIFT,
- .field_width = 4,
- .min_field_value = ID_AA64PFR0_EL1_DIT_IMP,
.matches = has_cpuid_feature,
.cpu_enable = cpu_enable_dit,
+ ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, DIT, IMP)
},
{},
};
#define HWCAP_CPUID_MATCH(reg, field, min_value) \
- .matches = has_user_cpuid_feature, \
- .sys_reg = SYS_##reg, \
- .field_pos = reg##_##field##_SHIFT, \
- .field_width = reg##_##field##_WIDTH, \
- .sign = reg##_##field##_SIGNED, \
- .min_field_value = reg##_##field##_##min_value,
+ .matches = has_user_cpuid_feature, \
+ ARM64_CPUID_FIELDS(reg, field, min_value)
#define __HWCAP_CAP(name, cap_type, cap) \
.desc = name, \
@@ -2811,26 +2657,26 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
#ifdef CONFIG_ARM64_PTR_AUTH
static const struct arm64_cpu_capabilities ptr_auth_hwcap_addr_matches[] = {
{
- HWCAP_CPUID_MATCH(ID_AA64ISAR1_EL1, APA, PAuth)
+ ARM64_CPUID_FIELDS(ID_AA64ISAR1_EL1, APA, PAuth)
},
{
- HWCAP_CPUID_MATCH(ID_AA64ISAR2_EL1, APA3, PAuth)
+ ARM64_CPUID_FIELDS(ID_AA64ISAR2_EL1, APA3, PAuth)
},
{
- HWCAP_CPUID_MATCH(ID_AA64ISAR1_EL1, API, PAuth)
+ ARM64_CPUID_FIELDS(ID_AA64ISAR1_EL1, API, PAuth)
},
{},
};
static const struct arm64_cpu_capabilities ptr_auth_hwcap_gen_matches[] = {
{
- HWCAP_CPUID_MATCH(ID_AA64ISAR1_EL1, GPA, IMP)
+ ARM64_CPUID_FIELDS(ID_AA64ISAR1_EL1, GPA, IMP)
},
{
- HWCAP_CPUID_MATCH(ID_AA64ISAR2_EL1, GPA3, IMP)
+ ARM64_CPUID_FIELDS(ID_AA64ISAR2_EL1, GPA3, IMP)
},
{
- HWCAP_CPUID_MATCH(ID_AA64ISAR1_EL1, GPI, IMP)
+ ARM64_CPUID_FIELDS(ID_AA64ISAR1_EL1, GPI, IMP)
},
{},
};
diff --git a/arch/arm64/kernel/crash_core.c b/arch/arm64/kernel/crash_core.c
index 2b65aae332ce..66cde752cd74 100644
--- a/arch/arm64/kernel/crash_core.c
+++ b/arch/arm64/kernel/crash_core.c
@@ -8,6 +8,7 @@
#include <asm/cpufeature.h>
#include <asm/memory.h>
#include <asm/pgtable-hwdef.h>
+#include <asm/pointer_auth.h>
static inline u64 get_tcr_el1_t1sz(void);
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index 3da09778267e..64f2ecbdfe5c 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -438,6 +438,11 @@ int kernel_active_single_step(void)
}
NOKPROBE_SYMBOL(kernel_active_single_step);
+void kernel_rewind_single_step(struct pt_regs *regs)
+{
+ set_regs_spsr_ss(regs);
+}
+
/* ptrace API */
void user_enable_single_step(struct task_struct *task)
{
diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S
index 350ed81324ac..1c38a60575aa 100644
--- a/arch/arm64/kernel/entry-ftrace.S
+++ b/arch/arm64/kernel/entry-ftrace.S
@@ -36,6 +36,31 @@
SYM_CODE_START(ftrace_caller)
bti c
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS
+ /*
+ * The literal pointer to the ops is at an 8-byte aligned boundary
+ * which is either 12 or 16 bytes before the BL instruction in the call
+ * site. See ftrace_call_adjust() for details.
+ *
+ * Therefore here the LR points at `literal + 16` or `literal + 20`,
+ * and we can find the address of the literal in either case by
+ * aligning to an 8-byte boundary and subtracting 16. We do the
+ * alignment first as this allows us to fold the subtraction into the
+ * LDR.
+ */
+ bic x11, x30, 0x7
+ ldr x11, [x11, #-(4 * AARCH64_INSN_SIZE)] // op
+
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+ /*
+ * If the op has a direct call, handle it immediately without
+ * saving/restoring registers.
+ */
+ ldr x17, [x11, #FTRACE_OPS_DIRECT_CALL] // op->direct_call
+ cbnz x17, ftrace_caller_direct
+#endif
+#endif
+
/* Save original SP */
mov x10, sp
@@ -49,6 +74,10 @@ SYM_CODE_START(ftrace_caller)
stp x6, x7, [sp, #FREGS_X6]
str x8, [sp, #FREGS_X8]
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+ str xzr, [sp, #FREGS_DIRECT_TRAMP]
+#endif
+
/* Save the callsite's FP, LR, SP */
str x29, [sp, #FREGS_FP]
str x9, [sp, #FREGS_LR]
@@ -71,20 +100,7 @@ SYM_CODE_START(ftrace_caller)
mov x3, sp // regs
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS
- /*
- * The literal pointer to the ops is at an 8-byte aligned boundary
- * which is either 12 or 16 bytes before the BL instruction in the call
- * site. See ftrace_call_adjust() for details.
- *
- * Therefore here the LR points at `literal + 16` or `literal + 20`,
- * and we can find the address of the literal in either case by
- * aligning to an 8-byte boundary and subtracting 16. We do the
- * alignment first as this allows us to fold the subtraction into the
- * LDR.
- */
- bic x2, x30, 0x7
- ldr x2, [x2, #-16] // op
-
+ mov x2, x11 // op
ldr x4, [x2, #FTRACE_OPS_FUNC] // op->func
blr x4 // op->func(ip, parent_ip, op, regs)
@@ -107,8 +123,15 @@ SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL)
ldp x6, x7, [sp, #FREGS_X6]
ldr x8, [sp, #FREGS_X8]
- /* Restore the callsite's FP, LR, PC */
+ /* Restore the callsite's FP */
ldr x29, [sp, #FREGS_FP]
+
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+ ldr x17, [sp, #FREGS_DIRECT_TRAMP]
+ cbnz x17, ftrace_caller_direct_late
+#endif
+
+ /* Restore the callsite's LR and PC */
ldr x30, [sp, #FREGS_LR]
ldr x9, [sp, #FREGS_PC]
@@ -116,8 +139,45 @@ SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL)
add sp, sp, #FREGS_SIZE + 32
ret x9
+
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+SYM_INNER_LABEL(ftrace_caller_direct_late, SYM_L_LOCAL)
+ /*
+ * Head to a direct trampoline in x17 after having run other tracers.
+ * The ftrace_regs are live, and x0-x8 and FP have been restored. The
+ * LR, PC, and SP have not been restored.
+ */
+
+ /*
+ * Restore the callsite's LR and PC matching the trampoline calling
+ * convention.
+ */
+ ldr x9, [sp, #FREGS_LR]
+ ldr x30, [sp, #FREGS_PC]
+
+ /* Restore the callsite's SP */
+ add sp, sp, #FREGS_SIZE + 32
+
+SYM_INNER_LABEL(ftrace_caller_direct, SYM_L_LOCAL)
+ /*
+ * Head to a direct trampoline in x17.
+ *
+ * We use `BR X17` as this can safely land on a `BTI C` or `PACIASP` in
+ * the trampoline, and will not unbalance any return stack.
+ */
+ br x17
+#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
SYM_CODE_END(ftrace_caller)
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+SYM_CODE_START(ftrace_stub_direct_tramp)
+ bti c
+ mov x10, x30
+ mov x30, x9
+ ret x10
+SYM_CODE_END(ftrace_stub_direct_tramp)
+#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
+
#else /* CONFIG_DYNAMIC_FTRACE_WITH_ARGS */
/*
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 9e7e50a0fd76..2fbafa5cc7ac 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -299,7 +299,7 @@ void task_set_vl_onexec(struct task_struct *task, enum vec_type type,
/*
* TIF_SME controls whether a task can use SME without trapping while
* in userspace, when TIF_SME is set then we must have storage
- * alocated in sve_state and sme_state to store the contents of both ZA
+ * allocated in sve_state and sme_state to store the contents of both ZA
* and the SVE registers for both streaming and non-streaming modes.
*
* If both SVCR.ZA and SVCR.SM are disabled then at any point we
@@ -1477,7 +1477,7 @@ void do_sve_acc(unsigned long esr, struct pt_regs *regs)
*
* TIF_SME should be clear on entry: otherwise, fpsimd_restore_current_state()
* would have disabled the SME access trap for userspace during
- * ret_to_user, making an SVE access trap impossible in that case.
+ * ret_to_user, making an SME access trap impossible in that case.
*/
void do_sme_acc(unsigned long esr, struct pt_regs *regs)
{
diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c
index 5545fe1a9012..432626c866a8 100644
--- a/arch/arm64/kernel/ftrace.c
+++ b/arch/arm64/kernel/ftrace.c
@@ -195,15 +195,22 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
return ftrace_modify_code(pc, 0, new, false);
}
-static struct plt_entry *get_ftrace_plt(struct module *mod, unsigned long addr)
+static struct plt_entry *get_ftrace_plt(struct module *mod)
{
#ifdef CONFIG_ARM64_MODULE_PLTS
struct plt_entry *plt = mod->arch.ftrace_trampolines;
- if (addr == FTRACE_ADDR)
- return &plt[FTRACE_PLT_IDX];
-#endif
+ return &plt[FTRACE_PLT_IDX];
+#else
return NULL;
+#endif
+}
+
+static bool reachable_by_bl(unsigned long addr, unsigned long pc)
+{
+ long offset = (long)addr - (long)pc;
+
+ return offset >= -SZ_128M && offset < SZ_128M;
}
/*
@@ -220,14 +227,21 @@ static bool ftrace_find_callable_addr(struct dyn_ftrace *rec,
unsigned long *addr)
{
unsigned long pc = rec->ip;
- long offset = (long)*addr - (long)pc;
struct plt_entry *plt;
/*
+ * If a custom trampoline is unreachable, rely on the ftrace_caller
+ * trampoline which knows how to indirectly reach that trampoline
+ * through ops->direct_call.
+ */
+ if (*addr != FTRACE_ADDR && !reachable_by_bl(*addr, pc))
+ *addr = FTRACE_ADDR;
+
+ /*
* When the target is within range of the 'BL' instruction, use 'addr'
* as-is and branch to that directly.
*/
- if (offset >= -SZ_128M && offset < SZ_128M)
+ if (reachable_by_bl(*addr, pc))
return true;
/*
@@ -256,7 +270,7 @@ static bool ftrace_find_callable_addr(struct dyn_ftrace *rec,
if (WARN_ON(!mod))
return false;
- plt = get_ftrace_plt(mod, *addr);
+ plt = get_ftrace_plt(mod);
if (!plt) {
pr_err("ftrace: no module PLT for %ps\n", (void *)*addr);
return false;
@@ -330,12 +344,24 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
unsigned long addr)
{
- if (WARN_ON_ONCE(old_addr != (unsigned long)ftrace_caller))
+ unsigned long pc = rec->ip;
+ u32 old, new;
+ int ret;
+
+ ret = ftrace_rec_set_ops(rec, arm64_rec_get_ops(rec));
+ if (ret)
+ return ret;
+
+ if (!ftrace_find_callable_addr(rec, NULL, &old_addr))
return -EINVAL;
- if (WARN_ON_ONCE(addr != (unsigned long)ftrace_caller))
+ if (!ftrace_find_callable_addr(rec, NULL, &addr))
return -EINVAL;
- return ftrace_rec_update_ops(rec);
+ old = aarch64_insn_gen_branch_imm(pc, old_addr,
+ AARCH64_INSN_BRANCH_LINK);
+ new = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK);
+
+ return ftrace_modify_code(pc, old, new, true);
}
#endif
diff --git a/arch/arm64/kernel/idreg-override.c b/arch/arm64/kernel/idreg-override.c
index d833d78a7f31..370ab84fd06e 100644
--- a/arch/arm64/kernel/idreg-override.c
+++ b/arch/arm64/kernel/idreg-override.c
@@ -167,7 +167,7 @@ static const struct {
} aliases[] __initconst = {
{ "kvm-arm.mode=nvhe", "id_aa64mmfr1.vh=0" },
{ "kvm-arm.mode=protected", "id_aa64mmfr1.vh=0" },
- { "arm64.nosve", "id_aa64pfr0.sve=0 id_aa64pfr1.sme=0" },
+ { "arm64.nosve", "id_aa64pfr0.sve=0" },
{ "arm64.nosme", "id_aa64pfr1.sme=0" },
{ "arm64.nobti", "id_aa64pfr1.bt=0" },
{ "arm64.nopauth",
@@ -178,6 +178,13 @@ static const struct {
{ "nokaslr", "kaslr.disabled=1" },
};
+static int __init parse_nokaslr(char *unused)
+{
+ /* nokaslr param handling is done by early cpufeature code */
+ return 0;
+}
+early_param("nokaslr", parse_nokaslr);
+
static int __init find_field(const char *cmdline,
const struct ftr_set_desc *reg, int f, u64 *v)
{
diff --git a/arch/arm64/kernel/kgdb.c b/arch/arm64/kernel/kgdb.c
index cda9c1e9864f..4e1f983df3d1 100644
--- a/arch/arm64/kernel/kgdb.c
+++ b/arch/arm64/kernel/kgdb.c
@@ -224,6 +224,8 @@ int kgdb_arch_handle_exception(int exception_vector, int signo,
*/
if (!kernel_active_single_step())
kernel_enable_single_step(linux_regs);
+ else
+ kernel_rewind_single_step(linux_regs);
err = 0;
break;
default:
diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c
index ce3d40120f72..078910db77a4 100644
--- a/arch/arm64/kernel/machine_kexec.c
+++ b/arch/arm64/kernel/machine_kexec.c
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/kexec.h>
#include <linux/page-flags.h>
+#include <linux/reboot.h>
#include <linux/set_memory.h>
#include <linux/smp.h>
@@ -102,7 +103,7 @@ static void kexec_segment_flush(const struct kimage *kimage)
/* Allocates pages for kexec page table */
static void *kexec_page_alloc(void *arg)
{
- struct kimage *kimage = (struct kimage *)arg;
+ struct kimage *kimage = arg;
struct page *page = kimage_alloc_control_pages(kimage, 0);
void *vaddr = NULL;
@@ -268,26 +269,6 @@ void machine_crash_shutdown(struct pt_regs *regs)
pr_info("Starting crashdump kernel...\n");
}
-void arch_kexec_protect_crashkres(void)
-{
- int i;
-
- for (i = 0; i < kexec_crash_image->nr_segments; i++)
- set_memory_valid(
- __phys_to_virt(kexec_crash_image->segment[i].mem),
- kexec_crash_image->segment[i].memsz >> PAGE_SHIFT, 0);
-}
-
-void arch_kexec_unprotect_crashkres(void)
-{
- int i;
-
- for (i = 0; i < kexec_crash_image->nr_segments; i++)
- set_memory_valid(
- __phys_to_virt(kexec_crash_image->segment[i].mem),
- kexec_crash_image->segment[i].memsz >> PAGE_SHIFT, 1);
-}
-
#ifdef CONFIG_HIBERNATION
/*
* To preserve the crash dump kernel image, the relevant memory segments
diff --git a/arch/arm64/kernel/perf_callchain.c b/arch/arm64/kernel/perf_callchain.c
index 65b196e3ca6c..6d157f32187b 100644
--- a/arch/arm64/kernel/perf_callchain.c
+++ b/arch/arm64/kernel/perf_callchain.c
@@ -38,7 +38,7 @@ user_backtrace(struct frame_tail __user *tail,
if (err)
return NULL;
- lr = ptrauth_strip_insn_pac(buftail.lr);
+ lr = ptrauth_strip_user_insn_pac(buftail.lr);
perf_callchain_store(entry, lr);
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
deleted file mode 100644
index dde06c0f97f3..000000000000
--- a/arch/arm64/kernel/perf_event.c
+++ /dev/null
@@ -1,1467 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * ARMv8 PMUv3 Performance Events handling code.
- *
- * Copyright (C) 2012 ARM Limited
- * Author: Will Deacon <will.deacon@arm.com>
- *
- * This code is based heavily on the ARMv7 perf event code.
- */
-
-#include <asm/irq_regs.h>
-#include <asm/perf_event.h>
-#include <asm/sysreg.h>
-#include <asm/virt.h>
-
-#include <clocksource/arm_arch_timer.h>
-
-#include <linux/acpi.h>
-#include <linux/clocksource.h>
-#include <linux/kvm_host.h>
-#include <linux/of.h>
-#include <linux/perf/arm_pmu.h>
-#include <linux/platform_device.h>
-#include <linux/sched_clock.h>
-#include <linux/smp.h>
-
-/* ARMv8 Cortex-A53 specific event types. */
-#define ARMV8_A53_PERFCTR_PREF_LINEFILL 0xC2
-
-/* ARMv8 Cavium ThunderX specific event types. */
-#define ARMV8_THUNDER_PERFCTR_L1D_CACHE_MISS_ST 0xE9
-#define ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_ACCESS 0xEA
-#define ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_MISS 0xEB
-#define ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_ACCESS 0xEC
-#define ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_MISS 0xED
-
-/*
- * ARMv8 Architectural defined events, not all of these may
- * be supported on any given implementation. Unsupported events will
- * be disabled at run-time based on the PMCEID registers.
- */
-static const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = {
- PERF_MAP_ALL_UNSUPPORTED,
- [PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CPU_CYCLES,
- [PERF_COUNT_HW_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_INST_RETIRED,
- [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV8_PMUV3_PERFCTR_L1D_CACHE,
- [PERF_COUNT_HW_CACHE_MISSES] = ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL,
- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED,
- [PERF_COUNT_HW_BRANCH_MISSES] = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
- [PERF_COUNT_HW_BUS_CYCLES] = ARMV8_PMUV3_PERFCTR_BUS_CYCLES,
- [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV8_PMUV3_PERFCTR_STALL_FRONTEND,
- [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV8_PMUV3_PERFCTR_STALL_BACKEND,
-};
-
-static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
- [PERF_COUNT_HW_CACHE_OP_MAX]
- [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
- PERF_CACHE_MAP_ALL_UNSUPPORTED,
-
- [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1D_CACHE,
- [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL,
-
- [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1I_CACHE,
- [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL,
-
- [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1D_TLB_REFILL,
- [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1D_TLB,
-
- [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL,
- [C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1I_TLB,
-
- [C(LL)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS_RD,
- [C(LL)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_LL_CACHE_RD,
-
- [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_BR_PRED,
- [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
-};
-
-static const unsigned armv8_a53_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
- [PERF_COUNT_HW_CACHE_OP_MAX]
- [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
- PERF_CACHE_MAP_ALL_UNSUPPORTED,
-
- [C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_A53_PERFCTR_PREF_LINEFILL,
-
- [C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD,
- [C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR,
-};
-
-static const unsigned armv8_a57_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
- [PERF_COUNT_HW_CACHE_OP_MAX]
- [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
- PERF_CACHE_MAP_ALL_UNSUPPORTED,
-
- [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
- [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD,
- [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
- [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_WR,
-
- [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD,
- [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR,
-
- [C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD,
- [C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR,
-};
-
-static const unsigned armv8_a73_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
- [PERF_COUNT_HW_CACHE_OP_MAX]
- [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
- PERF_CACHE_MAP_ALL_UNSUPPORTED,
-
- [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
- [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
-};
-
-static const unsigned armv8_thunder_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
- [PERF_COUNT_HW_CACHE_OP_MAX]
- [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
- PERF_CACHE_MAP_ALL_UNSUPPORTED,
-
- [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
- [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD,
- [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
- [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_THUNDER_PERFCTR_L1D_CACHE_MISS_ST,
- [C(L1D)][C(OP_PREFETCH)][C(RESULT_ACCESS)] = ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_ACCESS,
- [C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_MISS,
-
- [C(L1I)][C(OP_PREFETCH)][C(RESULT_ACCESS)] = ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_ACCESS,
- [C(L1I)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_MISS,
-
- [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_RD,
- [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD,
- [C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_WR,
- [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR,
-};
-
-static const unsigned armv8_vulcan_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
- [PERF_COUNT_HW_CACHE_OP_MAX]
- [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
- PERF_CACHE_MAP_ALL_UNSUPPORTED,
-
- [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
- [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD,
- [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
- [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_WR,
-
- [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_RD,
- [C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_WR,
- [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD,
- [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR,
-
- [C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD,
- [C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR,
-};
-
-static ssize_t
-armv8pmu_events_sysfs_show(struct device *dev,
- struct device_attribute *attr, char *page)
-{
- struct perf_pmu_events_attr *pmu_attr;
-
- pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
-
- return sprintf(page, "event=0x%04llx\n", pmu_attr->id);
-}
-
-#define ARMV8_EVENT_ATTR(name, config) \
- PMU_EVENT_ATTR_ID(name, armv8pmu_events_sysfs_show, config)
-
-static struct attribute *armv8_pmuv3_event_attrs[] = {
- ARMV8_EVENT_ATTR(sw_incr, ARMV8_PMUV3_PERFCTR_SW_INCR),
- ARMV8_EVENT_ATTR(l1i_cache_refill, ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL),
- ARMV8_EVENT_ATTR(l1i_tlb_refill, ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL),
- ARMV8_EVENT_ATTR(l1d_cache_refill, ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL),
- ARMV8_EVENT_ATTR(l1d_cache, ARMV8_PMUV3_PERFCTR_L1D_CACHE),
- ARMV8_EVENT_ATTR(l1d_tlb_refill, ARMV8_PMUV3_PERFCTR_L1D_TLB_REFILL),
- ARMV8_EVENT_ATTR(ld_retired, ARMV8_PMUV3_PERFCTR_LD_RETIRED),
- ARMV8_EVENT_ATTR(st_retired, ARMV8_PMUV3_PERFCTR_ST_RETIRED),
- ARMV8_EVENT_ATTR(inst_retired, ARMV8_PMUV3_PERFCTR_INST_RETIRED),
- ARMV8_EVENT_ATTR(exc_taken, ARMV8_PMUV3_PERFCTR_EXC_TAKEN),
- ARMV8_EVENT_ATTR(exc_return, ARMV8_PMUV3_PERFCTR_EXC_RETURN),
- ARMV8_EVENT_ATTR(cid_write_retired, ARMV8_PMUV3_PERFCTR_CID_WRITE_RETIRED),
- ARMV8_EVENT_ATTR(pc_write_retired, ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED),
- ARMV8_EVENT_ATTR(br_immed_retired, ARMV8_PMUV3_PERFCTR_BR_IMMED_RETIRED),
- ARMV8_EVENT_ATTR(br_return_retired, ARMV8_PMUV3_PERFCTR_BR_RETURN_RETIRED),
- ARMV8_EVENT_ATTR(unaligned_ldst_retired, ARMV8_PMUV3_PERFCTR_UNALIGNED_LDST_RETIRED),
- ARMV8_EVENT_ATTR(br_mis_pred, ARMV8_PMUV3_PERFCTR_BR_MIS_PRED),
- ARMV8_EVENT_ATTR(cpu_cycles, ARMV8_PMUV3_PERFCTR_CPU_CYCLES),
- ARMV8_EVENT_ATTR(br_pred, ARMV8_PMUV3_PERFCTR_BR_PRED),
- ARMV8_EVENT_ATTR(mem_access, ARMV8_PMUV3_PERFCTR_MEM_ACCESS),
- ARMV8_EVENT_ATTR(l1i_cache, ARMV8_PMUV3_PERFCTR_L1I_CACHE),
- ARMV8_EVENT_ATTR(l1d_cache_wb, ARMV8_PMUV3_PERFCTR_L1D_CACHE_WB),
- ARMV8_EVENT_ATTR(l2d_cache, ARMV8_PMUV3_PERFCTR_L2D_CACHE),
- ARMV8_EVENT_ATTR(l2d_cache_refill, ARMV8_PMUV3_PERFCTR_L2D_CACHE_REFILL),
- ARMV8_EVENT_ATTR(l2d_cache_wb, ARMV8_PMUV3_PERFCTR_L2D_CACHE_WB),
- ARMV8_EVENT_ATTR(bus_access, ARMV8_PMUV3_PERFCTR_BUS_ACCESS),
- ARMV8_EVENT_ATTR(memory_error, ARMV8_PMUV3_PERFCTR_MEMORY_ERROR),
- ARMV8_EVENT_ATTR(inst_spec, ARMV8_PMUV3_PERFCTR_INST_SPEC),
- ARMV8_EVENT_ATTR(ttbr_write_retired, ARMV8_PMUV3_PERFCTR_TTBR_WRITE_RETIRED),
- ARMV8_EVENT_ATTR(bus_cycles, ARMV8_PMUV3_PERFCTR_BUS_CYCLES),
- /* Don't expose the chain event in /sys, since it's useless in isolation */
- ARMV8_EVENT_ATTR(l1d_cache_allocate, ARMV8_PMUV3_PERFCTR_L1D_CACHE_ALLOCATE),
- ARMV8_EVENT_ATTR(l2d_cache_allocate, ARMV8_PMUV3_PERFCTR_L2D_CACHE_ALLOCATE),
- ARMV8_EVENT_ATTR(br_retired, ARMV8_PMUV3_PERFCTR_BR_RETIRED),
- ARMV8_EVENT_ATTR(br_mis_pred_retired, ARMV8_PMUV3_PERFCTR_BR_MIS_PRED_RETIRED),
- ARMV8_EVENT_ATTR(stall_frontend, ARMV8_PMUV3_PERFCTR_STALL_FRONTEND),
- ARMV8_EVENT_ATTR(stall_backend, ARMV8_PMUV3_PERFCTR_STALL_BACKEND),
- ARMV8_EVENT_ATTR(l1d_tlb, ARMV8_PMUV3_PERFCTR_L1D_TLB),
- ARMV8_EVENT_ATTR(l1i_tlb, ARMV8_PMUV3_PERFCTR_L1I_TLB),
- ARMV8_EVENT_ATTR(l2i_cache, ARMV8_PMUV3_PERFCTR_L2I_CACHE),
- ARMV8_EVENT_ATTR(l2i_cache_refill, ARMV8_PMUV3_PERFCTR_L2I_CACHE_REFILL),
- ARMV8_EVENT_ATTR(l3d_cache_allocate, ARMV8_PMUV3_PERFCTR_L3D_CACHE_ALLOCATE),
- ARMV8_EVENT_ATTR(l3d_cache_refill, ARMV8_PMUV3_PERFCTR_L3D_CACHE_REFILL),
- ARMV8_EVENT_ATTR(l3d_cache, ARMV8_PMUV3_PERFCTR_L3D_CACHE),
- ARMV8_EVENT_ATTR(l3d_cache_wb, ARMV8_PMUV3_PERFCTR_L3D_CACHE_WB),
- ARMV8_EVENT_ATTR(l2d_tlb_refill, ARMV8_PMUV3_PERFCTR_L2D_TLB_REFILL),
- ARMV8_EVENT_ATTR(l2i_tlb_refill, ARMV8_PMUV3_PERFCTR_L2I_TLB_REFILL),
- ARMV8_EVENT_ATTR(l2d_tlb, ARMV8_PMUV3_PERFCTR_L2D_TLB),
- ARMV8_EVENT_ATTR(l2i_tlb, ARMV8_PMUV3_PERFCTR_L2I_TLB),
- ARMV8_EVENT_ATTR(remote_access, ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS),
- ARMV8_EVENT_ATTR(ll_cache, ARMV8_PMUV3_PERFCTR_LL_CACHE),
- ARMV8_EVENT_ATTR(ll_cache_miss, ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS),
- ARMV8_EVENT_ATTR(dtlb_walk, ARMV8_PMUV3_PERFCTR_DTLB_WALK),
- ARMV8_EVENT_ATTR(itlb_walk, ARMV8_PMUV3_PERFCTR_ITLB_WALK),
- ARMV8_EVENT_ATTR(ll_cache_rd, ARMV8_PMUV3_PERFCTR_LL_CACHE_RD),
- ARMV8_EVENT_ATTR(ll_cache_miss_rd, ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS_RD),
- ARMV8_EVENT_ATTR(remote_access_rd, ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS_RD),
- ARMV8_EVENT_ATTR(l1d_cache_lmiss_rd, ARMV8_PMUV3_PERFCTR_L1D_CACHE_LMISS_RD),
- ARMV8_EVENT_ATTR(op_retired, ARMV8_PMUV3_PERFCTR_OP_RETIRED),
- ARMV8_EVENT_ATTR(op_spec, ARMV8_PMUV3_PERFCTR_OP_SPEC),
- ARMV8_EVENT_ATTR(stall, ARMV8_PMUV3_PERFCTR_STALL),
- ARMV8_EVENT_ATTR(stall_slot_backend, ARMV8_PMUV3_PERFCTR_STALL_SLOT_BACKEND),
- ARMV8_EVENT_ATTR(stall_slot_frontend, ARMV8_PMUV3_PERFCTR_STALL_SLOT_FRONTEND),
- ARMV8_EVENT_ATTR(stall_slot, ARMV8_PMUV3_PERFCTR_STALL_SLOT),
- ARMV8_EVENT_ATTR(sample_pop, ARMV8_SPE_PERFCTR_SAMPLE_POP),
- ARMV8_EVENT_ATTR(sample_feed, ARMV8_SPE_PERFCTR_SAMPLE_FEED),
- ARMV8_EVENT_ATTR(sample_filtrate, ARMV8_SPE_PERFCTR_SAMPLE_FILTRATE),
- ARMV8_EVENT_ATTR(sample_collision, ARMV8_SPE_PERFCTR_SAMPLE_COLLISION),
- ARMV8_EVENT_ATTR(cnt_cycles, ARMV8_AMU_PERFCTR_CNT_CYCLES),
- ARMV8_EVENT_ATTR(stall_backend_mem, ARMV8_AMU_PERFCTR_STALL_BACKEND_MEM),
- ARMV8_EVENT_ATTR(l1i_cache_lmiss, ARMV8_PMUV3_PERFCTR_L1I_CACHE_LMISS),
- ARMV8_EVENT_ATTR(l2d_cache_lmiss_rd, ARMV8_PMUV3_PERFCTR_L2D_CACHE_LMISS_RD),
- ARMV8_EVENT_ATTR(l2i_cache_lmiss, ARMV8_PMUV3_PERFCTR_L2I_CACHE_LMISS),
- ARMV8_EVENT_ATTR(l3d_cache_lmiss_rd, ARMV8_PMUV3_PERFCTR_L3D_CACHE_LMISS_RD),
- ARMV8_EVENT_ATTR(trb_wrap, ARMV8_PMUV3_PERFCTR_TRB_WRAP),
- ARMV8_EVENT_ATTR(trb_trig, ARMV8_PMUV3_PERFCTR_TRB_TRIG),
- ARMV8_EVENT_ATTR(trcextout0, ARMV8_PMUV3_PERFCTR_TRCEXTOUT0),
- ARMV8_EVENT_ATTR(trcextout1, ARMV8_PMUV3_PERFCTR_TRCEXTOUT1),
- ARMV8_EVENT_ATTR(trcextout2, ARMV8_PMUV3_PERFCTR_TRCEXTOUT2),
- ARMV8_EVENT_ATTR(trcextout3, ARMV8_PMUV3_PERFCTR_TRCEXTOUT3),
- ARMV8_EVENT_ATTR(cti_trigout4, ARMV8_PMUV3_PERFCTR_CTI_TRIGOUT4),
- ARMV8_EVENT_ATTR(cti_trigout5, ARMV8_PMUV3_PERFCTR_CTI_TRIGOUT5),
- ARMV8_EVENT_ATTR(cti_trigout6, ARMV8_PMUV3_PERFCTR_CTI_TRIGOUT6),
- ARMV8_EVENT_ATTR(cti_trigout7, ARMV8_PMUV3_PERFCTR_CTI_TRIGOUT7),
- ARMV8_EVENT_ATTR(ldst_align_lat, ARMV8_PMUV3_PERFCTR_LDST_ALIGN_LAT),
- ARMV8_EVENT_ATTR(ld_align_lat, ARMV8_PMUV3_PERFCTR_LD_ALIGN_LAT),
- ARMV8_EVENT_ATTR(st_align_lat, ARMV8_PMUV3_PERFCTR_ST_ALIGN_LAT),
- ARMV8_EVENT_ATTR(mem_access_checked, ARMV8_MTE_PERFCTR_MEM_ACCESS_CHECKED),
- ARMV8_EVENT_ATTR(mem_access_checked_rd, ARMV8_MTE_PERFCTR_MEM_ACCESS_CHECKED_RD),
- ARMV8_EVENT_ATTR(mem_access_checked_wr, ARMV8_MTE_PERFCTR_MEM_ACCESS_CHECKED_WR),
- NULL,
-};
-
-static umode_t
-armv8pmu_event_attr_is_visible(struct kobject *kobj,
- struct attribute *attr, int unused)
-{
- struct device *dev = kobj_to_dev(kobj);
- struct pmu *pmu = dev_get_drvdata(dev);
- struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu);
- struct perf_pmu_events_attr *pmu_attr;
-
- pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr.attr);
-
- if (pmu_attr->id < ARMV8_PMUV3_MAX_COMMON_EVENTS &&
- test_bit(pmu_attr->id, cpu_pmu->pmceid_bitmap))
- return attr->mode;
-
- if (pmu_attr->id >= ARMV8_PMUV3_EXT_COMMON_EVENT_BASE) {
- u64 id = pmu_attr->id - ARMV8_PMUV3_EXT_COMMON_EVENT_BASE;
-
- if (id < ARMV8_PMUV3_MAX_COMMON_EVENTS &&
- test_bit(id, cpu_pmu->pmceid_ext_bitmap))
- return attr->mode;
- }
-
- return 0;
-}
-
-static const struct attribute_group armv8_pmuv3_events_attr_group = {
- .name = "events",
- .attrs = armv8_pmuv3_event_attrs,
- .is_visible = armv8pmu_event_attr_is_visible,
-};
-
-PMU_FORMAT_ATTR(event, "config:0-15");
-PMU_FORMAT_ATTR(long, "config1:0");
-PMU_FORMAT_ATTR(rdpmc, "config1:1");
-
-static int sysctl_perf_user_access __read_mostly;
-
-static inline bool armv8pmu_event_is_64bit(struct perf_event *event)
-{
- return event->attr.config1 & 0x1;
-}
-
-static inline bool armv8pmu_event_want_user_access(struct perf_event *event)
-{
- return event->attr.config1 & 0x2;
-}
-
-static struct attribute *armv8_pmuv3_format_attrs[] = {
- &format_attr_event.attr,
- &format_attr_long.attr,
- &format_attr_rdpmc.attr,
- NULL,
-};
-
-static const struct attribute_group armv8_pmuv3_format_attr_group = {
- .name = "format",
- .attrs = armv8_pmuv3_format_attrs,
-};
-
-static ssize_t slots_show(struct device *dev, struct device_attribute *attr,
- char *page)
-{
- struct pmu *pmu = dev_get_drvdata(dev);
- struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu);
- u32 slots = cpu_pmu->reg_pmmir & ARMV8_PMU_SLOTS_MASK;
-
- return sysfs_emit(page, "0x%08x\n", slots);
-}
-
-static DEVICE_ATTR_RO(slots);
-
-static ssize_t bus_slots_show(struct device *dev, struct device_attribute *attr,
- char *page)
-{
- struct pmu *pmu = dev_get_drvdata(dev);
- struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu);
- u32 bus_slots = (cpu_pmu->reg_pmmir >> ARMV8_PMU_BUS_SLOTS_SHIFT)
- & ARMV8_PMU_BUS_SLOTS_MASK;
-
- return sysfs_emit(page, "0x%08x\n", bus_slots);
-}
-
-static DEVICE_ATTR_RO(bus_slots);
-
-static ssize_t bus_width_show(struct device *dev, struct device_attribute *attr,
- char *page)
-{
- struct pmu *pmu = dev_get_drvdata(dev);
- struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu);
- u32 bus_width = (cpu_pmu->reg_pmmir >> ARMV8_PMU_BUS_WIDTH_SHIFT)
- & ARMV8_PMU_BUS_WIDTH_MASK;
- u32 val = 0;
-
- /* Encoded as Log2(number of bytes), plus one */
- if (bus_width > 2 && bus_width < 13)
- val = 1 << (bus_width - 1);
-
- return sysfs_emit(page, "0x%08x\n", val);
-}
-
-static DEVICE_ATTR_RO(bus_width);
-
-static struct attribute *armv8_pmuv3_caps_attrs[] = {
- &dev_attr_slots.attr,
- &dev_attr_bus_slots.attr,
- &dev_attr_bus_width.attr,
- NULL,
-};
-
-static const struct attribute_group armv8_pmuv3_caps_attr_group = {
- .name = "caps",
- .attrs = armv8_pmuv3_caps_attrs,
-};
-
-/*
- * Perf Events' indices
- */
-#define ARMV8_IDX_CYCLE_COUNTER 0
-#define ARMV8_IDX_COUNTER0 1
-#define ARMV8_IDX_CYCLE_COUNTER_USER 32
-
-/*
- * We unconditionally enable ARMv8.5-PMU long event counter support
- * (64-bit events) where supported. Indicate if this arm_pmu has long
- * event counter support.
- */
-static bool armv8pmu_has_long_event(struct arm_pmu *cpu_pmu)
-{
- return (cpu_pmu->pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P5);
-}
-
-static inline bool armv8pmu_event_has_user_read(struct perf_event *event)
-{
- return event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT;
-}
-
-/*
- * We must chain two programmable counters for 64 bit events,
- * except when we have allocated the 64bit cycle counter (for CPU
- * cycles event) or when user space counter access is enabled.
- */
-static inline bool armv8pmu_event_is_chained(struct perf_event *event)
-{
- int idx = event->hw.idx;
- struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
-
- return !armv8pmu_event_has_user_read(event) &&
- armv8pmu_event_is_64bit(event) &&
- !armv8pmu_has_long_event(cpu_pmu) &&
- (idx != ARMV8_IDX_CYCLE_COUNTER);
-}
-
-/*
- * ARMv8 low level PMU access
- */
-
-/*
- * Perf Event to low level counters mapping
- */
-#define ARMV8_IDX_TO_COUNTER(x) \
- (((x) - ARMV8_IDX_COUNTER0) & ARMV8_PMU_COUNTER_MASK)
-
-/*
- * This code is really good
- */
-
-#define PMEVN_CASE(n, case_macro) \
- case n: case_macro(n); break
-
-#define PMEVN_SWITCH(x, case_macro) \
- do { \
- switch (x) { \
- PMEVN_CASE(0, case_macro); \
- PMEVN_CASE(1, case_macro); \
- PMEVN_CASE(2, case_macro); \
- PMEVN_CASE(3, case_macro); \
- PMEVN_CASE(4, case_macro); \
- PMEVN_CASE(5, case_macro); \
- PMEVN_CASE(6, case_macro); \
- PMEVN_CASE(7, case_macro); \
- PMEVN_CASE(8, case_macro); \
- PMEVN_CASE(9, case_macro); \
- PMEVN_CASE(10, case_macro); \
- PMEVN_CASE(11, case_macro); \
- PMEVN_CASE(12, case_macro); \
- PMEVN_CASE(13, case_macro); \
- PMEVN_CASE(14, case_macro); \
- PMEVN_CASE(15, case_macro); \
- PMEVN_CASE(16, case_macro); \
- PMEVN_CASE(17, case_macro); \
- PMEVN_CASE(18, case_macro); \
- PMEVN_CASE(19, case_macro); \
- PMEVN_CASE(20, case_macro); \
- PMEVN_CASE(21, case_macro); \
- PMEVN_CASE(22, case_macro); \
- PMEVN_CASE(23, case_macro); \
- PMEVN_CASE(24, case_macro); \
- PMEVN_CASE(25, case_macro); \
- PMEVN_CASE(26, case_macro); \
- PMEVN_CASE(27, case_macro); \
- PMEVN_CASE(28, case_macro); \
- PMEVN_CASE(29, case_macro); \
- PMEVN_CASE(30, case_macro); \
- default: WARN(1, "Invalid PMEV* index\n"); \
- } \
- } while (0)
-
-#define RETURN_READ_PMEVCNTRN(n) \
- return read_sysreg(pmevcntr##n##_el0)
-static unsigned long read_pmevcntrn(int n)
-{
- PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN);
- return 0;
-}
-
-#define WRITE_PMEVCNTRN(n) \
- write_sysreg(val, pmevcntr##n##_el0)
-static void write_pmevcntrn(int n, unsigned long val)
-{
- PMEVN_SWITCH(n, WRITE_PMEVCNTRN);
-}
-
-#define WRITE_PMEVTYPERN(n) \
- write_sysreg(val, pmevtyper##n##_el0)
-static void write_pmevtypern(int n, unsigned long val)
-{
- PMEVN_SWITCH(n, WRITE_PMEVTYPERN);
-}
-
-static inline u32 armv8pmu_pmcr_read(void)
-{
- return read_sysreg(pmcr_el0);
-}
-
-static inline void armv8pmu_pmcr_write(u32 val)
-{
- val &= ARMV8_PMU_PMCR_MASK;
- isb();
- write_sysreg(val, pmcr_el0);
-}
-
-static inline int armv8pmu_has_overflowed(u32 pmovsr)
-{
- return pmovsr & ARMV8_PMU_OVERFLOWED_MASK;
-}
-
-static inline int armv8pmu_counter_has_overflowed(u32 pmnc, int idx)
-{
- return pmnc & BIT(ARMV8_IDX_TO_COUNTER(idx));
-}
-
-static inline u64 armv8pmu_read_evcntr(int idx)
-{
- u32 counter = ARMV8_IDX_TO_COUNTER(idx);
-
- return read_pmevcntrn(counter);
-}
-
-static inline u64 armv8pmu_read_hw_counter(struct perf_event *event)
-{
- int idx = event->hw.idx;
- u64 val = armv8pmu_read_evcntr(idx);
-
- if (armv8pmu_event_is_chained(event))
- val = (val << 32) | armv8pmu_read_evcntr(idx - 1);
- return val;
-}
-
-/*
- * The cycle counter is always a 64-bit counter. When ARMV8_PMU_PMCR_LP
- * is set the event counters also become 64-bit counters. Unless the
- * user has requested a long counter (attr.config1) then we want to
- * interrupt upon 32-bit overflow - we achieve this by applying a bias.
- */
-static bool armv8pmu_event_needs_bias(struct perf_event *event)
-{
- struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
- struct hw_perf_event *hwc = &event->hw;
- int idx = hwc->idx;
-
- if (armv8pmu_event_is_64bit(event))
- return false;
-
- if (armv8pmu_has_long_event(cpu_pmu) ||
- idx == ARMV8_IDX_CYCLE_COUNTER)
- return true;
-
- return false;
-}
-
-static u64 armv8pmu_bias_long_counter(struct perf_event *event, u64 value)
-{
- if (armv8pmu_event_needs_bias(event))
- value |= GENMASK(63, 32);
-
- return value;
-}
-
-static u64 armv8pmu_unbias_long_counter(struct perf_event *event, u64 value)
-{
- if (armv8pmu_event_needs_bias(event))
- value &= ~GENMASK(63, 32);
-
- return value;
-}
-
-static u64 armv8pmu_read_counter(struct perf_event *event)
-{
- struct hw_perf_event *hwc = &event->hw;
- int idx = hwc->idx;
- u64 value;
-
- if (idx == ARMV8_IDX_CYCLE_COUNTER)
- value = read_sysreg(pmccntr_el0);
- else
- value = armv8pmu_read_hw_counter(event);
-
- return armv8pmu_unbias_long_counter(event, value);
-}
-
-static inline void armv8pmu_write_evcntr(int idx, u64 value)
-{
- u32 counter = ARMV8_IDX_TO_COUNTER(idx);
-
- write_pmevcntrn(counter, value);
-}
-
-static inline void armv8pmu_write_hw_counter(struct perf_event *event,
- u64 value)
-{
- int idx = event->hw.idx;
-
- if (armv8pmu_event_is_chained(event)) {
- armv8pmu_write_evcntr(idx, upper_32_bits(value));
- armv8pmu_write_evcntr(idx - 1, lower_32_bits(value));
- } else {
- armv8pmu_write_evcntr(idx, value);
- }
-}
-
-static void armv8pmu_write_counter(struct perf_event *event, u64 value)
-{
- struct hw_perf_event *hwc = &event->hw;
- int idx = hwc->idx;
-
- value = armv8pmu_bias_long_counter(event, value);
-
- if (idx == ARMV8_IDX_CYCLE_COUNTER)
- write_sysreg(value, pmccntr_el0);
- else
- armv8pmu_write_hw_counter(event, value);
-}
-
-static inline void armv8pmu_write_evtype(int idx, u32 val)
-{
- u32 counter = ARMV8_IDX_TO_COUNTER(idx);
-
- val &= ARMV8_PMU_EVTYPE_MASK;
- write_pmevtypern(counter, val);
-}
-
-static inline void armv8pmu_write_event_type(struct perf_event *event)
-{
- struct hw_perf_event *hwc = &event->hw;
- int idx = hwc->idx;
-
- /*
- * For chained events, the low counter is programmed to count
- * the event of interest and the high counter is programmed
- * with CHAIN event code with filters set to count at all ELs.
- */
- if (armv8pmu_event_is_chained(event)) {
- u32 chain_evt = ARMV8_PMUV3_PERFCTR_CHAIN |
- ARMV8_PMU_INCLUDE_EL2;
-
- armv8pmu_write_evtype(idx - 1, hwc->config_base);
- armv8pmu_write_evtype(idx, chain_evt);
- } else {
- if (idx == ARMV8_IDX_CYCLE_COUNTER)
- write_sysreg(hwc->config_base, pmccfiltr_el0);
- else
- armv8pmu_write_evtype(idx, hwc->config_base);
- }
-}
-
-static u32 armv8pmu_event_cnten_mask(struct perf_event *event)
-{
- int counter = ARMV8_IDX_TO_COUNTER(event->hw.idx);
- u32 mask = BIT(counter);
-
- if (armv8pmu_event_is_chained(event))
- mask |= BIT(counter - 1);
- return mask;
-}
-
-static inline void armv8pmu_enable_counter(u32 mask)
-{
- /*
- * Make sure event configuration register writes are visible before we
- * enable the counter.
- * */
- isb();
- write_sysreg(mask, pmcntenset_el0);
-}
-
-static inline void armv8pmu_enable_event_counter(struct perf_event *event)
-{
- struct perf_event_attr *attr = &event->attr;
- u32 mask = armv8pmu_event_cnten_mask(event);
-
- kvm_set_pmu_events(mask, attr);
-
- /* We rely on the hypervisor switch code to enable guest counters */
- if (!kvm_pmu_counter_deferred(attr))
- armv8pmu_enable_counter(mask);
-}
-
-static inline void armv8pmu_disable_counter(u32 mask)
-{
- write_sysreg(mask, pmcntenclr_el0);
- /*
- * Make sure the effects of disabling the counter are visible before we
- * start configuring the event.
- */
- isb();
-}
-
-static inline void armv8pmu_disable_event_counter(struct perf_event *event)
-{
- struct perf_event_attr *attr = &event->attr;
- u32 mask = armv8pmu_event_cnten_mask(event);
-
- kvm_clr_pmu_events(mask);
-
- /* We rely on the hypervisor switch code to disable guest counters */
- if (!kvm_pmu_counter_deferred(attr))
- armv8pmu_disable_counter(mask);
-}
-
-static inline void armv8pmu_enable_intens(u32 mask)
-{
- write_sysreg(mask, pmintenset_el1);
-}
-
-static inline void armv8pmu_enable_event_irq(struct perf_event *event)
-{
- u32 counter = ARMV8_IDX_TO_COUNTER(event->hw.idx);
- armv8pmu_enable_intens(BIT(counter));
-}
-
-static inline void armv8pmu_disable_intens(u32 mask)
-{
- write_sysreg(mask, pmintenclr_el1);
- isb();
- /* Clear the overflow flag in case an interrupt is pending. */
- write_sysreg(mask, pmovsclr_el0);
- isb();
-}
-
-static inline void armv8pmu_disable_event_irq(struct perf_event *event)
-{
- u32 counter = ARMV8_IDX_TO_COUNTER(event->hw.idx);
- armv8pmu_disable_intens(BIT(counter));
-}
-
-static inline u32 armv8pmu_getreset_flags(void)
-{
- u32 value;
-
- /* Read */
- value = read_sysreg(pmovsclr_el0);
-
- /* Write to clear flags */
- value &= ARMV8_PMU_OVSR_MASK;
- write_sysreg(value, pmovsclr_el0);
-
- return value;
-}
-
-static void armv8pmu_disable_user_access(void)
-{
- write_sysreg(0, pmuserenr_el0);
-}
-
-static void armv8pmu_enable_user_access(struct arm_pmu *cpu_pmu)
-{
- int i;
- struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
-
- /* Clear any unused counters to avoid leaking their contents */
- for_each_clear_bit(i, cpuc->used_mask, cpu_pmu->num_events) {
- if (i == ARMV8_IDX_CYCLE_COUNTER)
- write_sysreg(0, pmccntr_el0);
- else
- armv8pmu_write_evcntr(i, 0);
- }
-
- write_sysreg(0, pmuserenr_el0);
- write_sysreg(ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_CR, pmuserenr_el0);
-}
-
-static void armv8pmu_enable_event(struct perf_event *event)
-{
- /*
- * Enable counter and interrupt, and set the counter to count
- * the event that we're interested in.
- */
-
- /*
- * Disable counter
- */
- armv8pmu_disable_event_counter(event);
-
- /*
- * Set event.
- */
- armv8pmu_write_event_type(event);
-
- /*
- * Enable interrupt for this counter
- */
- armv8pmu_enable_event_irq(event);
-
- /*
- * Enable counter
- */
- armv8pmu_enable_event_counter(event);
-}
-
-static void armv8pmu_disable_event(struct perf_event *event)
-{
- /*
- * Disable counter
- */
- armv8pmu_disable_event_counter(event);
-
- /*
- * Disable interrupt for this counter
- */
- armv8pmu_disable_event_irq(event);
-}
-
-static void armv8pmu_start(struct arm_pmu *cpu_pmu)
-{
- struct perf_event_context *ctx;
- int nr_user = 0;
-
- ctx = perf_cpu_task_ctx();
- if (ctx)
- nr_user = ctx->nr_user;
-
- if (sysctl_perf_user_access && nr_user)
- armv8pmu_enable_user_access(cpu_pmu);
- else
- armv8pmu_disable_user_access();
-
- /* Enable all counters */
- armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E);
-}
-
-static void armv8pmu_stop(struct arm_pmu *cpu_pmu)
-{
- /* Disable all counters */
- armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMU_PMCR_E);
-}
-
-static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu)
-{
- u32 pmovsr;
- struct perf_sample_data data;
- struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
- struct pt_regs *regs;
- int idx;
-
- /*
- * Get and reset the IRQ flags
- */
- pmovsr = armv8pmu_getreset_flags();
-
- /*
- * Did an overflow occur?
- */
- if (!armv8pmu_has_overflowed(pmovsr))
- return IRQ_NONE;
-
- /*
- * Handle the counter(s) overflow(s)
- */
- regs = get_irq_regs();
-
- /*
- * Stop the PMU while processing the counter overflows
- * to prevent skews in group events.
- */
- armv8pmu_stop(cpu_pmu);
- for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
- struct perf_event *event = cpuc->events[idx];
- struct hw_perf_event *hwc;
-
- /* Ignore if we don't have an event. */
- if (!event)
- continue;
-
- /*
- * We have a single interrupt for all counters. Check that
- * each counter has overflowed before we process it.
- */
- if (!armv8pmu_counter_has_overflowed(pmovsr, idx))
- continue;
-
- hwc = &event->hw;
- armpmu_event_update(event);
- perf_sample_data_init(&data, 0, hwc->last_period);
- if (!armpmu_event_set_period(event))
- continue;
-
- /*
- * Perf event overflow will queue the processing of the event as
- * an irq_work which will be taken care of in the handling of
- * IPI_IRQ_WORK.
- */
- if (perf_event_overflow(event, &data, regs))
- cpu_pmu->disable(event);
- }
- armv8pmu_start(cpu_pmu);
-
- return IRQ_HANDLED;
-}
-
-static int armv8pmu_get_single_idx(struct pmu_hw_events *cpuc,
- struct arm_pmu *cpu_pmu)
-{
- int idx;
-
- for (idx = ARMV8_IDX_COUNTER0; idx < cpu_pmu->num_events; idx++) {
- if (!test_and_set_bit(idx, cpuc->used_mask))
- return idx;
- }
- return -EAGAIN;
-}
-
-static int armv8pmu_get_chain_idx(struct pmu_hw_events *cpuc,
- struct arm_pmu *cpu_pmu)
-{
- int idx;
-
- /*
- * Chaining requires two consecutive event counters, where
- * the lower idx must be even.
- */
- for (idx = ARMV8_IDX_COUNTER0 + 1; idx < cpu_pmu->num_events; idx += 2) {
- if (!test_and_set_bit(idx, cpuc->used_mask)) {
- /* Check if the preceding even counter is available */
- if (!test_and_set_bit(idx - 1, cpuc->used_mask))
- return idx;
- /* Release the Odd counter */
- clear_bit(idx, cpuc->used_mask);
- }
- }
- return -EAGAIN;
-}
-
-static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
- struct perf_event *event)
-{
- struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
- struct hw_perf_event *hwc = &event->hw;
- unsigned long evtype = hwc->config_base & ARMV8_PMU_EVTYPE_EVENT;
-
- /* Always prefer to place a cycle counter into the cycle counter. */
- if (evtype == ARMV8_PMUV3_PERFCTR_CPU_CYCLES) {
- if (!test_and_set_bit(ARMV8_IDX_CYCLE_COUNTER, cpuc->used_mask))
- return ARMV8_IDX_CYCLE_COUNTER;
- else if (armv8pmu_event_is_64bit(event) &&
- armv8pmu_event_want_user_access(event) &&
- !armv8pmu_has_long_event(cpu_pmu))
- return -EAGAIN;
- }
-
- /*
- * Otherwise use events counters
- */
- if (armv8pmu_event_is_chained(event))
- return armv8pmu_get_chain_idx(cpuc, cpu_pmu);
- else
- return armv8pmu_get_single_idx(cpuc, cpu_pmu);
-}
-
-static void armv8pmu_clear_event_idx(struct pmu_hw_events *cpuc,
- struct perf_event *event)
-{
- int idx = event->hw.idx;
-
- clear_bit(idx, cpuc->used_mask);
- if (armv8pmu_event_is_chained(event))
- clear_bit(idx - 1, cpuc->used_mask);
-}
-
-static int armv8pmu_user_event_idx(struct perf_event *event)
-{
- if (!sysctl_perf_user_access || !armv8pmu_event_has_user_read(event))
- return 0;
-
- /*
- * We remap the cycle counter index to 32 to
- * match the offset applied to the rest of
- * the counter indices.
- */
- if (event->hw.idx == ARMV8_IDX_CYCLE_COUNTER)
- return ARMV8_IDX_CYCLE_COUNTER_USER;
-
- return event->hw.idx;
-}
-
-/*
- * Add an event filter to a given event.
- */
-static int armv8pmu_set_event_filter(struct hw_perf_event *event,
- struct perf_event_attr *attr)
-{
- unsigned long config_base = 0;
-
- if (attr->exclude_idle)
- return -EPERM;
-
- /*
- * If we're running in hyp mode, then we *are* the hypervisor.
- * Therefore we ignore exclude_hv in this configuration, since
- * there's no hypervisor to sample anyway. This is consistent
- * with other architectures (x86 and Power).
- */
- if (is_kernel_in_hyp_mode()) {
- if (!attr->exclude_kernel && !attr->exclude_host)
- config_base |= ARMV8_PMU_INCLUDE_EL2;
- if (attr->exclude_guest)
- config_base |= ARMV8_PMU_EXCLUDE_EL1;
- if (attr->exclude_host)
- config_base |= ARMV8_PMU_EXCLUDE_EL0;
- } else {
- if (!attr->exclude_hv && !attr->exclude_host)
- config_base |= ARMV8_PMU_INCLUDE_EL2;
- }
-
- /*
- * Filter out !VHE kernels and guest kernels
- */
- if (attr->exclude_kernel)
- config_base |= ARMV8_PMU_EXCLUDE_EL1;
-
- if (attr->exclude_user)
- config_base |= ARMV8_PMU_EXCLUDE_EL0;
-
- /*
- * Install the filter into config_base as this is used to
- * construct the event type.
- */
- event->config_base = config_base;
-
- return 0;
-}
-
-static void armv8pmu_reset(void *info)
-{
- struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
- u32 pmcr;
-
- /* The counter and interrupt enable registers are unknown at reset. */
- armv8pmu_disable_counter(U32_MAX);
- armv8pmu_disable_intens(U32_MAX);
-
- /* Clear the counters we flip at guest entry/exit */
- kvm_clr_pmu_events(U32_MAX);
-
- /*
- * Initialize & Reset PMNC. Request overflow interrupt for
- * 64 bit cycle counter but cheat in armv8pmu_write_counter().
- */
- pmcr = ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_LC;
-
- /* Enable long event counter support where available */
- if (armv8pmu_has_long_event(cpu_pmu))
- pmcr |= ARMV8_PMU_PMCR_LP;
-
- armv8pmu_pmcr_write(pmcr);
-}
-
-static int __armv8_pmuv3_map_event(struct perf_event *event,
- const unsigned (*extra_event_map)
- [PERF_COUNT_HW_MAX],
- const unsigned (*extra_cache_map)
- [PERF_COUNT_HW_CACHE_MAX]
- [PERF_COUNT_HW_CACHE_OP_MAX]
- [PERF_COUNT_HW_CACHE_RESULT_MAX])
-{
- int hw_event_id;
- struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
-
- hw_event_id = armpmu_map_event(event, &armv8_pmuv3_perf_map,
- &armv8_pmuv3_perf_cache_map,
- ARMV8_PMU_EVTYPE_EVENT);
-
- /*
- * CHAIN events only work when paired with an adjacent counter, and it
- * never makes sense for a user to open one in isolation, as they'll be
- * rotated arbitrarily.
- */
- if (hw_event_id == ARMV8_PMUV3_PERFCTR_CHAIN)
- return -EINVAL;
-
- if (armv8pmu_event_is_64bit(event))
- event->hw.flags |= ARMPMU_EVT_64BIT;
-
- /*
- * User events must be allocated into a single counter, and so
- * must not be chained.
- *
- * Most 64-bit events require long counter support, but 64-bit
- * CPU_CYCLES events can be placed into the dedicated cycle
- * counter when this is free.
- */
- if (armv8pmu_event_want_user_access(event)) {
- if (!(event->attach_state & PERF_ATTACH_TASK))
- return -EINVAL;
- if (armv8pmu_event_is_64bit(event) &&
- (hw_event_id != ARMV8_PMUV3_PERFCTR_CPU_CYCLES) &&
- !armv8pmu_has_long_event(armpmu))
- return -EOPNOTSUPP;
-
- event->hw.flags |= PERF_EVENT_FLAG_USER_READ_CNT;
- }
-
- /* Only expose micro/arch events supported by this PMU */
- if ((hw_event_id > 0) && (hw_event_id < ARMV8_PMUV3_MAX_COMMON_EVENTS)
- && test_bit(hw_event_id, armpmu->pmceid_bitmap)) {
- return hw_event_id;
- }
-
- return armpmu_map_event(event, extra_event_map, extra_cache_map,
- ARMV8_PMU_EVTYPE_EVENT);
-}
-
-static int armv8_pmuv3_map_event(struct perf_event *event)
-{
- return __armv8_pmuv3_map_event(event, NULL, NULL);
-}
-
-static int armv8_a53_map_event(struct perf_event *event)
-{
- return __armv8_pmuv3_map_event(event, NULL, &armv8_a53_perf_cache_map);
-}
-
-static int armv8_a57_map_event(struct perf_event *event)
-{
- return __armv8_pmuv3_map_event(event, NULL, &armv8_a57_perf_cache_map);
-}
-
-static int armv8_a73_map_event(struct perf_event *event)
-{
- return __armv8_pmuv3_map_event(event, NULL, &armv8_a73_perf_cache_map);
-}
-
-static int armv8_thunder_map_event(struct perf_event *event)
-{
- return __armv8_pmuv3_map_event(event, NULL,
- &armv8_thunder_perf_cache_map);
-}
-
-static int armv8_vulcan_map_event(struct perf_event *event)
-{
- return __armv8_pmuv3_map_event(event, NULL,
- &armv8_vulcan_perf_cache_map);
-}
-
-struct armv8pmu_probe_info {
- struct arm_pmu *pmu;
- bool present;
-};
-
-static void __armv8pmu_probe_pmu(void *info)
-{
- struct armv8pmu_probe_info *probe = info;
- struct arm_pmu *cpu_pmu = probe->pmu;
- u64 dfr0;
- u64 pmceid_raw[2];
- u32 pmceid[2];
- int pmuver;
-
- dfr0 = read_sysreg(id_aa64dfr0_el1);
- pmuver = cpuid_feature_extract_unsigned_field(dfr0,
- ID_AA64DFR0_EL1_PMUVer_SHIFT);
- if (pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF ||
- pmuver == ID_AA64DFR0_EL1_PMUVer_NI)
- return;
-
- cpu_pmu->pmuver = pmuver;
- probe->present = true;
-
- /* Read the nb of CNTx counters supported from PMNC */
- cpu_pmu->num_events = (armv8pmu_pmcr_read() >> ARMV8_PMU_PMCR_N_SHIFT)
- & ARMV8_PMU_PMCR_N_MASK;
-
- /* Add the CPU cycles counter */
- cpu_pmu->num_events += 1;
-
- pmceid[0] = pmceid_raw[0] = read_sysreg(pmceid0_el0);
- pmceid[1] = pmceid_raw[1] = read_sysreg(pmceid1_el0);
-
- bitmap_from_arr32(cpu_pmu->pmceid_bitmap,
- pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS);
-
- pmceid[0] = pmceid_raw[0] >> 32;
- pmceid[1] = pmceid_raw[1] >> 32;
-
- bitmap_from_arr32(cpu_pmu->pmceid_ext_bitmap,
- pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS);
-
- /* store PMMIR_EL1 register for sysfs */
- if (pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P4 && (pmceid_raw[1] & BIT(31)))
- cpu_pmu->reg_pmmir = read_cpuid(PMMIR_EL1);
- else
- cpu_pmu->reg_pmmir = 0;
-}
-
-static int armv8pmu_probe_pmu(struct arm_pmu *cpu_pmu)
-{
- struct armv8pmu_probe_info probe = {
- .pmu = cpu_pmu,
- .present = false,
- };
- int ret;
-
- ret = smp_call_function_any(&cpu_pmu->supported_cpus,
- __armv8pmu_probe_pmu,
- &probe, 1);
- if (ret)
- return ret;
-
- return probe.present ? 0 : -ENODEV;
-}
-
-static void armv8pmu_disable_user_access_ipi(void *unused)
-{
- armv8pmu_disable_user_access();
-}
-
-static int armv8pmu_proc_user_access_handler(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos)
-{
- int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
- if (ret || !write || sysctl_perf_user_access)
- return ret;
-
- on_each_cpu(armv8pmu_disable_user_access_ipi, NULL, 1);
- return 0;
-}
-
-static struct ctl_table armv8_pmu_sysctl_table[] = {
- {
- .procname = "perf_user_access",
- .data = &sysctl_perf_user_access,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = armv8pmu_proc_user_access_handler,
- .extra1 = SYSCTL_ZERO,
- .extra2 = SYSCTL_ONE,
- },
- { }
-};
-
-static void armv8_pmu_register_sysctl_table(void)
-{
- static u32 tbl_registered = 0;
-
- if (!cmpxchg_relaxed(&tbl_registered, 0, 1))
- register_sysctl("kernel", armv8_pmu_sysctl_table);
-}
-
-static int armv8_pmu_init(struct arm_pmu *cpu_pmu, char *name,
- int (*map_event)(struct perf_event *event),
- const struct attribute_group *events,
- const struct attribute_group *format,
- const struct attribute_group *caps)
-{
- int ret = armv8pmu_probe_pmu(cpu_pmu);
- if (ret)
- return ret;
-
- cpu_pmu->handle_irq = armv8pmu_handle_irq;
- cpu_pmu->enable = armv8pmu_enable_event;
- cpu_pmu->disable = armv8pmu_disable_event;
- cpu_pmu->read_counter = armv8pmu_read_counter;
- cpu_pmu->write_counter = armv8pmu_write_counter;
- cpu_pmu->get_event_idx = armv8pmu_get_event_idx;
- cpu_pmu->clear_event_idx = armv8pmu_clear_event_idx;
- cpu_pmu->start = armv8pmu_start;
- cpu_pmu->stop = armv8pmu_stop;
- cpu_pmu->reset = armv8pmu_reset;
- cpu_pmu->set_event_filter = armv8pmu_set_event_filter;
-
- cpu_pmu->pmu.event_idx = armv8pmu_user_event_idx;
-
- cpu_pmu->name = name;
- cpu_pmu->map_event = map_event;
- cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = events ?
- events : &armv8_pmuv3_events_attr_group;
- cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = format ?
- format : &armv8_pmuv3_format_attr_group;
- cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_CAPS] = caps ?
- caps : &armv8_pmuv3_caps_attr_group;
-
- armv8_pmu_register_sysctl_table();
- return 0;
-}
-
-static int armv8_pmu_init_nogroups(struct arm_pmu *cpu_pmu, char *name,
- int (*map_event)(struct perf_event *event))
-{
- return armv8_pmu_init(cpu_pmu, name, map_event, NULL, NULL, NULL);
-}
-
-#define PMUV3_INIT_SIMPLE(name) \
-static int name##_pmu_init(struct arm_pmu *cpu_pmu) \
-{ \
- return armv8_pmu_init_nogroups(cpu_pmu, #name, armv8_pmuv3_map_event);\
-}
-
-PMUV3_INIT_SIMPLE(armv8_pmuv3)
-
-PMUV3_INIT_SIMPLE(armv8_cortex_a34)
-PMUV3_INIT_SIMPLE(armv8_cortex_a55)
-PMUV3_INIT_SIMPLE(armv8_cortex_a65)
-PMUV3_INIT_SIMPLE(armv8_cortex_a75)
-PMUV3_INIT_SIMPLE(armv8_cortex_a76)
-PMUV3_INIT_SIMPLE(armv8_cortex_a77)
-PMUV3_INIT_SIMPLE(armv8_cortex_a78)
-PMUV3_INIT_SIMPLE(armv9_cortex_a510)
-PMUV3_INIT_SIMPLE(armv9_cortex_a710)
-PMUV3_INIT_SIMPLE(armv8_cortex_x1)
-PMUV3_INIT_SIMPLE(armv9_cortex_x2)
-PMUV3_INIT_SIMPLE(armv8_neoverse_e1)
-PMUV3_INIT_SIMPLE(armv8_neoverse_n1)
-PMUV3_INIT_SIMPLE(armv9_neoverse_n2)
-PMUV3_INIT_SIMPLE(armv8_neoverse_v1)
-
-PMUV3_INIT_SIMPLE(armv8_nvidia_carmel)
-PMUV3_INIT_SIMPLE(armv8_nvidia_denver)
-
-static int armv8_a35_pmu_init(struct arm_pmu *cpu_pmu)
-{
- return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a35",
- armv8_a53_map_event);
-}
-
-static int armv8_a53_pmu_init(struct arm_pmu *cpu_pmu)
-{
- return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a53",
- armv8_a53_map_event);
-}
-
-static int armv8_a57_pmu_init(struct arm_pmu *cpu_pmu)
-{
- return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a57",
- armv8_a57_map_event);
-}
-
-static int armv8_a72_pmu_init(struct arm_pmu *cpu_pmu)
-{
- return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a72",
- armv8_a57_map_event);
-}
-
-static int armv8_a73_pmu_init(struct arm_pmu *cpu_pmu)
-{
- return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a73",
- armv8_a73_map_event);
-}
-
-static int armv8_thunder_pmu_init(struct arm_pmu *cpu_pmu)
-{
- return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cavium_thunder",
- armv8_thunder_map_event);
-}
-
-static int armv8_vulcan_pmu_init(struct arm_pmu *cpu_pmu)
-{
- return armv8_pmu_init_nogroups(cpu_pmu, "armv8_brcm_vulcan",
- armv8_vulcan_map_event);
-}
-
-static const struct of_device_id armv8_pmu_of_device_ids[] = {
- {.compatible = "arm,armv8-pmuv3", .data = armv8_pmuv3_pmu_init},
- {.compatible = "arm,cortex-a34-pmu", .data = armv8_cortex_a34_pmu_init},
- {.compatible = "arm,cortex-a35-pmu", .data = armv8_a35_pmu_init},
- {.compatible = "arm,cortex-a53-pmu", .data = armv8_a53_pmu_init},
- {.compatible = "arm,cortex-a55-pmu", .data = armv8_cortex_a55_pmu_init},
- {.compatible = "arm,cortex-a57-pmu", .data = armv8_a57_pmu_init},
- {.compatible = "arm,cortex-a65-pmu", .data = armv8_cortex_a65_pmu_init},
- {.compatible = "arm,cortex-a72-pmu", .data = armv8_a72_pmu_init},
- {.compatible = "arm,cortex-a73-pmu", .data = armv8_a73_pmu_init},
- {.compatible = "arm,cortex-a75-pmu", .data = armv8_cortex_a75_pmu_init},
- {.compatible = "arm,cortex-a76-pmu", .data = armv8_cortex_a76_pmu_init},
- {.compatible = "arm,cortex-a77-pmu", .data = armv8_cortex_a77_pmu_init},
- {.compatible = "arm,cortex-a78-pmu", .data = armv8_cortex_a78_pmu_init},
- {.compatible = "arm,cortex-a510-pmu", .data = armv9_cortex_a510_pmu_init},
- {.compatible = "arm,cortex-a710-pmu", .data = armv9_cortex_a710_pmu_init},
- {.compatible = "arm,cortex-x1-pmu", .data = armv8_cortex_x1_pmu_init},
- {.compatible = "arm,cortex-x2-pmu", .data = armv9_cortex_x2_pmu_init},
- {.compatible = "arm,neoverse-e1-pmu", .data = armv8_neoverse_e1_pmu_init},
- {.compatible = "arm,neoverse-n1-pmu", .data = armv8_neoverse_n1_pmu_init},
- {.compatible = "arm,neoverse-n2-pmu", .data = armv9_neoverse_n2_pmu_init},
- {.compatible = "arm,neoverse-v1-pmu", .data = armv8_neoverse_v1_pmu_init},
- {.compatible = "cavium,thunder-pmu", .data = armv8_thunder_pmu_init},
- {.compatible = "brcm,vulcan-pmu", .data = armv8_vulcan_pmu_init},
- {.compatible = "nvidia,carmel-pmu", .data = armv8_nvidia_carmel_pmu_init},
- {.compatible = "nvidia,denver-pmu", .data = armv8_nvidia_denver_pmu_init},
- {},
-};
-
-static int armv8_pmu_device_probe(struct platform_device *pdev)
-{
- return arm_pmu_device_probe(pdev, armv8_pmu_of_device_ids, NULL);
-}
-
-static struct platform_driver armv8_pmu_driver = {
- .driver = {
- .name = ARMV8_PMU_PDEV_NAME,
- .of_match_table = armv8_pmu_of_device_ids,
- .suppress_bind_attrs = true,
- },
- .probe = armv8_pmu_device_probe,
-};
-
-static int __init armv8_pmu_driver_init(void)
-{
- if (acpi_disabled)
- return platform_driver_register(&armv8_pmu_driver);
- else
- return arm_pmu_acpi_probe(armv8_pmuv3_pmu_init);
-}
-device_initcall(armv8_pmu_driver_init)
-
-void arch_perf_update_userpage(struct perf_event *event,
- struct perf_event_mmap_page *userpg, u64 now)
-{
- struct clock_read_data *rd;
- unsigned int seq;
- u64 ns;
-
- userpg->cap_user_time = 0;
- userpg->cap_user_time_zero = 0;
- userpg->cap_user_time_short = 0;
- userpg->cap_user_rdpmc = armv8pmu_event_has_user_read(event);
-
- if (userpg->cap_user_rdpmc) {
- if (event->hw.flags & ARMPMU_EVT_64BIT)
- userpg->pmc_width = 64;
- else
- userpg->pmc_width = 32;
- }
-
- do {
- rd = sched_clock_read_begin(&seq);
-
- if (rd->read_sched_clock != arch_timer_read_counter)
- return;
-
- userpg->time_mult = rd->mult;
- userpg->time_shift = rd->shift;
- userpg->time_zero = rd->epoch_ns;
- userpg->time_cycles = rd->epoch_cyc;
- userpg->time_mask = rd->sched_clock_mask;
-
- /*
- * Subtract the cycle base, such that software that
- * doesn't know about cap_user_time_short still 'works'
- * assuming no wraps.
- */
- ns = mul_u64_u32_shr(rd->epoch_cyc, rd->mult, rd->shift);
- userpg->time_zero -= ns;
-
- } while (sched_clock_read_retry(seq));
-
- userpg->time_offset = userpg->time_zero - now;
-
- /*
- * time_shift is not expected to be greater than 31 due to
- * the original published conversion algorithm shifting a
- * 32-bit value (now specifies a 64-bit value) - refer
- * perf_event_mmap_page documentation in perf_event.h.
- */
- if (userpg->time_shift == 32) {
- userpg->time_shift = 31;
- userpg->time_mult >>= 1;
- }
-
- /*
- * Internal timekeeping for enabled/running/stopped times
- * is always computed with the sched_clock.
- */
- userpg->cap_user_time = 1;
- userpg->cap_user_time_zero = 1;
- userpg->cap_user_time_short = 1;
-}
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 71d59b5abede..b5bed62483cb 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -217,7 +217,7 @@ void __show_regs(struct pt_regs *regs)
if (!user_mode(regs)) {
printk("pc : %pS\n", (void *)regs->pc);
- printk("lr : %pS\n", (void *)ptrauth_strip_insn_pac(lr));
+ printk("lr : %pS\n", (void *)ptrauth_strip_kernel_insn_pac(lr));
} else {
printk("pc : %016llx\n", regs->pc);
printk("lr : %016llx\n", lr);
diff --git a/arch/arm64/kernel/proton-pack.c b/arch/arm64/kernel/proton-pack.c
index fca9cc6f5581..05f40c4e18fd 100644
--- a/arch/arm64/kernel/proton-pack.c
+++ b/arch/arm64/kernel/proton-pack.c
@@ -966,9 +966,6 @@ static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot)
{
const char *v = arm64_get_bp_hardening_vector(slot);
- if (slot < 0)
- return;
-
__this_cpu_write(this_cpu_vector, v);
/*
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
index 06a02707f488..2cfc810d0a5b 100644
--- a/arch/arm64/kernel/signal.c
+++ b/arch/arm64/kernel/signal.c
@@ -651,7 +651,7 @@ static int parse_user_sigframe(struct user_ctxs *user,
break;
case TPIDR2_MAGIC:
- if (!system_supports_sme())
+ if (!system_supports_tpidr2())
goto invalid;
if (user->tpidr2)
@@ -802,7 +802,7 @@ static int restore_sigframe(struct pt_regs *regs,
err = restore_fpsimd_context(&user);
}
- if (err == 0 && system_supports_sme() && user.tpidr2)
+ if (err == 0 && system_supports_tpidr2() && user.tpidr2)
err = restore_tpidr2_context(&user);
if (err == 0 && system_supports_sme() && user.za)
@@ -893,6 +893,13 @@ static int setup_sigframe_layout(struct rt_sigframe_user_layout *user,
return err;
}
+ if (system_supports_tpidr2()) {
+ err = sigframe_alloc(user, &user->tpidr2_offset,
+ sizeof(struct tpidr2_context));
+ if (err)
+ return err;
+ }
+
if (system_supports_sme()) {
unsigned int vl;
unsigned int vq = 0;
@@ -902,11 +909,6 @@ static int setup_sigframe_layout(struct rt_sigframe_user_layout *user,
else
vl = task_get_sme_vl(current);
- err = sigframe_alloc(user, &user->tpidr2_offset,
- sizeof(struct tpidr2_context));
- if (err)
- return err;
-
if (thread_za_enabled(&current->thread))
vq = sve_vq_from_vl(vl);
@@ -974,7 +976,7 @@ static int setup_sigframe(struct rt_sigframe_user_layout *user,
}
/* TPIDR2 if supported */
- if (system_supports_sme() && err == 0) {
+ if (system_supports_tpidr2() && err == 0) {
struct tpidr2_context __user *tpidr2_ctx =
apply_user_offset(user, user->tpidr2_offset);
err |= preserve_tpidr2_context(tpidr2_ctx);
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index 83154303e682..17f66a74c745 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -25,8 +25,9 @@
*
* The regs must be on a stack currently owned by the calling task.
*/
-static __always_inline void unwind_init_from_regs(struct unwind_state *state,
- struct pt_regs *regs)
+static __always_inline void
+unwind_init_from_regs(struct unwind_state *state,
+ struct pt_regs *regs)
{
unwind_init_common(state, current);
@@ -42,7 +43,8 @@ static __always_inline void unwind_init_from_regs(struct unwind_state *state,
*
* The function which invokes this must be noinline.
*/
-static __always_inline void unwind_init_from_caller(struct unwind_state *state)
+static __always_inline void
+unwind_init_from_caller(struct unwind_state *state)
{
unwind_init_common(state, current);
@@ -60,8 +62,9 @@ static __always_inline void unwind_init_from_caller(struct unwind_state *state)
* duration of the unwind, or the unwind will be bogus. It is never valid to
* call this for the current task.
*/
-static __always_inline void unwind_init_from_task(struct unwind_state *state,
- struct task_struct *task)
+static __always_inline void
+unwind_init_from_task(struct unwind_state *state,
+ struct task_struct *task)
{
unwind_init_common(state, task);
@@ -69,6 +72,32 @@ static __always_inline void unwind_init_from_task(struct unwind_state *state,
state->pc = thread_saved_pc(task);
}
+static __always_inline int
+unwind_recover_return_address(struct unwind_state *state)
+{
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ if (state->task->ret_stack &&
+ (state->pc == (unsigned long)return_to_handler)) {
+ unsigned long orig_pc;
+ orig_pc = ftrace_graph_ret_addr(state->task, NULL, state->pc,
+ (void *)state->fp);
+ if (WARN_ON_ONCE(state->pc == orig_pc))
+ return -EINVAL;
+ state->pc = orig_pc;
+ }
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
+#ifdef CONFIG_KRETPROBES
+ if (is_kretprobe_trampoline(state->pc)) {
+ state->pc = kretprobe_find_ret_addr(state->task,
+ (void *)state->fp,
+ &state->kr_cur);
+ }
+#endif /* CONFIG_KRETPROBES */
+
+ return 0;
+}
+
/*
* Unwind from one frame record (A) to the next frame record (B).
*
@@ -76,7 +105,8 @@ static __always_inline void unwind_init_from_task(struct unwind_state *state,
* records (e.g. a cycle), determined based on the location and fp value of A
* and the location (but not the fp value) of B.
*/
-static int notrace unwind_next(struct unwind_state *state)
+static __always_inline int
+unwind_next(struct unwind_state *state)
{
struct task_struct *tsk = state->task;
unsigned long fp = state->fp;
@@ -90,37 +120,18 @@ static int notrace unwind_next(struct unwind_state *state)
if (err)
return err;
- state->pc = ptrauth_strip_insn_pac(state->pc);
-
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- if (tsk->ret_stack &&
- (state->pc == (unsigned long)return_to_handler)) {
- unsigned long orig_pc;
- /*
- * This is a case where function graph tracer has
- * modified a return address (LR) in a stack frame
- * to hook a function return.
- * So replace it to an original value.
- */
- orig_pc = ftrace_graph_ret_addr(tsk, NULL, state->pc,
- (void *)state->fp);
- if (WARN_ON_ONCE(state->pc == orig_pc))
- return -EINVAL;
- state->pc = orig_pc;
- }
-#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
-#ifdef CONFIG_KRETPROBES
- if (is_kretprobe_trampoline(state->pc))
- state->pc = kretprobe_find_ret_addr(tsk, (void *)state->fp, &state->kr_cur);
-#endif
+ state->pc = ptrauth_strip_kernel_insn_pac(state->pc);
- return 0;
+ return unwind_recover_return_address(state);
}
-NOKPROBE_SYMBOL(unwind_next);
-static void notrace unwind(struct unwind_state *state,
- stack_trace_consume_fn consume_entry, void *cookie)
+static __always_inline void
+unwind(struct unwind_state *state, stack_trace_consume_fn consume_entry,
+ void *cookie)
{
+ if (unwind_recover_return_address(state))
+ return;
+
while (1) {
int ret;
@@ -131,40 +142,6 @@ static void notrace unwind(struct unwind_state *state,
break;
}
}
-NOKPROBE_SYMBOL(unwind);
-
-static bool dump_backtrace_entry(void *arg, unsigned long where)
-{
- char *loglvl = arg;
- printk("%s %pSb\n", loglvl, (void *)where);
- return true;
-}
-
-void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
- const char *loglvl)
-{
- pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
-
- if (regs && user_mode(regs))
- return;
-
- if (!tsk)
- tsk = current;
-
- if (!try_get_task_stack(tsk))
- return;
-
- printk("%sCall trace:\n", loglvl);
- arch_stack_walk(dump_backtrace_entry, (void *)loglvl, tsk, regs);
-
- put_task_stack(tsk);
-}
-
-void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
-{
- dump_backtrace(NULL, tsk, loglvl);
- barrier();
-}
/*
* Per-cpu stacks are only accessible when unwinding the current task in a
@@ -230,3 +207,36 @@ noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry,
unwind(&state, consume_entry, cookie);
}
+
+static bool dump_backtrace_entry(void *arg, unsigned long where)
+{
+ char *loglvl = arg;
+ printk("%s %pSb\n", loglvl, (void *)where);
+ return true;
+}
+
+void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
+ const char *loglvl)
+{
+ pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
+
+ if (regs && user_mode(regs))
+ return;
+
+ if (!tsk)
+ tsk = current;
+
+ if (!try_get_task_stack(tsk))
+ return;
+
+ printk("%sCall trace:\n", loglvl);
+ arch_stack_walk(dump_backtrace_entry, (void *)loglvl, tsk, regs);
+
+ put_task_stack(tsk);
+}
+
+void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
+{
+ dump_backtrace(NULL, tsk, loglvl);
+ barrier();
+}
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 4b2e16e696a8..6673c7b4f1a8 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -16,7 +16,6 @@
#include <linux/fs.h>
#include <linux/mman.h>
#include <linux/sched.h>
-#include <linux/kmemleak.h>
#include <linux/kvm.h>
#include <linux/kvm_irqfd.h>
#include <linux/irqbypass.h>
@@ -46,7 +45,6 @@
#include <kvm/arm_psci.h>
static enum kvm_mode kvm_mode = KVM_MODE_DEFAULT;
-DEFINE_STATIC_KEY_FALSE(kvm_protected_mode_initialized);
DECLARE_KVM_HYP_PER_CPU(unsigned long, kvm_hyp_vector);
@@ -2130,41 +2128,6 @@ out_err:
return err;
}
-static void __init _kvm_host_prot_finalize(void *arg)
-{
- int *err = arg;
-
- if (WARN_ON(kvm_call_hyp_nvhe(__pkvm_prot_finalize)))
- WRITE_ONCE(*err, -EINVAL);
-}
-
-static int __init pkvm_drop_host_privileges(void)
-{
- int ret = 0;
-
- /*
- * Flip the static key upfront as that may no longer be possible
- * once the host stage 2 is installed.
- */
- static_branch_enable(&kvm_protected_mode_initialized);
- on_each_cpu(_kvm_host_prot_finalize, &ret, 1);
- return ret;
-}
-
-static int __init finalize_hyp_mode(void)
-{
- if (!is_protected_kvm_enabled())
- return 0;
-
- /*
- * Exclude HYP sections from kmemleak so that they don't get peeked
- * at, which would end badly once inaccessible.
- */
- kmemleak_free_part(__hyp_bss_start, __hyp_bss_end - __hyp_bss_start);
- kmemleak_free_part_phys(hyp_mem_base, hyp_mem_size);
- return pkvm_drop_host_privileges();
-}
-
struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)
{
struct kvm_vcpu *vcpu;
@@ -2282,14 +2245,6 @@ static __init int kvm_arm_init(void)
if (err)
goto out_hyp;
- if (!in_hyp_mode) {
- err = finalize_hyp_mode();
- if (err) {
- kvm_err("Failed to finalize Hyp protection\n");
- goto out_subs;
- }
- }
-
if (is_protected_kvm_enabled()) {
kvm_info("Protected nVHE mode initialized successfully\n");
} else if (in_hyp_mode) {
diff --git a/arch/arm64/kvm/pkvm.c b/arch/arm64/kvm/pkvm.c
index cf56958b1492..6e9ece1ebbe7 100644
--- a/arch/arm64/kvm/pkvm.c
+++ b/arch/arm64/kvm/pkvm.c
@@ -4,6 +4,8 @@
* Author: Quentin Perret <qperret@google.com>
*/
+#include <linux/init.h>
+#include <linux/kmemleak.h>
#include <linux/kvm_host.h>
#include <linux/memblock.h>
#include <linux/mutex.h>
@@ -13,6 +15,8 @@
#include "hyp_constants.h"
+DEFINE_STATIC_KEY_FALSE(kvm_protected_mode_initialized);
+
static struct memblock_region *hyp_memory = kvm_nvhe_sym(hyp_memory);
static unsigned int *hyp_memblock_nr_ptr = &kvm_nvhe_sym(hyp_memblock_nr);
@@ -213,3 +217,46 @@ int pkvm_init_host_vm(struct kvm *host_kvm)
mutex_init(&host_kvm->lock);
return 0;
}
+
+static void __init _kvm_host_prot_finalize(void *arg)
+{
+ int *err = arg;
+
+ if (WARN_ON(kvm_call_hyp_nvhe(__pkvm_prot_finalize)))
+ WRITE_ONCE(*err, -EINVAL);
+}
+
+static int __init pkvm_drop_host_privileges(void)
+{
+ int ret = 0;
+
+ /*
+ * Flip the static key upfront as that may no longer be possible
+ * once the host stage 2 is installed.
+ */
+ static_branch_enable(&kvm_protected_mode_initialized);
+ on_each_cpu(_kvm_host_prot_finalize, &ret, 1);
+ return ret;
+}
+
+static int __init finalize_pkvm(void)
+{
+ int ret;
+
+ if (!is_protected_kvm_enabled())
+ return 0;
+
+ /*
+ * Exclude HYP sections from kmemleak so that they don't get peeked
+ * at, which would end badly once inaccessible.
+ */
+ kmemleak_free_part(__hyp_bss_start, __hyp_bss_end - __hyp_bss_start);
+ kmemleak_free_part_phys(hyp_mem_base, hyp_mem_size);
+
+ ret = pkvm_drop_host_privileges();
+ if (ret)
+ pr_err("Failed to finalize Hyp protection: %d\n", ret);
+
+ return ret;
+}
+device_initcall_sync(finalize_pkvm);
diff --git a/arch/arm64/lib/uaccess_flushcache.c b/arch/arm64/lib/uaccess_flushcache.c
index baee22961bdb..7510d1a23124 100644
--- a/arch/arm64/lib/uaccess_flushcache.c
+++ b/arch/arm64/lib/uaccess_flushcache.c
@@ -19,12 +19,6 @@ void memcpy_flushcache(void *dst, const void *src, size_t cnt)
}
EXPORT_SYMBOL_GPL(memcpy_flushcache);
-void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
- size_t len)
-{
- memcpy_flushcache(to, page_address(page) + offset, len);
-}
-
unsigned long __copy_user_flushcache(void *to, const void __user *from,
unsigned long n)
{
diff --git a/arch/arm64/mm/Makefile b/arch/arm64/mm/Makefile
index ff1e800ba7a1..dbd1bc95967d 100644
--- a/arch/arm64/mm/Makefile
+++ b/arch/arm64/mm/Makefile
@@ -2,7 +2,7 @@
obj-y := dma-mapping.o extable.o fault.o init.o \
cache.o copypage.o flush.o \
ioremap.o mmap.o pgd.o mmu.o \
- context.o proc.o pageattr.o
+ context.o proc.o pageattr.o fixmap.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_PTDUMP_CORE) += ptdump.o
obj-$(CONFIG_PTDUMP_DEBUGFS) += ptdump_debugfs.o
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 5240f6acad64..3cb101e8cb29 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -36,22 +36,7 @@ void arch_dma_prep_coherent(struct page *page, size_t size)
{
unsigned long start = (unsigned long)page_address(page);
- /*
- * The architecture only requires a clean to the PoC here in order to
- * meet the requirements of the DMA API. However, some vendors (i.e.
- * Qualcomm) abuse the DMA API for transferring buffers from the
- * non-secure to the secure world, resetting the system if a non-secure
- * access shows up after the buffer has been transferred:
- *
- * https://lore.kernel.org/r/20221114110329.68413-1-manivannan.sadhasivam@linaro.org
- *
- * Using clean+invalidate appears to make this issue less likely, but
- * the drivers themselves still need fixing as the CPU could issue a
- * speculative read from the buffer via the linear mapping irrespective
- * of the cache maintenance we use. Once the drivers are fixed, we can
- * relax this to a clean operation.
- */
- dcache_clean_inval_poc(start, start + size);
+ dcache_clean_poc(start, start + size);
}
#ifdef CONFIG_IOMMU_DMA
diff --git a/arch/arm64/mm/fixmap.c b/arch/arm64/mm/fixmap.c
new file mode 100644
index 000000000000..c0a3301203bd
--- /dev/null
+++ b/arch/arm64/mm/fixmap.c
@@ -0,0 +1,203 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Fixmap manipulation code
+ */
+
+#include <linux/bug.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/libfdt.h>
+#include <linux/memory.h>
+#include <linux/mm.h>
+#include <linux/sizes.h>
+
+#include <asm/fixmap.h>
+#include <asm/kernel-pgtable.h>
+#include <asm/pgalloc.h>
+#include <asm/tlbflush.h>
+
+#define NR_BM_PTE_TABLES \
+ SPAN_NR_ENTRIES(FIXADDR_TOT_START, FIXADDR_TOP, PMD_SHIFT)
+#define NR_BM_PMD_TABLES \
+ SPAN_NR_ENTRIES(FIXADDR_TOT_START, FIXADDR_TOP, PUD_SHIFT)
+
+static_assert(NR_BM_PMD_TABLES == 1);
+
+#define __BM_TABLE_IDX(addr, shift) \
+ (((addr) >> (shift)) - (FIXADDR_TOT_START >> (shift)))
+
+#define BM_PTE_TABLE_IDX(addr) __BM_TABLE_IDX(addr, PMD_SHIFT)
+
+static pte_t bm_pte[NR_BM_PTE_TABLES][PTRS_PER_PTE] __page_aligned_bss;
+static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused;
+static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused;
+
+static inline pte_t *fixmap_pte(unsigned long addr)
+{
+ return &bm_pte[BM_PTE_TABLE_IDX(addr)][pte_index(addr)];
+}
+
+static void __init early_fixmap_init_pte(pmd_t *pmdp, unsigned long addr)
+{
+ pmd_t pmd = READ_ONCE(*pmdp);
+ pte_t *ptep;
+
+ if (pmd_none(pmd)) {
+ ptep = bm_pte[BM_PTE_TABLE_IDX(addr)];
+ __pmd_populate(pmdp, __pa_symbol(ptep), PMD_TYPE_TABLE);
+ }
+}
+
+static void __init early_fixmap_init_pmd(pud_t *pudp, unsigned long addr,
+ unsigned long end)
+{
+ unsigned long next;
+ pud_t pud = READ_ONCE(*pudp);
+ pmd_t *pmdp;
+
+ if (pud_none(pud))
+ __pud_populate(pudp, __pa_symbol(bm_pmd), PUD_TYPE_TABLE);
+
+ pmdp = pmd_offset_kimg(pudp, addr);
+ do {
+ next = pmd_addr_end(addr, end);
+ early_fixmap_init_pte(pmdp, addr);
+ } while (pmdp++, addr = next, addr != end);
+}
+
+
+static void __init early_fixmap_init_pud(p4d_t *p4dp, unsigned long addr,
+ unsigned long end)
+{
+ p4d_t p4d = READ_ONCE(*p4dp);
+ pud_t *pudp;
+
+ if (CONFIG_PGTABLE_LEVELS > 3 && !p4d_none(p4d) &&
+ p4d_page_paddr(p4d) != __pa_symbol(bm_pud)) {
+ /*
+ * We only end up here if the kernel mapping and the fixmap
+ * share the top level pgd entry, which should only happen on
+ * 16k/4 levels configurations.
+ */
+ BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
+ }
+
+ if (p4d_none(p4d))
+ __p4d_populate(p4dp, __pa_symbol(bm_pud), P4D_TYPE_TABLE);
+
+ pudp = pud_offset_kimg(p4dp, addr);
+ early_fixmap_init_pmd(pudp, addr, end);
+}
+
+/*
+ * The p*d_populate functions call virt_to_phys implicitly so they can't be used
+ * directly on kernel symbols (bm_p*d). This function is called too early to use
+ * lm_alias so __p*d_populate functions must be used to populate with the
+ * physical address from __pa_symbol.
+ */
+void __init early_fixmap_init(void)
+{
+ unsigned long addr = FIXADDR_TOT_START;
+ unsigned long end = FIXADDR_TOP;
+
+ pgd_t *pgdp = pgd_offset_k(addr);
+ p4d_t *p4dp = p4d_offset(pgdp, addr);
+
+ early_fixmap_init_pud(p4dp, addr, end);
+}
+
+/*
+ * Unusually, this is also called in IRQ context (ghes_iounmap_irq) so if we
+ * ever need to use IPIs for TLB broadcasting, then we're in trouble here.
+ */
+void __set_fixmap(enum fixed_addresses idx,
+ phys_addr_t phys, pgprot_t flags)
+{
+ unsigned long addr = __fix_to_virt(idx);
+ pte_t *ptep;
+
+ BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
+
+ ptep = fixmap_pte(addr);
+
+ if (pgprot_val(flags)) {
+ set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, flags));
+ } else {
+ pte_clear(&init_mm, addr, ptep);
+ flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
+ }
+}
+
+void *__init fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
+{
+ const u64 dt_virt_base = __fix_to_virt(FIX_FDT);
+ phys_addr_t dt_phys_base;
+ int offset;
+ void *dt_virt;
+
+ /*
+ * Check whether the physical FDT address is set and meets the minimum
+ * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be
+ * at least 8 bytes so that we can always access the magic and size
+ * fields of the FDT header after mapping the first chunk, double check
+ * here if that is indeed the case.
+ */
+ BUILD_BUG_ON(MIN_FDT_ALIGN < 8);
+ if (!dt_phys || dt_phys % MIN_FDT_ALIGN)
+ return NULL;
+
+ dt_phys_base = round_down(dt_phys, PAGE_SIZE);
+ offset = dt_phys % PAGE_SIZE;
+ dt_virt = (void *)dt_virt_base + offset;
+
+ /* map the first chunk so we can read the size from the header */
+ create_mapping_noalloc(dt_phys_base, dt_virt_base, PAGE_SIZE, prot);
+
+ if (fdt_magic(dt_virt) != FDT_MAGIC)
+ return NULL;
+
+ *size = fdt_totalsize(dt_virt);
+ if (*size > MAX_FDT_SIZE)
+ return NULL;
+
+ if (offset + *size > PAGE_SIZE) {
+ create_mapping_noalloc(dt_phys_base, dt_virt_base,
+ offset + *size, prot);
+ }
+
+ return dt_virt;
+}
+
+/*
+ * Copy the fixmap region into a new pgdir.
+ */
+void __init fixmap_copy(pgd_t *pgdir)
+{
+ if (!READ_ONCE(pgd_val(*pgd_offset_pgd(pgdir, FIXADDR_TOT_START)))) {
+ /*
+ * The fixmap falls in a separate pgd to the kernel, and doesn't
+ * live in the carveout for the swapper_pg_dir. We can simply
+ * re-use the existing dir for the fixmap.
+ */
+ set_pgd(pgd_offset_pgd(pgdir, FIXADDR_TOT_START),
+ READ_ONCE(*pgd_offset_k(FIXADDR_TOT_START)));
+ } else if (CONFIG_PGTABLE_LEVELS > 3) {
+ pgd_t *bm_pgdp;
+ p4d_t *bm_p4dp;
+ pud_t *bm_pudp;
+ /*
+ * The fixmap shares its top level pgd entry with the kernel
+ * mapping. This can really only occur when we are running
+ * with 16k/4 levels, so we can simply reuse the pud level
+ * entry instead.
+ */
+ BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
+ bm_pgdp = pgd_offset_pgd(pgdir, FIXADDR_TOT_START);
+ bm_p4dp = p4d_offset(bm_pgdp, FIXADDR_TOT_START);
+ bm_pudp = pud_set_fixmap_offset(bm_p4dp, FIXADDR_TOT_START);
+ pud_populate(&init_mm, bm_pudp, lm_alias(bm_pmd));
+ pud_clear_fixmap();
+ } else {
+ BUG();
+ }
+}
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 58a0bb2c17f1..66e70ca47680 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -61,34 +61,8 @@ EXPORT_SYMBOL(memstart_addr);
* unless restricted on specific platforms (e.g. 30-bit on Raspberry Pi 4).
* In such case, ZONE_DMA32 covers the rest of the 32-bit addressable memory,
* otherwise it is empty.
- *
- * Memory reservation for crash kernel either done early or deferred
- * depending on DMA memory zones configs (ZONE_DMA) --
- *
- * In absence of ZONE_DMA configs arm64_dma_phys_limit initialized
- * here instead of max_zone_phys(). This lets early reservation of
- * crash kernel memory which has a dependency on arm64_dma_phys_limit.
- * Reserving memory early for crash kernel allows linear creation of block
- * mappings (greater than page-granularity) for all the memory bank rangs.
- * In this scheme a comparatively quicker boot is observed.
- *
- * If ZONE_DMA configs are defined, crash kernel memory reservation
- * is delayed until DMA zone memory range size initialization performed in
- * zone_sizes_init(). The defer is necessary to steer clear of DMA zone
- * memory range to avoid overlap allocation. So crash kernel memory boundaries
- * are not known when mapping all bank memory ranges, which otherwise means
- * not possible to exclude crash kernel range from creating block mappings
- * so page-granularity mappings are created for the entire memory range.
- * Hence a slightly slower boot is observed.
- *
- * Note: Page-granularity mappings are necessary for crash kernel memory
- * range for shrinking its size via /sys/kernel/kexec_crash_size interface.
*/
-#if IS_ENABLED(CONFIG_ZONE_DMA) || IS_ENABLED(CONFIG_ZONE_DMA32)
phys_addr_t __ro_after_init arm64_dma_phys_limit;
-#else
-phys_addr_t __ro_after_init arm64_dma_phys_limit = PHYS_MASK + 1;
-#endif
/* Current arm64 boot protocol requires 2MB alignment */
#define CRASH_ALIGN SZ_2M
@@ -248,6 +222,8 @@ static void __init zone_sizes_init(void)
if (!arm64_dma_phys_limit)
arm64_dma_phys_limit = dma32_phys_limit;
#endif
+ if (!arm64_dma_phys_limit)
+ arm64_dma_phys_limit = PHYS_MASK + 1;
max_zone_pfns[ZONE_NORMAL] = max_pfn;
free_area_init(max_zone_pfns);
@@ -408,9 +384,6 @@ void __init arm64_memblock_init(void)
early_init_fdt_scan_reserved_mem();
- if (!defer_reserve_crashkernel())
- reserve_crashkernel();
-
high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
}
@@ -457,8 +430,7 @@ void __init bootmem_init(void)
* request_standard_resources() depends on crashkernel's memory being
* reserved, so do it here.
*/
- if (defer_reserve_crashkernel())
- reserve_crashkernel();
+ reserve_crashkernel();
memblock_dump_all();
}
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 6f9d8898a025..af6bc8403ee4 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -24,6 +24,7 @@
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <linux/set_memory.h>
+#include <linux/kfence.h>
#include <asm/barrier.h>
#include <asm/cputype.h>
@@ -38,6 +39,7 @@
#include <asm/ptdump.h>
#include <asm/tlbflush.h>
#include <asm/pgalloc.h>
+#include <asm/kfence.h>
#define NO_BLOCK_MAPPINGS BIT(0)
#define NO_CONT_MAPPINGS BIT(1)
@@ -71,10 +73,6 @@ long __section(".mmuoff.data.write") __early_cpu_boot_status;
unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
EXPORT_SYMBOL(empty_zero_page);
-static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
-static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused;
-static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused;
-
static DEFINE_SPINLOCK(swapper_pgdir_lock);
static DEFINE_MUTEX(fixmap_lock);
@@ -450,8 +448,8 @@ static phys_addr_t pgd_pgtable_alloc(int shift)
* without allocating new levels of table. Note that this permits the
* creation of new section or page entries.
*/
-static void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt,
- phys_addr_t size, pgprot_t prot)
+void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt,
+ phys_addr_t size, pgprot_t prot)
{
if ((virt >= PAGE_END) && (virt < VMALLOC_START)) {
pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
@@ -510,20 +508,59 @@ void __init mark_linear_text_alias_ro(void)
PAGE_KERNEL_RO);
}
-static bool crash_mem_map __initdata;
+#ifdef CONFIG_KFENCE
+
+bool __ro_after_init kfence_early_init = !!CONFIG_KFENCE_SAMPLE_INTERVAL;
-static int __init enable_crash_mem_map(char *arg)
+/* early_param() will be parsed before map_mem() below. */
+static int __init parse_kfence_early_init(char *arg)
{
- /*
- * Proper parameter parsing is done by reserve_crashkernel(). We only
- * need to know if the linear map has to avoid block mappings so that
- * the crashkernel reservations can be unmapped later.
- */
- crash_mem_map = true;
+ int val;
+ if (get_option(&arg, &val))
+ kfence_early_init = !!val;
return 0;
}
-early_param("crashkernel", enable_crash_mem_map);
+early_param("kfence.sample_interval", parse_kfence_early_init);
+
+static phys_addr_t __init arm64_kfence_alloc_pool(void)
+{
+ phys_addr_t kfence_pool;
+
+ if (!kfence_early_init)
+ return 0;
+
+ kfence_pool = memblock_phys_alloc(KFENCE_POOL_SIZE, PAGE_SIZE);
+ if (!kfence_pool) {
+ pr_err("failed to allocate kfence pool\n");
+ kfence_early_init = false;
+ return 0;
+ }
+
+ /* Temporarily mark as NOMAP. */
+ memblock_mark_nomap(kfence_pool, KFENCE_POOL_SIZE);
+
+ return kfence_pool;
+}
+
+static void __init arm64_kfence_map_pool(phys_addr_t kfence_pool, pgd_t *pgdp)
+{
+ if (!kfence_pool)
+ return;
+
+ /* KFENCE pool needs page-level mapping. */
+ __map_memblock(pgdp, kfence_pool, kfence_pool + KFENCE_POOL_SIZE,
+ pgprot_tagged(PAGE_KERNEL),
+ NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS);
+ memblock_clear_nomap(kfence_pool, KFENCE_POOL_SIZE);
+ __kfence_pool = phys_to_virt(kfence_pool);
+}
+#else /* CONFIG_KFENCE */
+
+static inline phys_addr_t arm64_kfence_alloc_pool(void) { return 0; }
+static inline void arm64_kfence_map_pool(phys_addr_t kfence_pool, pgd_t *pgdp) { }
+
+#endif /* CONFIG_KFENCE */
static void __init map_mem(pgd_t *pgdp)
{
@@ -531,6 +568,7 @@ static void __init map_mem(pgd_t *pgdp)
phys_addr_t kernel_start = __pa_symbol(_stext);
phys_addr_t kernel_end = __pa_symbol(__init_begin);
phys_addr_t start, end;
+ phys_addr_t early_kfence_pool;
int flags = NO_EXEC_MAPPINGS;
u64 i;
@@ -543,6 +581,8 @@ static void __init map_mem(pgd_t *pgdp)
*/
BUILD_BUG_ON(pgd_index(direct_map_end - 1) == pgd_index(direct_map_end));
+ early_kfence_pool = arm64_kfence_alloc_pool();
+
if (can_set_direct_map())
flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
@@ -554,16 +594,6 @@ static void __init map_mem(pgd_t *pgdp)
*/
memblock_mark_nomap(kernel_start, kernel_end - kernel_start);
-#ifdef CONFIG_KEXEC_CORE
- if (crash_mem_map) {
- if (defer_reserve_crashkernel())
- flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
- else if (crashk_res.end)
- memblock_mark_nomap(crashk_res.start,
- resource_size(&crashk_res));
- }
-#endif
-
/* map all the memory banks */
for_each_mem_range(i, &start, &end) {
if (start >= end)
@@ -590,24 +620,7 @@ static void __init map_mem(pgd_t *pgdp)
__map_memblock(pgdp, kernel_start, kernel_end,
PAGE_KERNEL, NO_CONT_MAPPINGS);
memblock_clear_nomap(kernel_start, kernel_end - kernel_start);
-
- /*
- * Use page-level mappings here so that we can shrink the region
- * in page granularity and put back unused memory to buddy system
- * through /sys/kernel/kexec_crash_size interface.
- */
-#ifdef CONFIG_KEXEC_CORE
- if (crash_mem_map && !defer_reserve_crashkernel()) {
- if (crashk_res.end) {
- __map_memblock(pgdp, crashk_res.start,
- crashk_res.end + 1,
- PAGE_KERNEL,
- NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS);
- memblock_clear_nomap(crashk_res.start,
- resource_size(&crashk_res));
- }
- }
-#endif
+ arm64_kfence_map_pool(early_kfence_pool, pgdp);
}
void mark_rodata_ro(void)
@@ -734,34 +747,7 @@ static void __init map_kernel(pgd_t *pgdp)
&vmlinux_initdata, 0, VM_NO_GUARD);
map_kernel_segment(pgdp, _data, _end, PAGE_KERNEL, &vmlinux_data, 0, 0);
- if (!READ_ONCE(pgd_val(*pgd_offset_pgd(pgdp, FIXADDR_START)))) {
- /*
- * The fixmap falls in a separate pgd to the kernel, and doesn't
- * live in the carveout for the swapper_pg_dir. We can simply
- * re-use the existing dir for the fixmap.
- */
- set_pgd(pgd_offset_pgd(pgdp, FIXADDR_START),
- READ_ONCE(*pgd_offset_k(FIXADDR_START)));
- } else if (CONFIG_PGTABLE_LEVELS > 3) {
- pgd_t *bm_pgdp;
- p4d_t *bm_p4dp;
- pud_t *bm_pudp;
- /*
- * The fixmap shares its top level pgd entry with the kernel
- * mapping. This can really only occur when we are running
- * with 16k/4 levels, so we can simply reuse the pud level
- * entry instead.
- */
- BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
- bm_pgdp = pgd_offset_pgd(pgdp, FIXADDR_START);
- bm_p4dp = p4d_offset(bm_pgdp, FIXADDR_START);
- bm_pudp = pud_set_fixmap_offset(bm_p4dp, FIXADDR_START);
- pud_populate(&init_mm, bm_pudp, lm_alias(bm_pmd));
- pud_clear_fixmap();
- } else {
- BUG();
- }
-
+ fixmap_copy(pgdp);
kasan_copy_shadow(pgdp);
}
@@ -1176,166 +1162,6 @@ void vmemmap_free(unsigned long start, unsigned long end,
}
#endif /* CONFIG_MEMORY_HOTPLUG */
-static inline pud_t *fixmap_pud(unsigned long addr)
-{
- pgd_t *pgdp = pgd_offset_k(addr);
- p4d_t *p4dp = p4d_offset(pgdp, addr);
- p4d_t p4d = READ_ONCE(*p4dp);
-
- BUG_ON(p4d_none(p4d) || p4d_bad(p4d));
-
- return pud_offset_kimg(p4dp, addr);
-}
-
-static inline pmd_t *fixmap_pmd(unsigned long addr)
-{
- pud_t *pudp = fixmap_pud(addr);
- pud_t pud = READ_ONCE(*pudp);
-
- BUG_ON(pud_none(pud) || pud_bad(pud));
-
- return pmd_offset_kimg(pudp, addr);
-}
-
-static inline pte_t *fixmap_pte(unsigned long addr)
-{
- return &bm_pte[pte_index(addr)];
-}
-
-/*
- * The p*d_populate functions call virt_to_phys implicitly so they can't be used
- * directly on kernel symbols (bm_p*d). This function is called too early to use
- * lm_alias so __p*d_populate functions must be used to populate with the
- * physical address from __pa_symbol.
- */
-void __init early_fixmap_init(void)
-{
- pgd_t *pgdp;
- p4d_t *p4dp, p4d;
- pud_t *pudp;
- pmd_t *pmdp;
- unsigned long addr = FIXADDR_START;
-
- pgdp = pgd_offset_k(addr);
- p4dp = p4d_offset(pgdp, addr);
- p4d = READ_ONCE(*p4dp);
- if (CONFIG_PGTABLE_LEVELS > 3 &&
- !(p4d_none(p4d) || p4d_page_paddr(p4d) == __pa_symbol(bm_pud))) {
- /*
- * We only end up here if the kernel mapping and the fixmap
- * share the top level pgd entry, which should only happen on
- * 16k/4 levels configurations.
- */
- BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
- pudp = pud_offset_kimg(p4dp, addr);
- } else {
- if (p4d_none(p4d))
- __p4d_populate(p4dp, __pa_symbol(bm_pud), P4D_TYPE_TABLE);
- pudp = fixmap_pud(addr);
- }
- if (pud_none(READ_ONCE(*pudp)))
- __pud_populate(pudp, __pa_symbol(bm_pmd), PUD_TYPE_TABLE);
- pmdp = fixmap_pmd(addr);
- __pmd_populate(pmdp, __pa_symbol(bm_pte), PMD_TYPE_TABLE);
-
- /*
- * The boot-ioremap range spans multiple pmds, for which
- * we are not prepared:
- */
- BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
- != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
-
- if ((pmdp != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)))
- || pmdp != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) {
- WARN_ON(1);
- pr_warn("pmdp %p != %p, %p\n",
- pmdp, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)),
- fixmap_pmd(fix_to_virt(FIX_BTMAP_END)));
- pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
- fix_to_virt(FIX_BTMAP_BEGIN));
- pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n",
- fix_to_virt(FIX_BTMAP_END));
-
- pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
- pr_warn("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN);
- }
-}
-
-/*
- * Unusually, this is also called in IRQ context (ghes_iounmap_irq) so if we
- * ever need to use IPIs for TLB broadcasting, then we're in trouble here.
- */
-void __set_fixmap(enum fixed_addresses idx,
- phys_addr_t phys, pgprot_t flags)
-{
- unsigned long addr = __fix_to_virt(idx);
- pte_t *ptep;
-
- BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
-
- ptep = fixmap_pte(addr);
-
- if (pgprot_val(flags)) {
- set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, flags));
- } else {
- pte_clear(&init_mm, addr, ptep);
- flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
- }
-}
-
-void *__init fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
-{
- const u64 dt_virt_base = __fix_to_virt(FIX_FDT);
- int offset;
- void *dt_virt;
-
- /*
- * Check whether the physical FDT address is set and meets the minimum
- * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be
- * at least 8 bytes so that we can always access the magic and size
- * fields of the FDT header after mapping the first chunk, double check
- * here if that is indeed the case.
- */
- BUILD_BUG_ON(MIN_FDT_ALIGN < 8);
- if (!dt_phys || dt_phys % MIN_FDT_ALIGN)
- return NULL;
-
- /*
- * Make sure that the FDT region can be mapped without the need to
- * allocate additional translation table pages, so that it is safe
- * to call create_mapping_noalloc() this early.
- *
- * On 64k pages, the FDT will be mapped using PTEs, so we need to
- * be in the same PMD as the rest of the fixmap.
- * On 4k pages, we'll use section mappings for the FDT so we only
- * have to be in the same PUD.
- */
- BUILD_BUG_ON(dt_virt_base % SZ_2M);
-
- BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> SWAPPER_TABLE_SHIFT !=
- __fix_to_virt(FIX_BTMAP_BEGIN) >> SWAPPER_TABLE_SHIFT);
-
- offset = dt_phys % SWAPPER_BLOCK_SIZE;
- dt_virt = (void *)dt_virt_base + offset;
-
- /* map the first chunk so we can read the size from the header */
- create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE),
- dt_virt_base, SWAPPER_BLOCK_SIZE, prot);
-
- if (fdt_magic(dt_virt) != FDT_MAGIC)
- return NULL;
-
- *size = fdt_totalsize(dt_virt);
- if (*size > MAX_FDT_SIZE)
- return NULL;
-
- if (offset + *size > SWAPPER_BLOCK_SIZE)
- create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
- round_up(offset + *size, SWAPPER_BLOCK_SIZE), prot);
-
- return dt_virt;
-}
-
int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot)
{
pud_t new_pud = pfn_pud(__phys_to_pfn(phys), mk_pud_sect_prot(prot));
diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
index 79dd201c59d8..8e2017ba5f1b 100644
--- a/arch/arm64/mm/pageattr.c
+++ b/arch/arm64/mm/pageattr.c
@@ -11,6 +11,7 @@
#include <asm/cacheflush.h>
#include <asm/set_memory.h>
#include <asm/tlbflush.h>
+#include <asm/kfence.h>
struct page_change_data {
pgprot_t set_mask;
@@ -22,12 +23,14 @@ bool rodata_full __ro_after_init = IS_ENABLED(CONFIG_RODATA_FULL_DEFAULT_ENABLED
bool can_set_direct_map(void)
{
/*
- * rodata_full, DEBUG_PAGEALLOC and KFENCE require linear map to be
+ * rodata_full and DEBUG_PAGEALLOC require linear map to be
* mapped at page granularity, so that it is possible to
* protect/unprotect single pages.
+ *
+ * KFENCE pool requires page-granular mapping if initialized late.
*/
return (rodata_enabled && rodata_full) || debug_pagealloc_enabled() ||
- IS_ENABLED(CONFIG_KFENCE);
+ arm64_kfence_can_set_direct_map();
}
static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
diff --git a/arch/arm64/mm/ptdump.c b/arch/arm64/mm/ptdump.c
index 9bc4066c5bf3..e305b6593c4e 100644
--- a/arch/arm64/mm/ptdump.c
+++ b/arch/arm64/mm/ptdump.c
@@ -45,7 +45,7 @@ static struct addr_marker address_markers[] = {
{ MODULES_END, "Modules end" },
{ VMALLOC_START, "vmalloc() area" },
{ VMALLOC_END, "vmalloc() end" },
- { FIXADDR_START, "Fixmap start" },
+ { FIXADDR_TOT_START, "Fixmap start" },
{ FIXADDR_TOP, "Fixmap end" },
{ PCI_IO_START, "PCI I/O start" },
{ PCI_IO_END, "PCI I/O end" },
diff --git a/arch/arm64/tools/gen-sysreg.awk b/arch/arm64/tools/gen-sysreg.awk
index 6fa0468caa00..d1254a056114 100755
--- a/arch/arm64/tools/gen-sysreg.awk
+++ b/arch/arm64/tools/gen-sysreg.awk
@@ -4,23 +4,35 @@
#
# Usage: awk -f gen-sysreg.awk sysregs.txt
+function block_current() {
+ return __current_block[__current_block_depth];
+}
+
# Log an error and terminate
function fatal(msg) {
print "Error at " NR ": " msg > "/dev/stderr"
+
+ printf "Current block nesting:"
+
+ for (i = 0; i <= __current_block_depth; i++) {
+ printf " " __current_block[i]
+ }
+ printf "\n"
+
exit 1
}
-# Sanity check that the start or end of a block makes sense at this point in
-# the file. If not, produce an error and terminate.
-#
-# @this - the $Block or $EndBlock
-# @prev - the only valid block to already be in (value of @block)
-# @new - the new value of @block
-function change_block(this, prev, new) {
- if (block != prev)
- fatal("unexpected " this " (inside " block ")")
-
- block = new
+# Enter a new block, setting the active block to @block
+function block_push(block) {
+ __current_block[++__current_block_depth] = block
+}
+
+# Exit a block, setting the active block to the parent block
+function block_pop() {
+ if (__current_block_depth == 0)
+ fatal("error: block_pop() in root block")
+
+ __current_block_depth--;
}
# Sanity check the number of records for a field makes sense. If not, produce
@@ -84,10 +96,14 @@ BEGIN {
print "/* Generated file - do not edit */"
print ""
- block = "None"
+ __current_block_depth = 0
+ __current_block[__current_block_depth] = "Root"
}
END {
+ if (__current_block_depth != 0)
+ fatal("Missing terminator for " block_current() " block")
+
print "#endif /* __ASM_SYSREG_DEFS_H */"
}
@@ -95,8 +111,9 @@ END {
/^$/ { next }
/^[\t ]*#/ { next }
-/^SysregFields/ {
- change_block("SysregFields", "None", "SysregFields")
+/^SysregFields/ && block_current() == "Root" {
+ block_push("SysregFields")
+
expect_fields(2)
reg = $2
@@ -110,12 +127,10 @@ END {
next
}
-/^EndSysregFields/ {
+/^EndSysregFields/ && block_current() == "SysregFields" {
if (next_bit > 0)
fatal("Unspecified bits in " reg)
- change_block("EndSysregFields", "SysregFields", "None")
-
define(reg "_RES0", "(" res0 ")")
define(reg "_RES1", "(" res1 ")")
define(reg "_UNKN", "(" unkn ")")
@@ -126,11 +141,13 @@ END {
res1 = null
unkn = null
+ block_pop()
next
}
-/^Sysreg/ {
- change_block("Sysreg", "None", "Sysreg")
+/^Sysreg/ && block_current() == "Root" {
+ block_push("Sysreg")
+
expect_fields(7)
reg = $2
@@ -160,12 +177,10 @@ END {
next
}
-/^EndSysreg/ {
+/^EndSysreg/ && block_current() == "Sysreg" {
if (next_bit > 0)
fatal("Unspecified bits in " reg)
- change_block("EndSysreg", "Sysreg", "None")
-
if (res0 != null)
define(reg "_RES0", "(" res0 ")")
if (res1 != null)
@@ -185,12 +200,13 @@ END {
res1 = null
unkn = null
+ block_pop()
next
}
# Currently this is effectivey a comment, in future we may want to emit
# defines for the fields.
-/^Fields/ && (block == "Sysreg") {
+/^Fields/ && block_current() == "Sysreg" {
expect_fields(2)
if (next_bit != 63)
@@ -208,7 +224,7 @@ END {
}
-/^Res0/ && (block == "Sysreg" || block == "SysregFields") {
+/^Res0/ && (block_current() == "Sysreg" || block_current() == "SysregFields") {
expect_fields(2)
parse_bitdef(reg, "RES0", $2)
field = "RES0_" msb "_" lsb
@@ -218,7 +234,7 @@ END {
next
}
-/^Res1/ && (block == "Sysreg" || block == "SysregFields") {
+/^Res1/ && (block_current() == "Sysreg" || block_current() == "SysregFields") {
expect_fields(2)
parse_bitdef(reg, "RES1", $2)
field = "RES1_" msb "_" lsb
@@ -228,7 +244,7 @@ END {
next
}
-/^Unkn/ && (block == "Sysreg" || block == "SysregFields") {
+/^Unkn/ && (block_current() == "Sysreg" || block_current() == "SysregFields") {
expect_fields(2)
parse_bitdef(reg, "UNKN", $2)
field = "UNKN_" msb "_" lsb
@@ -238,7 +254,7 @@ END {
next
}
-/^Field/ && (block == "Sysreg" || block == "SysregFields") {
+/^Field/ && (block_current() == "Sysreg" || block_current() == "SysregFields") {
expect_fields(3)
field = $3
parse_bitdef(reg, field, $2)
@@ -249,15 +265,16 @@ END {
next
}
-/^Raz/ && (block == "Sysreg" || block == "SysregFields") {
+/^Raz/ && (block_current() == "Sysreg" || block_current() == "SysregFields") {
expect_fields(2)
parse_bitdef(reg, field, $2)
next
}
-/^SignedEnum/ {
- change_block("Enum<", "Sysreg", "Enum")
+/^SignedEnum/ && (block_current() == "Sysreg" || block_current() == "SysregFields") {
+ block_push("Enum")
+
expect_fields(3)
field = $3
parse_bitdef(reg, field, $2)
@@ -268,8 +285,9 @@ END {
next
}
-/^UnsignedEnum/ {
- change_block("Enum<", "Sysreg", "Enum")
+/^UnsignedEnum/ && (block_current() == "Sysreg" || block_current() == "SysregFields") {
+ block_push("Enum")
+
expect_fields(3)
field = $3
parse_bitdef(reg, field, $2)
@@ -280,8 +298,9 @@ END {
next
}
-/^Enum/ {
- change_block("Enum", "Sysreg", "Enum")
+/^Enum/ && (block_current() == "Sysreg" || block_current() == "SysregFields") {
+ block_push("Enum")
+
expect_fields(3)
field = $3
parse_bitdef(reg, field, $2)
@@ -291,16 +310,18 @@ END {
next
}
-/^EndEnum/ {
- change_block("EndEnum", "Enum", "Sysreg")
+/^EndEnum/ && block_current() == "Enum" {
+
field = null
msb = null
lsb = null
print ""
+
+ block_pop()
next
}
-/0b[01]+/ && block == "Enum" {
+/0b[01]+/ && block_current() == "Enum" {
expect_fields(2)
val = $1
name = $2
diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg
index dd5a9c7e310f..77edce16f4f9 100644
--- a/arch/arm64/tools/sysreg
+++ b/arch/arm64/tools/sysreg
@@ -879,7 +879,30 @@ EndEnum
EndSysreg
Sysreg ID_AA64PFR1_EL1 3 0 0 4 1
-Res0 63:40
+UnsignedEnum 63:60 PFAR
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+UnsignedEnum 59:56 DF2
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+UnsignedEnum 55:52 MTEX
+ 0b0000 MTE
+ 0b0001 MTE4
+EndEnum
+UnsignedEnum 51:48 THE
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+UnsignedEnum 47:44 GCS
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Enum 43:40 MTE_frac
+ 0b0000 ASYNC
+ 0b1111 NI
+EndEnum
UnsignedEnum 39:36 NMI
0b0000 NI
0b0001 IMP
@@ -1866,6 +1889,146 @@ Field 1 ZA
Field 0 SM
EndSysreg
+SysregFields HFGxTR_EL2
+Field 63 nAMIAIR2_EL1
+Field 62 nMAIR2_EL1
+Field 61 nS2POR_EL1
+Field 60 nPOR_EL1
+Field 59 nPOR_EL0
+Field 58 nPIR_EL1
+Field 57 nPIRE0_EL1
+Field 56 nRCWMASK_EL1
+Field 55 nTPIDR2_EL0
+Field 54 nSMPRI_EL1
+Field 53 nGCS_EL1
+Field 52 nGCS_EL0
+Res0 51
+Field 50 nACCDATA_EL1
+Field 49 ERXADDR_EL1
+Field 48 EXRPFGCDN_EL1
+Field 47 EXPFGCTL_EL1
+Field 46 EXPFGF_EL1
+Field 45 ERXMISCn_EL1
+Field 44 ERXSTATUS_EL1
+Field 43 ERXCTLR_EL1
+Field 42 ERXFR_EL1
+Field 41 ERRSELR_EL1
+Field 40 ERRIDR_EL1
+Field 39 ICC_IGRPENn_EL1
+Field 38 VBAR_EL1
+Field 37 TTBR1_EL1
+Field 36 TTBR0_EL1
+Field 35 TPIDR_EL0
+Field 34 TPIDRRO_EL0
+Field 33 TPIDR_EL1
+Field 32 TCR_EL1
+Field 31 SCTXNUM_EL0
+Field 30 SCTXNUM_EL1
+Field 29 SCTLR_EL1
+Field 28 REVIDR_EL1
+Field 27 PAR_EL1
+Field 26 MPIDR_EL1
+Field 25 MIDR_EL1
+Field 24 MAIR_EL1
+Field 23 LORSA_EL1
+Field 22 LORN_EL1
+Field 21 LORID_EL1
+Field 20 LOREA_EL1
+Field 19 LORC_EL1
+Field 18 ISR_EL1
+Field 17 FAR_EL1
+Field 16 ESR_EL1
+Field 15 DCZID_EL0
+Field 14 CTR_EL0
+Field 13 CSSELR_EL1
+Field 12 CPACR_EL1
+Field 11 CONTEXTIDR_EL1
+Field 10 CLIDR_EL1
+Field 9 CCSIDR_EL1
+Field 8 APIBKey
+Field 7 APIAKey
+Field 6 APGAKey
+Field 5 APDBKey
+Field 4 APDAKey
+Field 3 AMAIR_EL1
+Field 2 AIDR_EL1
+Field 1 AFSR1_EL1
+Field 0 AFSR0_EL1
+EndSysregFields
+
+Sysreg HFGRTR_EL2 3 4 1 1 4
+Fields HFGxTR_EL2
+EndSysreg
+
+Sysreg HFGWTR_EL2 3 4 1 1 5
+Fields HFGxTR_EL2
+EndSysreg
+
+Sysreg HFGITR_EL2 3 4 1 1 6
+Res0 63:61
+Field 60 COSPRCTX
+Field 59 nGCSEPP
+Field 58 nGCSSTR_EL1
+Field 57 nGCSPUSHM_EL1
+Field 56 nBRBIALL
+Field 55 nBRBINJ
+Field 54 DCCVAC
+Field 53 SVC_EL1
+Field 52 SVC_EL0
+Field 51 ERET
+Field 50 CPPRCTX
+Field 49 DVPRCTX
+Field 48 CFPRCTX
+Field 47 TLBIVAALE1
+Field 46 TLBIVALE1
+Field 45 TLBIVAAE1
+Field 44 TLBIASIDE1
+Field 43 TLBIVAE1
+Field 42 TLBIVMALLE1
+Field 41 TLBIRVAALE1
+Field 40 TLBIRVALE1
+Field 39 TLBIRVAAE1
+Field 38 TLBIRVAE1
+Field 37 TLBIRVAALE1IS
+Field 36 TLBIRVALE1IS
+Field 35 TLBIRVAAE1IS
+Field 34 TLBIRVAE1IS
+Field 33 TLBIVAALE1IS
+Field 32 TLBIVALE1IS
+Field 31 TLBIVAAE1IS
+Field 30 TLBIASIDE1IS
+Field 29 TLBIVAE1IS
+Field 28 TLBIVMALLE1IS
+Field 27 TLBIRVAALE1OS
+Field 26 TLBIRVALE1OS
+Field 25 TLBIRVAAE1OS
+Field 24 TLBIRVAE1OS
+Field 23 TLBIVAALE1OS
+Field 22 TLBIVALE1OS
+Field 21 TLBIVAAE1OS
+Field 20 TLBIASIDE1OS
+Field 19 TLBIVAE1OS
+Field 18 TLBIVMALLE1OS
+Field 17 ATS1E1WP
+Field 16 ATS1E1RP
+Field 15 ATS1E0W
+Field 14 ATS1E0R
+Field 13 ATS1E1W
+Field 12 ATS1E1R
+Field 11 DCZVA
+Field 10 DCCIVAC
+Field 9 DCCVADP
+Field 8 DCCVAP
+Field 7 DCCVAU
+Field 6 DCCISW
+Field 5 DCCSW
+Field 4 DCISW
+Field 3 DCIVAC
+Field 2 ICIVAU
+Field 1 ICIALLU
+Field 0 ICIALLUIS
+EndSysreg
+
Sysreg ZCR_EL2 3 4 1 2 0
Fields ZCR_ELx
EndSysreg
diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S
index 43ff91073d2a..6c10da43b538 100644
--- a/arch/s390/kernel/mcount.S
+++ b/arch/s390/kernel/mcount.S
@@ -32,6 +32,11 @@ ENTRY(ftrace_stub)
BR_EX %r14
ENDPROC(ftrace_stub)
+SYM_CODE_START(ftrace_stub_direct_tramp)
+ lgr %r1, %r0
+ BR_EX %r1
+SYM_CODE_END(ftrace_stub_direct_tramp)
+
.macro ftrace_regs_entry, allregs=0
stg %r14,(__SF_GPRS+8*8)(%r15) # save traced function caller
diff --git a/arch/x86/kernel/ftrace_32.S b/arch/x86/kernel/ftrace_32.S
index a0ed0e4a2c0c..0d9a14528176 100644
--- a/arch/x86/kernel/ftrace_32.S
+++ b/arch/x86/kernel/ftrace_32.S
@@ -163,6 +163,11 @@ SYM_INNER_LABEL(ftrace_regs_call, SYM_L_GLOBAL)
jmp .Lftrace_ret
SYM_CODE_END(ftrace_regs_caller)
+SYM_FUNC_START(ftrace_stub_direct_tramp)
+ CALL_DEPTH_ACCOUNT
+ RET
+SYM_FUNC_END(ftrace_stub_direct_tramp)
+
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
SYM_CODE_START(ftrace_graph_caller)
pushl %eax
diff --git a/arch/x86/kernel/ftrace_64.S b/arch/x86/kernel/ftrace_64.S
index fb4f1e01b64a..970d8445fdc4 100644
--- a/arch/x86/kernel/ftrace_64.S
+++ b/arch/x86/kernel/ftrace_64.S
@@ -309,6 +309,10 @@ SYM_INNER_LABEL(ftrace_regs_caller_end, SYM_L_GLOBAL)
SYM_FUNC_END(ftrace_regs_caller)
STACK_FRAME_NON_STANDARD_FP(ftrace_regs_caller)
+SYM_FUNC_START(ftrace_stub_direct_tramp)
+ CALL_DEPTH_ACCOUNT
+ RET
+SYM_FUNC_END(ftrace_stub_direct_tramp)
#else /* ! CONFIG_DYNAMIC_FTRACE */