summaryrefslogtreecommitdiff
path: root/target-arm
diff options
context:
space:
mode:
authorYonghee Han <onstudy@samsung.com>2016-07-27 16:39:12 +0900
committerYonghee Han <onstudy@samsung.com>2016-07-27 16:47:03 +0900
commita3b133b0ea0696e42fd876b9a803e28bc6ef5299 (patch)
tree68d7537fb9ede28b2e4d2b9f44eb70988279b8ba /target-arm
parent0d6a2f7e595218b5632ba7005128470e65138951 (diff)
downloadqemu-a3b133b0ea0696e42fd876b9a803e28bc6ef5299.tar.gz
qemu-a3b133b0ea0696e42fd876b9a803e28bc6ef5299.tar.bz2
qemu-a3b133b0ea0696e42fd876b9a803e28bc6ef5299.zip
Imported Upstream version 2.3.1upstream/2.3.1
Change-Id: I2161522ea1d7ff10cd1d697609d473243c05e1df
Diffstat (limited to 'target-arm')
-rw-r--r--target-arm/arm-semi.c11
-rw-r--r--target-arm/cpu-qom.h2
-rw-r--r--target-arm/cpu.c80
-rw-r--r--target-arm/cpu.h511
-rw-r--r--target-arm/cpu64.c40
-rw-r--r--target-arm/crypto_helper.c114
-rw-r--r--target-arm/helper-a64.c25
-rw-r--r--target-arm/helper.c1833
-rw-r--r--target-arm/internals.h11
-rw-r--r--target-arm/kvm.c159
-rw-r--r--target-arm/kvm32.c104
-rw-r--r--target-arm/kvm64.c189
-rw-r--r--target-arm/kvm_arm.h39
-rw-r--r--target-arm/machine.c26
-rw-r--r--target-arm/op_helper.c10
-rw-r--r--target-arm/translate-a64.c84
-rw-r--r--target-arm/translate.c148
-rw-r--r--target-arm/translate.h8
18 files changed, 2586 insertions, 808 deletions
diff --git a/target-arm/arm-semi.c b/target-arm/arm-semi.c
index ebb523552..a8b83e691 100644
--- a/target-arm/arm-semi.c
+++ b/target-arm/arm-semi.c
@@ -58,6 +58,10 @@
#define TARGET_SYS_HEAPINFO 0x16
#define TARGET_SYS_EXIT 0x18
+/* ADP_Stopped_ApplicationExit is used for exit(0),
+ * anything else is implemented as exit(1) */
+#define ADP_Stopped_ApplicationExit (0x20026)
+
#ifndef O_BINARY
#define O_BINARY 0
#endif
@@ -551,8 +555,11 @@ uint32_t do_arm_semihosting(CPUARMState *env)
return 0;
}
case TARGET_SYS_EXIT:
- gdb_exit(env, 0);
- exit(0);
+ /* ARM specifies only Stopped_ApplicationExit as normal
+ * exit, everything else is considered an error */
+ ret = (args == ADP_Stopped_ApplicationExit) ? 0 : 1;
+ gdb_exit(env, ret);
+ exit(ret);
default:
fprintf(stderr, "qemu: Unsupported SemiHosting SWI 0x%02x\n", nr);
cpu_dump_state(cs, stderr, fprintf, 0);
diff --git a/target-arm/cpu-qom.h b/target-arm/cpu-qom.h
index dcfda7dfc..ed5a6441b 100644
--- a/target-arm/cpu-qom.h
+++ b/target-arm/cpu-qom.h
@@ -100,6 +100,8 @@ typedef struct ARMCPU {
bool start_powered_off;
/* CPU currently in PSCI powered-off state */
bool powered_off;
+ /* CPU has security extension */
+ bool has_el3;
/* PSCI conduit used to invoke PSCI methods
* 0 - disabled, 1 - smc, 2 - hvc
diff --git a/target-arm/cpu.c b/target-arm/cpu.c
index 5ce7350ce..986f04cfd 100644
--- a/target-arm/cpu.c
+++ b/target-arm/cpu.c
@@ -109,11 +109,18 @@ static void arm_cpu_reset(CPUState *s)
#if defined(CONFIG_USER_ONLY)
env->pstate = PSTATE_MODE_EL0t;
/* Userspace expects access to DC ZVA, CTL_EL0 and the cache ops */
- env->cp15.c1_sys |= SCTLR_UCT | SCTLR_UCI | SCTLR_DZE;
+ env->cp15.sctlr_el[1] |= SCTLR_UCT | SCTLR_UCI | SCTLR_DZE;
/* and to the FP/Neon instructions */
env->cp15.c1_coproc = deposit64(env->cp15.c1_coproc, 20, 2, 3);
#else
- env->pstate = PSTATE_MODE_EL1h;
+ /* Reset into the highest available EL */
+ if (arm_feature(env, ARM_FEATURE_EL3)) {
+ env->pstate = PSTATE_MODE_EL3h;
+ } else if (arm_feature(env, ARM_FEATURE_EL2)) {
+ env->pstate = PSTATE_MODE_EL2h;
+ } else {
+ env->pstate = PSTATE_MODE_EL1h;
+ }
env->pc = cpu->rvbar;
#endif
} else {
@@ -167,7 +174,11 @@ static void arm_cpu_reset(CPUState *s)
env->thumb = initial_pc & 1;
}
- if (env->cp15.c1_sys & SCTLR_V) {
+ /* AArch32 has a hard highvec setting of 0xFFFF0000. If we are currently
+ * executing as AArch32 then check if highvecs are enabled and
+ * adjust the PC accordingly.
+ */
+ if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) {
env->regs[15] = 0xFFFF0000;
}
@@ -316,6 +327,29 @@ static void arm_cpu_kvm_set_irq(void *opaque, int irq, int level)
kvm_set_irq(kvm_state, kvm_irq, level ? 1 : 0);
#endif
}
+
+static bool arm_cpu_is_big_endian(CPUState *cs)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+ int cur_el;
+
+ cpu_synchronize_state(cs);
+
+ /* In 32bit guest endianness is determined by looking at CPSR's E bit */
+ if (!is_a64(env)) {
+ return (env->uncached_cpsr & CPSR_E) ? 1 : 0;
+ }
+
+ cur_el = arm_current_el(env);
+
+ if (cur_el == 0) {
+ return (env->cp15.sctlr_el[1] & SCTLR_E0E) != 0;
+ }
+
+ return (env->cp15.sctlr_el[cur_el] & SCTLR_EE) != 0;
+}
+
#endif
static inline void set_feature(CPUARMState *env, int feature)
@@ -323,6 +357,11 @@ static inline void set_feature(CPUARMState *env, int feature)
env->features |= 1ULL << feature;
}
+static inline void unset_feature(CPUARMState *env, int feature)
+{
+ env->features &= ~(1ULL << feature);
+}
+
static void arm_cpu_initfn(Object *obj)
{
CPUState *cs = CPU(obj);
@@ -379,6 +418,9 @@ static Property arm_cpu_reset_hivecs_property =
static Property arm_cpu_rvbar_property =
DEFINE_PROP_UINT64("rvbar", ARMCPU, rvbar, 0);
+static Property arm_cpu_has_el3_property =
+ DEFINE_PROP_BOOL("has_el3", ARMCPU, has_el3, true);
+
static void arm_cpu_post_init(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
@@ -398,6 +440,14 @@ static void arm_cpu_post_init(Object *obj)
qdev_property_add_static(DEVICE(obj), &arm_cpu_rvbar_property,
&error_abort);
}
+
+ if (arm_feature(&cpu->env, ARM_FEATURE_EL3)) {
+ /* Add the has_el3 state CPU property only if EL3 is allowed. This will
+ * prevent "has_el3" from existing on CPUs which cannot support EL3.
+ */
+ qdev_property_add_static(DEVICE(obj), &arm_cpu_has_el3_property,
+ &error_abort);
+ }
}
static void arm_cpu_finalizefn(Object *obj)
@@ -467,6 +517,18 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
cpu->reset_sctlr |= (1 << 13);
}
+ if (!cpu->has_el3) {
+ /* If the has_el3 CPU property is disabled then we need to disable the
+ * feature.
+ */
+ unset_feature(env, ARM_FEATURE_EL3);
+
+ /* Disable the security extension feature bits in the processor feature
+ * register as well. This is id_pfr1[7:4].
+ */
+ cpu->id_pfr1 &= ~0xf0;
+ }
+
register_cp_regs_for_features(cpu);
arm_cpu_register_gdb_regs_for_features(cpu);
@@ -482,13 +544,16 @@ static ObjectClass *arm_cpu_class_by_name(const char *cpu_model)
{
ObjectClass *oc;
char *typename;
+ char **cpuname;
if (!cpu_model) {
return NULL;
}
- typename = g_strdup_printf("%s-" TYPE_ARM_CPU, cpu_model);
+ cpuname = g_strsplit(cpu_model, ",", 1);
+ typename = g_strdup_printf("%s-" TYPE_ARM_CPU, cpuname[0]);
oc = object_class_by_name(typename);
+ g_strfreev(cpuname);
g_free(typename);
if (!oc || !object_class_dynamic_cast(oc, TYPE_ARM_CPU) ||
object_class_is_abstract(oc)) {
@@ -548,7 +613,7 @@ static void arm1026_initfn(Object *obj)
ARMCPRegInfo ifar = {
.name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1,
.access = PL1_RW,
- .fieldoffset = offsetofhigh32(CPUARMState, cp15.far_el[1]),
+ .fieldoffset = offsetof(CPUARMState, cp15.ifar_ns),
.resetvalue = 0
};
define_one_arm_cp_reg(cpu, &ifar);
@@ -636,6 +701,7 @@ static void arm1176_initfn(Object *obj)
set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
set_feature(&cpu->env, ARM_FEATURE_CACHE_DIRTY_REG);
set_feature(&cpu->env, ARM_FEATURE_CACHE_BLOCK_OPS);
+ set_feature(&cpu->env, ARM_FEATURE_EL3);
cpu->midr = 0x410fb767;
cpu->reset_fpsid = 0x410120b5;
cpu->mvfr0 = 0x11111111;
@@ -724,6 +790,7 @@ static void cortex_a8_initfn(Object *obj)
set_feature(&cpu->env, ARM_FEATURE_NEON);
set_feature(&cpu->env, ARM_FEATURE_THUMB2EE);
set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
+ set_feature(&cpu->env, ARM_FEATURE_EL3);
cpu->midr = 0x410fc080;
cpu->reset_fpsid = 0x410330c0;
cpu->mvfr0 = 0x11110222;
@@ -791,6 +858,7 @@ static void cortex_a9_initfn(Object *obj)
set_feature(&cpu->env, ARM_FEATURE_VFP_FP16);
set_feature(&cpu->env, ARM_FEATURE_NEON);
set_feature(&cpu->env, ARM_FEATURE_THUMB2EE);
+ set_feature(&cpu->env, ARM_FEATURE_EL3);
/* Note that A9 supports the MP extensions even for
* A9UP and single-core A9MP (which are both different
* and valid configurations; we don't model A9UP).
@@ -858,6 +926,7 @@ static void cortex_a15_initfn(Object *obj)
set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
set_feature(&cpu->env, ARM_FEATURE_LPAE);
+ set_feature(&cpu->env, ARM_FEATURE_EL3);
cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A15;
cpu->midr = 0x412fc0f1;
cpu->reset_fpsid = 0x410430f0;
@@ -1153,6 +1222,7 @@ static void arm_cpu_class_init(ObjectClass *oc, void *data)
cc->do_interrupt = arm_cpu_do_interrupt;
cc->get_phys_page_debug = arm_cpu_get_phys_page_debug;
cc->vmsd = &vmstate_arm_cpu;
+ cc->virtio_is_big_endian = arm_cpu_is_big_endian;
#endif
cc->gdb_num_core_regs = 26;
cc->gdb_core_xml_file = "arm-core.xml";
diff --git a/target-arm/cpu.h b/target-arm/cpu.h
index 7f800908f..083211ce3 100644
--- a/target-arm/cpu.h
+++ b/target-arm/cpu.h
@@ -32,6 +32,8 @@
# define ELF_MACHINE EM_ARM
#endif
+#define TARGET_IS_BIENDIAN 1
+
#define CPUArchState struct CPUARMState
#include "qemu-common.h"
@@ -39,8 +41,6 @@
#include "fpu/softfloat.h"
-#define TARGET_HAS_ICE 1
-
#define EXCP_UDEF 1 /* undefined instruction */
#define EXCP_SWI 2 /* software interrupt */
#define EXCP_PREFETCH_ABORT 3
@@ -100,7 +100,7 @@ typedef uint32_t ARMReadCPFunc(void *opaque, int cp_info,
struct arm_boot_info;
-#define NB_MMU_MODES 4
+#define NB_MMU_MODES 7
/* We currently assume float and double are IEEE single and double
precision respectively.
@@ -120,6 +120,12 @@ typedef struct ARMGenericTimer {
#define GTIMER_VIRT 1
#define NUM_GTIMERS 2
+typedef struct {
+ uint64_t raw_tcr;
+ uint32_t mask;
+ uint32_t base_mask;
+} TCR;
+
typedef struct CPUARMState {
/* Regs for current mode. */
uint32_t regs[16];
@@ -177,28 +183,111 @@ typedef struct CPUARMState {
/* System control coprocessor (cp15) */
struct {
uint32_t c0_cpuid;
- uint64_t c0_cssel; /* Cache size selection. */
- uint64_t c1_sys; /* System control register. */
+ union { /* Cache size selection */
+ struct {
+ uint64_t _unused_csselr0;
+ uint64_t csselr_ns;
+ uint64_t _unused_csselr1;
+ uint64_t csselr_s;
+ };
+ uint64_t csselr_el[4];
+ };
+ union { /* System control register. */
+ struct {
+ uint64_t _unused_sctlr;
+ uint64_t sctlr_ns;
+ uint64_t hsctlr;
+ uint64_t sctlr_s;
+ };
+ uint64_t sctlr_el[4];
+ };
uint64_t c1_coproc; /* Coprocessor access register. */
uint32_t c1_xscaleauxcr; /* XScale auxiliary control register. */
- uint64_t ttbr0_el1; /* MMU translation table base 0. */
- uint64_t ttbr1_el1; /* MMU translation table base 1. */
- uint64_t c2_control; /* MMU translation table base control. */
- uint32_t c2_mask; /* MMU translation table base selection mask. */
- uint32_t c2_base_mask; /* MMU translation table base 0 mask. */
+ uint64_t sder; /* Secure debug enable register. */
+ uint32_t nsacr; /* Non-secure access control register. */
+ union { /* MMU translation table base 0. */
+ struct {
+ uint64_t _unused_ttbr0_0;
+ uint64_t ttbr0_ns;
+ uint64_t _unused_ttbr0_1;
+ uint64_t ttbr0_s;
+ };
+ uint64_t ttbr0_el[4];
+ };
+ union { /* MMU translation table base 1. */
+ struct {
+ uint64_t _unused_ttbr1_0;
+ uint64_t ttbr1_ns;
+ uint64_t _unused_ttbr1_1;
+ uint64_t ttbr1_s;
+ };
+ uint64_t ttbr1_el[4];
+ };
+ /* MMU translation table base control. */
+ TCR tcr_el[4];
uint32_t c2_data; /* MPU data cachable bits. */
uint32_t c2_insn; /* MPU instruction cachable bits. */
- uint32_t c3; /* MMU domain access control register
- MPU write buffer control. */
+ union { /* MMU domain access control register
+ * MPU write buffer control.
+ */
+ struct {
+ uint64_t dacr_ns;
+ uint64_t dacr_s;
+ };
+ struct {
+ uint64_t dacr32_el2;
+ };
+ };
uint32_t pmsav5_data_ap; /* PMSAv5 MPU data access permissions */
uint32_t pmsav5_insn_ap; /* PMSAv5 MPU insn access permissions */
uint64_t hcr_el2; /* Hypervisor configuration register */
uint64_t scr_el3; /* Secure configuration register. */
- uint32_t ifsr_el2; /* Fault status registers. */
- uint64_t esr_el[4];
+ union { /* Fault status registers. */
+ struct {
+ uint64_t ifsr_ns;
+ uint64_t ifsr_s;
+ };
+ struct {
+ uint64_t ifsr32_el2;
+ };
+ };
+ union {
+ struct {
+ uint64_t _unused_dfsr;
+ uint64_t dfsr_ns;
+ uint64_t hsr;
+ uint64_t dfsr_s;
+ };
+ uint64_t esr_el[4];
+ };
uint32_t c6_region[8]; /* MPU base/size registers. */
- uint64_t far_el[4]; /* Fault address registers. */
- uint64_t par_el1; /* Translation result. */
+ union { /* Fault address registers. */
+ struct {
+ uint64_t _unused_far0;
+#ifdef HOST_WORDS_BIGENDIAN
+ uint32_t ifar_ns;
+ uint32_t dfar_ns;
+ uint32_t ifar_s;
+ uint32_t dfar_s;
+#else
+ uint32_t dfar_ns;
+ uint32_t ifar_ns;
+ uint32_t dfar_s;
+ uint32_t ifar_s;
+#endif
+ uint64_t _unused_far3;
+ };
+ uint64_t far_el[4];
+ };
+ union { /* Translation result. */
+ struct {
+ uint64_t _unused_par_0;
+ uint64_t par_ns;
+ uint64_t _unused_par_1;
+ uint64_t par_s;
+ };
+ uint64_t par_el[4];
+ };
uint32_t c9_insn; /* Cache lockdown registers. */
uint32_t c9_data;
uint64_t c9_pmcr; /* performance monitor control register */
@@ -207,13 +296,67 @@ typedef struct CPUARMState {
uint32_t c9_pmxevtyper; /* perf monitor event type */
uint32_t c9_pmuserenr; /* perf monitor user enable */
uint32_t c9_pminten; /* perf monitor interrupt enables */
- uint64_t mair_el1;
- uint64_t vbar_el[4]; /* vector base address register */
- uint32_t c13_fcse; /* FCSE PID. */
- uint64_t contextidr_el1; /* Context ID. */
- uint64_t tpidr_el0; /* User RW Thread register. */
- uint64_t tpidrro_el0; /* User RO Thread register. */
- uint64_t tpidr_el1; /* Privileged Thread register. */
+ union { /* Memory attribute redirection */
+ struct {
+#ifdef HOST_WORDS_BIGENDIAN
+ uint64_t _unused_mair_0;
+ uint32_t mair1_ns;
+ uint32_t mair0_ns;
+ uint64_t _unused_mair_1;
+ uint32_t mair1_s;
+ uint32_t mair0_s;
+#else
+ uint64_t _unused_mair_0;
+ uint32_t mair0_ns;
+ uint32_t mair1_ns;
+ uint64_t _unused_mair_1;
+ uint32_t mair0_s;
+ uint32_t mair1_s;
+#endif
+ };
+ uint64_t mair_el[4];
+ };
+ union { /* vector base address register */
+ struct {
+ uint64_t _unused_vbar;
+ uint64_t vbar_ns;
+ uint64_t hvbar;
+ uint64_t vbar_s;
+ };
+ uint64_t vbar_el[4];
+ };
+ uint32_t mvbar; /* (monitor) vector base address register */
+ struct { /* FCSE PID. */
+ uint32_t fcseidr_ns;
+ uint32_t fcseidr_s;
+ };
+ union { /* Context ID. */
+ struct {
+ uint64_t _unused_contextidr_0;
+ uint64_t contextidr_ns;
+ uint64_t _unused_contextidr_1;
+ uint64_t contextidr_s;
+ };
+ uint64_t contextidr_el[4];
+ };
+ union { /* User RW Thread register. */
+ struct {
+ uint64_t tpidrurw_ns;
+ uint64_t tpidrprw_ns;
+ uint64_t htpidr;
+ uint64_t _tpidr_el3;
+ };
+ uint64_t tpidr_el[4];
+ };
+ /* The secure banks of these registers don't map anywhere */
+ uint64_t tpidrurw_s;
+ uint64_t tpidrprw_s;
+ uint64_t tpidruro_s;
+
+ union { /* User RO Thread register. */
+ uint64_t tpidruro_ns;
+ uint64_t tpidrro_el[1];
+ };
uint64_t c14_cntfrq; /* Counter Frequency register */
uint64_t c14_cntkctl; /* Timer Control register */
ARMGenericTimer c14_timer[NUM_GTIMERS];
@@ -352,6 +495,8 @@ typedef struct CPUARMState {
ARMCPU *cpu_arm_init(const char *cpu_model);
int cpu_arm_exec(CPUARMState *s);
uint32_t do_arm_semihosting(CPUARMState *env);
+void aarch64_sync_32_to_64(CPUARMState *env);
+void aarch64_sync_64_to_32(CPUARMState *env);
static inline bool is_a64(CPUARMState *env)
{
@@ -817,6 +962,49 @@ static inline bool arm_el_is_aa64(CPUARMState *env, int el)
return arm_feature(env, ARM_FEATURE_AARCH64);
}
+/* Function for determing whether guest cp register reads and writes should
+ * access the secure or non-secure bank of a cp register. When EL3 is
+ * operating in AArch32 state, the NS-bit determines whether the secure
+ * instance of a cp register should be used. When EL3 is AArch64 (or if
+ * it doesn't exist at all) then there is no register banking, and all
+ * accesses are to the non-secure version.
+ */
+static inline bool access_secure_reg(CPUARMState *env)
+{
+ bool ret = (arm_feature(env, ARM_FEATURE_EL3) &&
+ !arm_el_is_aa64(env, 3) &&
+ !(env->cp15.scr_el3 & SCR_NS));
+
+ return ret;
+}
+
+/* Macros for accessing a specified CP register bank */
+#define A32_BANKED_REG_GET(_env, _regname, _secure) \
+ ((_secure) ? (_env)->cp15._regname##_s : (_env)->cp15._regname##_ns)
+
+#define A32_BANKED_REG_SET(_env, _regname, _secure, _val) \
+ do { \
+ if (_secure) { \
+ (_env)->cp15._regname##_s = (_val); \
+ } else { \
+ (_env)->cp15._regname##_ns = (_val); \
+ } \
+ } while (0)
+
+/* Macros for automatically accessing a specific CP register bank depending on
+ * the current secure state of the system. These macros are not intended for
+ * supporting instruction translation reads/writes as these are dependent
+ * solely on the SCR.NS bit and not the mode.
+ */
+#define A32_BANKED_CURRENT_REG_GET(_env, _regname) \
+ A32_BANKED_REG_GET((_env), _regname, \
+ ((!arm_el_is_aa64((_env), 3) && arm_is_secure(_env))))
+
+#define A32_BANKED_CURRENT_REG_SET(_env, _regname, _val) \
+ A32_BANKED_REG_SET((_env), _regname, \
+ ((!arm_el_is_aa64((_env), 3) && arm_is_secure(_env))), \
+ (_val))
+
void arm_cpu_list(FILE *f, fprintf_function cpu_fprintf);
unsigned int arm_excp_target_el(CPUState *cs, unsigned int excp_idx);
@@ -836,6 +1024,7 @@ void armv7m_nvic_complete_irq(void *opaque, int irq);
* Crn, Crm, opc1, opc2 fields
* 32 or 64 bit register (ie is it accessed via MRC/MCR
* or via MRRC/MCRR?)
+ * non-secure/secure bank (AArch32 only)
* We allow 4 bits for opc1 because MRRC/MCRR have a 4 bit field.
* (In this case crn and opc2 should be zero.)
* For AArch64, there is no 32/64 bit size distinction;
@@ -853,9 +1042,16 @@ void armv7m_nvic_complete_irq(void *opaque, int irq);
#define CP_REG_AA64_SHIFT 28
#define CP_REG_AA64_MASK (1 << CP_REG_AA64_SHIFT)
-#define ENCODE_CP_REG(cp, is64, crn, crm, opc1, opc2) \
- (((cp) << 16) | ((is64) << 15) | ((crn) << 11) | \
- ((crm) << 7) | ((opc1) << 3) | (opc2))
+/* To enable banking of coprocessor registers depending on ns-bit we
+ * add a bit to distinguish between secure and non-secure cpregs in the
+ * hashtable.
+ */
+#define CP_REG_NS_SHIFT 29
+#define CP_REG_NS_MASK (1 << CP_REG_NS_SHIFT)
+
+#define ENCODE_CP_REG(cp, is64, ns, crn, crm, opc1, opc2) \
+ ((ns) << CP_REG_NS_SHIFT | ((cp) << 16) | ((is64) << 15) | \
+ ((crn) << 11) | ((crm) << 7) | ((opc1) << 3) | (opc2))
#define ENCODE_AA64_CP_REG(cp, crn, crm, op0, op1, op2) \
(CP_REG_AA64_MASK | \
@@ -874,8 +1070,15 @@ static inline uint32_t kvm_to_cpreg_id(uint64_t kvmid)
uint32_t cpregid = kvmid;
if ((kvmid & CP_REG_ARCH_MASK) == CP_REG_ARM64) {
cpregid |= CP_REG_AA64_MASK;
- } else if ((kvmid & CP_REG_SIZE_MASK) == CP_REG_SIZE_U64) {
- cpregid |= (1 << 15);
+ } else {
+ if ((kvmid & CP_REG_SIZE_MASK) == CP_REG_SIZE_U64) {
+ cpregid |= (1 << 15);
+ }
+
+ /* KVM is always non-secure so add the NS flag on AArch32 register
+ * entries.
+ */
+ cpregid |= 1 << CP_REG_NS_SHIFT;
}
return cpregid;
}
@@ -911,8 +1114,14 @@ static inline uint64_t cpreg_to_kvm_id(uint32_t cpregid)
* a register definition to override a previous definition for the
* same (cp, is64, crn, crm, opc1, opc2) tuple: either the new or the
* old must have the OVERRIDE bit set.
- * NO_MIGRATE indicates that this register should be ignored for migration;
- * (eg because any state is accessed via some other coprocessor register).
+ * ALIAS indicates that this register is an alias view of some underlying
+ * state which is also visible via another register, and that the other
+ * register is handling migration; registers marked ALIAS will not be migrated
+ * but may have their state set by syncing of register state from KVM.
+ * NO_RAW indicates that this register has no underlying state and does not
+ * support raw access for state saving/loading; it will not be used for either
+ * migration or KVM state synchronization. (Typically this is for "registers"
+ * which are actually used as instructions for cache maintenance and so on.)
* IO indicates that this register does I/O and therefore its accesses
* need to be surrounded by gen_io_start()/gen_io_end(). In particular,
* registers which implement clocks or timers require this.
@@ -922,8 +1131,9 @@ static inline uint64_t cpreg_to_kvm_id(uint32_t cpregid)
#define ARM_CP_64BIT 4
#define ARM_CP_SUPPRESS_TB_END 8
#define ARM_CP_OVERRIDE 16
-#define ARM_CP_NO_MIGRATE 32
+#define ARM_CP_ALIAS 32
#define ARM_CP_IO 64
+#define ARM_CP_NO_RAW 128
#define ARM_CP_NOP (ARM_CP_SPECIAL | (1 << 8))
#define ARM_CP_WFI (ARM_CP_SPECIAL | (2 << 8))
#define ARM_CP_NZCV (ARM_CP_SPECIAL | (3 << 8))
@@ -933,7 +1143,7 @@ static inline uint64_t cpreg_to_kvm_id(uint32_t cpregid)
/* Used only as a terminator for ARMCPRegInfo lists */
#define ARM_CP_SENTINEL 0xffff
/* Mask of only the flag bits in a type field */
-#define ARM_CP_FLAG_MASK 0x7f
+#define ARM_CP_FLAG_MASK 0xff
/* Valid values for ARMCPRegInfo state field, indicating which of
* the AArch32 and AArch64 execution states this register is visible in.
@@ -950,6 +1160,21 @@ enum {
ARM_CP_STATE_BOTH = 2,
};
+/* ARM CP register secure state flags. These flags identify security state
+ * attributes for a given CP register entry.
+ * The existence of both or neither secure and non-secure flags indicates that
+ * the register has both a secure and non-secure hash entry. A single one of
+ * these flags causes the register to only be hashed for the specified
+ * security state.
+ * Although definitions may have any combination of the S/NS bits, each
+ * registered entry will only have one to identify whether the entry is secure
+ * or non-secure.
+ */
+enum {
+ ARM_CP_SECSTATE_S = (1 << 0), /* bit[0]: Secure state register */
+ ARM_CP_SECSTATE_NS = (1 << 1), /* bit[1]: Non-secure state register */
+};
+
/* Return true if cptype is a valid type field. This is used to try to
* catch errors where the sentinel has been accidentally left off the end
* of a list of registers.
@@ -997,6 +1222,10 @@ static inline bool cptype_valid(int cptype)
*/
static inline int arm_current_el(CPUARMState *env)
{
+ if (arm_feature(env, ARM_FEATURE_M)) {
+ return !((env->v7m.exception == 0) && (env->v7m.control & 1));
+ }
+
if (is_a64(env)) {
return extract32(env->pstate, 2, 2);
}
@@ -1084,6 +1313,8 @@ struct ARMCPRegInfo {
int type;
/* Access rights: PL*_[RW] */
int access;
+ /* Security state: ARM_CP_SECSTATE_* bits/values */
+ int secure;
/* The opaque pointer passed to define_arm_cp_regs_with_opaque() when
* this register was defined: can be used to hand data through to the
* register read/write functions, since they are passed the ARMCPRegInfo*.
@@ -1093,12 +1324,27 @@ struct ARMCPRegInfo {
* fieldoffset is non-zero, the reset value of the register.
*/
uint64_t resetvalue;
- /* Offset of the field in CPUARMState for this register. This is not
- * needed if either:
+ /* Offset of the field in CPUARMState for this register.
+ *
+ * This is not needed if either:
* 1. type is ARM_CP_CONST or one of the ARM_CP_SPECIALs
* 2. both readfn and writefn are specified
*/
ptrdiff_t fieldoffset; /* offsetof(CPUARMState, field) */
+
+ /* Offsets of the secure and non-secure fields in CPUARMState for the
+ * register if it is banked. These fields are only used during the static
+ * registration of a register. During hashing the bank associated
+ * with a given security state is copied to fieldoffset which is used from
+ * there on out.
+ *
+ * It is expected that register definitions use either fieldoffset or
+ * bank_fieldoffsets in the definition but not both. It is also expected
+ * that both bank offsets are set when defining a banked register. This
+ * use indicates that a register is banked.
+ */
+ ptrdiff_t bank_fieldoffsets[2];
+
/* Function for making any access checks for this register in addition to
* those specified by the 'access' permissions bits. If NULL, no extra
* checks required. The access check is performed at runtime, not at
@@ -1247,27 +1493,50 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx)
CPUARMState *env = cs->env_ptr;
unsigned int cur_el = arm_current_el(env);
unsigned int target_el = arm_excp_target_el(cs, excp_idx);
- /* FIXME: Use actual secure state. */
- bool secure = false;
- /* If in EL1/0, Physical IRQ routing to EL2 only happens from NS state. */
- bool irq_can_hyp = !secure && cur_el < 2 && target_el == 2;
-
- /* Don't take exceptions if they target a lower EL. */
+ bool secure = arm_is_secure(env);
+ uint32_t scr;
+ uint32_t hcr;
+ bool pstate_unmasked;
+ int8_t unmasked = 0;
+
+ /* Don't take exceptions if they target a lower EL.
+ * This check should catch any exceptions that would not be taken but left
+ * pending.
+ */
if (cur_el > target_el) {
return false;
}
switch (excp_idx) {
case EXCP_FIQ:
- if (irq_can_hyp && (env->cp15.hcr_el2 & HCR_FMO)) {
- return true;
- }
- return !(env->daif & PSTATE_F);
+ /* If FIQs are routed to EL3 or EL2 then there are cases where we
+ * override the CPSR.F in determining if the exception is masked or
+ * not. If neither of these are set then we fall back to the CPSR.F
+ * setting otherwise we further assess the state below.
+ */
+ hcr = (env->cp15.hcr_el2 & HCR_FMO);
+ scr = (env->cp15.scr_el3 & SCR_FIQ);
+
+ /* When EL3 is 32-bit, the SCR.FW bit controls whether the CPSR.F bit
+ * masks FIQ interrupts when taken in non-secure state. If SCR.FW is
+ * set then FIQs can be masked by CPSR.F when non-secure but only
+ * when FIQs are only routed to EL3.
+ */
+ scr &= !((env->cp15.scr_el3 & SCR_FW) && !hcr);
+ pstate_unmasked = !(env->daif & PSTATE_F);
+ break;
+
case EXCP_IRQ:
- if (irq_can_hyp && (env->cp15.hcr_el2 & HCR_IMO)) {
- return true;
- }
- return !(env->daif & PSTATE_I);
+ /* When EL3 execution state is 32-bit, if HCR.IMO is set then we may
+ * override the CPSR.I masking when in non-secure state. The SCR.IRQ
+ * setting has already been taken into consideration when setting the
+ * target EL, so it does not have a further affect here.
+ */
+ hcr = (env->cp15.hcr_el2 & HCR_IMO);
+ scr = false;
+ pstate_unmasked = !(env->daif & PSTATE_I);
+ break;
+
case EXCP_VFIQ:
if (secure || !(env->cp15.hcr_el2 & HCR_FMO)) {
/* VFIQs are only taken when hypervized and non-secure. */
@@ -1283,29 +1552,114 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx)
default:
g_assert_not_reached();
}
-}
-static inline CPUARMState *cpu_init(const char *cpu_model)
-{
- ARMCPU *cpu = cpu_arm_init(cpu_model);
- if (cpu) {
- return &cpu->env;
+ /* Use the target EL, current execution state and SCR/HCR settings to
+ * determine whether the corresponding CPSR bit is used to mask the
+ * interrupt.
+ */
+ if ((target_el > cur_el) && (target_el != 1)) {
+ if (arm_el_is_aa64(env, 3) || ((scr || hcr) && (!secure))) {
+ unmasked = 1;
+ }
}
- return NULL;
+
+ /* The PSTATE bits only mask the interrupt if we have not overriden the
+ * ability above.
+ */
+ return unmasked || pstate_unmasked;
}
+#define cpu_init(cpu_model) CPU(cpu_arm_init(cpu_model))
+
#define cpu_exec cpu_arm_exec
#define cpu_gen_code cpu_arm_gen_code
#define cpu_signal_handler cpu_arm_signal_handler
#define cpu_list arm_cpu_list
-/* MMU modes definitions */
-#define MMU_MODE0_SUFFIX _user
-#define MMU_MODE1_SUFFIX _kernel
+/* ARM has the following "translation regimes" (as the ARM ARM calls them):
+ *
+ * If EL3 is 64-bit:
+ * + NonSecure EL1 & 0 stage 1
+ * + NonSecure EL1 & 0 stage 2
+ * + NonSecure EL2
+ * + Secure EL1 & EL0
+ * + Secure EL3
+ * If EL3 is 32-bit:
+ * + NonSecure PL1 & 0 stage 1
+ * + NonSecure PL1 & 0 stage 2
+ * + NonSecure PL2
+ * + Secure PL0 & PL1
+ * (reminder: for 32 bit EL3, Secure PL1 is *EL3*, not EL1.)
+ *
+ * For QEMU, an mmu_idx is not quite the same as a translation regime because:
+ * 1. we need to split the "EL1 & 0" regimes into two mmu_idxes, because they
+ * may differ in access permissions even if the VA->PA map is the same
+ * 2. we want to cache in our TLB the full VA->IPA->PA lookup for a stage 1+2
+ * translation, which means that we have one mmu_idx that deals with two
+ * concatenated translation regimes [this sort of combined s1+2 TLB is
+ * architecturally permitted]
+ * 3. we don't need to allocate an mmu_idx to translations that we won't be
+ * handling via the TLB. The only way to do a stage 1 translation without
+ * the immediate stage 2 translation is via the ATS or AT system insns,
+ * which can be slow-pathed and always do a page table walk.
+ * 4. we can also safely fold together the "32 bit EL3" and "64 bit EL3"
+ * translation regimes, because they map reasonably well to each other
+ * and they can't both be active at the same time.
+ * This gives us the following list of mmu_idx values:
+ *
+ * NS EL0 (aka NS PL0) stage 1+2
+ * NS EL1 (aka NS PL1) stage 1+2
+ * NS EL2 (aka NS PL2)
+ * S EL3 (aka S PL1)
+ * S EL0 (aka S PL0)
+ * S EL1 (not used if EL3 is 32 bit)
+ * NS EL0+1 stage 2
+ *
+ * (The last of these is an mmu_idx because we want to be able to use the TLB
+ * for the accesses done as part of a stage 1 page table walk, rather than
+ * having to walk the stage 2 page table over and over.)
+ *
+ * Our enumeration includes at the end some entries which are not "true"
+ * mmu_idx values in that they don't have corresponding TLBs and are only
+ * valid for doing slow path page table walks.
+ *
+ * The constant names here are patterned after the general style of the names
+ * of the AT/ATS operations.
+ * The values used are carefully arranged to make mmu_idx => EL lookup easy.
+ */
+typedef enum ARMMMUIdx {
+ ARMMMUIdx_S12NSE0 = 0,
+ ARMMMUIdx_S12NSE1 = 1,
+ ARMMMUIdx_S1E2 = 2,
+ ARMMMUIdx_S1E3 = 3,
+ ARMMMUIdx_S1SE0 = 4,
+ ARMMMUIdx_S1SE1 = 5,
+ ARMMMUIdx_S2NS = 6,
+ /* Indexes below here don't have TLBs and are used only for AT system
+ * instructions or for the first stage of an S12 page table walk.
+ */
+ ARMMMUIdx_S1NSE0 = 7,
+ ARMMMUIdx_S1NSE1 = 8,
+} ARMMMUIdx;
+
#define MMU_USER_IDX 0
-static inline int cpu_mmu_index (CPUARMState *env)
+
+/* Return the exception level we're running at if this is our mmu_idx */
+static inline int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx)
{
- return arm_current_el(env);
+ assert(mmu_idx < ARMMMUIdx_S2NS);
+ return mmu_idx & 3;
+}
+
+/* Determine the current mmu_idx to use for normal loads/stores */
+static inline int cpu_mmu_index(CPUARMState *env)
+{
+ int el = arm_current_el(env);
+
+ if (el < 2 && arm_is_secure_below_el3(env)) {
+ return ARMMMUIdx_S1SE0 + el;
+ }
+ return el;
}
/* Return the Exception Level targeted by debug exceptions;
@@ -1372,9 +1726,13 @@ static inline bool arm_singlestep_active(CPUARMState *env)
/* Bit usage in the TB flags field: bit 31 indicates whether we are
* in 32 or 64 bit mode. The meaning of the other bits depends on that.
+ * We put flags which are shared between 32 and 64 bit mode at the top
+ * of the word, and flags which apply to only one mode at the bottom.
*/
#define ARM_TBFLAG_AARCH64_STATE_SHIFT 31
#define ARM_TBFLAG_AARCH64_STATE_MASK (1U << ARM_TBFLAG_AARCH64_STATE_SHIFT)
+#define ARM_TBFLAG_MMUIDX_SHIFT 28
+#define ARM_TBFLAG_MMUIDX_MASK (0x7 << ARM_TBFLAG_MMUIDX_SHIFT)
/* Bit usage when in AArch32 state: */
#define ARM_TBFLAG_THUMB_SHIFT 0
@@ -1383,8 +1741,6 @@ static inline bool arm_singlestep_active(CPUARMState *env)
#define ARM_TBFLAG_VECLEN_MASK (0x7 << ARM_TBFLAG_VECLEN_SHIFT)
#define ARM_TBFLAG_VECSTRIDE_SHIFT 4
#define ARM_TBFLAG_VECSTRIDE_MASK (0x3 << ARM_TBFLAG_VECSTRIDE_SHIFT)
-#define ARM_TBFLAG_PRIV_SHIFT 6
-#define ARM_TBFLAG_PRIV_MASK (1 << ARM_TBFLAG_PRIV_SHIFT)
#define ARM_TBFLAG_VFPEN_SHIFT 7
#define ARM_TBFLAG_VFPEN_MASK (1 << ARM_TBFLAG_VFPEN_SHIFT)
#define ARM_TBFLAG_CONDEXEC_SHIFT 8
@@ -1402,10 +1758,14 @@ static inline bool arm_singlestep_active(CPUARMState *env)
*/
#define ARM_TBFLAG_XSCALE_CPAR_SHIFT 20
#define ARM_TBFLAG_XSCALE_CPAR_MASK (3 << ARM_TBFLAG_XSCALE_CPAR_SHIFT)
+/* Indicates whether cp register reads and writes by guest code should access
+ * the secure or nonsecure bank of banked registers; note that this is not
+ * the same thing as the current security state of the processor!
+ */
+#define ARM_TBFLAG_NS_SHIFT 22
+#define ARM_TBFLAG_NS_MASK (1 << ARM_TBFLAG_NS_SHIFT)
/* Bit usage when in AArch64 state */
-#define ARM_TBFLAG_AA64_EL_SHIFT 0
-#define ARM_TBFLAG_AA64_EL_MASK (0x3 << ARM_TBFLAG_AA64_EL_SHIFT)
#define ARM_TBFLAG_AA64_FPEN_SHIFT 2
#define ARM_TBFLAG_AA64_FPEN_MASK (1 << ARM_TBFLAG_AA64_FPEN_SHIFT)
#define ARM_TBFLAG_AA64_SS_ACTIVE_SHIFT 3
@@ -1416,14 +1776,14 @@ static inline bool arm_singlestep_active(CPUARMState *env)
/* some convenience accessor macros */
#define ARM_TBFLAG_AARCH64_STATE(F) \
(((F) & ARM_TBFLAG_AARCH64_STATE_MASK) >> ARM_TBFLAG_AARCH64_STATE_SHIFT)
+#define ARM_TBFLAG_MMUIDX(F) \
+ (((F) & ARM_TBFLAG_MMUIDX_MASK) >> ARM_TBFLAG_MMUIDX_SHIFT)
#define ARM_TBFLAG_THUMB(F) \
(((F) & ARM_TBFLAG_THUMB_MASK) >> ARM_TBFLAG_THUMB_SHIFT)
#define ARM_TBFLAG_VECLEN(F) \
(((F) & ARM_TBFLAG_VECLEN_MASK) >> ARM_TBFLAG_VECLEN_SHIFT)
#define ARM_TBFLAG_VECSTRIDE(F) \
(((F) & ARM_TBFLAG_VECSTRIDE_MASK) >> ARM_TBFLAG_VECSTRIDE_SHIFT)
-#define ARM_TBFLAG_PRIV(F) \
- (((F) & ARM_TBFLAG_PRIV_MASK) >> ARM_TBFLAG_PRIV_SHIFT)
#define ARM_TBFLAG_VFPEN(F) \
(((F) & ARM_TBFLAG_VFPEN_MASK) >> ARM_TBFLAG_VFPEN_SHIFT)
#define ARM_TBFLAG_CONDEXEC(F) \
@@ -1438,14 +1798,14 @@ static inline bool arm_singlestep_active(CPUARMState *env)
(((F) & ARM_TBFLAG_PSTATE_SS_MASK) >> ARM_TBFLAG_PSTATE_SS_SHIFT)
#define ARM_TBFLAG_XSCALE_CPAR(F) \
(((F) & ARM_TBFLAG_XSCALE_CPAR_MASK) >> ARM_TBFLAG_XSCALE_CPAR_SHIFT)
-#define ARM_TBFLAG_AA64_EL(F) \
- (((F) & ARM_TBFLAG_AA64_EL_MASK) >> ARM_TBFLAG_AA64_EL_SHIFT)
#define ARM_TBFLAG_AA64_FPEN(F) \
(((F) & ARM_TBFLAG_AA64_FPEN_MASK) >> ARM_TBFLAG_AA64_FPEN_SHIFT)
#define ARM_TBFLAG_AA64_SS_ACTIVE(F) \
(((F) & ARM_TBFLAG_AA64_SS_ACTIVE_MASK) >> ARM_TBFLAG_AA64_SS_ACTIVE_SHIFT)
#define ARM_TBFLAG_AA64_PSTATE_SS(F) \
(((F) & ARM_TBFLAG_AA64_PSTATE_SS_MASK) >> ARM_TBFLAG_AA64_PSTATE_SS_SHIFT)
+#define ARM_TBFLAG_NS(F) \
+ (((F) & ARM_TBFLAG_NS_MASK) >> ARM_TBFLAG_NS_SHIFT)
static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
target_ulong *cs_base, int *flags)
@@ -1461,8 +1821,7 @@ static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
if (is_a64(env)) {
*pc = env->pc;
- *flags = ARM_TBFLAG_AARCH64_STATE_MASK
- | (arm_current_el(env) << ARM_TBFLAG_AA64_EL_SHIFT);
+ *flags = ARM_TBFLAG_AARCH64_STATE_MASK;
if (fpen == 3 || (fpen == 1 && arm_current_el(env) != 0)) {
*flags |= ARM_TBFLAG_AA64_FPEN_MASK;
}
@@ -1480,20 +1839,14 @@ static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
}
}
} else {
- int privmode;
*pc = env->regs[15];
*flags = (env->thumb << ARM_TBFLAG_THUMB_SHIFT)
| (env->vfp.vec_len << ARM_TBFLAG_VECLEN_SHIFT)
| (env->vfp.vec_stride << ARM_TBFLAG_VECSTRIDE_SHIFT)
| (env->condexec_bits << ARM_TBFLAG_CONDEXEC_SHIFT)
| (env->bswap_code << ARM_TBFLAG_BSWAP_CODE_SHIFT);
- if (arm_feature(env, ARM_FEATURE_M)) {
- privmode = !((env->v7m.exception == 0) && (env->v7m.control & 1));
- } else {
- privmode = (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR;
- }
- if (privmode) {
- *flags |= ARM_TBFLAG_PRIV_MASK;
+ if (!(access_secure_reg(env))) {
+ *flags |= ARM_TBFLAG_NS_MASK;
}
if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)
|| arm_el_is_aa64(env, 1)) {
@@ -1519,6 +1872,8 @@ static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
<< ARM_TBFLAG_XSCALE_CPAR_SHIFT);
}
+ *flags |= (cpu_mmu_index(env) << ARM_TBFLAG_MMUIDX_SHIFT);
+
*cs_base = 0;
}
diff --git a/target-arm/cpu64.c b/target-arm/cpu64.c
index bb778b3d9..270bc2fec 100644
--- a/target-arm/cpu64.c
+++ b/target-arm/cpu64.c
@@ -32,6 +32,11 @@ static inline void set_feature(CPUARMState *env, int feature)
env->features |= 1ULL << feature;
}
+static inline void unset_feature(CPUARMState *env, int feature)
+{
+ env->features &= ~(1ULL << feature);
+}
+
#ifndef CONFIG_USER_ONLY
static uint64_t a57_l2ctlr_read(CPUARMState *env, const ARMCPRegInfo *ri)
{
@@ -91,6 +96,7 @@ static void aarch64_a57_initfn(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
+ cpu->dtb_compatible = "arm,cortex-a57";
set_feature(&cpu->env, ARM_FEATURE_V8);
set_feature(&cpu->env, ARM_FEATURE_VFP4);
set_feature(&cpu->env, ARM_FEATURE_NEON);
@@ -170,8 +176,42 @@ static const ARMCPUInfo aarch64_cpus[] = {
{ .name = NULL }
};
+static bool aarch64_cpu_get_aarch64(Object *obj, Error **errp)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ return arm_feature(&cpu->env, ARM_FEATURE_AARCH64);
+}
+
+static void aarch64_cpu_set_aarch64(Object *obj, bool value, Error **errp)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ /* At this time, this property is only allowed if KVM is enabled. This
+ * restriction allows us to avoid fixing up functionality that assumes a
+ * uniform execution state like do_interrupt.
+ */
+ if (!kvm_enabled()) {
+ error_setg(errp, "'aarch64' feature cannot be disabled "
+ "unless KVM is enabled");
+ return;
+ }
+
+ if (value == false) {
+ unset_feature(&cpu->env, ARM_FEATURE_AARCH64);
+ } else {
+ set_feature(&cpu->env, ARM_FEATURE_AARCH64);
+ }
+}
+
static void aarch64_cpu_initfn(Object *obj)
{
+ object_property_add_bool(obj, "aarch64", aarch64_cpu_get_aarch64,
+ aarch64_cpu_set_aarch64, NULL);
+ object_property_set_description(obj, "aarch64",
+ "Set on/off to enable/disable aarch64 "
+ "execution state ",
+ NULL);
}
static void aarch64_cpu_finalizefn(Object *obj)
diff --git a/target-arm/crypto_helper.c b/target-arm/crypto_helper.c
index dd60d0b81..1fe975d0f 100644
--- a/target-arm/crypto_helper.c
+++ b/target-arm/crypto_helper.c
@@ -22,6 +22,14 @@ union CRYPTO_STATE {
uint64_t l[2];
};
+#ifdef HOST_WORDS_BIGENDIAN
+#define CR_ST_BYTE(state, i) (state.bytes[(15 - (i)) ^ 8])
+#define CR_ST_WORD(state, i) (state.words[(3 - (i)) ^ 2])
+#else
+#define CR_ST_BYTE(state, i) (state.bytes[i])
+#define CR_ST_WORD(state, i) (state.words[i])
+#endif
+
void HELPER(crypto_aese)(CPUARMState *env, uint32_t rd, uint32_t rm,
uint32_t decrypt)
{
@@ -46,7 +54,7 @@ void HELPER(crypto_aese)(CPUARMState *env, uint32_t rd, uint32_t rm,
/* combine ShiftRows operation and sbox substitution */
for (i = 0; i < 16; i++) {
- st.bytes[i] = sbox[decrypt][rk.bytes[shift[decrypt][i]]];
+ CR_ST_BYTE(st, i) = sbox[decrypt][CR_ST_BYTE(rk, shift[decrypt][i])];
}
env->vfp.regs[rd] = make_float64(st.l[0]);
@@ -198,11 +206,11 @@ void HELPER(crypto_aesmc)(CPUARMState *env, uint32_t rd, uint32_t rm,
assert(decrypt < 2);
for (i = 0; i < 16; i += 4) {
- st.words[i >> 2] = cpu_to_le32(
- mc[decrypt][st.bytes[i]] ^
- rol32(mc[decrypt][st.bytes[i + 1]], 8) ^
- rol32(mc[decrypt][st.bytes[i + 2]], 16) ^
- rol32(mc[decrypt][st.bytes[i + 3]], 24));
+ CR_ST_WORD(st, i >> 2) =
+ mc[decrypt][CR_ST_BYTE(st, i)] ^
+ rol32(mc[decrypt][CR_ST_BYTE(st, i + 1)], 8) ^
+ rol32(mc[decrypt][CR_ST_BYTE(st, i + 2)], 16) ^
+ rol32(mc[decrypt][CR_ST_BYTE(st, i + 3)], 24);
}
env->vfp.regs[rd] = make_float64(st.l[0]);
@@ -255,24 +263,25 @@ void HELPER(crypto_sha1_3reg)(CPUARMState *env, uint32_t rd, uint32_t rn,
switch (op) {
case 0: /* sha1c */
- t = cho(d.words[1], d.words[2], d.words[3]);
+ t = cho(CR_ST_WORD(d, 1), CR_ST_WORD(d, 2), CR_ST_WORD(d, 3));
break;
case 1: /* sha1p */
- t = par(d.words[1], d.words[2], d.words[3]);
+ t = par(CR_ST_WORD(d, 1), CR_ST_WORD(d, 2), CR_ST_WORD(d, 3));
break;
case 2: /* sha1m */
- t = maj(d.words[1], d.words[2], d.words[3]);
+ t = maj(CR_ST_WORD(d, 1), CR_ST_WORD(d, 2), CR_ST_WORD(d, 3));
break;
default:
g_assert_not_reached();
}
- t += rol32(d.words[0], 5) + n.words[0] + m.words[i];
-
- n.words[0] = d.words[3];
- d.words[3] = d.words[2];
- d.words[2] = ror32(d.words[1], 2);
- d.words[1] = d.words[0];
- d.words[0] = t;
+ t += rol32(CR_ST_WORD(d, 0), 5) + CR_ST_WORD(n, 0)
+ + CR_ST_WORD(m, i);
+
+ CR_ST_WORD(n, 0) = CR_ST_WORD(d, 3);
+ CR_ST_WORD(d, 3) = CR_ST_WORD(d, 2);
+ CR_ST_WORD(d, 2) = ror32(CR_ST_WORD(d, 1), 2);
+ CR_ST_WORD(d, 1) = CR_ST_WORD(d, 0);
+ CR_ST_WORD(d, 0) = t;
}
}
env->vfp.regs[rd] = make_float64(d.l[0]);
@@ -286,8 +295,8 @@ void HELPER(crypto_sha1h)(CPUARMState *env, uint32_t rd, uint32_t rm)
float64_val(env->vfp.regs[rm + 1])
} };
- m.words[0] = ror32(m.words[0], 2);
- m.words[1] = m.words[2] = m.words[3] = 0;
+ CR_ST_WORD(m, 0) = ror32(CR_ST_WORD(m, 0), 2);
+ CR_ST_WORD(m, 1) = CR_ST_WORD(m, 2) = CR_ST_WORD(m, 3) = 0;
env->vfp.regs[rd] = make_float64(m.l[0]);
env->vfp.regs[rd + 1] = make_float64(m.l[1]);
@@ -304,10 +313,10 @@ void HELPER(crypto_sha1su1)(CPUARMState *env, uint32_t rd, uint32_t rm)
float64_val(env->vfp.regs[rm + 1])
} };
- d.words[0] = rol32(d.words[0] ^ m.words[1], 1);
- d.words[1] = rol32(d.words[1] ^ m.words[2], 1);
- d.words[2] = rol32(d.words[2] ^ m.words[3], 1);
- d.words[3] = rol32(d.words[3] ^ d.words[0], 1);
+ CR_ST_WORD(d, 0) = rol32(CR_ST_WORD(d, 0) ^ CR_ST_WORD(m, 1), 1);
+ CR_ST_WORD(d, 1) = rol32(CR_ST_WORD(d, 1) ^ CR_ST_WORD(m, 2), 1);
+ CR_ST_WORD(d, 2) = rol32(CR_ST_WORD(d, 2) ^ CR_ST_WORD(m, 3), 1);
+ CR_ST_WORD(d, 3) = rol32(CR_ST_WORD(d, 3) ^ CR_ST_WORD(d, 0), 1);
env->vfp.regs[rd] = make_float64(d.l[0]);
env->vfp.regs[rd + 1] = make_float64(d.l[1]);
@@ -356,20 +365,22 @@ void HELPER(crypto_sha256h)(CPUARMState *env, uint32_t rd, uint32_t rn,
int i;
for (i = 0; i < 4; i++) {
- uint32_t t = cho(n.words[0], n.words[1], n.words[2]) + n.words[3]
- + S1(n.words[0]) + m.words[i];
-
- n.words[3] = n.words[2];
- n.words[2] = n.words[1];
- n.words[1] = n.words[0];
- n.words[0] = d.words[3] + t;
-
- t += maj(d.words[0], d.words[1], d.words[2]) + S0(d.words[0]);
-
- d.words[3] = d.words[2];
- d.words[2] = d.words[1];
- d.words[1] = d.words[0];
- d.words[0] = t;
+ uint32_t t = cho(CR_ST_WORD(n, 0), CR_ST_WORD(n, 1), CR_ST_WORD(n, 2))
+ + CR_ST_WORD(n, 3) + S1(CR_ST_WORD(n, 0))
+ + CR_ST_WORD(m, i);
+
+ CR_ST_WORD(n, 3) = CR_ST_WORD(n, 2);
+ CR_ST_WORD(n, 2) = CR_ST_WORD(n, 1);
+ CR_ST_WORD(n, 1) = CR_ST_WORD(n, 0);
+ CR_ST_WORD(n, 0) = CR_ST_WORD(d, 3) + t;
+
+ t += maj(CR_ST_WORD(d, 0), CR_ST_WORD(d, 1), CR_ST_WORD(d, 2))
+ + S0(CR_ST_WORD(d, 0));
+
+ CR_ST_WORD(d, 3) = CR_ST_WORD(d, 2);
+ CR_ST_WORD(d, 2) = CR_ST_WORD(d, 1);
+ CR_ST_WORD(d, 1) = CR_ST_WORD(d, 0);
+ CR_ST_WORD(d, 0) = t;
}
env->vfp.regs[rd] = make_float64(d.l[0]);
@@ -394,13 +405,14 @@ void HELPER(crypto_sha256h2)(CPUARMState *env, uint32_t rd, uint32_t rn,
int i;
for (i = 0; i < 4; i++) {
- uint32_t t = cho(d.words[0], d.words[1], d.words[2]) + d.words[3]
- + S1(d.words[0]) + m.words[i];
-
- d.words[3] = d.words[2];
- d.words[2] = d.words[1];
- d.words[1] = d.words[0];
- d.words[0] = n.words[3 - i] + t;
+ uint32_t t = cho(CR_ST_WORD(d, 0), CR_ST_WORD(d, 1), CR_ST_WORD(d, 2))
+ + CR_ST_WORD(d, 3) + S1(CR_ST_WORD(d, 0))
+ + CR_ST_WORD(m, i);
+
+ CR_ST_WORD(d, 3) = CR_ST_WORD(d, 2);
+ CR_ST_WORD(d, 2) = CR_ST_WORD(d, 1);
+ CR_ST_WORD(d, 1) = CR_ST_WORD(d, 0);
+ CR_ST_WORD(d, 0) = CR_ST_WORD(n, 3 - i) + t;
}
env->vfp.regs[rd] = make_float64(d.l[0]);
@@ -418,10 +430,10 @@ void HELPER(crypto_sha256su0)(CPUARMState *env, uint32_t rd, uint32_t rm)
float64_val(env->vfp.regs[rm + 1])
} };
- d.words[0] += s0(d.words[1]);
- d.words[1] += s0(d.words[2]);
- d.words[2] += s0(d.words[3]);
- d.words[3] += s0(m.words[0]);
+ CR_ST_WORD(d, 0) += s0(CR_ST_WORD(d, 1));
+ CR_ST_WORD(d, 1) += s0(CR_ST_WORD(d, 2));
+ CR_ST_WORD(d, 2) += s0(CR_ST_WORD(d, 3));
+ CR_ST_WORD(d, 3) += s0(CR_ST_WORD(m, 0));
env->vfp.regs[rd] = make_float64(d.l[0]);
env->vfp.regs[rd + 1] = make_float64(d.l[1]);
@@ -443,10 +455,10 @@ void HELPER(crypto_sha256su1)(CPUARMState *env, uint32_t rd, uint32_t rn,
float64_val(env->vfp.regs[rm + 1])
} };
- d.words[0] += s1(m.words[2]) + n.words[1];
- d.words[1] += s1(m.words[3]) + n.words[2];
- d.words[2] += s1(d.words[0]) + n.words[3];
- d.words[3] += s1(d.words[1]) + m.words[0];
+ CR_ST_WORD(d, 0) += s1(CR_ST_WORD(m, 2)) + CR_ST_WORD(n, 1);
+ CR_ST_WORD(d, 1) += s1(CR_ST_WORD(m, 3)) + CR_ST_WORD(n, 2);
+ CR_ST_WORD(d, 2) += s1(CR_ST_WORD(d, 0)) + CR_ST_WORD(n, 3);
+ CR_ST_WORD(d, 3) += s1(CR_ST_WORD(d, 1)) + CR_ST_WORD(m, 0);
env->vfp.regs[rd] = make_float64(d.l[0]);
env->vfp.regs[rd + 1] = make_float64(d.l[1]);
diff --git a/target-arm/helper-a64.c b/target-arm/helper-a64.c
index 81066ca93..861f6fa69 100644
--- a/target-arm/helper-a64.c
+++ b/target-arm/helper-a64.c
@@ -135,6 +135,9 @@ float32 HELPER(vfp_mulxs)(float32 a, float32 b, void *fpstp)
{
float_status *fpst = fpstp;
+ a = float32_squash_input_denormal(a, fpst);
+ b = float32_squash_input_denormal(b, fpst);
+
if ((float32_is_zero(a) && float32_is_infinity(b)) ||
(float32_is_infinity(a) && float32_is_zero(b))) {
/* 2.0 with the sign bit set to sign(A) XOR sign(B) */
@@ -148,6 +151,9 @@ float64 HELPER(vfp_mulxd)(float64 a, float64 b, void *fpstp)
{
float_status *fpst = fpstp;
+ a = float64_squash_input_denormal(a, fpst);
+ b = float64_squash_input_denormal(b, fpst);
+
if ((float64_is_zero(a) && float64_is_infinity(b)) ||
(float64_is_infinity(a) && float64_is_zero(b))) {
/* 2.0 with the sign bit set to sign(A) XOR sign(B) */
@@ -223,6 +229,9 @@ float32 HELPER(recpsf_f32)(float32 a, float32 b, void *fpstp)
{
float_status *fpst = fpstp;
+ a = float32_squash_input_denormal(a, fpst);
+ b = float32_squash_input_denormal(b, fpst);
+
a = float32_chs(a);
if ((float32_is_infinity(a) && float32_is_zero(b)) ||
(float32_is_infinity(b) && float32_is_zero(a))) {
@@ -235,6 +244,9 @@ float64 HELPER(recpsf_f64)(float64 a, float64 b, void *fpstp)
{
float_status *fpst = fpstp;
+ a = float64_squash_input_denormal(a, fpst);
+ b = float64_squash_input_denormal(b, fpst);
+
a = float64_chs(a);
if ((float64_is_infinity(a) && float64_is_zero(b)) ||
(float64_is_infinity(b) && float64_is_zero(a))) {
@@ -247,6 +259,9 @@ float32 HELPER(rsqrtsf_f32)(float32 a, float32 b, void *fpstp)
{
float_status *fpst = fpstp;
+ a = float32_squash_input_denormal(a, fpst);
+ b = float32_squash_input_denormal(b, fpst);
+
a = float32_chs(a);
if ((float32_is_infinity(a) && float32_is_zero(b)) ||
(float32_is_infinity(b) && float32_is_zero(a))) {
@@ -259,6 +274,9 @@ float64 HELPER(rsqrtsf_f64)(float64 a, float64 b, void *fpstp)
{
float_status *fpst = fpstp;
+ a = float64_squash_input_denormal(a, fpst);
+ b = float64_squash_input_denormal(b, fpst);
+
a = float64_chs(a);
if ((float64_is_infinity(a) && float64_is_zero(b)) ||
(float64_is_infinity(b) && float64_is_zero(a))) {
@@ -448,7 +466,6 @@ void aarch64_cpu_do_interrupt(CPUState *cs)
unsigned int new_el = arm_excp_target_el(cs, cs->exception_index);
target_ulong addr = env->cp15.vbar_el[new_el];
unsigned int new_mode = aarch64_pstate_mode(new_el, true);
- int i;
if (arm_current_el(env) < new_el) {
if (env->aarch64) {
@@ -506,15 +523,13 @@ void aarch64_cpu_do_interrupt(CPUState *cs)
aarch64_save_sp(env, arm_current_el(env));
env->elr_el[new_el] = env->pc;
} else {
- env->banked_spsr[0] = cpsr_read(env);
+ env->banked_spsr[aarch64_banked_spsr_index(new_el)] = cpsr_read(env);
if (!env->thumb) {
env->cp15.esr_el[new_el] |= 1 << 25;
}
env->elr_el[new_el] = env->regs[15];
- for (i = 0; i < 15; i++) {
- env->xregs[i] = env->regs[i];
- }
+ aarch64_sync_32_to_64(env);
env->condexec_bits = 0;
}
diff --git a/target-arm/helper.c b/target-arm/helper.c
index b74d348a3..d77c6de40 100644
--- a/target-arm/helper.c
+++ b/target-arm/helper.c
@@ -13,7 +13,7 @@
#ifndef CONFIG_USER_ONLY
static inline int get_phys_addr(CPUARMState *env, target_ulong address,
- int access_type, int is_user,
+ int access_type, ARMMMUIdx mmu_idx,
hwaddr *phys_ptr, int *prot,
target_ulong *page_size);
@@ -119,6 +119,7 @@ static int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri)
{
+ assert(ri->fieldoffset);
if (cpreg_field_is_64bit(ri)) {
return CPREG_FIELD64(env, ri);
} else {
@@ -129,6 +130,7 @@ static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri)
static void raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
+ assert(ri->fieldoffset);
if (cpreg_field_is_64bit(ri)) {
CPREG_FIELD64(env, ri) = value;
} else {
@@ -136,6 +138,11 @@ static void raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
}
}
+static void *raw_ptr(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ return (char *)env + ri->fieldoffset;
+}
+
static uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri)
{
/* Raw read of a coprocessor register (as needed for migration, etc). */
@@ -169,6 +176,27 @@ static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri,
}
}
+static bool raw_accessors_invalid(const ARMCPRegInfo *ri)
+{
+ /* Return true if the regdef would cause an assertion if you called
+ * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a
+ * program bug for it not to have the NO_RAW flag).
+ * NB that returning false here doesn't necessarily mean that calling
+ * read/write_raw_cp_reg() is safe, because we can't distinguish "has
+ * read/write access functions which are safe for raw use" from "has
+ * read/write access functions which have side effects but has forgotten
+ * to provide raw access functions".
+ * The tests here line up with the conditions in read/write_raw_cp_reg()
+ * and assertions in raw_read()/raw_write().
+ */
+ if ((ri->type & ARM_CP_CONST) ||
+ ri->fieldoffset ||
+ ((ri->raw_writefn || ri->writefn) && (ri->raw_readfn || ri->readfn))) {
+ return false;
+ }
+ return true;
+}
+
bool write_cpustate_to_list(ARMCPU *cpu)
{
/* Write the coprocessor state from cpu->env to the (index,value) list. */
@@ -184,7 +212,7 @@ bool write_cpustate_to_list(ARMCPU *cpu)
ok = false;
continue;
}
- if (ri->type & ARM_CP_NO_MIGRATE) {
+ if (ri->type & ARM_CP_NO_RAW) {
continue;
}
cpu->cpreg_values[i] = read_raw_cp_reg(&cpu->env, ri);
@@ -207,7 +235,7 @@ bool write_list_to_cpustate(ARMCPU *cpu)
ok = false;
continue;
}
- if (ri->type & ARM_CP_NO_MIGRATE) {
+ if (ri->type & ARM_CP_NO_RAW) {
continue;
}
/* Write value and confirm it reads back as written
@@ -231,7 +259,7 @@ static void add_cpreg_to_list(gpointer key, gpointer opaque)
regidx = *(uint32_t *)key;
ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
- if (!(ri->type & ARM_CP_NO_MIGRATE)) {
+ if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx);
/* The value array need not be initialized at this point */
cpu->cpreg_array_len++;
@@ -247,7 +275,7 @@ static void count_cpreg(gpointer key, gpointer opaque)
regidx = *(uint32_t *)key;
ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
- if (!(ri->type & ARM_CP_NO_MIGRATE)) {
+ if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
cpu->cpreg_array_len++;
}
}
@@ -419,13 +447,36 @@ static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
}
static const ARMCPRegInfo cp_reginfo[] = {
- { .name = "FCSEIDR", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 0,
- .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c13_fcse),
+ /* Define the secure and non-secure FCSE identifier CP registers
+ * separately because there is no secure bank in V8 (no _EL3). This allows
+ * the secure register to be properly reset and migrated. There is also no
+ * v8 EL1 version of the register so the non-secure instance stands alone.
+ */
+ { .name = "FCSEIDR(NS)",
+ .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
+ .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS,
+ .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_ns),
.resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
- { .name = "CONTEXTIDR", .state = ARM_CP_STATE_BOTH,
+ { .name = "FCSEIDR(S)",
+ .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
+ .access = PL1_RW, .secure = ARM_CP_SECSTATE_S,
+ .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_s),
+ .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
+ /* Define the secure and non-secure context identifier CP registers
+ * separately because there is no secure bank in V8 (no _EL3). This allows
+ * the secure register to be properly reset and migrated. In the
+ * non-secure case, the 32-bit register will have reset and migration
+ * disabled during registration as it is handled by the 64-bit instance.
+ */
+ { .name = "CONTEXTIDR_EL1", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
- .access = PL1_RW,
- .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el1),
+ .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS,
+ .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[1]),
+ .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
+ { .name = "CONTEXTIDR(S)", .state = ARM_CP_STATE_AA32,
+ .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
+ .access = PL1_RW, .secure = ARM_CP_SECSTATE_S,
+ .fieldoffset = offsetof(CPUARMState, cp15.contextidr_s),
.resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
REGINFO_SENTINEL
};
@@ -435,10 +486,12 @@ static const ARMCPRegInfo not_v8_cp_reginfo[] = {
* definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]).
*/
/* MMU Domain access control / MPU write buffer control */
- { .name = "DACR", .cp = 15,
- .crn = 3, .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
- .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c3),
- .resetvalue = 0, .writefn = dacr_write, .raw_writefn = raw_write, },
+ { .name = "DACR",
+ .cp = 15, .opc1 = CP_ANY, .crn = 3, .crm = CP_ANY, .opc2 = CP_ANY,
+ .access = PL1_RW, .resetvalue = 0,
+ .writefn = dacr_write, .raw_writefn = raw_write,
+ .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
+ offsetoflow32(CPUARMState, cp15.dacr_ns) } },
/* ??? This covers not just the impdef TLB lockdown registers but also
* some v7VMSA registers relating to TEX remap, so it is overly broad.
*/
@@ -478,7 +531,7 @@ static const ARMCPRegInfo not_v7_cp_reginfo[] = {
.resetvalue = 0 },
/* v6 doesn't have the cache ID registers but Linux reads them anyway */
{ .name = "DUMMY", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = CP_ANY,
- .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_MIGRATE,
+ .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
.resetvalue = 0 },
/* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
* implementing it as RAZ means the "debug architecture version" bits
@@ -492,16 +545,16 @@ static const ARMCPRegInfo not_v7_cp_reginfo[] = {
*/
{ .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY,
.opc1 = CP_ANY, .opc2 = 0, .access = PL1_W, .writefn = tlbiall_write,
- .type = ARM_CP_NO_MIGRATE },
+ .type = ARM_CP_NO_RAW },
{ .name = "TLBIMVA", .cp = 15, .crn = 8, .crm = CP_ANY,
.opc1 = CP_ANY, .opc2 = 1, .access = PL1_W, .writefn = tlbimva_write,
- .type = ARM_CP_NO_MIGRATE },
+ .type = ARM_CP_NO_RAW },
{ .name = "TLBIASID", .cp = 15, .crn = 8, .crm = CP_ANY,
.opc1 = CP_ANY, .opc2 = 2, .access = PL1_W, .writefn = tlbiasid_write,
- .type = ARM_CP_NO_MIGRATE },
+ .type = ARM_CP_NO_RAW },
{ .name = "TLBIMVAA", .cp = 15, .crn = 8, .crm = CP_ANY,
.opc1 = CP_ANY, .opc2 = 3, .access = PL1_W, .writefn = tlbimvaa_write,
- .type = ARM_CP_NO_MIGRATE },
+ .type = ARM_CP_NO_RAW },
REGINFO_SENTINEL
};
@@ -552,7 +605,8 @@ static const ARMCPRegInfo v6_cp_reginfo[] = {
.access = PL0_W, .type = ARM_CP_NOP },
{ .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 2,
.access = PL1_RW,
- .fieldoffset = offsetofhigh32(CPUARMState, cp15.far_el[1]),
+ .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ifar_s),
+ offsetof(CPUARMState, cp15.ifar_ns) },
.resetvalue = 0, },
/* Watchpoint Fault Address Register : should actually only be present
* for 1136, 1176, 11MPCore.
@@ -776,7 +830,14 @@ static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
{
ARMCPU *cpu = arm_env_get_cpu(env);
- return cpu->ccsidr[env->cp15.c0_cssel];
+
+ /* Acquire the CSSELR index from the bank corresponding to the CCSIDR
+ * bank
+ */
+ uint32_t index = A32_BANKED_REG_GET(env, csselr,
+ ri->secure & ARM_CP_SECSTATE_S);
+
+ return cpu->ccsidr[index];
}
static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -816,7 +877,7 @@ static const ARMCPRegInfo v7_cp_reginfo[] = {
* or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
*/
{ .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1,
- .access = PL0_RW, .type = ARM_CP_NO_MIGRATE,
+ .access = PL0_RW, .type = ARM_CP_ALIAS,
.fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
.writefn = pmcntenset_write,
.accessfn = pmreg_access,
@@ -831,11 +892,11 @@ static const ARMCPRegInfo v7_cp_reginfo[] = {
.fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
.accessfn = pmreg_access,
.writefn = pmcntenclr_write,
- .type = ARM_CP_NO_MIGRATE },
+ .type = ARM_CP_ALIAS },
{ .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2,
.access = PL0_RW, .accessfn = pmreg_access,
- .type = ARM_CP_NO_MIGRATE,
+ .type = ARM_CP_ALIAS,
.fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten),
.writefn = pmcntenclr_write },
{ .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3,
@@ -890,24 +951,23 @@ static const ARMCPRegInfo v7_cp_reginfo[] = {
.resetvalue = 0,
.writefn = pmintenset_write, .raw_writefn = raw_write },
{ .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2,
- .access = PL1_RW, .type = ARM_CP_NO_MIGRATE,
+ .access = PL1_RW, .type = ARM_CP_ALIAS,
.fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
.resetvalue = 0, .writefn = pmintenclr_write, },
{ .name = "VBAR", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .crn = 12, .crm = 0, .opc1 = 0, .opc2 = 0,
.access = PL1_RW, .writefn = vbar_write,
- .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[1]),
+ .bank_fieldoffsets = { offsetof(CPUARMState, cp15.vbar_s),
+ offsetof(CPUARMState, cp15.vbar_ns) },
.resetvalue = 0 },
- { .name = "SCR", .cp = 15, .crn = 1, .crm = 1, .opc1 = 0, .opc2 = 0,
- .access = PL1_RW, .fieldoffset = offsetoflow32(CPUARMState, cp15.scr_el3),
- .resetvalue = 0, .writefn = scr_write },
{ .name = "CCSIDR", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0,
- .access = PL1_R, .readfn = ccsidr_read, .type = ARM_CP_NO_MIGRATE },
+ .access = PL1_R, .readfn = ccsidr_read, .type = ARM_CP_NO_RAW },
{ .name = "CSSELR", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0,
- .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c0_cssel),
- .writefn = csselr_write, .resetvalue = 0 },
+ .access = PL1_RW, .writefn = csselr_write, .resetvalue = 0,
+ .bank_fieldoffsets = { offsetof(CPUARMState, cp15.csselr_s),
+ offsetof(CPUARMState, cp15.csselr_ns) } },
/* Auxiliary ID register: this actually has an IMPDEF value but for now
* just RAZ for all cores:
*/
@@ -928,61 +988,67 @@ static const ARMCPRegInfo v7_cp_reginfo[] = {
*/
{ .name = "MAIR_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
- .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el1),
+ .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[1]),
.resetvalue = 0 },
/* For non-long-descriptor page tables these are PRRR and NMRR;
* regardless they still act as reads-as-written for QEMU.
* The override is necessary because of the overly-broad TLB_LOCKDOWN
* definition.
*/
+ /* MAIR0/1 are defined separately from their 64-bit counterpart which
+ * allows them to assign the correct fieldoffset based on the endianness
+ * handled in the field definitions.
+ */
{ .name = "MAIR0", .state = ARM_CP_STATE_AA32, .type = ARM_CP_OVERRIDE,
.cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0, .access = PL1_RW,
- .fieldoffset = offsetoflow32(CPUARMState, cp15.mair_el1),
+ .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair0_s),
+ offsetof(CPUARMState, cp15.mair0_ns) },
.resetfn = arm_cp_reset_ignore },
{ .name = "MAIR1", .state = ARM_CP_STATE_AA32, .type = ARM_CP_OVERRIDE,
.cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 1, .access = PL1_RW,
- .fieldoffset = offsetofhigh32(CPUARMState, cp15.mair_el1),
+ .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair1_s),
+ offsetof(CPUARMState, cp15.mair1_ns) },
.resetfn = arm_cp_reset_ignore },
{ .name = "ISR_EL1", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 0,
- .type = ARM_CP_NO_MIGRATE, .access = PL1_R, .readfn = isr_read },
+ .type = ARM_CP_NO_RAW, .access = PL1_R, .readfn = isr_read },
/* 32 bit ITLB invalidates */
{ .name = "ITLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 0,
- .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbiall_write },
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
{ .name = "ITLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1,
- .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbimva_write },
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
{ .name = "ITLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 2,
- .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbiasid_write },
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
/* 32 bit DTLB invalidates */
{ .name = "DTLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 0,
- .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbiall_write },
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
{ .name = "DTLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1,
- .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbimva_write },
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
{ .name = "DTLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 2,
- .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbiasid_write },
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
/* 32 bit TLB invalidates */
{ .name = "TLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
- .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbiall_write },
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
{ .name = "TLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
- .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbimva_write },
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
{ .name = "TLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
- .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbiasid_write },
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
{ .name = "TLBIMVAA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
- .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbimvaa_write },
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimvaa_write },
REGINFO_SENTINEL
};
static const ARMCPRegInfo v7mp_cp_reginfo[] = {
/* 32 bit TLB invalidates, Inner Shareable */
{ .name = "TLBIALLIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
- .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbiall_is_write },
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_is_write },
{ .name = "TLBIMVAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
- .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbimva_is_write },
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_is_write },
{ .name = "TLBIASIDIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
- .type = ARM_CP_NO_MIGRATE, .access = PL1_W,
+ .type = ARM_CP_NO_RAW, .access = PL1_W,
.writefn = tlbiasid_is_write },
{ .name = "TLBIMVAAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
- .type = ARM_CP_NO_MIGRATE, .access = PL1_W,
+ .type = ARM_CP_NO_RAW, .access = PL1_W,
.writefn = tlbimvaa_is_write },
REGINFO_SENTINEL
};
@@ -1017,23 +1083,31 @@ static const ARMCPRegInfo v6k_cp_reginfo[] = {
{ .name = "TPIDR_EL0", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 3, .opc2 = 2, .crn = 13, .crm = 0,
.access = PL0_RW,
- .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el0), .resetvalue = 0 },
+ .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[0]), .resetvalue = 0 },
{ .name = "TPIDRURW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 2,
.access = PL0_RW,
- .fieldoffset = offsetoflow32(CPUARMState, cp15.tpidr_el0),
+ .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrurw_s),
+ offsetoflow32(CPUARMState, cp15.tpidrurw_ns) },
.resetfn = arm_cp_reset_ignore },
{ .name = "TPIDRRO_EL0", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 3, .opc2 = 3, .crn = 13, .crm = 0,
.access = PL0_R|PL1_W,
- .fieldoffset = offsetof(CPUARMState, cp15.tpidrro_el0), .resetvalue = 0 },
+ .fieldoffset = offsetof(CPUARMState, cp15.tpidrro_el[0]),
+ .resetvalue = 0},
{ .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3,
.access = PL0_R|PL1_W,
- .fieldoffset = offsetoflow32(CPUARMState, cp15.tpidrro_el0),
+ .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidruro_s),
+ offsetoflow32(CPUARMState, cp15.tpidruro_ns) },
.resetfn = arm_cp_reset_ignore },
- { .name = "TPIDR_EL1", .state = ARM_CP_STATE_BOTH,
+ { .name = "TPIDR_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .opc2 = 4, .crn = 13, .crm = 0,
.access = PL1_RW,
- .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el1), .resetvalue = 0 },
+ .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[1]), .resetvalue = 0 },
+ { .name = "TPIDRPRW", .opc1 = 0, .cp = 15, .crn = 13, .crm = 0, .opc2 = 4,
+ .access = PL1_RW,
+ .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrprw_s),
+ offsetoflow32(CPUARMState, cp15.tpidrprw_ns) },
+ .resetvalue = 0 },
REGINFO_SENTINEL
};
@@ -1217,7 +1291,7 @@ static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
* Our reset value matches the fixed frequency we implement the timer at.
*/
{ .name = "CNTFRQ", .cp = 15, .crn = 14, .crm = 0, .opc1 = 0, .opc2 = 0,
- .type = ARM_CP_NO_MIGRATE,
+ .type = ARM_CP_ALIAS,
.access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
.fieldoffset = offsetoflow32(CPUARMState, cp15.c14_cntfrq),
.resetfn = arm_cp_reset_ignore,
@@ -1237,7 +1311,7 @@ static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
},
/* per-timer control */
{ .name = "CNTP_CTL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
- .type = ARM_CP_IO | ARM_CP_NO_MIGRATE, .access = PL1_RW | PL0_R,
+ .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R,
.accessfn = gt_ptimer_access,
.fieldoffset = offsetoflow32(CPUARMState,
cp15.c14_timer[GTIMER_PHYS].ctl),
@@ -1253,7 +1327,7 @@ static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
.writefn = gt_ctl_write, .raw_writefn = raw_write,
},
{ .name = "CNTV_CTL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 1,
- .type = ARM_CP_IO | ARM_CP_NO_MIGRATE, .access = PL1_RW | PL0_R,
+ .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R,
.accessfn = gt_vtimer_access,
.fieldoffset = offsetoflow32(CPUARMState,
cp15.c14_timer[GTIMER_VIRT].ctl),
@@ -1270,52 +1344,52 @@ static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
},
/* TimerValue views: a 32 bit downcounting view of the underlying state */
{ .name = "CNTP_TVAL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
- .type = ARM_CP_NO_MIGRATE | ARM_CP_IO, .access = PL1_RW | PL0_R,
+ .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
.accessfn = gt_ptimer_access,
.readfn = gt_tval_read, .writefn = gt_tval_write,
},
{ .name = "CNTP_TVAL_EL0", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 0,
- .type = ARM_CP_NO_MIGRATE | ARM_CP_IO, .access = PL1_RW | PL0_R,
+ .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
.readfn = gt_tval_read, .writefn = gt_tval_write,
},
{ .name = "CNTV_TVAL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 0,
- .type = ARM_CP_NO_MIGRATE | ARM_CP_IO, .access = PL1_RW | PL0_R,
+ .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
.accessfn = gt_vtimer_access,
.readfn = gt_tval_read, .writefn = gt_tval_write,
},
{ .name = "CNTV_TVAL_EL0", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 0,
- .type = ARM_CP_NO_MIGRATE | ARM_CP_IO, .access = PL1_RW | PL0_R,
+ .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
.readfn = gt_tval_read, .writefn = gt_tval_write,
},
/* The counter itself */
{ .name = "CNTPCT", .cp = 15, .crm = 14, .opc1 = 0,
- .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_MIGRATE | ARM_CP_IO,
+ .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
.accessfn = gt_pct_access,
.readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore,
},
{ .name = "CNTPCT_EL0", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 1,
- .access = PL0_R, .type = ARM_CP_NO_MIGRATE | ARM_CP_IO,
+ .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
.accessfn = gt_pct_access,
.readfn = gt_cnt_read, .resetfn = gt_cnt_reset,
},
{ .name = "CNTVCT", .cp = 15, .crm = 14, .opc1 = 1,
- .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_MIGRATE | ARM_CP_IO,
+ .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
.accessfn = gt_vct_access,
.readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore,
},
{ .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
- .access = PL0_R, .type = ARM_CP_NO_MIGRATE | ARM_CP_IO,
+ .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
.accessfn = gt_vct_access,
.readfn = gt_cnt_read, .resetfn = gt_cnt_reset,
},
/* Comparison value, indicating when the timer goes off */
{ .name = "CNTP_CVAL", .cp = 15, .crm = 14, .opc1 = 2,
.access = PL1_RW | PL0_R,
- .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_NO_MIGRATE,
+ .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
.fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
.accessfn = gt_ptimer_access, .resetfn = arm_cp_reset_ignore,
.writefn = gt_cval_write, .raw_writefn = raw_write,
@@ -1330,7 +1404,7 @@ static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
},
{ .name = "CNTV_CVAL", .cp = 15, .crm = 14, .opc1 = 3,
.access = PL1_RW | PL0_R,
- .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_NO_MIGRATE,
+ .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
.fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
.accessfn = gt_vtimer_access, .resetfn = arm_cp_reset_ignore,
.writefn = gt_cval_write, .raw_writefn = raw_write,
@@ -1377,29 +1451,30 @@ static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri)
/* Other states are only available with TrustZone; in
* a non-TZ implementation these registers don't exist
* at all, which is an Uncategorized trap. This underdecoding
- * is safe because the reginfo is NO_MIGRATE.
+ * is safe because the reginfo is NO_RAW.
*/
return CP_ACCESS_TRAP_UNCATEGORIZED;
}
return CP_ACCESS_OK;
}
-static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
+static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
+ int access_type, ARMMMUIdx mmu_idx)
{
hwaddr phys_addr;
target_ulong page_size;
int prot;
- int ret, is_user = ri->opc2 & 2;
- int access_type = ri->opc2 & 1;
+ int ret;
+ uint64_t par64;
- ret = get_phys_addr(env, value, access_type, is_user,
+ ret = get_phys_addr(env, value, access_type, mmu_idx,
&phys_addr, &prot, &page_size);
if (extended_addresses_enabled(env)) {
/* ret is a DFSR/IFSR value for the long descriptor
* translation table format, but with WnR always clear.
* Convert it to a 64-bit PAR.
*/
- uint64_t par64 = (1 << 11); /* LPAE bit always set */
+ par64 = (1 << 11); /* LPAE bit always set */
if (ret == 0) {
par64 |= phys_addr & ~0xfffULL;
/* We don't set the ATTR or SH fields in the PAR. */
@@ -1411,7 +1486,6 @@ static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
* fault.
*/
}
- env->cp15.par_el1 = par64;
} else {
/* ret is a DFSR/IFSR value for the short descriptor
* translation table format (with WnR always clear).
@@ -1421,28 +1495,126 @@ static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
/* We do not set any attribute bits in the PAR */
if (page_size == (1 << 24)
&& arm_feature(env, ARM_FEATURE_V7)) {
- env->cp15.par_el1 = (phys_addr & 0xff000000) | 1 << 1;
+ par64 = (phys_addr & 0xff000000) | (1 << 1);
} else {
- env->cp15.par_el1 = phys_addr & 0xfffff000;
+ par64 = phys_addr & 0xfffff000;
}
} else {
- env->cp15.par_el1 = ((ret & (1 << 10)) >> 5) |
- ((ret & (1 << 12)) >> 6) |
- ((ret & 0xf) << 1) | 1;
+ par64 = ((ret & (1 << 10)) >> 5) | ((ret & (1 << 12)) >> 6) |
+ ((ret & 0xf) << 1) | 1;
+ }
+ }
+ return par64;
+}
+
+static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
+{
+ int access_type = ri->opc2 & 1;
+ uint64_t par64;
+ ARMMMUIdx mmu_idx;
+ int el = arm_current_el(env);
+ bool secure = arm_is_secure_below_el3(env);
+
+ switch (ri->opc2 & 6) {
+ case 0:
+ /* stage 1 current state PL1: ATS1CPR, ATS1CPW */
+ switch (el) {
+ case 3:
+ mmu_idx = ARMMMUIdx_S1E3;
+ break;
+ case 2:
+ mmu_idx = ARMMMUIdx_S1NSE1;
+ break;
+ case 1:
+ mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S1NSE1;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ break;
+ case 2:
+ /* stage 1 current state PL0: ATS1CUR, ATS1CUW */
+ switch (el) {
+ case 3:
+ mmu_idx = ARMMMUIdx_S1SE0;
+ break;
+ case 2:
+ mmu_idx = ARMMMUIdx_S1NSE0;
+ break;
+ case 1:
+ mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S1NSE0;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ break;
+ case 4:
+ /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */
+ mmu_idx = ARMMMUIdx_S12NSE1;
+ break;
+ case 6:
+ /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */
+ mmu_idx = ARMMMUIdx_S12NSE0;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ par64 = do_ats_write(env, value, access_type, mmu_idx);
+
+ A32_BANKED_CURRENT_REG_SET(env, par, par64);
+}
+
+static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ int access_type = ri->opc2 & 1;
+ ARMMMUIdx mmu_idx;
+ int secure = arm_is_secure_below_el3(env);
+
+ switch (ri->opc2 & 6) {
+ case 0:
+ switch (ri->opc1) {
+ case 0: /* AT S1E1R, AT S1E1W */
+ mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S1NSE1;
+ break;
+ case 4: /* AT S1E2R, AT S1E2W */
+ mmu_idx = ARMMMUIdx_S1E2;
+ break;
+ case 6: /* AT S1E3R, AT S1E3W */
+ mmu_idx = ARMMMUIdx_S1E3;
+ break;
+ default:
+ g_assert_not_reached();
}
+ break;
+ case 2: /* AT S1E0R, AT S1E0W */
+ mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S1NSE0;
+ break;
+ case 4: /* AT S12E1R, AT S12E1W */
+ mmu_idx = ARMMMUIdx_S12NSE1;
+ break;
+ case 6: /* AT S12E0R, AT S12E0W */
+ mmu_idx = ARMMMUIdx_S12NSE0;
+ break;
+ default:
+ g_assert_not_reached();
}
+
+ env->cp15.par_el[1] = do_ats_write(env, value, access_type, mmu_idx);
}
#endif
static const ARMCPRegInfo vapa_cp_reginfo[] = {
{ .name = "PAR", .cp = 15, .crn = 7, .crm = 4, .opc1 = 0, .opc2 = 0,
.access = PL1_RW, .resetvalue = 0,
- .fieldoffset = offsetoflow32(CPUARMState, cp15.par_el1),
+ .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.par_s),
+ offsetoflow32(CPUARMState, cp15.par_ns) },
.writefn = par_write },
#ifndef CONFIG_USER_ONLY
{ .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY,
.access = PL1_W, .accessfn = ats_access,
- .writefn = ats_write, .type = ARM_CP_NO_MIGRATE },
+ .writefn = ats_write, .type = ARM_CP_NO_RAW },
#endif
REGINFO_SENTINEL
};
@@ -1501,12 +1673,12 @@ static uint64_t pmsav5_insn_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
static const ARMCPRegInfo pmsav5_cp_reginfo[] = {
{ .name = "DATA_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
- .access = PL1_RW, .type = ARM_CP_NO_MIGRATE,
+ .access = PL1_RW, .type = ARM_CP_ALIAS,
.fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
.resetvalue = 0,
.readfn = pmsav5_data_ap_read, .writefn = pmsav5_data_ap_write, },
{ .name = "INSN_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
- .access = PL1_RW, .type = ARM_CP_NO_MIGRATE,
+ .access = PL1_RW, .type = ARM_CP_ALIAS,
.fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
.resetvalue = 0,
.readfn = pmsav5_insn_ap_read, .writefn = pmsav5_insn_ap_write, },
@@ -1555,6 +1727,7 @@ static const ARMCPRegInfo pmsav5_cp_reginfo[] = {
static void vmsa_ttbcr_raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
+ TCR *tcr = raw_ptr(env, ri);
int maskshift = extract32(value, 0, 3);
if (!arm_feature(env, ARM_FEATURE_V8)) {
@@ -1573,14 +1746,15 @@ static void vmsa_ttbcr_raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
}
}
- /* Note that we always calculate c2_mask and c2_base_mask, but
+ /* Update the masks corresponding to the the TCR bank being written
+ * Note that we always calculate mask and base_mask, but
* they are only used for short-descriptor tables (ie if EAE is 0);
- * for long-descriptor tables the TTBCR fields are used differently
- * and the c2_mask and c2_base_mask values are meaningless.
+ * for long-descriptor tables the TCR fields are used differently
+ * and the mask and base_mask values are meaningless.
*/
- raw_write(env, ri, value);
- env->cp15.c2_mask = ~(((uint32_t)0xffffffffu) >> maskshift);
- env->cp15.c2_base_mask = ~((uint32_t)0x3fffu >> maskshift);
+ tcr->raw_tcr = value;
+ tcr->mask = ~(((uint32_t)0xffffffffu) >> maskshift);
+ tcr->base_mask = ~((uint32_t)0x3fffu >> maskshift);
}
static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -1599,19 +1773,25 @@ static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
static void vmsa_ttbcr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
{
- env->cp15.c2_base_mask = 0xffffc000u;
- raw_write(env, ri, 0);
- env->cp15.c2_mask = 0;
+ TCR *tcr = raw_ptr(env, ri);
+
+ /* Reset both the TCR as well as the masks corresponding to the bank of
+ * the TCR being reset.
+ */
+ tcr->raw_tcr = 0;
+ tcr->mask = 0;
+ tcr->base_mask = 0xffffc000u;
}
static void vmsa_tcr_el1_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
ARMCPU *cpu = arm_env_get_cpu(env);
+ TCR *tcr = raw_ptr(env, ri);
/* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
tlb_flush(CPU(cpu), 1);
- raw_write(env, ri, value);
+ tcr->raw_tcr = value;
}
static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -1630,38 +1810,46 @@ static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
static const ARMCPRegInfo vmsa_cp_reginfo[] = {
{ .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
- .access = PL1_RW, .type = ARM_CP_NO_MIGRATE,
- .fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el[1]),
+ .access = PL1_RW, .type = ARM_CP_ALIAS,
+ .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dfsr_s),
+ offsetoflow32(CPUARMState, cp15.dfsr_ns) },
.resetfn = arm_cp_reset_ignore, },
{ .name = "IFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
- .access = PL1_RW,
- .fieldoffset = offsetof(CPUARMState, cp15.ifsr_el2), .resetvalue = 0, },
+ .access = PL1_RW, .resetvalue = 0,
+ .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.ifsr_s),
+ offsetoflow32(CPUARMState, cp15.ifsr_ns) } },
{ .name = "ESR_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .crn = 5, .crm = 2, .opc1 = 0, .opc2 = 0,
.access = PL1_RW,
.fieldoffset = offsetof(CPUARMState, cp15.esr_el[1]), .resetvalue = 0, },
{ .name = "TTBR0_EL1", .state = ARM_CP_STATE_BOTH,
- .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
- .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el1),
- .writefn = vmsa_ttbr_write, .resetvalue = 0 },
+ .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 0,
+ .access = PL1_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0,
+ .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
+ offsetof(CPUARMState, cp15.ttbr0_ns) } },
{ .name = "TTBR1_EL1", .state = ARM_CP_STATE_BOTH,
- .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 1,
- .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.ttbr1_el1),
- .writefn = vmsa_ttbr_write, .resetvalue = 0 },
+ .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 1,
+ .access = PL1_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0,
+ .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
+ offsetof(CPUARMState, cp15.ttbr1_ns) } },
{ .name = "TCR_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
.access = PL1_RW, .writefn = vmsa_tcr_el1_write,
.resetfn = vmsa_ttbcr_reset, .raw_writefn = raw_write,
- .fieldoffset = offsetof(CPUARMState, cp15.c2_control) },
+ .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[1]) },
{ .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
- .access = PL1_RW, .type = ARM_CP_NO_MIGRATE, .writefn = vmsa_ttbcr_write,
+ .access = PL1_RW, .type = ARM_CP_ALIAS, .writefn = vmsa_ttbcr_write,
.resetfn = arm_cp_reset_ignore, .raw_writefn = vmsa_ttbcr_raw_write,
- .fieldoffset = offsetoflow32(CPUARMState, cp15.c2_control) },
- /* 64-bit FAR; this entry also gives us the AArch32 DFAR */
- { .name = "FAR_EL1", .state = ARM_CP_STATE_BOTH,
+ .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tcr_el[3]),
+ offsetoflow32(CPUARMState, cp15.tcr_el[1])} },
+ { .name = "FAR_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0,
.access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[1]),
.resetvalue = 0, },
+ { .name = "DFAR", .cp = 15, .opc1 = 0, .crn = 6, .crm = 0, .opc2 = 0,
+ .access = PL1_RW, .resetvalue = 0,
+ .bank_fieldoffsets = { offsetof(CPUARMState, cp15.dfar_s),
+ offsetof(CPUARMState, cp15.dfar_ns) } },
REGINFO_SENTINEL
};
@@ -1720,7 +1908,7 @@ static const ARMCPRegInfo omap_cp_reginfo[] = {
.writefn = omap_threadid_write },
{ .name = "TI925T_STATUS", .cp = 15, .crn = 15,
.crm = 8, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
- .type = ARM_CP_NO_MIGRATE,
+ .type = ARM_CP_NO_RAW,
.readfn = arm_cp_read_zero, .writefn = omap_wfi_write, },
/* TODO: Peripheral port remap register:
* On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
@@ -1729,7 +1917,7 @@ static const ARMCPRegInfo omap_cp_reginfo[] = {
*/
{ .name = "OMAP_CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
.opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
- .type = ARM_CP_OVERRIDE | ARM_CP_NO_MIGRATE,
+ .type = ARM_CP_OVERRIDE | ARM_CP_NO_RAW,
.writefn = omap_cachemaint_write },
{ .name = "C9", .cp = 15, .crn = 9,
.crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW,
@@ -1779,7 +1967,7 @@ static const ARMCPRegInfo dummy_c15_cp_reginfo[] = {
{ .name = "C15_IMPDEF", .cp = 15, .crn = 15,
.crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
.access = PL1_RW,
- .type = ARM_CP_CONST | ARM_CP_NO_MIGRATE | ARM_CP_OVERRIDE,
+ .type = ARM_CP_CONST | ARM_CP_NO_RAW | ARM_CP_OVERRIDE,
.resetvalue = 0 },
REGINFO_SENTINEL
};
@@ -1787,7 +1975,7 @@ static const ARMCPRegInfo dummy_c15_cp_reginfo[] = {
static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = {
/* Cache status: RAZ because we have no cache so it's always clean */
{ .name = "CDSR", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 6,
- .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_MIGRATE,
+ .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
.resetvalue = 0 },
REGINFO_SENTINEL
};
@@ -1795,7 +1983,7 @@ static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = {
static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = {
/* We never have a a block transfer operation in progress */
{ .name = "BXSR", .cp = 15, .crn = 7, .crm = 12, .opc1 = 0, .opc2 = 4,
- .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_MIGRATE,
+ .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
.resetvalue = 0 },
/* The cache ops themselves: these all NOP for QEMU */
{ .name = "IICR", .cp = 15, .crm = 5, .opc1 = 0,
@@ -1818,10 +2006,10 @@ static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = {
* to indicate that there are no dirty cache lines.
*/
{ .name = "TC_DCACHE", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 3,
- .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_MIGRATE,
+ .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
.resetvalue = (1 << 30) },
{ .name = "TCI_DCACHE", .cp = 15, .crn = 7, .crm = 14, .opc1 = 0, .opc2 = 3,
- .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_MIGRATE,
+ .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
.resetvalue = (1 << 30) },
REGINFO_SENTINEL
};
@@ -1831,7 +2019,7 @@ static const ARMCPRegInfo strongarm_cp_reginfo[] = {
{ .name = "C9_READBUFFER", .cp = 15, .crn = 9,
.crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
.access = PL1_RW, .resetvalue = 0,
- .type = ARM_CP_CONST | ARM_CP_OVERRIDE | ARM_CP_NO_MIGRATE },
+ .type = ARM_CP_CONST | ARM_CP_OVERRIDE | ARM_CP_NO_RAW },
REGINFO_SENTINEL
};
@@ -1857,7 +2045,7 @@ static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
static const ARMCPRegInfo mpidr_cp_reginfo[] = {
{ .name = "MPIDR", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 5,
- .access = PL1_R, .readfn = mpidr_read, .type = ARM_CP_NO_MIGRATE },
+ .access = PL1_R, .readfn = mpidr_read, .type = ARM_CP_NO_RAW },
REGINFO_SENTINEL
};
@@ -1874,15 +2062,18 @@ static const ARMCPRegInfo lpae_cp_reginfo[] = {
.access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_OVERRIDE,
.resetvalue = 0 },
{ .name = "PAR", .cp = 15, .crm = 7, .opc1 = 0,
- .access = PL1_RW, .type = ARM_CP_64BIT,
- .fieldoffset = offsetof(CPUARMState, cp15.par_el1), .resetvalue = 0 },
+ .access = PL1_RW, .type = ARM_CP_64BIT, .resetvalue = 0,
+ .bank_fieldoffsets = { offsetof(CPUARMState, cp15.par_s),
+ offsetof(CPUARMState, cp15.par_ns)} },
{ .name = "TTBR0", .cp = 15, .crm = 2, .opc1 = 0,
- .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_NO_MIGRATE,
- .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el1),
+ .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
+ .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
+ offsetof(CPUARMState, cp15.ttbr0_ns) },
.writefn = vmsa_ttbr_write, .resetfn = arm_cp_reset_ignore },
{ .name = "TTBR1", .cp = 15, .crm = 2, .opc1 = 1,
- .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_NO_MIGRATE,
- .fieldoffset = offsetof(CPUARMState, cp15.ttbr1_el1),
+ .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
+ .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
+ offsetof(CPUARMState, cp15.ttbr1_ns) },
.writefn = vmsa_ttbr_write, .resetfn = arm_cp_reset_ignore },
REGINFO_SENTINEL
};
@@ -1911,7 +2102,7 @@ static void aa64_fpsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri)
{
- if (arm_current_el(env) == 0 && !(env->cp15.c1_sys & SCTLR_UMA)) {
+ if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UMA)) {
return CP_ACCESS_TRAP;
}
return CP_ACCESS_OK;
@@ -1929,7 +2120,7 @@ static CPAccessResult aa64_cacheop_access(CPUARMState *env,
/* Cache invalidate/clean: NOP, but EL0 must UNDEF unless
* SCTLR_EL1.UCI is set.
*/
- if (arm_current_el(env) == 0 && !(env->cp15.c1_sys & SCTLR_UCI)) {
+ if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UCI)) {
return CP_ACCESS_TRAP;
}
return CP_ACCESS_OK;
@@ -2006,7 +2197,7 @@ static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri)
/* We don't implement EL2, so the only control on DC ZVA is the
* bit in the SCTLR which can prohibit access for EL0.
*/
- if (arm_current_el(env) == 0 && !(env->cp15.c1_sys & SCTLR_DZE)) {
+ if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_DZE)) {
return CP_ACCESS_TRAP;
}
return CP_ACCESS_OK;
@@ -2045,6 +2236,24 @@ static void spsel_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
update_spsel(env, val);
}
+static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+
+ if (raw_read(env, ri) == value) {
+ /* Skip the TLB flush if nothing actually changed; Linux likes
+ * to do a lot of pointless SCTLR writes.
+ */
+ return;
+ }
+
+ raw_write(env, ri, value);
+ /* ??? Lots of these bits are not implemented. */
+ /* This may enable/disable the MMU, so do a TLB flush. */
+ tlb_flush(CPU(cpu), 1);
+}
+
static const ARMCPRegInfo v8_cp_reginfo[] = {
/* Minimal set of EL0-visible registers. This will need to be expanded
* significantly for system emulation of AArch64 CPUs.
@@ -2054,7 +2263,7 @@ static const ARMCPRegInfo v8_cp_reginfo[] = {
.access = PL0_RW, .type = ARM_CP_NZCV },
{ .name = "DAIF", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 2,
- .type = ARM_CP_NO_MIGRATE,
+ .type = ARM_CP_NO_RAW,
.access = PL0_RW, .accessfn = aa64_daif_access,
.fieldoffset = offsetof(CPUARMState, daif),
.writefn = aa64_daif_write, .resetfn = arm_cp_reset_ignore },
@@ -2066,7 +2275,7 @@ static const ARMCPRegInfo v8_cp_reginfo[] = {
.access = PL0_RW, .readfn = aa64_fpsr_read, .writefn = aa64_fpsr_write },
{ .name = "DCZID_EL0", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 3, .opc2 = 7, .crn = 0, .crm = 0,
- .access = PL0_R, .type = ARM_CP_NO_MIGRATE,
+ .access = PL0_R, .type = ARM_CP_NO_RAW,
.readfn = aa64_dczid_read },
{ .name = "DC_ZVA", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 1,
@@ -2117,77 +2326,77 @@ static const ARMCPRegInfo v8_cp_reginfo[] = {
/* TLBI operations */
{ .name = "TLBI_VMALLE1IS", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
- .access = PL1_W, .type = ARM_CP_NO_MIGRATE,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
.writefn = tlbiall_is_write },
{ .name = "TLBI_VAE1IS", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
- .access = PL1_W, .type = ARM_CP_NO_MIGRATE,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
.writefn = tlbi_aa64_va_is_write },
{ .name = "TLBI_ASIDE1IS", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
- .access = PL1_W, .type = ARM_CP_NO_MIGRATE,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
.writefn = tlbi_aa64_asid_is_write },
{ .name = "TLBI_VAAE1IS", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
- .access = PL1_W, .type = ARM_CP_NO_MIGRATE,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
.writefn = tlbi_aa64_vaa_is_write },
{ .name = "TLBI_VALE1IS", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
- .access = PL1_W, .type = ARM_CP_NO_MIGRATE,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
.writefn = tlbi_aa64_va_is_write },
{ .name = "TLBI_VAALE1IS", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
- .access = PL1_W, .type = ARM_CP_NO_MIGRATE,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
.writefn = tlbi_aa64_vaa_is_write },
{ .name = "TLBI_VMALLE1", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
- .access = PL1_W, .type = ARM_CP_NO_MIGRATE,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
.writefn = tlbiall_write },
{ .name = "TLBI_VAE1", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
- .access = PL1_W, .type = ARM_CP_NO_MIGRATE,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
.writefn = tlbi_aa64_va_write },
{ .name = "TLBI_ASIDE1", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
- .access = PL1_W, .type = ARM_CP_NO_MIGRATE,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
.writefn = tlbi_aa64_asid_write },
{ .name = "TLBI_VAAE1", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
- .access = PL1_W, .type = ARM_CP_NO_MIGRATE,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
.writefn = tlbi_aa64_vaa_write },
{ .name = "TLBI_VALE1", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
- .access = PL1_W, .type = ARM_CP_NO_MIGRATE,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
.writefn = tlbi_aa64_va_write },
{ .name = "TLBI_VAALE1", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
- .access = PL1_W, .type = ARM_CP_NO_MIGRATE,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
.writefn = tlbi_aa64_vaa_write },
#ifndef CONFIG_USER_ONLY
/* 64 bit address translation operations */
{ .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 0,
- .access = PL1_W, .type = ARM_CP_NO_MIGRATE, .writefn = ats_write },
+ .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
{ .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 1,
- .access = PL1_W, .type = ARM_CP_NO_MIGRATE, .writefn = ats_write },
+ .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
{ .name = "AT_S1E0R", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 2,
- .access = PL1_W, .type = ARM_CP_NO_MIGRATE, .writefn = ats_write },
+ .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
{ .name = "AT_S1E0W", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 3,
- .access = PL1_W, .type = ARM_CP_NO_MIGRATE, .writefn = ats_write },
+ .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
#endif
/* TLB invalidate last level of translation table walk */
{ .name = "TLBIMVALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
- .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbimva_is_write },
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_is_write },
{ .name = "TLBIMVAALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
- .type = ARM_CP_NO_MIGRATE, .access = PL1_W,
+ .type = ARM_CP_NO_RAW, .access = PL1_W,
.writefn = tlbimvaa_is_write },
{ .name = "TLBIMVAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
- .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbimva_write },
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
{ .name = "TLBIMVAAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
- .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbimvaa_write },
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimvaa_write },
/* 32 bit cache operations */
{ .name = "ICIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
.type = ARM_CP_NOP, .access = PL1_W },
@@ -2216,19 +2425,20 @@ static const ARMCPRegInfo v8_cp_reginfo[] = {
{ .name = "DCCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
.type = ARM_CP_NOP, .access = PL1_W },
/* MMU Domain access control / MPU write buffer control */
- { .name = "DACR", .cp = 15,
- .opc1 = 0, .crn = 3, .crm = 0, .opc2 = 0,
- .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c3),
- .resetvalue = 0, .writefn = dacr_write, .raw_writefn = raw_write, },
+ { .name = "DACR", .cp = 15, .opc1 = 0, .crn = 3, .crm = 0, .opc2 = 0,
+ .access = PL1_RW, .resetvalue = 0,
+ .writefn = dacr_write, .raw_writefn = raw_write,
+ .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
+ offsetoflow32(CPUARMState, cp15.dacr_ns) } },
{ .name = "ELR_EL1", .state = ARM_CP_STATE_AA64,
- .type = ARM_CP_NO_MIGRATE,
+ .type = ARM_CP_ALIAS,
.opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 1,
.access = PL1_RW,
.fieldoffset = offsetof(CPUARMState, elr_el[1]) },
{ .name = "SPSR_EL1", .state = ARM_CP_STATE_AA64,
- .type = ARM_CP_NO_MIGRATE,
+ .type = ARM_CP_ALIAS,
.opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 0,
- .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, banked_spsr[0]) },
+ .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, banked_spsr[1]) },
/* We rely on the access checks not allowing the guest to write to the
* state field when SPSel indicates that it's being used as the stack
* pointer.
@@ -2236,11 +2446,15 @@ static const ARMCPRegInfo v8_cp_reginfo[] = {
{ .name = "SP_EL0", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 4, .crm = 1, .opc2 = 0,
.access = PL1_RW, .accessfn = sp_el0_access,
- .type = ARM_CP_NO_MIGRATE,
+ .type = ARM_CP_ALIAS,
.fieldoffset = offsetof(CPUARMState, sp_el[0]) },
+ { .name = "SP_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 1, .opc2 = 0,
+ .access = PL2_RW, .type = ARM_CP_ALIAS,
+ .fieldoffset = offsetof(CPUARMState, sp_el[1]) },
{ .name = "SPSel", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 0,
- .type = ARM_CP_NO_MIGRATE,
+ .type = ARM_CP_NO_RAW,
.access = PL1_RW, .readfn = spsel_read, .writefn = spsel_write },
REGINFO_SENTINEL
};
@@ -2252,7 +2466,7 @@ static const ARMCPRegInfo v8_el3_no_el2_cp_reginfo[] = {
.access = PL2_RW,
.readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore },
{ .name = "HCR_EL2", .state = ARM_CP_STATE_AA64,
- .type = ARM_CP_NO_MIGRATE,
+ .type = ARM_CP_NO_RAW,
.opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
.access = PL2_RW,
.readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore },
@@ -2289,20 +2503,29 @@ static const ARMCPRegInfo v8_el2_cp_reginfo[] = {
.opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
.access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
.writefn = hcr_write },
+ { .name = "DACR32_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 0, .opc2 = 0,
+ .access = PL2_RW, .resetvalue = 0,
+ .writefn = dacr_write, .raw_writefn = raw_write,
+ .fieldoffset = offsetof(CPUARMState, cp15.dacr32_el2) },
{ .name = "ELR_EL2", .state = ARM_CP_STATE_AA64,
- .type = ARM_CP_NO_MIGRATE,
+ .type = ARM_CP_ALIAS,
.opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1,
.access = PL2_RW,
.fieldoffset = offsetof(CPUARMState, elr_el[2]) },
{ .name = "ESR_EL2", .state = ARM_CP_STATE_AA64,
- .type = ARM_CP_NO_MIGRATE,
+ .type = ARM_CP_ALIAS,
.opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0,
.access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[2]) },
+ { .name = "IFSR32_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 0, .opc2 = 1,
+ .access = PL2_RW, .resetvalue = 0,
+ .fieldoffset = offsetof(CPUARMState, cp15.ifsr32_el2) },
{ .name = "FAR_EL2", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0,
.access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[2]) },
{ .name = "SPSR_EL2", .state = ARM_CP_STATE_AA64,
- .type = ARM_CP_NO_MIGRATE,
+ .type = ARM_CP_ALIAS,
.opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 0,
.access = PL2_RW, .fieldoffset = offsetof(CPUARMState, banked_spsr[6]) },
{ .name = "VBAR_EL2", .state = ARM_CP_STATE_AA64,
@@ -2310,24 +2533,64 @@ static const ARMCPRegInfo v8_el2_cp_reginfo[] = {
.access = PL2_RW, .writefn = vbar_write,
.fieldoffset = offsetof(CPUARMState, cp15.vbar_el[2]),
.resetvalue = 0 },
+ { .name = "SP_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 1, .opc2 = 0,
+ .access = PL3_RW, .type = ARM_CP_ALIAS,
+ .fieldoffset = offsetof(CPUARMState, sp_el[2]) },
REGINFO_SENTINEL
};
-static const ARMCPRegInfo v8_el3_cp_reginfo[] = {
+static const ARMCPRegInfo el3_cp_reginfo[] = {
+ { .name = "SCR_EL3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 0,
+ .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.scr_el3),
+ .resetvalue = 0, .writefn = scr_write },
+ { .name = "SCR", .type = ARM_CP_ALIAS,
+ .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 0,
+ .access = PL3_RW, .fieldoffset = offsetoflow32(CPUARMState, cp15.scr_el3),
+ .resetfn = arm_cp_reset_ignore, .writefn = scr_write },
+ { .name = "SDER32_EL3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 1,
+ .access = PL3_RW, .resetvalue = 0,
+ .fieldoffset = offsetof(CPUARMState, cp15.sder) },
+ { .name = "SDER",
+ .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 1,
+ .access = PL3_RW, .resetvalue = 0,
+ .fieldoffset = offsetoflow32(CPUARMState, cp15.sder) },
+ /* TODO: Implement NSACR trapping of secure EL1 accesses to EL3 */
+ { .name = "NSACR", .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
+ .access = PL3_W | PL1_R, .resetvalue = 0,
+ .fieldoffset = offsetof(CPUARMState, cp15.nsacr) },
+ { .name = "MVBAR", .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
+ .access = PL3_RW, .writefn = vbar_write, .resetvalue = 0,
+ .fieldoffset = offsetof(CPUARMState, cp15.mvbar) },
+ { .name = "SCTLR_EL3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 0,
+ .access = PL3_RW, .raw_writefn = raw_write, .writefn = sctlr_write,
+ .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[3]) },
+ { .name = "TTBR0_EL3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 0,
+ .access = PL3_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0,
+ .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[3]) },
+ { .name = "TCR_EL3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 2,
+ .access = PL3_RW, .writefn = vmsa_tcr_el1_write,
+ .resetfn = vmsa_ttbcr_reset, .raw_writefn = raw_write,
+ .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[3]) },
{ .name = "ELR_EL3", .state = ARM_CP_STATE_AA64,
- .type = ARM_CP_NO_MIGRATE,
+ .type = ARM_CP_ALIAS,
.opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 1,
.access = PL3_RW,
.fieldoffset = offsetof(CPUARMState, elr_el[3]) },
{ .name = "ESR_EL3", .state = ARM_CP_STATE_AA64,
- .type = ARM_CP_NO_MIGRATE,
+ .type = ARM_CP_ALIAS,
.opc0 = 3, .opc1 = 6, .crn = 5, .crm = 2, .opc2 = 0,
.access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[3]) },
{ .name = "FAR_EL3", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 6, .crn = 6, .crm = 0, .opc2 = 0,
.access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[3]) },
{ .name = "SPSR_EL3", .state = ARM_CP_STATE_AA64,
- .type = ARM_CP_NO_MIGRATE,
+ .type = ARM_CP_ALIAS,
.opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 0,
.access = PL3_RW, .fieldoffset = offsetof(CPUARMState, banked_spsr[7]) },
{ .name = "VBAR_EL3", .state = ARM_CP_STATE_AA64,
@@ -2335,38 +2598,15 @@ static const ARMCPRegInfo v8_el3_cp_reginfo[] = {
.access = PL3_RW, .writefn = vbar_write,
.fieldoffset = offsetof(CPUARMState, cp15.vbar_el[3]),
.resetvalue = 0 },
- { .name = "SCR_EL3", .state = ARM_CP_STATE_AA64,
- .type = ARM_CP_NO_MIGRATE,
- .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 0,
- .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.scr_el3),
- .writefn = scr_write },
REGINFO_SENTINEL
};
-static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- ARMCPU *cpu = arm_env_get_cpu(env);
-
- if (raw_read(env, ri) == value) {
- /* Skip the TLB flush if nothing actually changed; Linux likes
- * to do a lot of pointless SCTLR writes.
- */
- return;
- }
-
- raw_write(env, ri, value);
- /* ??? Lots of these bits are not implemented. */
- /* This may enable/disable the MMU, so do a TLB flush. */
- tlb_flush(CPU(cpu), 1);
-}
-
static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri)
{
/* Only accessible in EL0 if SCTLR.UCT is set (and only in AArch64,
* but the AArch32 CTR has its own reginfo struct)
*/
- if (arm_current_el(env) == 0 && !(env->cp15.c1_sys & SCTLR_UCT)) {
+ if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UCT)) {
return CP_ACCESS_TRAP;
}
return CP_ACCESS_OK;
@@ -2397,7 +2637,7 @@ static const ARMCPRegInfo debug_cp_reginfo[] = {
*/
{ .name = "MDCCSR_EL0", .state = ARM_CP_STATE_BOTH,
.cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
- .type = ARM_CP_NO_MIGRATE,
+ .type = ARM_CP_ALIAS,
.access = PL1_R,
.fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1),
.resetfn = arm_cp_reset_ignore },
@@ -2850,7 +3090,7 @@ void register_cp_regs_for_features(ARMCPU *cpu)
ARMCPRegInfo pmcr = {
.name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0,
.access = PL0_RW,
- .type = ARM_CP_IO | ARM_CP_NO_MIGRATE,
+ .type = ARM_CP_IO | ARM_CP_ALIAS,
.fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcr),
.accessfn = pmreg_access, .writefn = pmcr_write,
.raw_writefn = raw_write,
@@ -2940,17 +3180,30 @@ void register_cp_regs_for_features(ARMCPU *cpu)
.resetvalue = cpu->mvfr2 },
REGINFO_SENTINEL
};
- ARMCPRegInfo rvbar = {
- .name = "RVBAR_EL1", .state = ARM_CP_STATE_AA64,
- .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 2,
- .type = ARM_CP_CONST, .access = PL1_R, .resetvalue = cpu->rvbar
- };
- define_one_arm_cp_reg(cpu, &rvbar);
+ /* RVBAR_EL1 is only implemented if EL1 is the highest EL */
+ if (!arm_feature(env, ARM_FEATURE_EL3) &&
+ !arm_feature(env, ARM_FEATURE_EL2)) {
+ ARMCPRegInfo rvbar = {
+ .name = "RVBAR_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
+ .type = ARM_CP_CONST, .access = PL1_R, .resetvalue = cpu->rvbar
+ };
+ define_one_arm_cp_reg(cpu, &rvbar);
+ }
define_arm_cp_regs(cpu, v8_idregs);
define_arm_cp_regs(cpu, v8_cp_reginfo);
}
if (arm_feature(env, ARM_FEATURE_EL2)) {
define_arm_cp_regs(cpu, v8_el2_cp_reginfo);
+ /* RVBAR_EL2 is only implemented if EL2 is the highest EL */
+ if (!arm_feature(env, ARM_FEATURE_EL3)) {
+ ARMCPRegInfo rvbar = {
+ .name = "RVBAR_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 1,
+ .type = ARM_CP_CONST, .access = PL2_R, .resetvalue = cpu->rvbar
+ };
+ define_one_arm_cp_reg(cpu, &rvbar);
+ }
} else {
/* If EL2 is missing but higher ELs are enabled, we need to
* register the no_el2 reginfos.
@@ -2960,7 +3213,13 @@ void register_cp_regs_for_features(ARMCPU *cpu)
}
}
if (arm_feature(env, ARM_FEATURE_EL3)) {
- define_arm_cp_regs(cpu, v8_el3_cp_reginfo);
+ define_arm_cp_regs(cpu, el3_cp_reginfo);
+ ARMCPRegInfo rvbar = {
+ .name = "RVBAR_EL3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 1,
+ .type = ARM_CP_CONST, .access = PL3_R, .resetvalue = cpu->rvbar
+ };
+ define_one_arm_cp_reg(cpu, &rvbar);
}
if (arm_feature(env, ARM_FEATURE_MPU)) {
/* These are the MPU registers prior to PMSAv6. Any new
@@ -3160,8 +3419,10 @@ void register_cp_regs_for_features(ARMCPU *cpu)
{
ARMCPRegInfo sctlr = {
.name = "SCTLR", .state = ARM_CP_STATE_BOTH,
- .opc0 = 3, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 0,
- .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c1_sys),
+ .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0,
+ .access = PL1_RW,
+ .bank_fieldoffsets = { offsetof(CPUARMState, cp15.sctlr_s),
+ offsetof(CPUARMState, cp15.sctlr_ns) },
.writefn = sctlr_write, .resetvalue = cpu->reset_sctlr,
.raw_writefn = raw_write,
};
@@ -3287,7 +3548,7 @@ CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
}
static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
- void *opaque, int state,
+ void *opaque, int state, int secstate,
int crm, int opc1, int opc2)
{
/* Private utility function for define_one_arm_cp_reg_with_opaque():
@@ -3296,22 +3557,59 @@ static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
uint32_t *key = g_new(uint32_t, 1);
ARMCPRegInfo *r2 = g_memdup(r, sizeof(ARMCPRegInfo));
int is64 = (r->type & ARM_CP_64BIT) ? 1 : 0;
- if (r->state == ARM_CP_STATE_BOTH && state == ARM_CP_STATE_AA32) {
- /* The AArch32 view of a shared register sees the lower 32 bits
- * of a 64 bit backing field. It is not migratable as the AArch64
- * view handles that. AArch64 also handles reset.
- * We assume it is a cp15 register if the .cp field is left unset.
+ int ns = (secstate & ARM_CP_SECSTATE_NS) ? 1 : 0;
+
+ /* Reset the secure state to the specific incoming state. This is
+ * necessary as the register may have been defined with both states.
+ */
+ r2->secure = secstate;
+
+ if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) {
+ /* Register is banked (using both entries in array).
+ * Overwriting fieldoffset as the array is only used to define
+ * banked registers but later only fieldoffset is used.
*/
- if (r2->cp == 0) {
- r2->cp = 15;
+ r2->fieldoffset = r->bank_fieldoffsets[ns];
+ }
+
+ if (state == ARM_CP_STATE_AA32) {
+ if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) {
+ /* If the register is banked then we don't need to migrate or
+ * reset the 32-bit instance in certain cases:
+ *
+ * 1) If the register has both 32-bit and 64-bit instances then we
+ * can count on the 64-bit instance taking care of the
+ * non-secure bank.
+ * 2) If ARMv8 is enabled then we can count on a 64-bit version
+ * taking care of the secure bank. This requires that separate
+ * 32 and 64-bit definitions are provided.
+ */
+ if ((r->state == ARM_CP_STATE_BOTH && ns) ||
+ (arm_feature(&cpu->env, ARM_FEATURE_V8) && !ns)) {
+ r2->type |= ARM_CP_ALIAS;
+ r2->resetfn = arm_cp_reset_ignore;
+ }
+ } else if ((secstate != r->secure) && !ns) {
+ /* The register is not banked so we only want to allow migration of
+ * the non-secure instance.
+ */
+ r2->type |= ARM_CP_ALIAS;
+ r2->resetfn = arm_cp_reset_ignore;
}
- r2->type |= ARM_CP_NO_MIGRATE;
- r2->resetfn = arm_cp_reset_ignore;
+
+ if (r->state == ARM_CP_STATE_BOTH) {
+ /* We assume it is a cp15 register if the .cp field is left unset.
+ */
+ if (r2->cp == 0) {
+ r2->cp = 15;
+ }
+
#ifdef HOST_WORDS_BIGENDIAN
- if (r2->fieldoffset) {
- r2->fieldoffset += sizeof(uint32_t);
- }
+ if (r2->fieldoffset) {
+ r2->fieldoffset += sizeof(uint32_t);
+ }
#endif
+ }
}
if (state == ARM_CP_STATE_AA64) {
/* To allow abbreviation of ARMCPRegInfo
@@ -3327,7 +3625,7 @@ static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
*key = ENCODE_AA64_CP_REG(r2->cp, r2->crn, crm,
r2->opc0, opc1, opc2);
} else {
- *key = ENCODE_CP_REG(r2->cp, is64, r2->crn, crm, opc1, opc2);
+ *key = ENCODE_CP_REG(r2->cp, is64, ns, r2->crn, crm, opc1, opc2);
}
if (opaque) {
r2->opaque = opaque;
@@ -3344,15 +3642,25 @@ static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
r2->opc2 = opc2;
/* By convention, for wildcarded registers only the first
* entry is used for migration; the others are marked as
- * NO_MIGRATE so we don't try to transfer the register
+ * ALIAS so we don't try to transfer the register
* multiple times. Special registers (ie NOP/WFI) are
- * never migratable.
+ * never migratable and not even raw-accessible.
*/
- if ((r->type & ARM_CP_SPECIAL) ||
- ((r->crm == CP_ANY) && crm != 0) ||
+ if ((r->type & ARM_CP_SPECIAL)) {
+ r2->type |= ARM_CP_NO_RAW;
+ }
+ if (((r->crm == CP_ANY) && crm != 0) ||
((r->opc1 == CP_ANY) && opc1 != 0) ||
((r->opc2 == CP_ANY) && opc2 != 0)) {
- r2->type |= ARM_CP_NO_MIGRATE;
+ r2->type |= ARM_CP_ALIAS;
+ }
+
+ /* Check that raw accesses are either forbidden or handled. Note that
+ * we can't assert this earlier because the setup of fieldoffset for
+ * banked registers has to be done first.
+ */
+ if (!(r2->type & ARM_CP_NO_RAW)) {
+ assert(!raw_accessors_invalid(r2));
}
/* Overriding of an existing definition must be explicitly
@@ -3460,10 +3768,14 @@ void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
*/
if (!(r->type & (ARM_CP_SPECIAL|ARM_CP_CONST))) {
if (r->access & PL3_R) {
- assert(r->fieldoffset || r->readfn);
+ assert((r->fieldoffset ||
+ (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) ||
+ r->readfn);
}
if (r->access & PL3_W) {
- assert(r->fieldoffset || r->writefn);
+ assert((r->fieldoffset ||
+ (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) ||
+ r->writefn);
}
}
/* Bad type field probably means missing sentinel at end of reg list */
@@ -3476,8 +3788,32 @@ void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
if (r->state != state && r->state != ARM_CP_STATE_BOTH) {
continue;
}
- add_cpreg_to_hashtable(cpu, r, opaque, state,
- crm, opc1, opc2);
+ if (state == ARM_CP_STATE_AA32) {
+ /* Under AArch32 CP registers can be common
+ * (same for secure and non-secure world) or banked.
+ */
+ switch (r->secure) {
+ case ARM_CP_SECSTATE_S:
+ case ARM_CP_SECSTATE_NS:
+ add_cpreg_to_hashtable(cpu, r, opaque, state,
+ r->secure, crm, opc1, opc2);
+ break;
+ default:
+ add_cpreg_to_hashtable(cpu, r, opaque, state,
+ ARM_CP_SECSTATE_S,
+ crm, opc1, opc2);
+ add_cpreg_to_hashtable(cpu, r, opaque, state,
+ ARM_CP_SECSTATE_NS,
+ crm, opc1, opc2);
+ break;
+ }
+ } else {
+ /* AArch64 registers get mapped to non-secure instance
+ * of AArch32 */
+ add_cpreg_to_hashtable(cpu, r, opaque, state,
+ ARM_CP_SECSTATE_NS,
+ crm, opc1, opc2);
+ }
}
}
}
@@ -3551,6 +3887,8 @@ uint32_t cpsr_read(CPUARMState *env)
void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask)
{
+ uint32_t changed_daif;
+
if (mask & CPSR_NZCV) {
env->ZF = (~val) & CPSR_Z;
env->NF = val;
@@ -3573,6 +3911,58 @@ void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask)
env->GE = (val >> 16) & 0xf;
}
+ /* In a V7 implementation that includes the security extensions but does
+ * not include Virtualization Extensions the SCR.FW and SCR.AW bits control
+ * whether non-secure software is allowed to change the CPSR_F and CPSR_A
+ * bits respectively.
+ *
+ * In a V8 implementation, it is permitted for privileged software to
+ * change the CPSR A/F bits regardless of the SCR.AW/FW bits.
+ */
+ if (!arm_feature(env, ARM_FEATURE_V8) &&
+ arm_feature(env, ARM_FEATURE_EL3) &&
+ !arm_feature(env, ARM_FEATURE_EL2) &&
+ !arm_is_secure(env)) {
+
+ changed_daif = (env->daif ^ val) & mask;
+
+ if (changed_daif & CPSR_A) {
+ /* Check to see if we are allowed to change the masking of async
+ * abort exceptions from a non-secure state.
+ */
+ if (!(env->cp15.scr_el3 & SCR_AW)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "Ignoring attempt to switch CPSR_A flag from "
+ "non-secure world with SCR.AW bit clear\n");
+ mask &= ~CPSR_A;
+ }
+ }
+
+ if (changed_daif & CPSR_F) {
+ /* Check to see if we are allowed to change the masking of FIQ
+ * exceptions from a non-secure state.
+ */
+ if (!(env->cp15.scr_el3 & SCR_FW)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "Ignoring attempt to switch CPSR_F flag from "
+ "non-secure world with SCR.FW bit clear\n");
+ mask &= ~CPSR_F;
+ }
+
+ /* Check whether non-maskable FIQ (NMFI) support is enabled.
+ * If this bit is set software is not allowed to mask
+ * FIQs, but is allowed to set CPSR_F to 0.
+ */
+ if ((A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_NMFI) &&
+ (val & CPSR_F)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "Ignoring attempt to enable CPSR_F flag "
+ "(non-maskable FIQ [NMFI] support enabled)\n");
+ mask &= ~CPSR_F;
+ }
+ }
+ }
+
env->daif &= ~(CPSR_AIF & mask);
env->daif |= val & CPSR_AIF & mask;
@@ -3706,6 +4096,11 @@ unsigned int arm_excp_target_el(CPUState *cs, unsigned int excp_idx)
return 1;
}
+void aarch64_sync_64_to_32(CPUARMState *env)
+{
+ g_assert_not_reached();
+}
+
#else
/* Map CPU modes onto saved register banks. */
@@ -3761,6 +4156,101 @@ void switch_mode(CPUARMState *env, int mode)
env->spsr = env->banked_spsr[i];
}
+/* Physical Interrupt Target EL Lookup Table
+ *
+ * [ From ARM ARM section G1.13.4 (Table G1-15) ]
+ *
+ * The below multi-dimensional table is used for looking up the target
+ * exception level given numerous condition criteria. Specifically, the
+ * target EL is based on SCR and HCR routing controls as well as the
+ * currently executing EL and secure state.
+ *
+ * Dimensions:
+ * target_el_table[2][2][2][2][2][4]
+ * | | | | | +--- Current EL
+ * | | | | +------ Non-secure(0)/Secure(1)
+ * | | | +--------- HCR mask override
+ * | | +------------ SCR exec state control
+ * | +--------------- SCR mask override
+ * +------------------ 32-bit(0)/64-bit(1) EL3
+ *
+ * The table values are as such:
+ * 0-3 = EL0-EL3
+ * -1 = Cannot occur
+ *
+ * The ARM ARM target EL table includes entries indicating that an "exception
+ * is not taken". The two cases where this is applicable are:
+ * 1) An exception is taken from EL3 but the SCR does not have the exception
+ * routed to EL3.
+ * 2) An exception is taken from EL2 but the HCR does not have the exception
+ * routed to EL2.
+ * In these two cases, the below table contain a target of EL1. This value is
+ * returned as it is expected that the consumer of the table data will check
+ * for "target EL >= current EL" to ensure the exception is not taken.
+ *
+ * SCR HCR
+ * 64 EA AMO From
+ * BIT IRQ IMO Non-secure Secure
+ * EL3 FIQ RW FMO EL0 EL1 EL2 EL3 EL0 EL1 EL2 EL3
+ */
+const int8_t target_el_table[2][2][2][2][2][4] = {
+ {{{{/* 0 0 0 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
+ {/* 0 0 0 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},
+ {{/* 0 0 1 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
+ {/* 0 0 1 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},},
+ {{{/* 0 1 0 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
+ {/* 0 1 0 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},
+ {{/* 0 1 1 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
+ {/* 0 1 1 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},},},
+ {{{{/* 1 0 0 0 */{ 1, 1, 2, -1 },{ 1, 1, -1, 1 },},
+ {/* 1 0 0 1 */{ 2, 2, 2, -1 },{ 1, 1, -1, 1 },},},
+ {{/* 1 0 1 0 */{ 1, 1, 1, -1 },{ 1, 1, -1, 1 },},
+ {/* 1 0 1 1 */{ 2, 2, 2, -1 },{ 1, 1, -1, 1 },},},},
+ {{{/* 1 1 0 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},
+ {/* 1 1 0 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},
+ {{/* 1 1 1 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},
+ {/* 1 1 1 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},},},
+};
+
+/*
+ * Determine the target EL for physical exceptions
+ */
+static inline uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
+ uint32_t cur_el, bool secure)
+{
+ CPUARMState *env = cs->env_ptr;
+ int rw = ((env->cp15.scr_el3 & SCR_RW) == SCR_RW);
+ int scr;
+ int hcr;
+ int target_el;
+ int is64 = arm_el_is_aa64(env, 3);
+
+ switch (excp_idx) {
+ case EXCP_IRQ:
+ scr = ((env->cp15.scr_el3 & SCR_IRQ) == SCR_IRQ);
+ hcr = ((env->cp15.hcr_el2 & HCR_IMO) == HCR_IMO);
+ break;
+ case EXCP_FIQ:
+ scr = ((env->cp15.scr_el3 & SCR_FIQ) == SCR_FIQ);
+ hcr = ((env->cp15.hcr_el2 & HCR_FMO) == HCR_FMO);
+ break;
+ default:
+ scr = ((env->cp15.scr_el3 & SCR_EA) == SCR_EA);
+ hcr = ((env->cp15.hcr_el2 & HCR_AMO) == HCR_AMO);
+ break;
+ };
+
+ /* If HCR.TGE is set then HCR is treated as being 1 */
+ hcr |= ((env->cp15.hcr_el2 & HCR_TGE) == HCR_TGE);
+
+ /* Perform a table-lookup for the target EL given the current state */
+ target_el = target_el_table[is64][scr][rw][hcr][secure][cur_el];
+
+ assert(target_el > 0);
+
+ return target_el;
+}
+
/*
* Determine the target EL for a given exception type.
*/
@@ -3770,13 +4260,7 @@ unsigned int arm_excp_target_el(CPUState *cs, unsigned int excp_idx)
CPUARMState *env = &cpu->env;
unsigned int cur_el = arm_current_el(env);
unsigned int target_el;
- /* FIXME: Use actual secure state. */
- bool secure = false;
-
- if (!env->aarch64) {
- /* TODO: Add EL2 and 3 exception handling for AArch32. */
- return 1;
- }
+ bool secure = arm_is_secure(env);
switch (excp_idx) {
case EXCP_HVC:
@@ -3788,19 +4272,8 @@ unsigned int arm_excp_target_el(CPUState *cs, unsigned int excp_idx)
break;
case EXCP_FIQ:
case EXCP_IRQ:
- {
- const uint64_t hcr_mask = excp_idx == EXCP_FIQ ? HCR_FMO : HCR_IMO;
- const uint32_t scr_mask = excp_idx == EXCP_FIQ ? SCR_FIQ : SCR_IRQ;
-
- target_el = 1;
- if (!secure && (env->cp15.hcr_el2 & hcr_mask)) {
- target_el = 2;
- }
- if (env->cp15.scr_el3 & scr_mask) {
- target_el = 3;
- }
+ target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, secure);
break;
- }
case EXCP_VIRQ:
case EXCP_VFIQ:
target_el = 1;
@@ -3861,6 +4334,16 @@ static void do_v7m_exception_exit(CPUARMState *env)
env->regs[12] = v7m_pop(env);
env->regs[14] = v7m_pop(env);
env->regs[15] = v7m_pop(env);
+ if (env->regs[15] & 1) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "M profile return from interrupt with misaligned "
+ "PC is UNPREDICTABLE\n");
+ /* Actual hardware seems to ignore the lsbit, and there are several
+ * RTOSes out there which incorrectly assume the r15 in the stack
+ * frame should be a Thumb-style "lsbit indicates ARM/Thumb" value.
+ */
+ env->regs[15] &= ~1U;
+ }
xpsr = v7m_pop(env);
xpsr_write(env, xpsr, 0xfffffdff);
/* Undo stack alignment. */
@@ -3957,6 +4440,212 @@ void arm_v7m_cpu_do_interrupt(CPUState *cs)
env->thumb = addr & 1;
}
+/* Function used to synchronize QEMU's AArch64 register set with AArch32
+ * register set. This is necessary when switching between AArch32 and AArch64
+ * execution state.
+ */
+void aarch64_sync_32_to_64(CPUARMState *env)
+{
+ int i;
+ uint32_t mode = env->uncached_cpsr & CPSR_M;
+
+ /* We can blanket copy R[0:7] to X[0:7] */
+ for (i = 0; i < 8; i++) {
+ env->xregs[i] = env->regs[i];
+ }
+
+ /* Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12.
+ * Otherwise, they come from the banked user regs.
+ */
+ if (mode == ARM_CPU_MODE_FIQ) {
+ for (i = 8; i < 13; i++) {
+ env->xregs[i] = env->usr_regs[i - 8];
+ }
+ } else {
+ for (i = 8; i < 13; i++) {
+ env->xregs[i] = env->regs[i];
+ }
+ }
+
+ /* Registers x13-x23 are the various mode SP and FP registers. Registers
+ * r13 and r14 are only copied if we are in that mode, otherwise we copy
+ * from the mode banked register.
+ */
+ if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) {
+ env->xregs[13] = env->regs[13];
+ env->xregs[14] = env->regs[14];
+ } else {
+ env->xregs[13] = env->banked_r13[bank_number(ARM_CPU_MODE_USR)];
+ /* HYP is an exception in that it is copied from r14 */
+ if (mode == ARM_CPU_MODE_HYP) {
+ env->xregs[14] = env->regs[14];
+ } else {
+ env->xregs[14] = env->banked_r14[bank_number(ARM_CPU_MODE_USR)];
+ }
+ }
+
+ if (mode == ARM_CPU_MODE_HYP) {
+ env->xregs[15] = env->regs[13];
+ } else {
+ env->xregs[15] = env->banked_r13[bank_number(ARM_CPU_MODE_HYP)];
+ }
+
+ if (mode == ARM_CPU_MODE_IRQ) {
+ env->xregs[16] = env->regs[13];
+ env->xregs[17] = env->regs[14];
+ } else {
+ env->xregs[16] = env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)];
+ env->xregs[17] = env->banked_r14[bank_number(ARM_CPU_MODE_IRQ)];
+ }
+
+ if (mode == ARM_CPU_MODE_SVC) {
+ env->xregs[18] = env->regs[13];
+ env->xregs[19] = env->regs[14];
+ } else {
+ env->xregs[18] = env->banked_r13[bank_number(ARM_CPU_MODE_SVC)];
+ env->xregs[19] = env->banked_r14[bank_number(ARM_CPU_MODE_SVC)];
+ }
+
+ if (mode == ARM_CPU_MODE_ABT) {
+ env->xregs[20] = env->regs[13];
+ env->xregs[21] = env->regs[14];
+ } else {
+ env->xregs[20] = env->banked_r13[bank_number(ARM_CPU_MODE_ABT)];
+ env->xregs[21] = env->banked_r14[bank_number(ARM_CPU_MODE_ABT)];
+ }
+
+ if (mode == ARM_CPU_MODE_UND) {
+ env->xregs[22] = env->regs[13];
+ env->xregs[23] = env->regs[14];
+ } else {
+ env->xregs[22] = env->banked_r13[bank_number(ARM_CPU_MODE_UND)];
+ env->xregs[23] = env->banked_r14[bank_number(ARM_CPU_MODE_UND)];
+ }
+
+ /* Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
+ * mode, then we can copy from r8-r14. Otherwise, we copy from the
+ * FIQ bank for r8-r14.
+ */
+ if (mode == ARM_CPU_MODE_FIQ) {
+ for (i = 24; i < 31; i++) {
+ env->xregs[i] = env->regs[i - 16]; /* X[24:30] <- R[8:14] */
+ }
+ } else {
+ for (i = 24; i < 29; i++) {
+ env->xregs[i] = env->fiq_regs[i - 24];
+ }
+ env->xregs[29] = env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)];
+ env->xregs[30] = env->banked_r14[bank_number(ARM_CPU_MODE_FIQ)];
+ }
+
+ env->pc = env->regs[15];
+}
+
+/* Function used to synchronize QEMU's AArch32 register set with AArch64
+ * register set. This is necessary when switching between AArch32 and AArch64
+ * execution state.
+ */
+void aarch64_sync_64_to_32(CPUARMState *env)
+{
+ int i;
+ uint32_t mode = env->uncached_cpsr & CPSR_M;
+
+ /* We can blanket copy X[0:7] to R[0:7] */
+ for (i = 0; i < 8; i++) {
+ env->regs[i] = env->xregs[i];
+ }
+
+ /* Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12.
+ * Otherwise, we copy x8-x12 into the banked user regs.
+ */
+ if (mode == ARM_CPU_MODE_FIQ) {
+ for (i = 8; i < 13; i++) {
+ env->usr_regs[i - 8] = env->xregs[i];
+ }
+ } else {
+ for (i = 8; i < 13; i++) {
+ env->regs[i] = env->xregs[i];
+ }
+ }
+
+ /* Registers r13 & r14 depend on the current mode.
+ * If we are in a given mode, we copy the corresponding x registers to r13
+ * and r14. Otherwise, we copy the x register to the banked r13 and r14
+ * for the mode.
+ */
+ if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) {
+ env->regs[13] = env->xregs[13];
+ env->regs[14] = env->xregs[14];
+ } else {
+ env->banked_r13[bank_number(ARM_CPU_MODE_USR)] = env->xregs[13];
+
+ /* HYP is an exception in that it does not have its own banked r14 but
+ * shares the USR r14
+ */
+ if (mode == ARM_CPU_MODE_HYP) {
+ env->regs[14] = env->xregs[14];
+ } else {
+ env->banked_r14[bank_number(ARM_CPU_MODE_USR)] = env->xregs[14];
+ }
+ }
+
+ if (mode == ARM_CPU_MODE_HYP) {
+ env->regs[13] = env->xregs[15];
+ } else {
+ env->banked_r13[bank_number(ARM_CPU_MODE_HYP)] = env->xregs[15];
+ }
+
+ if (mode == ARM_CPU_MODE_IRQ) {
+ env->regs[13] = env->xregs[16];
+ env->regs[14] = env->xregs[17];
+ } else {
+ env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[16];
+ env->banked_r14[bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[17];
+ }
+
+ if (mode == ARM_CPU_MODE_SVC) {
+ env->regs[13] = env->xregs[18];
+ env->regs[14] = env->xregs[19];
+ } else {
+ env->banked_r13[bank_number(ARM_CPU_MODE_SVC)] = env->xregs[18];
+ env->banked_r14[bank_number(ARM_CPU_MODE_SVC)] = env->xregs[19];
+ }
+
+ if (mode == ARM_CPU_MODE_ABT) {
+ env->regs[13] = env->xregs[20];
+ env->regs[14] = env->xregs[21];
+ } else {
+ env->banked_r13[bank_number(ARM_CPU_MODE_ABT)] = env->xregs[20];
+ env->banked_r14[bank_number(ARM_CPU_MODE_ABT)] = env->xregs[21];
+ }
+
+ if (mode == ARM_CPU_MODE_UND) {
+ env->regs[13] = env->xregs[22];
+ env->regs[14] = env->xregs[23];
+ } else {
+ env->banked_r13[bank_number(ARM_CPU_MODE_UND)] = env->xregs[22];
+ env->banked_r14[bank_number(ARM_CPU_MODE_UND)] = env->xregs[23];
+ }
+
+ /* Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
+ * mode, then we can copy to r8-r14. Otherwise, we copy to the
+ * FIQ bank for r8-r14.
+ */
+ if (mode == ARM_CPU_MODE_FIQ) {
+ for (i = 24; i < 31; i++) {
+ env->regs[i - 16] = env->xregs[i]; /* X[24:30] -> R[8:14] */
+ }
+ } else {
+ for (i = 24; i < 29; i++) {
+ env->fiq_regs[i - 24] = env->xregs[i];
+ }
+ env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[29];
+ env->banked_r14[bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[30];
+ }
+
+ env->regs[15] = env->pc;
+}
+
/* Handle a CPU exception. */
void arm_cpu_do_interrupt(CPUState *cs)
{
@@ -4055,22 +4744,20 @@ void arm_cpu_do_interrupt(CPUState *cs)
env->exception.fsr = 2;
/* Fall through to prefetch abort. */
case EXCP_PREFETCH_ABORT:
- env->cp15.ifsr_el2 = env->exception.fsr;
- env->cp15.far_el[1] = deposit64(env->cp15.far_el[1], 32, 32,
- env->exception.vaddress);
+ A32_BANKED_CURRENT_REG_SET(env, ifsr, env->exception.fsr);
+ A32_BANKED_CURRENT_REG_SET(env, ifar, env->exception.vaddress);
qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x IFAR 0x%x\n",
- env->cp15.ifsr_el2, (uint32_t)env->exception.vaddress);
+ env->exception.fsr, (uint32_t)env->exception.vaddress);
new_mode = ARM_CPU_MODE_ABT;
addr = 0x0c;
mask = CPSR_A | CPSR_I;
offset = 4;
break;
case EXCP_DATA_ABORT:
- env->cp15.esr_el[1] = env->exception.fsr;
- env->cp15.far_el[1] = deposit64(env->cp15.far_el[1], 0, 32,
- env->exception.vaddress);
+ A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr);
+ A32_BANKED_CURRENT_REG_SET(env, dfar, env->exception.vaddress);
qemu_log_mask(CPU_LOG_INT, "...with DFSR 0x%x DFAR 0x%x\n",
- (uint32_t)env->cp15.esr_el[1],
+ env->exception.fsr,
(uint32_t)env->exception.vaddress);
new_mode = ARM_CPU_MODE_ABT;
addr = 0x10;
@@ -4083,12 +4770,21 @@ void arm_cpu_do_interrupt(CPUState *cs)
/* Disable IRQ and imprecise data aborts. */
mask = CPSR_A | CPSR_I;
offset = 4;
+ if (env->cp15.scr_el3 & SCR_IRQ) {
+ /* IRQ routed to monitor mode */
+ new_mode = ARM_CPU_MODE_MON;
+ mask |= CPSR_F;
+ }
break;
case EXCP_FIQ:
new_mode = ARM_CPU_MODE_FIQ;
addr = 0x1c;
/* Disable FIQ, IRQ and imprecise data aborts. */
mask = CPSR_A | CPSR_I | CPSR_F;
+ if (env->cp15.scr_el3 & SCR_FIQ) {
+ /* FIQ routed to monitor mode */
+ new_mode = ARM_CPU_MODE_MON;
+ }
offset = 4;
break;
case EXCP_SMC:
@@ -4101,19 +4797,19 @@ void arm_cpu_do_interrupt(CPUState *cs)
cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
return; /* Never happens. Keep compiler happy. */
}
- /* High vectors. */
- if (env->cp15.c1_sys & SCTLR_V) {
- /* when enabled, base address cannot be remapped. */
+
+ if (new_mode == ARM_CPU_MODE_MON) {
+ addr += env->cp15.mvbar;
+ } else if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) {
+ /* High vectors. When enabled, base address cannot be remapped. */
addr += 0xffff0000;
} else {
/* ARM v7 architectures provide a vector base address register to remap
* the interrupt vector table.
- * This register is only followed in non-monitor mode, and has a secure
- * and un-secure copy. Since the cpu is always in a un-secure operation
- * and is never in monitor mode this feature is always active.
+ * This register is only followed in non-monitor mode, and is banked.
* Note: only bits 31:5 are valid.
*/
- addr += env->cp15.vbar_el[1];
+ addr += A32_BANKED_CURRENT_REG_GET(env, vbar);
}
if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) {
@@ -4134,91 +4830,280 @@ void arm_cpu_do_interrupt(CPUState *cs)
/* this is a lie, as the was no c1_sys on V4T/V5, but who cares
* and we should just guard the thumb mode on V4 */
if (arm_feature(env, ARM_FEATURE_V4T)) {
- env->thumb = (env->cp15.c1_sys & SCTLR_TE) != 0;
+ env->thumb = (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_TE) != 0;
}
env->regs[14] = env->regs[15] + offset;
env->regs[15] = addr;
cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
}
-/* Check section/page access permissions.
- Returns the page protection flags, or zero if the access is not
- permitted. */
-static inline int check_ap(CPUARMState *env, int ap, int domain_prot,
- int access_type, int is_user)
-{
- int prot_ro;
-
- if (domain_prot == 3) {
- return PAGE_READ | PAGE_WRITE;
- }
-
- if (access_type == 1)
- prot_ro = 0;
- else
- prot_ro = PAGE_READ;
-
- switch (ap) {
- case 0:
- if (arm_feature(env, ARM_FEATURE_V7)) {
- return 0;
- }
- if (access_type == 1)
- return 0;
- switch (env->cp15.c1_sys & (SCTLR_S | SCTLR_R)) {
- case SCTLR_S:
- return is_user ? 0 : PAGE_READ;
- case SCTLR_R:
- return PAGE_READ;
- default:
- return 0;
- }
- case 1:
- return is_user ? 0 : PAGE_READ | PAGE_WRITE;
- case 2:
- if (is_user)
- return prot_ro;
- else
- return PAGE_READ | PAGE_WRITE;
- case 3:
- return PAGE_READ | PAGE_WRITE;
- case 4: /* Reserved. */
- return 0;
- case 5:
- return is_user ? 0 : prot_ro;
- case 6:
- return prot_ro;
- case 7:
- if (!arm_feature (env, ARM_FEATURE_V6K))
- return 0;
- return prot_ro;
- default:
- abort();
- }
-}
-
-static bool get_level1_table_address(CPUARMState *env, uint32_t *table,
- uint32_t address)
-{
- if (address & env->cp15.c2_mask) {
- if ((env->cp15.c2_control & TTBCR_PD1)) {
+
+/* Return the exception level which controls this address translation regime */
+static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
+{
+ switch (mmu_idx) {
+ case ARMMMUIdx_S2NS:
+ case ARMMMUIdx_S1E2:
+ return 2;
+ case ARMMMUIdx_S1E3:
+ return 3;
+ case ARMMMUIdx_S1SE0:
+ return arm_el_is_aa64(env, 3) ? 1 : 3;
+ case ARMMMUIdx_S1SE1:
+ case ARMMMUIdx_S1NSE0:
+ case ARMMMUIdx_S1NSE1:
+ return 1;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+/* Return the SCTLR value which controls this address translation regime */
+static inline uint32_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx)
+{
+ return env->cp15.sctlr_el[regime_el(env, mmu_idx)];
+}
+
+/* Return true if the specified stage of address translation is disabled */
+static inline bool regime_translation_disabled(CPUARMState *env,
+ ARMMMUIdx mmu_idx)
+{
+ if (mmu_idx == ARMMMUIdx_S2NS) {
+ return (env->cp15.hcr_el2 & HCR_VM) == 0;
+ }
+ return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0;
+}
+
+/* Return the TCR controlling this translation regime */
+static inline TCR *regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx)
+{
+ if (mmu_idx == ARMMMUIdx_S2NS) {
+ /* TODO: return VTCR_EL2 */
+ g_assert_not_reached();
+ }
+ return &env->cp15.tcr_el[regime_el(env, mmu_idx)];
+}
+
+/* Return true if the translation regime is using LPAE format page tables */
+static inline bool regime_using_lpae_format(CPUARMState *env,
+ ARMMMUIdx mmu_idx)
+{
+ int el = regime_el(env, mmu_idx);
+ if (el == 2 || arm_el_is_aa64(env, el)) {
+ return true;
+ }
+ if (arm_feature(env, ARM_FEATURE_LPAE)
+ && (regime_tcr(env, mmu_idx)->raw_tcr & TTBCR_EAE)) {
+ return true;
+ }
+ return false;
+}
+
+static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx)
+{
+ switch (mmu_idx) {
+ case ARMMMUIdx_S1SE0:
+ case ARMMMUIdx_S1NSE0:
+ return true;
+ default:
+ return false;
+ case ARMMMUIdx_S12NSE0:
+ case ARMMMUIdx_S12NSE1:
+ g_assert_not_reached();
+ }
+}
+
+/* Translate section/page access permissions to page
+ * R/W protection flags
+ *
+ * @env: CPUARMState
+ * @mmu_idx: MMU index indicating required translation regime
+ * @ap: The 3-bit access permissions (AP[2:0])
+ * @domain_prot: The 2-bit domain access permissions
+ */
+static inline int ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx,
+ int ap, int domain_prot)
+{
+ bool is_user = regime_is_user(env, mmu_idx);
+
+ if (domain_prot == 3) {
+ return PAGE_READ | PAGE_WRITE;
+ }
+
+ switch (ap) {
+ case 0:
+ if (arm_feature(env, ARM_FEATURE_V7)) {
+ return 0;
+ }
+ switch (regime_sctlr(env, mmu_idx) & (SCTLR_S | SCTLR_R)) {
+ case SCTLR_S:
+ return is_user ? 0 : PAGE_READ;
+ case SCTLR_R:
+ return PAGE_READ;
+ default:
+ return 0;
+ }
+ case 1:
+ return is_user ? 0 : PAGE_READ | PAGE_WRITE;
+ case 2:
+ if (is_user) {
+ return PAGE_READ;
+ } else {
+ return PAGE_READ | PAGE_WRITE;
+ }
+ case 3:
+ return PAGE_READ | PAGE_WRITE;
+ case 4: /* Reserved. */
+ return 0;
+ case 5:
+ return is_user ? 0 : PAGE_READ;
+ case 6:
+ return PAGE_READ;
+ case 7:
+ if (!arm_feature(env, ARM_FEATURE_V6K)) {
+ return 0;
+ }
+ return PAGE_READ;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+/* Translate section/page access permissions to page
+ * R/W protection flags.
+ *
+ * @ap: The 2-bit simple AP (AP[2:1])
+ * @is_user: TRUE if accessing from PL0
+ */
+static inline int simple_ap_to_rw_prot_is_user(int ap, bool is_user)
+{
+ switch (ap) {
+ case 0:
+ return is_user ? 0 : PAGE_READ | PAGE_WRITE;
+ case 1:
+ return PAGE_READ | PAGE_WRITE;
+ case 2:
+ return is_user ? 0 : PAGE_READ;
+ case 3:
+ return PAGE_READ;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static inline int
+simple_ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, int ap)
+{
+ return simple_ap_to_rw_prot_is_user(ap, regime_is_user(env, mmu_idx));
+}
+
+/* Translate section/page access permissions to protection flags
+ *
+ * @env: CPUARMState
+ * @mmu_idx: MMU index indicating required translation regime
+ * @is_aa64: TRUE if AArch64
+ * @ap: The 2-bit simple AP (AP[2:1])
+ * @ns: NS (non-secure) bit
+ * @xn: XN (execute-never) bit
+ * @pxn: PXN (privileged execute-never) bit
+ */
+static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
+ int ap, int ns, int xn, int pxn)
+{
+ bool is_user = regime_is_user(env, mmu_idx);
+ int prot_rw, user_rw;
+ bool have_wxn;
+ int wxn = 0;
+
+ assert(mmu_idx != ARMMMUIdx_S2NS);
+
+ user_rw = simple_ap_to_rw_prot_is_user(ap, true);
+ if (is_user) {
+ prot_rw = user_rw;
+ } else {
+ prot_rw = simple_ap_to_rw_prot_is_user(ap, false);
+ }
+
+ if (ns && arm_is_secure(env) && (env->cp15.scr_el3 & SCR_SIF)) {
+ return prot_rw;
+ }
+
+ /* TODO have_wxn should be replaced with
+ * ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2)
+ * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE
+ * compatible processors have EL2, which is required for [U]WXN.
+ */
+ have_wxn = arm_feature(env, ARM_FEATURE_LPAE);
+
+ if (have_wxn) {
+ wxn = regime_sctlr(env, mmu_idx) & SCTLR_WXN;
+ }
+
+ if (is_aa64) {
+ switch (regime_el(env, mmu_idx)) {
+ case 1:
+ if (!is_user) {
+ xn = pxn || (user_rw & PAGE_WRITE);
+ }
+ break;
+ case 2:
+ case 3:
+ break;
+ }
+ } else if (arm_feature(env, ARM_FEATURE_V7)) {
+ switch (regime_el(env, mmu_idx)) {
+ case 1:
+ case 3:
+ if (is_user) {
+ xn = xn || !(user_rw & PAGE_READ);
+ } else {
+ int uwxn = 0;
+ if (have_wxn) {
+ uwxn = regime_sctlr(env, mmu_idx) & SCTLR_UWXN;
+ }
+ xn = xn || !(prot_rw & PAGE_READ) || pxn ||
+ (uwxn && (user_rw & PAGE_WRITE));
+ }
+ break;
+ case 2:
+ break;
+ }
+ } else {
+ xn = wxn = 0;
+ }
+
+ if (xn || (wxn && (prot_rw & PAGE_WRITE))) {
+ return prot_rw;
+ }
+ return prot_rw | PAGE_EXEC;
+}
+
+static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx,
+ uint32_t *table, uint32_t address)
+{
+ /* Note that we can only get here for an AArch32 PL0/PL1 lookup */
+ int el = regime_el(env, mmu_idx);
+ TCR *tcr = regime_tcr(env, mmu_idx);
+
+ if (address & tcr->mask) {
+ if (tcr->raw_tcr & TTBCR_PD1) {
/* Translation table walk disabled for TTBR1 */
return false;
}
- *table = env->cp15.ttbr1_el1 & 0xffffc000;
+ *table = env->cp15.ttbr1_el[el] & 0xffffc000;
} else {
- if ((env->cp15.c2_control & TTBCR_PD0)) {
+ if (tcr->raw_tcr & TTBCR_PD0) {
/* Translation table walk disabled for TTBR0 */
return false;
}
- *table = env->cp15.ttbr0_el1 & env->cp15.c2_base_mask;
+ *table = env->cp15.ttbr0_el[el] & tcr->base_mask;
}
*table |= (address >> 18) & 0x3ffc;
return true;
}
static int get_phys_addr_v5(CPUARMState *env, uint32_t address, int access_type,
- int is_user, hwaddr *phys_ptr,
+ ARMMMUIdx mmu_idx, hwaddr *phys_ptr,
int *prot, target_ulong *page_size)
{
CPUState *cs = CPU(arm_env_get_cpu(env));
@@ -4230,10 +5115,11 @@ static int get_phys_addr_v5(CPUARMState *env, uint32_t address, int access_type,
int domain = 0;
int domain_prot;
hwaddr phys_addr;
+ uint32_t dacr;
/* Pagetable walk. */
/* Lookup l1 descriptor. */
- if (!get_level1_table_address(env, &table, address)) {
+ if (!get_level1_table_address(env, mmu_idx, &table, address)) {
/* Section translation fault if page walk is disabled by PD0 or PD1 */
code = 5;
goto do_fault;
@@ -4241,7 +5127,12 @@ static int get_phys_addr_v5(CPUARMState *env, uint32_t address, int access_type,
desc = ldl_phys(cs->as, table);
type = (desc & 3);
domain = (desc >> 5) & 0x0f;
- domain_prot = (env->cp15.c3 >> (domain * 2)) & 3;
+ if (regime_el(env, mmu_idx) == 1) {
+ dacr = env->cp15.dacr_ns;
+ } else {
+ dacr = env->cp15.dacr_s;
+ }
+ domain_prot = (dacr >> (domain * 2)) & 3;
if (type == 0) {
/* Section translation fault. */
code = 5;
@@ -4262,13 +5153,13 @@ static int get_phys_addr_v5(CPUARMState *env, uint32_t address, int access_type,
*page_size = 1024 * 1024;
} else {
/* Lookup l2 entry. */
- if (type == 1) {
- /* Coarse pagetable. */
- table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
- } else {
- /* Fine pagetable. */
- table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
- }
+ if (type == 1) {
+ /* Coarse pagetable. */
+ table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
+ } else {
+ /* Fine pagetable. */
+ table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
+ }
desc = ldl_phys(cs->as, table);
switch (desc & 3) {
case 0: /* Page translation fault. */
@@ -4285,17 +5176,17 @@ static int get_phys_addr_v5(CPUARMState *env, uint32_t address, int access_type,
*page_size = 0x1000;
break;
case 3: /* 1k page. */
- if (type == 1) {
- if (arm_feature(env, ARM_FEATURE_XSCALE)) {
- phys_addr = (desc & 0xfffff000) | (address & 0xfff);
- } else {
- /* Page translation fault. */
- code = 7;
- goto do_fault;
- }
- } else {
- phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
- }
+ if (type == 1) {
+ if (arm_feature(env, ARM_FEATURE_XSCALE)) {
+ phys_addr = (desc & 0xfffff000) | (address & 0xfff);
+ } else {
+ /* Page translation fault. */
+ code = 7;
+ goto do_fault;
+ }
+ } else {
+ phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
+ }
ap = (desc >> 4) & 3;
*page_size = 0x400;
break;
@@ -4305,12 +5196,12 @@ static int get_phys_addr_v5(CPUARMState *env, uint32_t address, int access_type,
}
code = 15;
}
- *prot = check_ap(env, ap, domain_prot, access_type, is_user);
- if (!*prot) {
+ *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
+ *prot |= *prot ? PAGE_EXEC : 0;
+ if (!(*prot & (1 << access_type))) {
/* Access permission fault. */
goto do_fault;
}
- *prot |= PAGE_EXEC;
*phys_ptr = phys_addr;
return 0;
do_fault:
@@ -4318,7 +5209,7 @@ do_fault:
}
static int get_phys_addr_v6(CPUARMState *env, uint32_t address, int access_type,
- int is_user, hwaddr *phys_ptr,
+ ARMMMUIdx mmu_idx, hwaddr *phys_ptr,
int *prot, target_ulong *page_size)
{
CPUState *cs = CPU(arm_env_get_cpu(env));
@@ -4332,10 +5223,11 @@ static int get_phys_addr_v6(CPUARMState *env, uint32_t address, int access_type,
int domain = 0;
int domain_prot;
hwaddr phys_addr;
+ uint32_t dacr;
/* Pagetable walk. */
/* Lookup l1 descriptor. */
- if (!get_level1_table_address(env, &table, address)) {
+ if (!get_level1_table_address(env, mmu_idx, &table, address)) {
/* Section translation fault if page walk is disabled by PD0 or PD1 */
code = 5;
goto do_fault;
@@ -4353,7 +5245,12 @@ static int get_phys_addr_v6(CPUARMState *env, uint32_t address, int access_type,
/* Page or Section. */
domain = (desc >> 5) & 0x0f;
}
- domain_prot = (env->cp15.c3 >> (domain * 2)) & 3;
+ if (regime_el(env, mmu_idx) == 1) {
+ dacr = env->cp15.dacr_ns;
+ } else {
+ dacr = env->cp15.dacr_s;
+ }
+ domain_prot = (dacr >> (domain * 2)) & 3;
if (domain_prot == 0 || domain_prot == 2) {
if (type != 1) {
code = 9; /* Section domain fault. */
@@ -4407,26 +5304,31 @@ static int get_phys_addr_v6(CPUARMState *env, uint32_t address, int access_type,
if (domain_prot == 3) {
*prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
} else {
- if (pxn && !is_user) {
+ if (pxn && !regime_is_user(env, mmu_idx)) {
xn = 1;
}
if (xn && access_type == 2)
goto do_fault;
- /* The simplified model uses AP[0] as an access control bit. */
- if ((env->cp15.c1_sys & SCTLR_AFE) && (ap & 1) == 0) {
- /* Access flag fault. */
- code = (code == 15) ? 6 : 3;
- goto do_fault;
+ if (arm_feature(env, ARM_FEATURE_V6K) &&
+ (regime_sctlr(env, mmu_idx) & SCTLR_AFE)) {
+ /* The simplified model uses AP[0] as an access control bit. */
+ if ((ap & 1) == 0) {
+ /* Access flag fault. */
+ code = (code == 15) ? 6 : 3;
+ goto do_fault;
+ }
+ *prot = simple_ap_to_rw_prot(env, mmu_idx, ap >> 1);
+ } else {
+ *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
+ }
+ if (*prot && !xn) {
+ *prot |= PAGE_EXEC;
}
- *prot = check_ap(env, ap, domain_prot, access_type, is_user);
- if (!*prot) {
+ if (!(*prot & (1 << access_type))) {
/* Access permission fault. */
goto do_fault;
}
- if (!xn) {
- *prot |= PAGE_EXEC;
- }
}
*phys_ptr = phys_addr;
return 0;
@@ -4444,7 +5346,7 @@ typedef enum {
} MMUFaultType;
static int get_phys_addr_lpae(CPUARMState *env, target_ulong address,
- int access_type, int is_user,
+ int access_type, ARMMMUIdx mmu_idx,
hwaddr *phys_ptr, int *prot,
target_ulong *page_size_ptr)
{
@@ -4464,13 +5366,22 @@ static int get_phys_addr_lpae(CPUARMState *env, target_ulong address,
int32_t granule_sz = 9;
int32_t va_size = 32;
int32_t tbi = 0;
-
- if (arm_el_is_aa64(env, 1)) {
+ TCR *tcr = regime_tcr(env, mmu_idx);
+ int ap, ns, xn, pxn;
+
+ /* TODO:
+ * This code assumes we're either a 64-bit EL1 or a 32-bit PL1;
+ * it doesn't handle the different format TCR for TCR_EL2, TCR_EL3,
+ * and VTCR_EL2, or the fact that those regimes don't have a split
+ * TTBR0/TTBR1. Attribute and permission bit handling should also
+ * be checked when adding support for those page table walks.
+ */
+ if (arm_el_is_aa64(env, regime_el(env, mmu_idx))) {
va_size = 64;
if (extract64(address, 55, 1))
- tbi = extract64(env->cp15.c2_control, 38, 1);
+ tbi = extract64(tcr->raw_tcr, 38, 1);
else
- tbi = extract64(env->cp15.c2_control, 37, 1);
+ tbi = extract64(tcr->raw_tcr, 37, 1);
tbi *= 8;
}
@@ -4479,13 +5390,13 @@ static int get_phys_addr_lpae(CPUARMState *env, target_ulong address,
* This is a Non-secure PL0/1 stage 1 translation, so controlled by
* TTBCR/TTBR0/TTBR1 in accordance with ARM ARM DDI0406C table B-32:
*/
- uint32_t t0sz = extract32(env->cp15.c2_control, 0, 6);
- if (arm_el_is_aa64(env, 1)) {
+ uint32_t t0sz = extract32(tcr->raw_tcr, 0, 6);
+ if (va_size == 64) {
t0sz = MIN(t0sz, 39);
t0sz = MAX(t0sz, 16);
}
- uint32_t t1sz = extract32(env->cp15.c2_control, 16, 6);
- if (arm_el_is_aa64(env, 1)) {
+ uint32_t t1sz = extract32(tcr->raw_tcr, 16, 6);
+ if (va_size == 64) {
t1sz = MIN(t1sz, 39);
t1sz = MAX(t1sz, 16);
}
@@ -4515,11 +5426,11 @@ static int get_phys_addr_lpae(CPUARMState *env, target_ulong address,
* we will always flush the TLB any time the ASID is changed).
*/
if (ttbr_select == 0) {
- ttbr = env->cp15.ttbr0_el1;
- epd = extract32(env->cp15.c2_control, 7, 1);
+ ttbr = A32_BANKED_CURRENT_REG_GET(env, ttbr0);
+ epd = extract32(tcr->raw_tcr, 7, 1);
tsz = t0sz;
- tg = extract32(env->cp15.c2_control, 14, 2);
+ tg = extract32(tcr->raw_tcr, 14, 2);
if (tg == 1) { /* 64KB pages */
granule_sz = 13;
}
@@ -4527,11 +5438,11 @@ static int get_phys_addr_lpae(CPUARMState *env, target_ulong address,
granule_sz = 11;
}
} else {
- ttbr = env->cp15.ttbr1_el1;
- epd = extract32(env->cp15.c2_control, 23, 1);
+ ttbr = A32_BANKED_CURRENT_REG_GET(env, ttbr1);
+ epd = extract32(tcr->raw_tcr, 23, 1);
tsz = t1sz;
- tg = extract32(env->cp15.c2_control, 30, 2);
+ tg = extract32(tcr->raw_tcr, 30, 2);
if (tg == 3) { /* 64KB pages */
granule_sz = 13;
}
@@ -4540,6 +5451,10 @@ static int get_phys_addr_lpae(CPUARMState *env, target_ulong address,
}
}
+ /* Here we should have set up all the parameters for the translation:
+ * va_size, ttbr, epd, tsz, granule_sz, tbi
+ */
+
if (epd) {
/* Translation table walk disabled => Translation fault on TLB miss */
goto do_fault;
@@ -4613,7 +5528,7 @@ static int get_phys_addr_lpae(CPUARMState *env, target_ulong address,
if (extract32(tableattrs, 2, 1)) {
attrs &= ~(1 << 4);
}
- /* Since we're always in the Non-secure state, NSTable is ignored. */
+ attrs |= extract32(tableattrs, 4, 1) << 3; /* NS */
break;
}
/* Here descaddr is the final physical address, and attributes
@@ -4624,30 +5539,18 @@ static int get_phys_addr_lpae(CPUARMState *env, target_ulong address,
/* Access flag */
goto do_fault;
}
+
+ ap = extract32(attrs, 4, 2);
+ ns = extract32(attrs, 3, 1);
+ xn = extract32(attrs, 12, 1);
+ pxn = extract32(attrs, 11, 1);
+
+ *prot = get_S1prot(env, mmu_idx, va_size == 64, ap, ns, xn, pxn);
+
fault_type = permission_fault;
- if (is_user && !(attrs & (1 << 4))) {
- /* Unprivileged access not enabled */
+ if (!(*prot & (1 << access_type))) {
goto do_fault;
}
- *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
- if ((arm_feature(env, ARM_FEATURE_V8) && is_user && (attrs & (1 << 12))) ||
- (!arm_feature(env, ARM_FEATURE_V8) && (attrs & (1 << 12))) ||
- (!is_user && (attrs & (1 << 11)))) {
- /* XN/UXN or PXN. Since we only implement EL0/EL1 we unconditionally
- * treat XN/UXN as UXN for v8.
- */
- if (access_type == 2) {
- goto do_fault;
- }
- *prot &= ~PAGE_EXEC;
- }
- if (attrs & (1 << 5)) {
- /* Write access forbidden */
- if (access_type == 1) {
- goto do_fault;
- }
- *prot &= ~PAGE_WRITE;
- }
*phys_ptr = descaddr;
*page_size_ptr = page_size;
@@ -4659,27 +5562,31 @@ do_fault:
}
static int get_phys_addr_mpu(CPUARMState *env, uint32_t address,
- int access_type, int is_user,
+ int access_type, ARMMMUIdx mmu_idx,
hwaddr *phys_ptr, int *prot)
{
int n;
uint32_t mask;
uint32_t base;
+ bool is_user = regime_is_user(env, mmu_idx);
*phys_ptr = address;
for (n = 7; n >= 0; n--) {
- base = env->cp15.c6_region[n];
- if ((base & 1) == 0)
- continue;
- mask = 1 << ((base >> 1) & 0x1f);
- /* Keep this shift separate from the above to avoid an
- (undefined) << 32. */
- mask = (mask << 1) - 1;
- if (((base ^ address) & ~mask) == 0)
- break;
- }
- if (n < 0)
- return 2;
+ base = env->cp15.c6_region[n];
+ if ((base & 1) == 0) {
+ continue;
+ }
+ mask = 1 << ((base >> 1) & 0x1f);
+ /* Keep this shift separate from the above to avoid an
+ (undefined) << 32. */
+ mask = (mask << 1) - 1;
+ if (((base ^ address) & ~mask) == 0) {
+ break;
+ }
+ }
+ if (n < 0) {
+ return 2;
+ }
if (access_type == 2) {
mask = env->cp15.pmsav5_insn_ap;
@@ -4689,31 +5596,34 @@ static int get_phys_addr_mpu(CPUARMState *env, uint32_t address,
mask = (mask >> (n * 4)) & 0xf;
switch (mask) {
case 0:
- return 1;
+ return 1;
case 1:
- if (is_user)
- return 1;
- *prot = PAGE_READ | PAGE_WRITE;
- break;
+ if (is_user) {
+ return 1;
+ }
+ *prot = PAGE_READ | PAGE_WRITE;
+ break;
case 2:
- *prot = PAGE_READ;
- if (!is_user)
- *prot |= PAGE_WRITE;
- break;
+ *prot = PAGE_READ;
+ if (!is_user) {
+ *prot |= PAGE_WRITE;
+ }
+ break;
case 3:
- *prot = PAGE_READ | PAGE_WRITE;
- break;
+ *prot = PAGE_READ | PAGE_WRITE;
+ break;
case 5:
- if (is_user)
- return 1;
- *prot = PAGE_READ;
- break;
+ if (is_user) {
+ return 1;
+ }
+ *prot = PAGE_READ;
+ break;
case 6:
- *prot = PAGE_READ;
- break;
+ *prot = PAGE_READ;
+ break;
default:
- /* Bad permission. */
- return 1;
+ /* Bad permission. */
+ return 1;
}
*prot |= PAGE_EXEC;
return 0;
@@ -4737,38 +5647,60 @@ static int get_phys_addr_mpu(CPUARMState *env, uint32_t address,
* @env: CPUARMState
* @address: virtual address to get physical address for
* @access_type: 0 for read, 1 for write, 2 for execute
- * @is_user: 0 for privileged access, 1 for user
+ * @mmu_idx: MMU index indicating required translation regime
* @phys_ptr: set to the physical address corresponding to the virtual address
* @prot: set to the permissions for the page containing phys_ptr
* @page_size: set to the size of the page containing phys_ptr
*/
static inline int get_phys_addr(CPUARMState *env, target_ulong address,
- int access_type, int is_user,
+ int access_type, ARMMMUIdx mmu_idx,
hwaddr *phys_ptr, int *prot,
target_ulong *page_size)
{
- /* Fast Context Switch Extension. */
- if (address < 0x02000000)
- address += env->cp15.c13_fcse;
+ if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
+ /* TODO: when we support EL2 we should here call ourselves recursively
+ * to do the stage 1 and then stage 2 translations. The ldl_phys
+ * calls for stage 1 will also need changing.
+ * For non-EL2 CPUs a stage1+stage2 translation is just stage 1.
+ */
+ assert(!arm_feature(env, ARM_FEATURE_EL2));
+ mmu_idx += ARMMMUIdx_S1NSE0;
+ }
+
+ /* Fast Context Switch Extension. This doesn't exist at all in v8.
+ * In v7 and earlier it affects all stage 1 translations.
+ */
+ if (address < 0x02000000 && mmu_idx != ARMMMUIdx_S2NS
+ && !arm_feature(env, ARM_FEATURE_V8)) {
+ if (regime_el(env, mmu_idx) == 3) {
+ address += env->cp15.fcseidr_s;
+ } else {
+ address += env->cp15.fcseidr_ns;
+ }
+ }
- if ((env->cp15.c1_sys & SCTLR_M) == 0) {
+ if (regime_translation_disabled(env, mmu_idx)) {
/* MMU/MPU disabled. */
*phys_ptr = address;
*prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
*page_size = TARGET_PAGE_SIZE;
return 0;
- } else if (arm_feature(env, ARM_FEATURE_MPU)) {
+ }
+
+ if (arm_feature(env, ARM_FEATURE_MPU)) {
*page_size = TARGET_PAGE_SIZE;
- return get_phys_addr_mpu(env, address, access_type, is_user, phys_ptr,
- prot);
- } else if (extended_addresses_enabled(env)) {
- return get_phys_addr_lpae(env, address, access_type, is_user, phys_ptr,
+ return get_phys_addr_mpu(env, address, access_type, mmu_idx, phys_ptr,
+ prot);
+ }
+
+ if (regime_using_lpae_format(env, mmu_idx)) {
+ return get_phys_addr_lpae(env, address, access_type, mmu_idx, phys_ptr,
prot, page_size);
- } else if (env->cp15.c1_sys & SCTLR_XP) {
- return get_phys_addr_v6(env, address, access_type, is_user, phys_ptr,
+ } else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) {
+ return get_phys_addr_v6(env, address, access_type, mmu_idx, phys_ptr,
prot, page_size);
} else {
- return get_phys_addr_v5(env, address, access_type, is_user, phys_ptr,
+ return get_phys_addr_v5(env, address, access_type, mmu_idx, phys_ptr,
prot, page_size);
}
}
@@ -4781,12 +5713,11 @@ int arm_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
hwaddr phys_addr;
target_ulong page_size;
int prot;
- int ret, is_user;
+ int ret;
uint32_t syn;
bool same_el = (arm_current_el(env) != 0);
- is_user = mmu_idx == MMU_USER_IDX;
- ret = get_phys_addr(env, address, access_type, is_user, &phys_addr, &prot,
+ ret = get_phys_addr(env, address, access_type, mmu_idx, &phys_addr, &prot,
&page_size);
if (ret == 0) {
/* Map a single [sub]page. */
@@ -4822,12 +5753,14 @@ int arm_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
hwaddr arm_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
{
ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
hwaddr phys_addr;
target_ulong page_size;
int prot;
int ret;
- ret = get_phys_addr(&cpu->env, addr, 0, 0, &phys_addr, &prot, &page_size);
+ ret = get_phys_addr(env, addr, 0, cpu_mmu_index(env), &phys_addr,
+ &prot, &page_size);
if (ret != 0) {
return -1;
@@ -5908,7 +6841,7 @@ float64 HELPER(recpe_f64)(float64 input, void *fpstp)
} else {
return float64_set_sign(float64_maxnorm, float64_is_neg(f64));
}
- } else if (f64_exp >= 1023 && fpst->flush_to_zero) {
+ } else if (f64_exp >= 2045 && fpst->flush_to_zero) {
float_raise(float_flag_underflow, fpst);
return float64_set_sign(float64_zero, float64_is_neg(f64));
}
diff --git a/target-arm/internals.h b/target-arm/internals.h
index 2dff4ffb1..2cc301762 100644
--- a/target-arm/internals.h
+++ b/target-arm/internals.h
@@ -82,11 +82,14 @@ static inline void arm_log_exception(int idx)
/*
* For AArch64, map a given EL to an index in the banked_spsr array.
+ * Note that this mapping and the AArch32 mapping defined in bank_number()
+ * must agree such that the AArch64<->AArch32 SPSRs have the architecturally
+ * mandated mapping between each other.
*/
static inline unsigned int aarch64_banked_spsr_index(unsigned int el)
{
static const unsigned int map[4] = {
- [1] = 0, /* EL1. */
+ [1] = 1, /* EL1. */
[2] = 6, /* EL2. */
[3] = 7, /* EL3. */
};
@@ -153,9 +156,9 @@ static inline void update_spsel(CPUARMState *env, uint32_t imm)
*/
static inline bool extended_addresses_enabled(CPUARMState *env)
{
- return arm_el_is_aa64(env, 1)
- || ((arm_feature(env, ARM_FEATURE_LPAE)
- && (env->cp15.c2_control & TTBCR_EAE)));
+ TCR *tcr = &env->cp15.tcr_el[arm_is_secure(env) ? 3 : 1];
+ return arm_el_is_aa64(env, 1) ||
+ (arm_feature(env, ARM_FEATURE_LPAE) && (tcr->raw_tcr & TTBCR_EAE));
}
/* Valid Syndrome Register EC field values */
diff --git a/target-arm/kvm.c b/target-arm/kvm.c
index 319784d68..fdd9ba3f1 100644
--- a/target-arm/kvm.c
+++ b/target-arm/kvm.c
@@ -21,12 +21,15 @@
#include "sysemu/kvm.h"
#include "kvm_arm.h"
#include "cpu.h"
+#include "internals.h"
#include "hw/arm/arm.h"
const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
KVM_CAP_LAST_INFO
};
+static bool cap_has_mp_state;
+
int kvm_arm_vcpu_init(CPUState *cs)
{
ARMCPU *cpu = ARM_CPU(cs);
@@ -149,13 +152,15 @@ static const TypeInfo host_arm_cpu_type_info = {
.class_size = sizeof(ARMHostCPUClass),
};
-int kvm_arch_init(KVMState *s)
+int kvm_arch_init(MachineState *ms, KVMState *s)
{
/* For ARM interrupt delivery is always asynchronous,
* whether we are using an in-kernel VGIC or not.
*/
kvm_async_interrupts_allowed = true;
+ cap_has_mp_state = kvm_check_extension(s, KVM_CAP_MP_STATE);
+
type_register_static(&host_arm_cpu_type_info);
return 0;
@@ -279,6 +284,94 @@ void kvm_arm_register_device(MemoryRegion *mr, uint64_t devid, uint64_t group,
memory_region_ref(kd->mr);
}
+static int compare_u64(const void *a, const void *b)
+{
+ if (*(uint64_t *)a > *(uint64_t *)b) {
+ return 1;
+ }
+ if (*(uint64_t *)a < *(uint64_t *)b) {
+ return -1;
+ }
+ return 0;
+}
+
+/* Initialize the CPUState's cpreg list according to the kernel's
+ * definition of what CPU registers it knows about (and throw away
+ * the previous TCG-created cpreg list).
+ */
+int kvm_arm_init_cpreg_list(ARMCPU *cpu)
+{
+ struct kvm_reg_list rl;
+ struct kvm_reg_list *rlp;
+ int i, ret, arraylen;
+ CPUState *cs = CPU(cpu);
+
+ rl.n = 0;
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_REG_LIST, &rl);
+ if (ret != -E2BIG) {
+ return ret;
+ }
+ rlp = g_malloc(sizeof(struct kvm_reg_list) + rl.n * sizeof(uint64_t));
+ rlp->n = rl.n;
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_REG_LIST, rlp);
+ if (ret) {
+ goto out;
+ }
+ /* Sort the list we get back from the kernel, since cpreg_tuples
+ * must be in strictly ascending order.
+ */
+ qsort(&rlp->reg, rlp->n, sizeof(rlp->reg[0]), compare_u64);
+
+ for (i = 0, arraylen = 0; i < rlp->n; i++) {
+ if (!kvm_arm_reg_syncs_via_cpreg_list(rlp->reg[i])) {
+ continue;
+ }
+ switch (rlp->reg[i] & KVM_REG_SIZE_MASK) {
+ case KVM_REG_SIZE_U32:
+ case KVM_REG_SIZE_U64:
+ break;
+ default:
+ fprintf(stderr, "Can't handle size of register in kernel list\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ arraylen++;
+ }
+
+ cpu->cpreg_indexes = g_renew(uint64_t, cpu->cpreg_indexes, arraylen);
+ cpu->cpreg_values = g_renew(uint64_t, cpu->cpreg_values, arraylen);
+ cpu->cpreg_vmstate_indexes = g_renew(uint64_t, cpu->cpreg_vmstate_indexes,
+ arraylen);
+ cpu->cpreg_vmstate_values = g_renew(uint64_t, cpu->cpreg_vmstate_values,
+ arraylen);
+ cpu->cpreg_array_len = arraylen;
+ cpu->cpreg_vmstate_array_len = arraylen;
+
+ for (i = 0, arraylen = 0; i < rlp->n; i++) {
+ uint64_t regidx = rlp->reg[i];
+ if (!kvm_arm_reg_syncs_via_cpreg_list(regidx)) {
+ continue;
+ }
+ cpu->cpreg_indexes[arraylen] = regidx;
+ arraylen++;
+ }
+ assert(cpu->cpreg_array_len == arraylen);
+
+ if (!write_kvmstate_to_list(cpu)) {
+ /* Shouldn't happen unless kernel is inconsistent about
+ * what registers exist.
+ */
+ fprintf(stderr, "Initial read of kernel register state failed\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+out:
+ g_free(rlp);
+ return ret;
+}
+
bool write_kvmstate_to_list(ARMCPU *cpu)
{
CPUState *cs = CPU(cpu);
@@ -351,6 +444,64 @@ bool write_list_to_kvmstate(ARMCPU *cpu)
return ok;
}
+void kvm_arm_reset_vcpu(ARMCPU *cpu)
+{
+ int ret;
+
+ /* Re-init VCPU so that all registers are set to
+ * their respective reset values.
+ */
+ ret = kvm_arm_vcpu_init(CPU(cpu));
+ if (ret < 0) {
+ fprintf(stderr, "kvm_arm_vcpu_init failed: %s\n", strerror(-ret));
+ abort();
+ }
+ if (!write_kvmstate_to_list(cpu)) {
+ fprintf(stderr, "write_kvmstate_to_list failed\n");
+ abort();
+ }
+}
+
+/*
+ * Update KVM's MP_STATE based on what QEMU thinks it is
+ */
+int kvm_arm_sync_mpstate_to_kvm(ARMCPU *cpu)
+{
+ if (cap_has_mp_state) {
+ struct kvm_mp_state mp_state = {
+ .mp_state =
+ cpu->powered_off ? KVM_MP_STATE_STOPPED : KVM_MP_STATE_RUNNABLE
+ };
+ int ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state);
+ if (ret) {
+ fprintf(stderr, "%s: failed to set MP_STATE %d/%s\n",
+ __func__, ret, strerror(-ret));
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Sync the KVM MP_STATE into QEMU
+ */
+int kvm_arm_sync_mpstate_to_qemu(ARMCPU *cpu)
+{
+ if (cap_has_mp_state) {
+ struct kvm_mp_state mp_state;
+ int ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MP_STATE, &mp_state);
+ if (ret) {
+ fprintf(stderr, "%s: failed to get MP_STATE %d/%s\n",
+ __func__, ret, strerror(-ret));
+ abort();
+ }
+ cpu->powered_off = (mp_state.mp_state == KVM_MP_STATE_STOPPED);
+ }
+
+ return 0;
+}
+
void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
{
}
@@ -441,3 +592,9 @@ int kvm_arch_irqchip_create(KVMState *s)
return 0;
}
+
+int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
+ uint64_t address, uint32_t data)
+{
+ return 0;
+}
diff --git a/target-arm/kvm32.c b/target-arm/kvm32.c
index 5ec4eb1f3..49b6babc0 100644
--- a/target-arm/kvm32.c
+++ b/target-arm/kvm32.c
@@ -51,17 +51,17 @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUClass *ahcc)
struct kvm_one_reg idregs[] = {
{
.id = KVM_REG_ARM | KVM_REG_SIZE_U32
- | ENCODE_CP_REG(15, 0, 0, 0, 0, 0),
+ | ENCODE_CP_REG(15, 0, 0, 0, 0, 0, 0),
.addr = (uintptr_t)&midr,
},
{
.id = KVM_REG_ARM | KVM_REG_SIZE_U32
- | ENCODE_CP_REG(15, 0, 0, 1, 0, 0),
+ | ENCODE_CP_REG(15, 0, 0, 0, 1, 0, 0),
.addr = (uintptr_t)&id_pfr0,
},
{
.id = KVM_REG_ARM | KVM_REG_SIZE_U32
- | ENCODE_CP_REG(15, 0, 0, 2, 0, 0),
+ | ENCODE_CP_REG(15, 0, 0, 0, 2, 0, 0),
.addr = (uintptr_t)&id_isar0,
},
{
@@ -138,7 +138,7 @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUClass *ahcc)
return true;
}
-static bool reg_syncs_via_tuple_list(uint64_t regidx)
+bool kvm_arm_reg_syncs_via_cpreg_list(uint64_t regidx)
{
/* Return true if the regidx is a register we should synchronize
* via the cpreg_tuples array (ie is not a core reg we sync by
@@ -153,24 +153,11 @@ static bool reg_syncs_via_tuple_list(uint64_t regidx)
}
}
-static int compare_u64(const void *a, const void *b)
-{
- if (*(uint64_t *)a > *(uint64_t *)b) {
- return 1;
- }
- if (*(uint64_t *)a < *(uint64_t *)b) {
- return -1;
- }
- return 0;
-}
-
int kvm_arch_init_vcpu(CPUState *cs)
{
- int i, ret, arraylen;
+ int ret;
uint64_t v;
struct kvm_one_reg r;
- struct kvm_reg_list rl;
- struct kvm_reg_list *rlp;
ARMCPU *cpu = ARM_CPU(cs);
if (cpu->kvm_target == QEMU_KVM_ARM_TARGET_NONE) {
@@ -206,73 +193,7 @@ int kvm_arch_init_vcpu(CPUState *cs)
return -EINVAL;
}
- /* Populate the cpreg list based on the kernel's idea
- * of what registers exist (and throw away the TCG-created list).
- */
- rl.n = 0;
- ret = kvm_vcpu_ioctl(cs, KVM_GET_REG_LIST, &rl);
- if (ret != -E2BIG) {
- return ret;
- }
- rlp = g_malloc(sizeof(struct kvm_reg_list) + rl.n * sizeof(uint64_t));
- rlp->n = rl.n;
- ret = kvm_vcpu_ioctl(cs, KVM_GET_REG_LIST, rlp);
- if (ret) {
- goto out;
- }
- /* Sort the list we get back from the kernel, since cpreg_tuples
- * must be in strictly ascending order.
- */
- qsort(&rlp->reg, rlp->n, sizeof(rlp->reg[0]), compare_u64);
-
- for (i = 0, arraylen = 0; i < rlp->n; i++) {
- if (!reg_syncs_via_tuple_list(rlp->reg[i])) {
- continue;
- }
- switch (rlp->reg[i] & KVM_REG_SIZE_MASK) {
- case KVM_REG_SIZE_U32:
- case KVM_REG_SIZE_U64:
- break;
- default:
- fprintf(stderr, "Can't handle size of register in kernel list\n");
- ret = -EINVAL;
- goto out;
- }
-
- arraylen++;
- }
-
- cpu->cpreg_indexes = g_renew(uint64_t, cpu->cpreg_indexes, arraylen);
- cpu->cpreg_values = g_renew(uint64_t, cpu->cpreg_values, arraylen);
- cpu->cpreg_vmstate_indexes = g_renew(uint64_t, cpu->cpreg_vmstate_indexes,
- arraylen);
- cpu->cpreg_vmstate_values = g_renew(uint64_t, cpu->cpreg_vmstate_values,
- arraylen);
- cpu->cpreg_array_len = arraylen;
- cpu->cpreg_vmstate_array_len = arraylen;
-
- for (i = 0, arraylen = 0; i < rlp->n; i++) {
- uint64_t regidx = rlp->reg[i];
- if (!reg_syncs_via_tuple_list(regidx)) {
- continue;
- }
- cpu->cpreg_indexes[arraylen] = regidx;
- arraylen++;
- }
- assert(cpu->cpreg_array_len == arraylen);
-
- if (!write_kvmstate_to_list(cpu)) {
- /* Shouldn't happen unless kernel is inconsistent about
- * what registers exist.
- */
- fprintf(stderr, "Initial read of kernel register state failed\n");
- ret = -EINVAL;
- goto out;
- }
-
-out:
- g_free(rlp);
- return ret;
+ return kvm_arm_init_cpreg_list(cpu);
}
typedef struct Reg {
@@ -435,6 +356,8 @@ int kvm_arch_put_registers(CPUState *cs, int level)
return EINVAL;
}
+ kvm_arm_sync_mpstate_to_kvm(cpu);
+
return ret;
}
@@ -506,14 +429,7 @@ int kvm_arch_get_registers(CPUState *cs)
*/
write_list_to_cpustate(cpu);
- return 0;
-}
+ kvm_arm_sync_mpstate_to_qemu(cpu);
-void kvm_arm_reset_vcpu(ARMCPU *cpu)
-{
- /* Re-init VCPU so that all registers are set to
- * their respective reset values.
- */
- kvm_arm_vcpu_init(CPU(cpu));
- write_kvmstate_to_list(cpu);
+ return 0;
}
diff --git a/target-arm/kvm64.c b/target-arm/kvm64.c
index c61528615..93c1ca8b2 100644
--- a/target-arm/kvm64.c
+++ b/target-arm/kvm64.c
@@ -15,6 +15,7 @@
#include <linux/kvm.h>
+#include "config-host.h"
#include "qemu-common.h"
#include "qemu/timer.h"
#include "sysemu/sysemu.h"
@@ -82,7 +83,7 @@ int kvm_arch_init_vcpu(CPUState *cs)
ARMCPU *cpu = ARM_CPU(cs);
if (cpu->kvm_target == QEMU_KVM_ARM_TARGET_NONE ||
- !arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
+ !object_dynamic_cast(OBJECT(cpu), TYPE_AARCH64_CPU)) {
fprintf(stderr, "KVM is not supported for this guest CPU type\n");
return -EINVAL;
}
@@ -96,6 +97,9 @@ int kvm_arch_init_vcpu(CPUState *cs)
cpu->psci_version = 2;
cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PSCI_0_2;
}
+ if (!arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
+ cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_EL1_32BIT;
+ }
/* Do KVM_ARM_VCPU_INIT ioctl */
ret = kvm_arm_vcpu_init(cs);
@@ -103,24 +107,51 @@ int kvm_arch_init_vcpu(CPUState *cs)
return ret;
}
- /* TODO : support for save/restore/reset of system regs via tuple list */
+ return kvm_arm_init_cpreg_list(cpu);
+}
- return 0;
+bool kvm_arm_reg_syncs_via_cpreg_list(uint64_t regidx)
+{
+ /* Return true if the regidx is a register we should synchronize
+ * via the cpreg_tuples array (ie is not a core reg we sync by
+ * hand in kvm_arch_get/put_registers())
+ */
+ switch (regidx & KVM_REG_ARM_COPROC_MASK) {
+ case KVM_REG_ARM_CORE:
+ return false;
+ default:
+ return true;
+ }
}
#define AARCH64_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
+#define AARCH64_SIMD_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U128 | \
+ KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
+
+#define AARCH64_SIMD_CTRL_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U32 | \
+ KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
+
int kvm_arch_put_registers(CPUState *cs, int level)
{
struct kvm_one_reg reg;
+ uint32_t fpr;
uint64_t val;
int i;
int ret;
+ unsigned int el;
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
+ /* If we are in AArch32 mode then we need to copy the AArch32 regs to the
+ * AArch64 registers before pushing them out to 64-bit KVM.
+ */
+ if (!is_a64(env)) {
+ aarch64_sync_32_to_64(env);
+ }
+
for (i = 0; i < 31; i++) {
reg.id = AARCH64_CORE_REG(regs.regs[i]);
reg.addr = (uintptr_t) &env->xregs[i];
@@ -150,7 +181,11 @@ int kvm_arch_put_registers(CPUState *cs, int level)
}
/* Note that KVM thinks pstate is 64 bit but we use a uint32_t */
- val = pstate_read(env);
+ if (is_a64(env)) {
+ val = pstate_read(env);
+ } else {
+ val = cpsr_read(env);
+ }
reg.id = AARCH64_CORE_REG(regs.pstate);
reg.addr = (uintptr_t) &val;
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
@@ -172,19 +207,70 @@ int kvm_arch_put_registers(CPUState *cs, int level)
return ret;
}
+ /* Saved Program State Registers
+ *
+ * Before we restore from the banked_spsr[] array we need to
+ * ensure that any modifications to env->spsr are correctly
+ * reflected in the banks.
+ */
+ el = arm_current_el(env);
+ if (el > 0 && !is_a64(env)) {
+ i = bank_number(env->uncached_cpsr & CPSR_M);
+ env->banked_spsr[i] = env->spsr;
+ }
+
+ /* KVM 0-4 map to QEMU banks 1-5 */
for (i = 0; i < KVM_NR_SPSR; i++) {
reg.id = AARCH64_CORE_REG(spsr[i]);
- reg.addr = (uintptr_t) &env->banked_spsr[i - 1];
+ reg.addr = (uintptr_t) &env->banked_spsr[i + 1];
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
if (ret) {
return ret;
}
}
- /* TODO:
- * FP state
- * system registers
+ /* Advanced SIMD and FP registers
+ * We map Qn = regs[2n+1]:regs[2n]
*/
+ for (i = 0; i < 32; i++) {
+ int rd = i << 1;
+ uint64_t fp_val[2];
+#ifdef HOST_WORDS_BIGENDIAN
+ fp_val[0] = env->vfp.regs[rd + 1];
+ fp_val[1] = env->vfp.regs[rd];
+#else
+ fp_val[1] = env->vfp.regs[rd + 1];
+ fp_val[0] = env->vfp.regs[rd];
+#endif
+ reg.id = AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]);
+ reg.addr = (uintptr_t)(&fp_val);
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+ if (ret) {
+ return ret;
+ }
+ }
+
+ reg.addr = (uintptr_t)(&fpr);
+ fpr = vfp_get_fpsr(env);
+ reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+ if (ret) {
+ return ret;
+ }
+
+ fpr = vfp_get_fpcr(env);
+ reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+ if (ret) {
+ return ret;
+ }
+
+ if (!write_list_to_kvmstate(cpu)) {
+ return EINVAL;
+ }
+
+ kvm_arm_sync_mpstate_to_kvm(cpu);
+
return ret;
}
@@ -192,6 +278,8 @@ int kvm_arch_get_registers(CPUState *cs)
{
struct kvm_one_reg reg;
uint64_t val;
+ uint32_t fpr;
+ unsigned int el;
int i;
int ret;
@@ -227,7 +315,14 @@ int kvm_arch_get_registers(CPUState *cs)
if (ret) {
return ret;
}
- pstate_write(env, val);
+
+ env->aarch64 = ((val & PSTATE_nRW) == 0);
+ if (is_a64(env)) {
+ pstate_write(env, val);
+ } else {
+ env->uncached_cpsr = val & CPSR_M;
+ cpsr_write(env, val, 0xffffffff);
+ }
/* KVM puts SP_EL0 in regs.sp and SP_EL1 in regs.sp_el1. On the
* QEMU side we keep the current SP in xregs[31] as well.
@@ -241,6 +336,15 @@ int kvm_arch_get_registers(CPUState *cs)
return ret;
}
+ /* If we are in AArch32 mode then we need to sync the AArch32 regs with the
+ * incoming AArch64 regs received from 64-bit KVM.
+ * We must perform this after all of the registers have been acquired from
+ * the kernel.
+ */
+ if (!is_a64(env)) {
+ aarch64_sync_64_to_32(env);
+ }
+
reg.id = AARCH64_CORE_REG(elr_el1);
reg.addr = (uintptr_t) &env->elr_el[1];
ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
@@ -248,23 +352,72 @@ int kvm_arch_get_registers(CPUState *cs)
return ret;
}
+ /* Fetch the SPSR registers
+ *
+ * KVM SPSRs 0-4 map to QEMU banks 1-5
+ */
for (i = 0; i < KVM_NR_SPSR; i++) {
reg.id = AARCH64_CORE_REG(spsr[i]);
- reg.addr = (uintptr_t) &env->banked_spsr[i - 1];
+ reg.addr = (uintptr_t) &env->banked_spsr[i + 1];
ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
if (ret) {
return ret;
}
}
- /* TODO: other registers */
- return ret;
-}
+ el = arm_current_el(env);
+ if (el > 0 && !is_a64(env)) {
+ i = bank_number(env->uncached_cpsr & CPSR_M);
+ env->spsr = env->banked_spsr[i];
+ }
-void kvm_arm_reset_vcpu(ARMCPU *cpu)
-{
- /* Re-init VCPU so that all registers are set to
- * their respective reset values.
+ /* Advanced SIMD and FP registers
+ * We map Qn = regs[2n+1]:regs[2n]
+ */
+ for (i = 0; i < 32; i++) {
+ uint64_t fp_val[2];
+ reg.id = AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]);
+ reg.addr = (uintptr_t)(&fp_val);
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+ if (ret) {
+ return ret;
+ } else {
+ int rd = i << 1;
+#ifdef HOST_WORDS_BIGENDIAN
+ env->vfp.regs[rd + 1] = fp_val[0];
+ env->vfp.regs[rd] = fp_val[1];
+#else
+ env->vfp.regs[rd + 1] = fp_val[1];
+ env->vfp.regs[rd] = fp_val[0];
+#endif
+ }
+ }
+
+ reg.addr = (uintptr_t)(&fpr);
+ reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+ if (ret) {
+ return ret;
+ }
+ vfp_set_fpsr(env, fpr);
+
+ reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+ if (ret) {
+ return ret;
+ }
+ vfp_set_fpcr(env, fpr);
+
+ if (!write_kvmstate_to_list(cpu)) {
+ return EINVAL;
+ }
+ /* Note that it's OK to have registers which aren't in CPUState,
+ * so we can ignore a failure return here.
*/
- kvm_arm_vcpu_init(CPU(cpu));
+ write_list_to_cpustate(cpu);
+
+ kvm_arm_sync_mpstate_to_qemu(cpu);
+
+ /* TODO: other registers */
+ return ret;
}
diff --git a/target-arm/kvm_arm.h b/target-arm/kvm_arm.h
index af9310551..5abd5916d 100644
--- a/target-arm/kvm_arm.h
+++ b/target-arm/kvm_arm.h
@@ -47,6 +47,28 @@ void kvm_arm_register_device(MemoryRegion *mr, uint64_t devid, uint64_t group,
uint64_t attr, int dev_fd);
/**
+ * kvm_arm_init_cpreg_list:
+ * @cs: CPUState
+ *
+ * Initialize the CPUState's cpreg list according to the kernel's
+ * definition of what CPU registers it knows about (and throw away
+ * the previous TCG-created cpreg list).
+ *
+ * Returns: 0 if success, else < 0 error code
+ */
+int kvm_arm_init_cpreg_list(ARMCPU *cpu);
+
+/**
+ * kvm_arm_reg_syncs_via_cpreg_list
+ * regidx: KVM register index
+ *
+ * Return true if this KVM register should be synchronized via the
+ * cpreg list of arbitrary system registers, false if it is synchronized
+ * by hand using code in kvm_arch_get/put_registers().
+ */
+bool kvm_arm_reg_syncs_via_cpreg_list(uint64_t regidx);
+
+/**
* write_list_to_kvmstate:
* @cpu: ARMCPU
*
@@ -140,6 +162,23 @@ typedef struct ARMHostCPUClass {
*/
bool kvm_arm_get_host_cpu_features(ARMHostCPUClass *ahcc);
+
+/**
+ * kvm_arm_sync_mpstate_to_kvm
+ * @cpu: ARMCPU
+ *
+ * If supported set the KVM MP_STATE based on QEMU's model.
+ */
+int kvm_arm_sync_mpstate_to_kvm(ARMCPU *cpu);
+
+/**
+ * kvm_arm_sync_mpstate_to_qemu
+ * @cpu: ARMCPU
+ *
+ * If supported get the MP_STATE from KVM and store in QEMU's model.
+ */
+int kvm_arm_sync_mpstate_to_qemu(ARMCPU *cpu);
+
#endif
#endif
diff --git a/target-arm/machine.c b/target-arm/machine.c
index 6437690af..9446e5a8a 100644
--- a/target-arm/machine.c
+++ b/target-arm/machine.c
@@ -127,6 +127,13 @@ static int get_cpsr(QEMUFile *f, void *opaque, size_t size)
CPUARMState *env = &cpu->env;
uint32_t val = qemu_get_be32(f);
+ env->aarch64 = ((val & PSTATE_nRW) == 0);
+
+ if (is_a64(env)) {
+ pstate_write(env, val);
+ return 0;
+ }
+
/* Avoid mode switch when restoring CPSR */
env->uncached_cpsr = val & CPSR_M;
cpsr_write(env, val, 0xffffffff);
@@ -137,8 +144,15 @@ static void put_cpsr(QEMUFile *f, void *opaque, size_t size)
{
ARMCPU *cpu = opaque;
CPUARMState *env = &cpu->env;
+ uint32_t val;
+
+ if (is_a64(env)) {
+ val = pstate_read(env);
+ } else {
+ val = cpsr_read(env);
+ }
- qemu_put_be32(f, cpsr_read(env));
+ qemu_put_be32(f, val);
}
static const VMStateInfo vmstate_cpsr = {
@@ -222,12 +236,14 @@ static int cpu_post_load(void *opaque, int version_id)
const VMStateDescription vmstate_arm_cpu = {
.name = "cpu",
- .version_id = 21,
- .minimum_version_id = 21,
+ .version_id = 22,
+ .minimum_version_id = 22,
.pre_save = cpu_pre_save,
.post_load = cpu_post_load,
.fields = (VMStateField[]) {
VMSTATE_UINT32_ARRAY(env.regs, ARMCPU, 16),
+ VMSTATE_UINT64_ARRAY(env.xregs, ARMCPU, 32),
+ VMSTATE_UINT64(env.pc, ARMCPU),
{
.name = "cpsr",
.version_id = 0,
@@ -261,8 +277,8 @@ const VMStateDescription vmstate_arm_cpu = {
VMSTATE_UINT32(env.exception.syndrome, ARMCPU),
VMSTATE_UINT32(env.exception.fsr, ARMCPU),
VMSTATE_UINT64(env.exception.vaddress, ARMCPU),
- VMSTATE_TIMER(gt_timer[GTIMER_PHYS], ARMCPU),
- VMSTATE_TIMER(gt_timer[GTIMER_VIRT], ARMCPU),
+ VMSTATE_TIMER_PTR(gt_timer[GTIMER_PHYS], ARMCPU),
+ VMSTATE_TIMER_PTR(gt_timer[GTIMER_VIRT], ARMCPU),
VMSTATE_BOOL(powered_off, ARMCPU),
VMSTATE_END_OF_LIST()
},
diff --git a/target-arm/op_helper.c b/target-arm/op_helper.c
index 62012c3a6..771302275 100644
--- a/target-arm/op_helper.c
+++ b/target-arm/op_helper.c
@@ -361,7 +361,7 @@ void HELPER(msr_i_pstate)(CPUARMState *env, uint32_t op, uint32_t imm)
* Note that SPSel is never OK from EL0; we rely on handle_msr_i()
* to catch that case at translate time.
*/
- if (arm_current_el(env) == 0 && !(env->cp15.c1_sys & SCTLR_UMA)) {
+ if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UMA)) {
raise_exception(env, EXCP_UDEF);
}
@@ -465,7 +465,7 @@ void HELPER(exception_return)(CPUARMState *env)
int cur_el = arm_current_el(env);
unsigned int spsr_idx = aarch64_banked_spsr_index(cur_el);
uint32_t spsr = env->banked_spsr[spsr_idx];
- int new_el, i;
+ int new_el;
aarch64_save_sp(env, cur_el);
@@ -491,9 +491,7 @@ void HELPER(exception_return)(CPUARMState *env)
if (!arm_singlestep_active(env)) {
env->uncached_cpsr &= ~PSTATE_SS;
}
- for (i = 0; i < 15; i++) {
- env->regs[i] = env->xregs[i];
- }
+ aarch64_sync_64_to_32(env);
env->regs[15] = env->elr_el[1] & ~0x1;
} else {
@@ -575,7 +573,7 @@ static bool linked_bp_matches(ARMCPU *cpu, int lbn)
* short descriptor format (in which case it holds both PROCID and ASID),
* since we don't implement the optional v7 context ID masking.
*/
- contextidr = extract64(env->cp15.contextidr_el1, 0, 32);
+ contextidr = extract64(env->cp15.contextidr_el[1], 0, 32);
switch (bt) {
case 3: /* linked context ID match */
diff --git a/target-arm/translate-a64.c b/target-arm/translate-a64.c
index 80d2c07e8..0b192a1f5 100644
--- a/target-arm/translate-a64.c
+++ b/target-arm/translate-a64.c
@@ -123,6 +123,23 @@ void a64_translate_init(void)
#endif
}
+static inline ARMMMUIdx get_a64_user_mem_index(DisasContext *s)
+{
+ /* Return the mmu_idx to use for A64 "unprivileged load/store" insns:
+ * if EL1, access as if EL0; otherwise access at current EL
+ */
+ switch (s->mmu_idx) {
+ case ARMMMUIdx_S12NSE1:
+ return ARMMMUIdx_S12NSE0;
+ case ARMMMUIdx_S1SE1:
+ return ARMMMUIdx_S1SE0;
+ case ARMMMUIdx_S2NS:
+ g_assert_not_reached();
+ default:
+ return s->mmu_idx;
+ }
+}
+
void aarch64_cpu_dump_state(CPUState *cs, FILE *f,
fprintf_function cpu_fprintf, int flags)
{
@@ -1060,7 +1077,7 @@ static void disas_uncond_b_imm(DisasContext *s, uint32_t insn)
{
uint64_t addr = s->pc + sextract32(insn, 0, 26) * 4 - 4;
- if (insn & (1 << 31)) {
+ if (insn & (1U << 31)) {
/* C5.6.26 BL Branch with link */
tcg_gen_movi_i64(cpu_reg(s, 30), s->pc);
}
@@ -1079,7 +1096,7 @@ static void disas_comp_b_imm(DisasContext *s, uint32_t insn)
{
unsigned int sf, op, rt;
uint64_t addr;
- int label_match;
+ TCGLabel *label_match;
TCGv_i64 tcg_cmp;
sf = extract32(insn, 31, 1);
@@ -1108,7 +1125,7 @@ static void disas_test_b_imm(DisasContext *s, uint32_t insn)
{
unsigned int bit_pos, op, rt;
uint64_t addr;
- int label_match;
+ TCGLabel *label_match;
TCGv_i64 tcg_cmp;
bit_pos = (extract32(insn, 31, 1) << 5) | extract32(insn, 19, 5);
@@ -1147,7 +1164,7 @@ static void disas_cond_b_imm(DisasContext *s, uint32_t insn)
if (cond < 0x0e) {
/* genuinely conditional branches */
- int label_match = gen_new_label();
+ TCGLabel *label_match = gen_new_label();
arm_gen_test_cc(cond, label_match);
gen_goto_tb(s, 0, s->pc);
gen_set_label(label_match);
@@ -1254,7 +1271,7 @@ static void gen_get_nzcv(TCGv_i64 tcg_rt)
TCGv_i32 nzcv = tcg_temp_new_i32();
/* build bit 31, N */
- tcg_gen_andi_i32(nzcv, cpu_NF, (1 << 31));
+ tcg_gen_andi_i32(nzcv, cpu_NF, (1U << 31));
/* build bit 30, Z */
tcg_gen_setcondi_i32(TCG_COND_EQ, tmp, cpu_ZF, 0);
tcg_gen_deposit_i32(nzcv, nzcv, tmp, 30, 1);
@@ -1279,7 +1296,7 @@ static void gen_set_nzcv(TCGv_i64 tcg_rt)
tcg_gen_trunc_i64_i32(nzcv, tcg_rt);
/* bit 31, N */
- tcg_gen_andi_i32(cpu_NF, nzcv, (1 << 31));
+ tcg_gen_andi_i32(cpu_NF, nzcv, (1U << 31));
/* bit 30, Z */
tcg_gen_andi_i32(cpu_ZF, nzcv, (1 << 30));
tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_ZF, cpu_ZF, 0);
@@ -1372,7 +1389,7 @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
break;
}
- if (use_icount && (ri->type & ARM_CP_IO)) {
+ if ((s->tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
gen_io_start();
}
@@ -1403,7 +1420,7 @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
}
}
- if (use_icount && (ri->type & ARM_CP_IO)) {
+ if ((s->tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
/* I/O operations must end the TB here (whether read or write) */
gen_io_end();
s->is_jmp = DISAS_UPDATE;
@@ -1694,8 +1711,8 @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
* }
* env->exclusive_addr = -1;
*/
- int fail_label = gen_new_label();
- int done_label = gen_new_label();
+ TCGLabel *fail_label = gen_new_label();
+ TCGLabel *done_label = gen_new_label();
TCGv_i64 addr = tcg_temp_local_new_i64();
TCGv_i64 tmp;
@@ -1900,7 +1917,7 @@ static void disas_ldst_pair(DisasContext *s, uint32_t insn)
int rt = extract32(insn, 0, 5);
int rn = extract32(insn, 5, 5);
int rt2 = extract32(insn, 10, 5);
- int64_t offset = sextract32(insn, 15, 7);
+ uint64_t offset = sextract64(insn, 15, 7);
int index = extract32(insn, 23, 2);
bool is_vector = extract32(insn, 26, 1);
bool is_load = extract32(insn, 22, 1);
@@ -2107,7 +2124,7 @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn)
}
} else {
TCGv_i64 tcg_rt = cpu_reg(s, rt);
- int memidx = is_unpriv ? 1 : get_mem_index(s);
+ int memidx = is_unpriv ? get_a64_user_mem_index(s) : get_mem_index(s);
if (is_store) {
do_gpr_st_memidx(s, tcg_rt, tcg_addr, size, memidx);
@@ -2645,11 +2662,12 @@ static void disas_pc_rel_adr(DisasContext *s, uint32_t insn)
{
unsigned int page, rd;
uint64_t base;
- int64_t offset;
+ uint64_t offset;
page = extract32(insn, 31, 1);
/* SignExtend(immhi:immlo) -> offset */
- offset = ((int64_t)sextract32(insn, 5, 19) << 2) | extract32(insn, 29, 2);
+ offset = sextract64(insn, 5, 19);
+ offset = offset << 2 | extract32(insn, 29, 2);
rd = extract32(insn, 0, 5);
base = s->pc - 4;
@@ -2803,7 +2821,10 @@ static bool logic_imm_decode_wmask(uint64_t *result, unsigned int immn,
* by r within the element (which is e bits wide)...
*/
mask = bitmask64(s + 1);
- mask = (mask >> r) | (mask << (e - r));
+ if (r) {
+ mask = (mask >> r) | (mask << (e - r));
+ mask &= bitmask64(e);
+ }
/* ...then replicate the element over the whole 64 bit value */
mask = bitfield_replicate(mask, e);
*result = mask;
@@ -3516,7 +3537,7 @@ static void disas_adc_sbc(DisasContext *s, uint32_t insn)
static void disas_cc(DisasContext *s, uint32_t insn)
{
unsigned int sf, op, y, cond, rn, nzcv, is_imm;
- int label_continue = -1;
+ TCGLabel *label_continue = NULL;
TCGv_i64 tcg_tmp, tcg_y, tcg_rn;
if (!extract32(insn, 29, 1)) {
@@ -3536,7 +3557,7 @@ static void disas_cc(DisasContext *s, uint32_t insn)
nzcv = extract32(insn, 0, 4);
if (cond < 0x0e) { /* not always */
- int label_match = gen_new_label();
+ TCGLabel *label_match = gen_new_label();
label_continue = gen_new_label();
arm_gen_test_cc(cond, label_match);
/* nomatch: */
@@ -3609,8 +3630,8 @@ static void disas_cond_select(DisasContext *s, uint32_t insn)
/* OPTME: we could use movcond here, at the cost of duplicating
* a lot of the arm_gen_test_cc() logic.
*/
- int label_match = gen_new_label();
- int label_continue = gen_new_label();
+ TCGLabel *label_match = gen_new_label();
+ TCGLabel *label_continue = gen_new_label();
arm_gen_test_cc(cond, label_match);
/* nomatch: */
@@ -4083,7 +4104,7 @@ static void disas_fp_ccomp(DisasContext *s, uint32_t insn)
{
unsigned int mos, type, rm, cond, rn, op, nzcv;
TCGv_i64 tcg_flags;
- int label_continue = -1;
+ TCGLabel *label_continue = NULL;
mos = extract32(insn, 29, 3);
type = extract32(insn, 22, 2); /* 0 = single, 1 = double */
@@ -4103,7 +4124,7 @@ static void disas_fp_ccomp(DisasContext *s, uint32_t insn)
}
if (cond < 0x0e) { /* not always */
- int label_match = gen_new_label();
+ TCGLabel *label_match = gen_new_label();
label_continue = gen_new_label();
arm_gen_test_cc(cond, label_match);
/* nomatch: */
@@ -4144,7 +4165,7 @@ static void gen_mov_fp2fp(DisasContext *s, int type, int dst, int src)
static void disas_fp_csel(DisasContext *s, uint32_t insn)
{
unsigned int mos, type, rm, cond, rn, rd;
- int label_continue = -1;
+ TCGLabel *label_continue = NULL;
mos = extract32(insn, 29, 3);
type = extract32(insn, 22, 2); /* 0 = single, 1 = double */
@@ -4163,7 +4184,7 @@ static void disas_fp_csel(DisasContext *s, uint32_t insn)
}
if (cond < 0x0e) { /* not always */
- int label_match = gen_new_label();
+ TCGLabel *label_match = gen_new_label();
label_continue = gen_new_label();
arm_gen_test_cc(cond, label_match);
/* nomatch: */
@@ -10899,7 +10920,6 @@ void gen_intermediate_code_internal_a64(ARMCPU *cpu,
CPUARMState *env = &cpu->env;
DisasContext dc1, *dc = &dc1;
CPUBreakpoint *bp;
- uint16_t *gen_opc_end;
int j, lj;
target_ulong pc_start;
target_ulong next_page_start;
@@ -10910,8 +10930,6 @@ void gen_intermediate_code_internal_a64(ARMCPU *cpu,
dc->tb = tb;
- gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
-
dc->is_jmp = DISAS_NEXT;
dc->pc = pc_start;
dc->singlestep_enabled = cs->singlestep_enabled;
@@ -10922,14 +10940,15 @@ void gen_intermediate_code_internal_a64(ARMCPU *cpu,
dc->bswap_code = 0;
dc->condexec_mask = 0;
dc->condexec_cond = 0;
+ dc->mmu_idx = ARM_TBFLAG_MMUIDX(tb->flags);
+ dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
#if !defined(CONFIG_USER_ONLY)
- dc->user = (ARM_TBFLAG_AA64_EL(tb->flags) == 0);
+ dc->user = (dc->current_el == 0);
#endif
dc->cpacr_fpen = ARM_TBFLAG_AA64_FPEN(tb->flags);
dc->vec_len = 0;
dc->vec_stride = 0;
dc->cp_regs = cpu->cp_regs;
- dc->current_el = arm_current_el(env);
dc->features = env->features;
/* Single step state. The code-generation logic here is:
@@ -10962,7 +10981,7 @@ void gen_intermediate_code_internal_a64(ARMCPU *cpu,
max_insns = CF_COUNT_MASK;
}
- gen_tb_start();
+ gen_tb_start(tb);
tcg_clear_temp_count();
@@ -10980,7 +10999,7 @@ void gen_intermediate_code_internal_a64(ARMCPU *cpu,
}
if (search_pc) {
- j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
+ j = tcg_op_buf_count();
if (lj < j) {
lj++;
while (lj < j) {
@@ -11030,7 +11049,7 @@ void gen_intermediate_code_internal_a64(ARMCPU *cpu,
* ensures prefetch aborts occur at the right place.
*/
num_insns++;
- } while (!dc->is_jmp && tcg_ctx.gen_opc_ptr < gen_opc_end &&
+ } while (!dc->is_jmp && !tcg_op_buf_full() &&
!cs->singlestep_enabled &&
!singlestep &&
!dc->ss_active &&
@@ -11090,7 +11109,6 @@ void gen_intermediate_code_internal_a64(ARMCPU *cpu,
done_generating:
gen_tb_end(tb, num_insns);
- *tcg_ctx.gen_opc_ptr = INDEX_op_end;
#ifdef DEBUG_DISAS
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
@@ -11102,7 +11120,7 @@ done_generating:
}
#endif
if (search_pc) {
- j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
+ j = tcg_op_buf_count();
lj++;
while (lj <= j) {
tcg_ctx.gen_opc_instr_start[lj++] = 0;
diff --git a/target-arm/translate.c b/target-arm/translate.c
index af5156857..f8f72bede 100644
--- a/target-arm/translate.c
+++ b/target-arm/translate.c
@@ -113,6 +113,28 @@ void arm_translate_init(void)
a64_translate_init();
}
+static inline ARMMMUIdx get_a32_user_mem_index(DisasContext *s)
+{
+ /* Return the mmu_idx to use for A32/T32 "unprivileged load/store"
+ * insns:
+ * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
+ * otherwise, access as if at PL0.
+ */
+ switch (s->mmu_idx) {
+ case ARMMMUIdx_S1E2: /* this one is UNPREDICTABLE */
+ case ARMMMUIdx_S12NSE0:
+ case ARMMMUIdx_S12NSE1:
+ return ARMMMUIdx_S12NSE0;
+ case ARMMMUIdx_S1E3:
+ case ARMMMUIdx_S1SE0:
+ case ARMMMUIdx_S1SE1:
+ return ARMMMUIdx_S1SE0;
+ case ARMMMUIdx_S2NS:
+ default:
+ g_assert_not_reached();
+ }
+}
+
static inline TCGv_i32 load_cpu_offset(int offset)
{
TCGv_i32 tmp = tcg_temp_new_i32();
@@ -714,10 +736,10 @@ static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
* generate a conditional branch based on ARM condition code cc.
* This is common between ARM and Aarch64 targets.
*/
-void arm_gen_test_cc(int cc, int label)
+void arm_gen_test_cc(int cc, TCGLabel *label)
{
TCGv_i32 tmp;
- int inv;
+ TCGLabel *inv;
switch (cc) {
case 0: /* eq: Z */
@@ -7091,7 +7113,7 @@ static int disas_coproc_insn(DisasContext *s, uint32_t insn)
rt = (insn >> 12) & 0xf;
ri = get_arm_cp_reginfo(s->cp_regs,
- ENCODE_CP_REG(cpnum, is64, crn, crm, opc1, opc2));
+ ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2));
if (ri) {
/* Check access permissions */
if (!cp_access_ok(s->current_el, ri, isread)) {
@@ -7170,7 +7192,7 @@ static int disas_coproc_insn(DisasContext *s, uint32_t insn)
break;
}
- if (use_icount && (ri->type & ARM_CP_IO)) {
+ if ((s->tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
gen_io_start();
}
@@ -7261,7 +7283,7 @@ static int disas_coproc_insn(DisasContext *s, uint32_t insn)
}
}
- if (use_icount && (ri->type & ARM_CP_IO)) {
+ if ((s->tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
/* I/O operations must end the TB here (whether read or write) */
gen_io_end();
gen_lookup_tb(s);
@@ -7281,12 +7303,16 @@ static int disas_coproc_insn(DisasContext *s, uint32_t insn)
*/
if (is64) {
qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
- "64 bit system register cp:%d opc1: %d crm:%d\n",
- isread ? "read" : "write", cpnum, opc1, crm);
+ "64 bit system register cp:%d opc1: %d crm:%d "
+ "(%s)\n",
+ isread ? "read" : "write", cpnum, opc1, crm,
+ s->ns ? "non-secure" : "secure");
} else {
qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
- "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d\n",
- isread ? "read" : "write", cpnum, opc1, crn, crm, opc2);
+ "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
+ "(%s)\n",
+ isread ? "read" : "write", cpnum, opc1, crn, crm, opc2,
+ s->ns ? "non-secure" : "secure");
}
return 1;
@@ -7414,8 +7440,8 @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
{
TCGv_i32 tmp;
TCGv_i64 val64, extaddr;
- int done_label;
- int fail_label;
+ TCGLabel *done_label;
+ TCGLabel *fail_label;
/* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
[addr] = {Rt};
@@ -8397,34 +8423,30 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
}
} else {
int address_offset;
- int load;
+ bool load = insn & (1 << 20);
+ bool doubleword = false;
/* Misc load/store */
rn = (insn >> 16) & 0xf;
rd = (insn >> 12) & 0xf;
+
+ if (!load && (sh & 2)) {
+ /* doubleword */
+ ARCH(5TE);
+ if (rd & 1) {
+ /* UNPREDICTABLE; we choose to UNDEF */
+ goto illegal_op;
+ }
+ load = (sh & 1) == 0;
+ doubleword = true;
+ }
+
addr = load_reg(s, rn);
if (insn & (1 << 24))
gen_add_datah_offset(s, insn, 0, addr);
address_offset = 0;
- if (insn & (1 << 20)) {
- /* load */
- tmp = tcg_temp_new_i32();
- switch(sh) {
- case 1:
- gen_aa32_ld16u(tmp, addr, get_mem_index(s));
- break;
- case 2:
- gen_aa32_ld8s(tmp, addr, get_mem_index(s));
- break;
- default:
- case 3:
- gen_aa32_ld16s(tmp, addr, get_mem_index(s));
- break;
- }
- load = 1;
- } else if (sh & 2) {
- ARCH(5TE);
- /* doubleword */
- if (sh & 1) {
+
+ if (doubleword) {
+ if (!load) {
/* store */
tmp = load_reg(s, rd);
gen_aa32_st32(tmp, addr, get_mem_index(s));
@@ -8433,7 +8455,6 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
tmp = load_reg(s, rd + 1);
gen_aa32_st32(tmp, addr, get_mem_index(s));
tcg_temp_free_i32(tmp);
- load = 0;
} else {
/* load */
tmp = tcg_temp_new_i32();
@@ -8443,15 +8464,28 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
tmp = tcg_temp_new_i32();
gen_aa32_ld32u(tmp, addr, get_mem_index(s));
rd++;
- load = 1;
}
address_offset = -4;
+ } else if (load) {
+ /* load */
+ tmp = tcg_temp_new_i32();
+ switch (sh) {
+ case 1:
+ gen_aa32_ld16u(tmp, addr, get_mem_index(s));
+ break;
+ case 2:
+ gen_aa32_ld8s(tmp, addr, get_mem_index(s));
+ break;
+ default:
+ case 3:
+ gen_aa32_ld16s(tmp, addr, get_mem_index(s));
+ break;
+ }
} else {
/* store */
tmp = load_reg(s, rd);
gen_aa32_st16(tmp, addr, get_mem_index(s));
tcg_temp_free_i32(tmp);
- load = 0;
}
/* Perform base writeback before the loaded value to
ensure correct behavior with overlapping index registers.
@@ -8735,6 +8769,10 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
ARCH(6T2);
shift = (insn >> 7) & 0x1f;
i = (insn >> 16) & 0x1f;
+ if (i < shift) {
+ /* UNPREDICTABLE; we choose to UNDEF */
+ goto illegal_op;
+ }
i = i + 1 - shift;
if (rm == 15) {
tmp = tcg_temp_new_i32();
@@ -8789,7 +8827,7 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
tmp2 = load_reg(s, rn);
if ((insn & 0x01200000) == 0x00200000) {
/* ldrt/strt */
- i = MMU_USER_IDX;
+ i = get_a32_user_mem_index(s);
} else {
i = get_mem_index(s);
}
@@ -8829,17 +8867,23 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
case 0x08:
case 0x09:
{
- int j, n, user, loaded_base;
+ int j, n, loaded_base;
+ bool exc_return = false;
+ bool is_load = extract32(insn, 20, 1);
+ bool user = false;
TCGv_i32 loaded_var;
/* load/store multiple words */
/* XXX: store correct base if write back */
- user = 0;
if (insn & (1 << 22)) {
+ /* LDM (user), LDM (exception return) and STM (user) */
if (IS_USER(s))
goto illegal_op; /* only usable in supervisor mode */
- if ((insn & (1 << 15)) == 0)
- user = 1;
+ if (is_load && extract32(insn, 15, 1)) {
+ exc_return = true;
+ } else {
+ user = true;
+ }
}
rn = (insn >> 16) & 0xf;
addr = load_reg(s, rn);
@@ -8873,7 +8917,7 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
j = 0;
for(i=0;i<16;i++) {
if (insn & (1 << i)) {
- if (insn & (1 << 20)) {
+ if (is_load) {
/* load */
tmp = tcg_temp_new_i32();
gen_aa32_ld32u(tmp, addr, get_mem_index(s));
@@ -8938,7 +8982,7 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
if (loaded_base) {
store_reg(s, rn, loaded_var);
}
- if ((insn & (1 << 22)) && !user) {
+ if (exc_return) {
/* Restore CPSR from SPSR. */
tmp = load_cpu_field(spsr);
gen_set_cpsr(tmp, CPSR_ERET_MASK);
@@ -10169,7 +10213,7 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw
break;
case 0xe: /* User privilege. */
tcg_gen_addi_i32(addr, addr, imm);
- memidx = MMU_USER_IDX;
+ memidx = get_a32_user_mem_index(s);
break;
case 0x9: /* Post-decrement. */
imm = -imm;
@@ -10995,7 +11039,6 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
CPUARMState *env = &cpu->env;
DisasContext dc1, *dc = &dc1;
CPUBreakpoint *bp;
- uint16_t *gen_opc_end;
int j, lj;
target_ulong pc_start;
target_ulong next_page_start;
@@ -11016,8 +11059,6 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
dc->tb = tb;
- gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
-
dc->is_jmp = DISAS_NEXT;
dc->pc = pc_start;
dc->singlestep_enabled = cs->singlestep_enabled;
@@ -11028,16 +11069,18 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
dc->bswap_code = ARM_TBFLAG_BSWAP_CODE(tb->flags);
dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
+ dc->mmu_idx = ARM_TBFLAG_MMUIDX(tb->flags);
+ dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
#if !defined(CONFIG_USER_ONLY)
- dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0);
+ dc->user = (dc->current_el == 0);
#endif
+ dc->ns = ARM_TBFLAG_NS(tb->flags);
dc->cpacr_fpen = ARM_TBFLAG_CPACR_FPEN(tb->flags);
dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
dc->c15_cpar = ARM_TBFLAG_XSCALE_CPAR(tb->flags);
dc->cp_regs = cpu->cp_regs;
- dc->current_el = arm_current_el(env);
dc->features = env->features;
/* Single step state. The code-generation logic here is:
@@ -11075,7 +11118,7 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
if (max_insns == 0)
max_insns = CF_COUNT_MASK;
- gen_tb_start();
+ gen_tb_start(tb);
tcg_clear_temp_count();
@@ -11150,7 +11193,7 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
}
}
if (search_pc) {
- j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
+ j = tcg_op_buf_count();
if (lj < j) {
lj++;
while (lj < j)
@@ -11216,7 +11259,7 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
* Also stop translation when a page boundary is reached. This
* ensures prefetch aborts occur at the right place. */
num_insns ++;
- } while (!dc->is_jmp && tcg_ctx.gen_opc_ptr < gen_opc_end &&
+ } while (!dc->is_jmp && !tcg_op_buf_full() &&
!cs->singlestep_enabled &&
!singlestep &&
!dc->ss_active &&
@@ -11325,7 +11368,6 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
done_generating:
gen_tb_end(tb, num_insns);
- *tcg_ctx.gen_opc_ptr = INDEX_op_end;
#ifdef DEBUG_DISAS
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
@@ -11337,7 +11379,7 @@ done_generating:
}
#endif
if (search_pc) {
- j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
+ j = tcg_op_buf_count();
lj++;
while (lj <= j)
tcg_ctx.gen_opc_instr_start[lj++] = 0;
diff --git a/target-arm/translate.h b/target-arm/translate.h
index 41a907157..9829576ab 100644
--- a/target-arm/translate.h
+++ b/target-arm/translate.h
@@ -9,7 +9,7 @@ typedef struct DisasContext {
/* Nonzero if this instruction has been conditionally skipped. */
int condjmp;
/* The label that will be jumped to when the instruction is skipped. */
- int condlabel;
+ TCGLabel *condlabel;
/* Thumb-2 conditional execution bits. */
int condexec_mask;
int condexec_cond;
@@ -20,6 +20,8 @@ typedef struct DisasContext {
#if !defined(CONFIG_USER_ONLY)
int user;
#endif
+ ARMMMUIdx mmu_idx; /* MMU index to use for normal loads/stores */
+ bool ns; /* Use non-secure CPREG bank on access */
bool cpacr_fpen; /* FP enabled via CPACR.FPEN */
bool vfp_enabled; /* FP enabled via FPSCR.EN */
int vec_len;
@@ -68,7 +70,7 @@ static inline int arm_dc_feature(DisasContext *dc, int feature)
static inline int get_mem_index(DisasContext *s)
{
- return s->current_el;
+ return s->mmu_idx;
}
/* target-specific extra values for is_jmp */
@@ -117,6 +119,6 @@ static inline void aarch64_cpu_dump_state(CPUState *cs, FILE *f,
}
#endif
-void arm_gen_test_cc(int cc, int label);
+void arm_gen_test_cc(int cc, TCGLabel *label);
#endif /* TARGET_ARM_TRANSLATE_H */