summaryrefslogtreecommitdiff
path: root/target-arm
diff options
context:
space:
mode:
authorChanho Park <chanho61.park@samsung.com>2014-12-10 15:42:55 +0900
committerChanho Park <chanho61.park@samsung.com>2014-12-10 15:42:55 +0900
commit0d6a2f7e595218b5632ba7005128470e65138951 (patch)
tree596b09930ef1538e6606450e2d8b88ec2e296a9b /target-arm
parent16b1353a36171ae06d63fd309f4772dbfb1da113 (diff)
downloadqemu-0d6a2f7e595218b5632ba7005128470e65138951.tar.gz
qemu-0d6a2f7e595218b5632ba7005128470e65138951.tar.bz2
qemu-0d6a2f7e595218b5632ba7005128470e65138951.zip
Imported Upstream version 2.2.0upstream/2.2.1upstream/2.2.0
Diffstat (limited to 'target-arm')
-rw-r--r--target-arm/Makefile.objs1
-rw-r--r--target-arm/cpu-qom.h9
-rw-r--r--target-arm/cpu.c186
-rw-r--r--target-arm/cpu.h377
-rw-r--r--target-arm/cpu64.c9
-rw-r--r--target-arm/helper-a64.c43
-rw-r--r--target-arm/helper.c1038
-rw-r--r--target-arm/helper.h3
-rw-r--r--target-arm/internals.h119
-rw-r--r--target-arm/kvm-consts.h89
-rw-r--r--target-arm/kvm64.c13
-rw-r--r--target-arm/machine.c13
-rw-r--r--target-arm/op_helper.c358
-rw-r--r--target-arm/psci.c242
-rw-r--r--target-arm/translate-a64.c145
-rw-r--r--target-arm/translate.c508
-rw-r--r--target-arm/translate.h20
17 files changed, 2673 insertions, 500 deletions
diff --git a/target-arm/Makefile.objs b/target-arm/Makefile.objs
index dcd167e0d..9460b409a 100644
--- a/target-arm/Makefile.objs
+++ b/target-arm/Makefile.objs
@@ -7,5 +7,6 @@ obj-$(call lnot,$(CONFIG_KVM)) += kvm-stub.o
obj-y += translate.o op_helper.o helper.o cpu.o
obj-y += neon_helper.o iwmmxt_helper.o
obj-y += gdbstub.o
+obj-$(CONFIG_SOFTMMU) += psci.o
obj-$(TARGET_AARCH64) += cpu64.o translate-a64.o helper-a64.o gdbstub64.o
obj-y += crypto_helper.o
diff --git a/target-arm/cpu-qom.h b/target-arm/cpu-qom.h
index ee4fbb1da..dcfda7dfc 100644
--- a/target-arm/cpu-qom.h
+++ b/target-arm/cpu-qom.h
@@ -98,6 +98,13 @@ typedef struct ARMCPU {
/* Should CPU start in PSCI powered-off state? */
bool start_powered_off;
+ /* CPU currently in PSCI powered-off state */
+ bool powered_off;
+
+ /* PSCI conduit used to invoke PSCI methods
+ * 0 - disabled, 1 - smc, 2 - hvc
+ */
+ uint32_t psci_conduit;
/* [QEMU_]KVM_ARM_TARGET_* constant for this CPU, or
* QEMU_KVM_ARM_TARGET_NONE if the kernel doesn't support this CPU type.
@@ -148,6 +155,7 @@ typedef struct ARMCPU {
uint64_t id_aa64isar1;
uint64_t id_aa64mmfr0;
uint64_t id_aa64mmfr1;
+ uint32_t dbgdidr;
uint32_t clidr;
/* The elements of this array are the CCSIDR values for each cache,
* in the order L1DCache, L1ICache, L2DCache, L2ICache, etc.
@@ -191,6 +199,7 @@ void init_cpreg_list(ARMCPU *cpu);
void arm_cpu_do_interrupt(CPUState *cpu);
void arm_v7m_cpu_do_interrupt(CPUState *cpu);
+bool arm_cpu_exec_interrupt(CPUState *cpu, int int_req);
void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
int flags);
diff --git a/target-arm/cpu.c b/target-arm/cpu.c
index 05e52e0e8..5ce7350ce 100644
--- a/target-arm/cpu.c
+++ b/target-arm/cpu.c
@@ -40,8 +40,13 @@ static void arm_cpu_set_pc(CPUState *cs, vaddr value)
static bool arm_cpu_has_work(CPUState *cs)
{
- return cs->interrupt_request &
- (CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB);
+ ARMCPU *cpu = ARM_CPU(cs);
+
+ return !cpu->powered_off
+ && cs->interrupt_request &
+ (CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD
+ | CPU_INTERRUPT_VFIQ | CPU_INTERRUPT_VIRQ
+ | CPU_INTERRUPT_EXITTB);
}
static void cp_reg_reset(gpointer key, gpointer value, gpointer opaque)
@@ -91,6 +96,9 @@ static void arm_cpu_reset(CPUState *s)
env->vfp.xregs[ARM_VFP_MVFR1] = cpu->mvfr1;
env->vfp.xregs[ARM_VFP_MVFR2] = cpu->mvfr2;
+ cpu->powered_off = cpu->start_powered_off;
+ s->halted = cpu->start_powered_off;
+
if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
env->iwmmxt.cregs[ARM_IWMMXT_wCID] = 0x69051000 | 'Q';
}
@@ -100,8 +108,8 @@ static void arm_cpu_reset(CPUState *s)
env->aarch64 = 1;
#if defined(CONFIG_USER_ONLY)
env->pstate = PSTATE_MODE_EL0t;
- /* Userspace expects access to CTL_EL0 and the cache ops */
- env->cp15.c1_sys |= SCTLR_UCT | SCTLR_UCI;
+ /* Userspace expects access to DC ZVA, CTL_EL0 and the cache ops */
+ env->cp15.c1_sys |= SCTLR_UCT | SCTLR_UCI | SCTLR_DZE;
/* and to the FP/Neon instructions */
env->cp15.c1_coproc = deposit64(env->cp15.c1_coproc, 20, 2, 3);
#else
@@ -129,26 +137,38 @@ static void arm_cpu_reset(CPUState *s)
env->uncached_cpsr = ARM_CPU_MODE_SVC;
env->daif = PSTATE_D | PSTATE_A | PSTATE_I | PSTATE_F;
/* On ARMv7-M the CPSR_I is the value of the PRIMASK register, and is
- clear at reset. Initial SP and PC are loaded from ROM. */
+ * clear at reset. Initial SP and PC are loaded from ROM.
+ */
if (IS_M(env)) {
- uint32_t pc;
+ uint32_t initial_msp; /* Loaded from 0x0 */
+ uint32_t initial_pc; /* Loaded from 0x4 */
uint8_t *rom;
+
env->daif &= ~PSTATE_I;
rom = rom_ptr(0);
if (rom) {
- /* We should really use ldl_phys here, in case the guest
- modified flash and reset itself. However images
- loaded via -kernel have not been copied yet, so load the
- values directly from there. */
- env->regs[13] = ldl_p(rom) & 0xFFFFFFFC;
- pc = ldl_p(rom + 4);
- env->thumb = pc & 1;
- env->regs[15] = pc & ~1;
+ /* Address zero is covered by ROM which hasn't yet been
+ * copied into physical memory.
+ */
+ initial_msp = ldl_p(rom);
+ initial_pc = ldl_p(rom + 4);
+ } else {
+ /* Address zero not covered by a ROM blob, or the ROM blob
+ * is in non-modifiable memory and this is a second reset after
+ * it got copied into memory. In the latter case, rom_ptr
+ * will return a NULL pointer and we should use ldl_phys instead.
+ */
+ initial_msp = ldl_phys(s->as, 0);
+ initial_pc = ldl_phys(s->as, 4);
}
+
+ env->regs[13] = initial_msp & 0xFFFFFFFC;
+ env->regs[15] = initial_pc & ~1;
+ env->thumb = initial_pc & 1;
}
if (env->cp15.c1_sys & SCTLR_V) {
- env->regs[15] = 0xFFFF0000;
+ env->regs[15] = 0xFFFF0000;
}
env->vfp.xregs[ARM_VFP_FPEXC] = 0;
@@ -161,38 +181,113 @@ static void arm_cpu_reset(CPUState *s)
set_float_detect_tininess(float_tininess_before_rounding,
&env->vfp.standard_fp_status);
tlb_flush(s, 1);
- /* Reset is a state change for some CPUARMState fields which we
- * bake assumptions about into translated code, so we need to
- * tb_flush().
- */
- tb_flush(env);
#ifndef CONFIG_USER_ONLY
if (kvm_enabled()) {
kvm_arm_reset_vcpu(cpu);
}
#endif
+
+ hw_breakpoint_update_all(cpu);
+ hw_watchpoint_update_all(cpu);
+}
+
+bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
+{
+ CPUClass *cc = CPU_GET_CLASS(cs);
+ bool ret = false;
+
+ if (interrupt_request & CPU_INTERRUPT_FIQ
+ && arm_excp_unmasked(cs, EXCP_FIQ)) {
+ cs->exception_index = EXCP_FIQ;
+ cc->do_interrupt(cs);
+ ret = true;
+ }
+ if (interrupt_request & CPU_INTERRUPT_HARD
+ && arm_excp_unmasked(cs, EXCP_IRQ)) {
+ cs->exception_index = EXCP_IRQ;
+ cc->do_interrupt(cs);
+ ret = true;
+ }
+ if (interrupt_request & CPU_INTERRUPT_VIRQ
+ && arm_excp_unmasked(cs, EXCP_VIRQ)) {
+ cs->exception_index = EXCP_VIRQ;
+ cc->do_interrupt(cs);
+ ret = true;
+ }
+ if (interrupt_request & CPU_INTERRUPT_VFIQ
+ && arm_excp_unmasked(cs, EXCP_VFIQ)) {
+ cs->exception_index = EXCP_VFIQ;
+ cc->do_interrupt(cs);
+ ret = true;
+ }
+
+ return ret;
+}
+
+#if !defined(CONFIG_USER_ONLY) || !defined(TARGET_AARCH64)
+static bool arm_v7m_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
+{
+ CPUClass *cc = CPU_GET_CLASS(cs);
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+ bool ret = false;
+
+
+ if (interrupt_request & CPU_INTERRUPT_FIQ
+ && !(env->daif & PSTATE_F)) {
+ cs->exception_index = EXCP_FIQ;
+ cc->do_interrupt(cs);
+ ret = true;
+ }
+ /* ARMv7-M interrupt return works by loading a magic value
+ * into the PC. On real hardware the load causes the
+ * return to occur. The qemu implementation performs the
+ * jump normally, then does the exception return when the
+ * CPU tries to execute code at the magic address.
+ * This will cause the magic PC value to be pushed to
+ * the stack if an interrupt occurred at the wrong time.
+ * We avoid this by disabling interrupts when
+ * pc contains a magic address.
+ */
+ if (interrupt_request & CPU_INTERRUPT_HARD
+ && !(env->daif & PSTATE_I)
+ && (env->regs[15] < 0xfffffff0)) {
+ cs->exception_index = EXCP_IRQ;
+ cc->do_interrupt(cs);
+ ret = true;
+ }
+ return ret;
}
+#endif
#ifndef CONFIG_USER_ONLY
static void arm_cpu_set_irq(void *opaque, int irq, int level)
{
ARMCPU *cpu = opaque;
+ CPUARMState *env = &cpu->env;
CPUState *cs = CPU(cpu);
+ static const int mask[] = {
+ [ARM_CPU_IRQ] = CPU_INTERRUPT_HARD,
+ [ARM_CPU_FIQ] = CPU_INTERRUPT_FIQ,
+ [ARM_CPU_VIRQ] = CPU_INTERRUPT_VIRQ,
+ [ARM_CPU_VFIQ] = CPU_INTERRUPT_VFIQ
+ };
switch (irq) {
- case ARM_CPU_IRQ:
- if (level) {
- cpu_interrupt(cs, CPU_INTERRUPT_HARD);
- } else {
- cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
+ case ARM_CPU_VIRQ:
+ case ARM_CPU_VFIQ:
+ if (!arm_feature(env, ARM_FEATURE_EL2)) {
+ hw_error("%s: Virtual interrupt line %d with no EL2 support\n",
+ __func__, irq);
}
- break;
+ /* fall through */
+ case ARM_CPU_IRQ:
case ARM_CPU_FIQ:
if (level) {
- cpu_interrupt(cs, CPU_INTERRUPT_FIQ);
+ cpu_interrupt(cs, mask[irq]);
} else {
- cpu_reset_interrupt(cs, CPU_INTERRUPT_FIQ);
+ cpu_reset_interrupt(cs, mask[irq]);
}
break;
default:
@@ -242,9 +337,12 @@ static void arm_cpu_initfn(Object *obj)
#ifndef CONFIG_USER_ONLY
/* Our inbound IRQ and FIQ lines */
if (kvm_enabled()) {
- qdev_init_gpio_in(DEVICE(cpu), arm_cpu_kvm_set_irq, 2);
+ /* VIRQ and VFIQ are unused with KVM but we add them to maintain
+ * the same interface as non-KVM CPUs.
+ */
+ qdev_init_gpio_in(DEVICE(cpu), arm_cpu_kvm_set_irq, 4);
} else {
- qdev_init_gpio_in(DEVICE(cpu), arm_cpu_set_irq, 2);
+ qdev_init_gpio_in(DEVICE(cpu), arm_cpu_set_irq, 4);
}
cpu->gt_timer[GTIMER_PHYS] = timer_new(QEMU_CLOCK_VIRTUAL, GTIMER_SCALE,
@@ -263,9 +361,12 @@ static void arm_cpu_initfn(Object *obj)
cpu->psci_version = 1; /* By default assume PSCI v0.1 */
cpu->kvm_target = QEMU_KVM_ARM_TARGET_NONE;
- if (tcg_enabled() && !inited) {
- inited = true;
- arm_translate_init();
+ if (tcg_enabled()) {
+ cpu->psci_version = 2; /* TCG implements PSCI 0.2 */
+ if (!inited) {
+ inited = true;
+ arm_translate_init();
+ }
}
}
@@ -447,7 +548,7 @@ static void arm1026_initfn(Object *obj)
ARMCPRegInfo ifar = {
.name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1,
.access = PL1_RW,
- .fieldoffset = offsetofhigh32(CPUARMState, cp15.far_el1),
+ .fieldoffset = offsetofhigh32(CPUARMState, cp15.far_el[1]),
.resetvalue = 0
};
define_one_arm_cp_reg(cpu, &ifar);
@@ -596,11 +697,13 @@ static void cortex_m3_initfn(Object *obj)
static void arm_v7m_class_init(ObjectClass *oc, void *data)
{
-#ifndef CONFIG_USER_ONLY
CPUClass *cc = CPU_CLASS(oc);
+#ifndef CONFIG_USER_ONLY
cc->do_interrupt = arm_v7m_cpu_do_interrupt;
#endif
+
+ cc->cpu_exec_interrupt = arm_v7m_cpu_exec_interrupt;
}
static const ARMCPRegInfo cortexa8_cp_reginfo[] = {
@@ -640,6 +743,7 @@ static void cortex_a8_initfn(Object *obj)
cpu->id_isar2 = 0x21232031;
cpu->id_isar3 = 0x11112131;
cpu->id_isar4 = 0x00111142;
+ cpu->dbgdidr = 0x15141000;
cpu->clidr = (1 << 27) | (2 << 24) | 3;
cpu->ccsidr[0] = 0xe007e01a; /* 16k L1 dcache. */
cpu->ccsidr[1] = 0x2007e01a; /* 16k L1 icache. */
@@ -712,9 +816,10 @@ static void cortex_a9_initfn(Object *obj)
cpu->id_isar2 = 0x21232041;
cpu->id_isar3 = 0x11112131;
cpu->id_isar4 = 0x00111142;
+ cpu->dbgdidr = 0x35141000;
cpu->clidr = (1 << 27) | (1 << 24) | 3;
- cpu->ccsidr[0] = 0xe00fe015; /* 16k L1 dcache. */
- cpu->ccsidr[1] = 0x200fe015; /* 16k L1 icache. */
+ cpu->ccsidr[0] = 0xe00fe019; /* 16k L1 dcache. */
+ cpu->ccsidr[1] = 0x200fe019; /* 16k L1 icache. */
define_arm_cp_regs(cpu, cortexa9_cp_reginfo);
}
@@ -773,6 +878,7 @@ static void cortex_a15_initfn(Object *obj)
cpu->id_isar2 = 0x21232041;
cpu->id_isar3 = 0x11112131;
cpu->id_isar4 = 0x10011142;
+ cpu->dbgdidr = 0x3515f021;
cpu->clidr = 0x0a200023;
cpu->ccsidr[0] = 0x701fe00a; /* 32K L1 dcache */
cpu->ccsidr[1] = 0x201fe00a; /* 32K L1 icache */
@@ -1016,6 +1122,7 @@ static const ARMCPUInfo arm_cpus[] = {
static Property arm_cpu_properties[] = {
DEFINE_PROP_BOOL("start-powered-off", ARMCPU, start_powered_off, false),
+ DEFINE_PROP_UINT32("psci-conduit", ARMCPU, psci_conduit, 0),
DEFINE_PROP_UINT32("midr", ARMCPU, midr, 0),
DEFINE_PROP_END_OF_LIST()
};
@@ -1035,7 +1142,7 @@ static void arm_cpu_class_init(ObjectClass *oc, void *data)
cc->class_by_name = arm_cpu_class_by_name;
cc->has_work = arm_cpu_has_work;
- cc->do_interrupt = arm_cpu_do_interrupt;
+ cc->cpu_exec_interrupt = arm_cpu_exec_interrupt;
cc->dump_state = arm_cpu_dump_state;
cc->set_pc = arm_cpu_set_pc;
cc->gdb_read_register = arm_cpu_gdb_read_register;
@@ -1043,11 +1150,14 @@ static void arm_cpu_class_init(ObjectClass *oc, void *data)
#ifdef CONFIG_USER_ONLY
cc->handle_mmu_fault = arm_cpu_handle_mmu_fault;
#else
+ cc->do_interrupt = arm_cpu_do_interrupt;
cc->get_phys_page_debug = arm_cpu_get_phys_page_debug;
cc->vmsd = &vmstate_arm_cpu;
#endif
cc->gdb_num_core_regs = 26;
cc->gdb_core_xml_file = "arm-core.xml";
+ cc->gdb_stop_before_watchpoint = true;
+ cc->debug_excp_handler = arm_debug_excp_handler;
}
static void cpu_register(const ARMCPUInfo *info)
diff --git a/target-arm/cpu.h b/target-arm/cpu.h
index 369d4727a..7f800908f 100644
--- a/target-arm/cpu.h
+++ b/target-arm/cpu.h
@@ -51,6 +51,11 @@
#define EXCP_EXCEPTION_EXIT 8 /* Return from v7M exception. */
#define EXCP_KERNEL_TRAP 9 /* Jumped to kernel code page. */
#define EXCP_STREX 10
+#define EXCP_HVC 11 /* HyperVisor Call */
+#define EXCP_HYP_TRAP 12
+#define EXCP_SMC 13 /* Secure Monitor Call */
+#define EXCP_VIRQ 14
+#define EXCP_VFIQ 15
#define ARMV7M_EXCP_RESET 1
#define ARMV7M_EXCP_NMI 2
@@ -65,6 +70,8 @@
/* ARM-specific interrupt pending bits. */
#define CPU_INTERRUPT_FIQ CPU_INTERRUPT_TGT_EXT_1
+#define CPU_INTERRUPT_VIRQ CPU_INTERRUPT_TGT_EXT_2
+#define CPU_INTERRUPT_VFIQ CPU_INTERRUPT_TGT_EXT_3
/* The usual mapping for an AArch64 system register to its AArch32
* counterpart is for the 32 bit world to have access to the lower
@@ -80,9 +87,11 @@
#define offsetofhigh32(S, M) (offsetof(S, M) + sizeof(uint32_t))
#endif
-/* Meanings of the ARMCPU object's two inbound GPIO lines */
+/* Meanings of the ARMCPU object's four inbound GPIO lines */
#define ARM_CPU_IRQ 0
#define ARM_CPU_FIQ 1
+#define ARM_CPU_VIRQ 2
+#define ARM_CPU_VFIQ 3
typedef void ARMWriteCPFunc(void *opaque, int cp_info,
int srcreg, int operand, uint32_t value);
@@ -91,7 +100,7 @@ typedef uint32_t ARMReadCPFunc(void *opaque, int cp_info,
struct arm_boot_info;
-#define NB_MMU_MODES 2
+#define NB_MMU_MODES 4
/* We currently assume float and double are IEEE single and double
precision respectively.
@@ -144,8 +153,8 @@ typedef struct CPUARMState {
/* Banked registers. */
uint64_t banked_spsr[8];
- uint32_t banked_r13[6];
- uint32_t banked_r14[6];
+ uint32_t banked_r13[8];
+ uint32_t banked_r14[8];
/* These hold r8-r12. */
uint32_t usr_regs[5];
@@ -172,7 +181,6 @@ typedef struct CPUARMState {
uint64_t c1_sys; /* System control register. */
uint64_t c1_coproc; /* Coprocessor access register. */
uint32_t c1_xscaleauxcr; /* XScale auxiliary control register. */
- uint32_t c1_scr; /* secure config register. */
uint64_t ttbr0_el1; /* MMU translation table base 0. */
uint64_t ttbr1_el1; /* MMU translation table base 1. */
uint64_t c2_control; /* MMU translation table base control. */
@@ -184,15 +192,17 @@ typedef struct CPUARMState {
MPU write buffer control. */
uint32_t pmsav5_data_ap; /* PMSAv5 MPU data access permissions */
uint32_t pmsav5_insn_ap; /* PMSAv5 MPU insn access permissions */
+ uint64_t hcr_el2; /* Hypervisor configuration register */
+ uint64_t scr_el3; /* Secure configuration register. */
uint32_t ifsr_el2; /* Fault status registers. */
- uint64_t esr_el[2];
+ uint64_t esr_el[4];
uint32_t c6_region[8]; /* MPU base/size registers. */
- uint64_t far_el1; /* Fault address registers. */
+ uint64_t far_el[4]; /* Fault address registers. */
uint64_t par_el1; /* Translation result. */
uint32_t c9_insn; /* Cache lockdown registers. */
uint32_t c9_data;
- uint32_t c9_pmcr; /* performance monitor control register */
- uint32_t c9_pmcnten; /* perf monitor counter enables */
+ uint64_t c9_pmcr; /* performance monitor control register */
+ uint64_t c9_pmcnten; /* perf monitor counter enables */
uint32_t c9_pmovsr; /* perf monitor overflow status */
uint32_t c9_pmxevtyper; /* perf monitor event type */
uint32_t c9_pmuserenr; /* perf monitor user enable */
@@ -220,10 +230,12 @@ typedef struct CPUARMState {
uint64_t dbgbcr[16]; /* breakpoint control registers */
uint64_t dbgwvr[16]; /* watchpoint value registers */
uint64_t dbgwcr[16]; /* watchpoint control registers */
+ uint64_t mdscr_el1;
/* If the counter is enabled, this stores the last time the counter
* was reset. Otherwise it stores the counter value
*/
- uint32_t c15_ccnt;
+ uint64_t c15_ccnt;
+ uint64_t pmccfiltr_el0; /* Performance Monitor Filter Register */
} cp15;
struct {
@@ -321,6 +333,9 @@ typedef struct CPUARMState {
int eabi;
#endif
+ struct CPUBreakpoint *cpu_breakpoint[16];
+ struct CPUWatchpoint *cpu_watchpoint[16];
+
CPU_COMMON
/* These fields after the common ones so they are preserved on reset. */
@@ -351,6 +366,17 @@ int cpu_arm_signal_handler(int host_signum, void *pinfo,
int arm_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,
int mmu_idx);
+/**
+ * pmccntr_sync
+ * @env: CPUARMState
+ *
+ * Synchronises the counter in the PMCCNTR. This must always be called twice,
+ * once before any action that might affect the timer and again afterwards.
+ * The function is used to swap the state of the register if required.
+ * This only happens when not in user mode (!CONFIG_USER_ONLY)
+ */
+void pmccntr_sync(CPUARMState *env);
+
/* SCTLR bit meanings. Several bits have been reused in newer
* versions of the architecture; in that case we define constants
* for both old and new bit meanings. Code which tests against those
@@ -411,7 +437,13 @@ int arm_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,
#define CPSR_E (1U << 9)
#define CPSR_IT_2_7 (0xfc00U)
#define CPSR_GE (0xfU << 16)
-#define CPSR_RESERVED (0xfU << 20)
+#define CPSR_IL (1U << 20)
+/* Note that the RESERVED bits include bit 21, which is PSTATE_SS in
+ * an AArch64 SPSR but RES0 in AArch32 SPSR and CPSR. In QEMU we use
+ * env->uncached_cpsr bit 21 to store PSTATE.SS when executing in AArch32,
+ * where it is live state but not accessible to the AArch32 code.
+ */
+#define CPSR_RESERVED (0x7U << 21)
#define CPSR_J (1U << 24)
#define CPSR_IT_0_1 (3U << 25)
#define CPSR_Q (1U << 27)
@@ -428,7 +460,9 @@ int arm_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,
/* Bits writable in user mode. */
#define CPSR_USER (CPSR_NZCV | CPSR_Q | CPSR_GE)
/* Execution state bits. MRS read as zero, MSR writes ignored. */
-#define CPSR_EXEC (CPSR_T | CPSR_IT | CPSR_J)
+#define CPSR_EXEC (CPSR_T | CPSR_IT | CPSR_J | CPSR_IL)
+/* Mask of bits which may be set by exception return copying them from SPSR */
+#define CPSR_ERET_MASK (~CPSR_RESERVED)
#define TTBCR_N (7U << 0) /* TTBCR.EAE==0 */
#define TTBCR_T0SZ (7U << 0) /* TTBCR.EAE==1 */
@@ -475,6 +509,12 @@ int arm_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,
#define PSTATE_MODE_EL1t 4
#define PSTATE_MODE_EL0t 0
+/* Map EL and handler into a PSTATE_MODE. */
+static inline unsigned int aarch64_pstate_mode(unsigned int el, bool handler)
+{
+ return (el << 2) | handler;
+}
+
/* Return the current PSTATE value. For the moment we don't support 32<->64 bit
* interprocessing, so we don't attempt to sync with the cpsr state used by
* the 32 bit decoder.
@@ -542,6 +582,58 @@ static inline void xpsr_write(CPUARMState *env, uint32_t val, uint32_t mask)
}
}
+#define HCR_VM (1ULL << 0)
+#define HCR_SWIO (1ULL << 1)
+#define HCR_PTW (1ULL << 2)
+#define HCR_FMO (1ULL << 3)
+#define HCR_IMO (1ULL << 4)
+#define HCR_AMO (1ULL << 5)
+#define HCR_VF (1ULL << 6)
+#define HCR_VI (1ULL << 7)
+#define HCR_VSE (1ULL << 8)
+#define HCR_FB (1ULL << 9)
+#define HCR_BSU_MASK (3ULL << 10)
+#define HCR_DC (1ULL << 12)
+#define HCR_TWI (1ULL << 13)
+#define HCR_TWE (1ULL << 14)
+#define HCR_TID0 (1ULL << 15)
+#define HCR_TID1 (1ULL << 16)
+#define HCR_TID2 (1ULL << 17)
+#define HCR_TID3 (1ULL << 18)
+#define HCR_TSC (1ULL << 19)
+#define HCR_TIDCP (1ULL << 20)
+#define HCR_TACR (1ULL << 21)
+#define HCR_TSW (1ULL << 22)
+#define HCR_TPC (1ULL << 23)
+#define HCR_TPU (1ULL << 24)
+#define HCR_TTLB (1ULL << 25)
+#define HCR_TVM (1ULL << 26)
+#define HCR_TGE (1ULL << 27)
+#define HCR_TDZ (1ULL << 28)
+#define HCR_HCD (1ULL << 29)
+#define HCR_TRVM (1ULL << 30)
+#define HCR_RW (1ULL << 31)
+#define HCR_CD (1ULL << 32)
+#define HCR_ID (1ULL << 33)
+#define HCR_MASK ((1ULL << 34) - 1)
+
+#define SCR_NS (1U << 0)
+#define SCR_IRQ (1U << 1)
+#define SCR_FIQ (1U << 2)
+#define SCR_EA (1U << 3)
+#define SCR_FW (1U << 4)
+#define SCR_AW (1U << 5)
+#define SCR_NET (1U << 6)
+#define SCR_SMD (1U << 7)
+#define SCR_HCE (1U << 8)
+#define SCR_SIF (1U << 9)
+#define SCR_RW (1U << 10)
+#define SCR_ST (1U << 11)
+#define SCR_TWI (1U << 12)
+#define SCR_TWE (1U << 13)
+#define SCR_AARCH32_MASK (0x3fff & ~(SCR_RW | SCR_ST))
+#define SCR_AARCH64_MASK (0x3fff & ~SCR_NET)
+
/* Return the current FPSCR value. */
uint32_t vfp_get_fpscr(CPUARMState *env);
void vfp_set_fpscr(CPUARMState *env, uint32_t val);
@@ -661,14 +753,62 @@ static inline int arm_feature(CPUARMState *env, int feature)
return (env->features & (1ULL << feature)) != 0;
}
+#if !defined(CONFIG_USER_ONLY)
+/* Return true if exception levels below EL3 are in secure state,
+ * or would be following an exception return to that level.
+ * Unlike arm_is_secure() (which is always a question about the
+ * _current_ state of the CPU) this doesn't care about the current
+ * EL or mode.
+ */
+static inline bool arm_is_secure_below_el3(CPUARMState *env)
+{
+ if (arm_feature(env, ARM_FEATURE_EL3)) {
+ return !(env->cp15.scr_el3 & SCR_NS);
+ } else {
+ /* If EL2 is not supported then the secure state is implementation
+ * defined, in which case QEMU defaults to non-secure.
+ */
+ return false;
+ }
+}
+
+/* Return true if the processor is in secure state */
+static inline bool arm_is_secure(CPUARMState *env)
+{
+ if (arm_feature(env, ARM_FEATURE_EL3)) {
+ if (is_a64(env) && extract32(env->pstate, 2, 2) == 3) {
+ /* CPU currently in AArch64 state and EL3 */
+ return true;
+ } else if (!is_a64(env) &&
+ (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) {
+ /* CPU currently in AArch32 state and monitor mode */
+ return true;
+ }
+ }
+ return arm_is_secure_below_el3(env);
+}
+
+#else
+static inline bool arm_is_secure_below_el3(CPUARMState *env)
+{
+ return false;
+}
+
+static inline bool arm_is_secure(CPUARMState *env)
+{
+ return false;
+}
+#endif
+
/* Return true if the specified exception level is running in AArch64 state. */
static inline bool arm_el_is_aa64(CPUARMState *env, int el)
{
- /* We don't currently support EL2 or EL3, and this isn't valid for EL0
+ /* We don't currently support EL2, and this isn't valid for EL0
* (if we're in EL0, is_a64() is what you want, and if we're not in EL0
* then the state of EL0 isn't well defined.)
*/
- assert(el == 1);
+ assert(el == 1 || el == 3);
+
/* AArch64-capable CPUs always run with EL1 in AArch64 mode. This
* is a QEMU-imposed simplification which we may wish to change later.
* If we in future support EL2 and/or EL3, then the state of lower
@@ -678,6 +818,7 @@ static inline bool arm_el_is_aa64(CPUARMState *env, int el)
}
void arm_cpu_list(FILE *f, fprintf_function cpu_fprintf);
+unsigned int arm_excp_target_el(CPUState *cs, unsigned int excp_idx);
/* Interface between CPU and Interrupt controller. */
void armv7m_nvic_set_pending(void *opaque, int irq);
@@ -851,19 +992,32 @@ static inline bool cptype_valid(int cptype)
#define PL1_RW (PL1_R | PL1_W)
#define PL0_RW (PL0_R | PL0_W)
-static inline int arm_current_pl(CPUARMState *env)
+/* Return the current Exception Level (as per ARMv8; note that this differs
+ * from the ARMv7 Privilege Level).
+ */
+static inline int arm_current_el(CPUARMState *env)
{
- if (env->aarch64) {
+ if (is_a64(env)) {
return extract32(env->pstate, 2, 2);
}
- if ((env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_USR) {
+ switch (env->uncached_cpsr & 0x1f) {
+ case ARM_CPU_MODE_USR:
return 0;
+ case ARM_CPU_MODE_HYP:
+ return 2;
+ case ARM_CPU_MODE_MON:
+ return 3;
+ default:
+ if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) {
+ /* If EL3 is 32-bit then all secure privileged modes run in
+ * EL3
+ */
+ return 3;
+ }
+
+ return 1;
}
- /* We don't currently implement the Virtualization or TrustZone
- * extensions, so PL2 and PL3 don't exist for us.
- */
- return 1;
}
typedef struct ARMCPRegInfo ARMCPRegInfo;
@@ -1024,10 +1178,10 @@ static inline bool cpreg_field_is_64bit(const ARMCPRegInfo *ri)
return (ri->state == ARM_CP_STATE_AA64) || (ri->type & ARM_CP_64BIT);
}
-static inline bool cp_access_ok(int current_pl,
+static inline bool cp_access_ok(int current_el,
const ARMCPRegInfo *ri, int isread)
{
- return (ri->access >> ((current_pl * 2) + isread)) & 1;
+ return (ri->access >> ((current_el * 2) + isread)) & 1;
}
/**
@@ -1088,6 +1242,49 @@ bool write_cpustate_to_list(ARMCPU *cpu);
# define TARGET_VIRT_ADDR_SPACE_BITS 32
#endif
+static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx)
+{
+ CPUARMState *env = cs->env_ptr;
+ unsigned int cur_el = arm_current_el(env);
+ unsigned int target_el = arm_excp_target_el(cs, excp_idx);
+ /* FIXME: Use actual secure state. */
+ bool secure = false;
+ /* If in EL1/0, Physical IRQ routing to EL2 only happens from NS state. */
+ bool irq_can_hyp = !secure && cur_el < 2 && target_el == 2;
+
+ /* Don't take exceptions if they target a lower EL. */
+ if (cur_el > target_el) {
+ return false;
+ }
+
+ switch (excp_idx) {
+ case EXCP_FIQ:
+ if (irq_can_hyp && (env->cp15.hcr_el2 & HCR_FMO)) {
+ return true;
+ }
+ return !(env->daif & PSTATE_F);
+ case EXCP_IRQ:
+ if (irq_can_hyp && (env->cp15.hcr_el2 & HCR_IMO)) {
+ return true;
+ }
+ return !(env->daif & PSTATE_I);
+ case EXCP_VFIQ:
+ if (secure || !(env->cp15.hcr_el2 & HCR_FMO)) {
+ /* VFIQs are only taken when hypervized and non-secure. */
+ return false;
+ }
+ return !(env->daif & PSTATE_F);
+ case EXCP_VIRQ:
+ if (secure || !(env->cp15.hcr_el2 & HCR_IMO)) {
+ /* VIRQs are only taken when hypervized and non-secure. */
+ return false;
+ }
+ return !(env->daif & PSTATE_I);
+ default:
+ g_assert_not_reached();
+ }
+}
+
static inline CPUARMState *cpu_init(const char *cpu_model)
{
ARMCPU *cpu = cpu_arm_init(cpu_model);
@@ -1108,7 +1305,67 @@ static inline CPUARMState *cpu_init(const char *cpu_model)
#define MMU_USER_IDX 0
static inline int cpu_mmu_index (CPUARMState *env)
{
- return arm_current_pl(env);
+ return arm_current_el(env);
+}
+
+/* Return the Exception Level targeted by debug exceptions;
+ * currently always EL1 since we don't implement EL2 or EL3.
+ */
+static inline int arm_debug_target_el(CPUARMState *env)
+{
+ return 1;
+}
+
+static inline bool aa64_generate_debug_exceptions(CPUARMState *env)
+{
+ if (arm_current_el(env) == arm_debug_target_el(env)) {
+ if ((extract32(env->cp15.mdscr_el1, 13, 1) == 0)
+ || (env->daif & PSTATE_D)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+static inline bool aa32_generate_debug_exceptions(CPUARMState *env)
+{
+ if (arm_current_el(env) == 0 && arm_el_is_aa64(env, 1)) {
+ return aa64_generate_debug_exceptions(env);
+ }
+ return arm_current_el(env) != 2;
+}
+
+/* Return true if debugging exceptions are currently enabled.
+ * This corresponds to what in ARM ARM pseudocode would be
+ * if UsingAArch32() then
+ * return AArch32.GenerateDebugExceptions()
+ * else
+ * return AArch64.GenerateDebugExceptions()
+ * We choose to push the if() down into this function for clarity,
+ * since the pseudocode has it at all callsites except for the one in
+ * CheckSoftwareStep(), where it is elided because both branches would
+ * always return the same value.
+ *
+ * Parts of the pseudocode relating to EL2 and EL3 are omitted because we
+ * don't yet implement those exception levels or their associated trap bits.
+ */
+static inline bool arm_generate_debug_exceptions(CPUARMState *env)
+{
+ if (env->aarch64) {
+ return aa64_generate_debug_exceptions(env);
+ } else {
+ return aa32_generate_debug_exceptions(env);
+ }
+}
+
+/* Is single-stepping active? (Note that the "is EL_D AArch64?" check
+ * implicitly means this always returns false in pre-v8 CPUs.)
+ */
+static inline bool arm_singlestep_active(CPUARMState *env)
+{
+ return extract32(env->cp15.mdscr_el1, 0, 1)
+ && arm_el_is_aa64(env, arm_debug_target_el(env))
+ && arm_generate_debug_exceptions(env);
}
#include "exec/cpu-all.h"
@@ -1136,12 +1393,25 @@ static inline int cpu_mmu_index (CPUARMState *env)
#define ARM_TBFLAG_BSWAP_CODE_MASK (1 << ARM_TBFLAG_BSWAP_CODE_SHIFT)
#define ARM_TBFLAG_CPACR_FPEN_SHIFT 17
#define ARM_TBFLAG_CPACR_FPEN_MASK (1 << ARM_TBFLAG_CPACR_FPEN_SHIFT)
+#define ARM_TBFLAG_SS_ACTIVE_SHIFT 18
+#define ARM_TBFLAG_SS_ACTIVE_MASK (1 << ARM_TBFLAG_SS_ACTIVE_SHIFT)
+#define ARM_TBFLAG_PSTATE_SS_SHIFT 19
+#define ARM_TBFLAG_PSTATE_SS_MASK (1 << ARM_TBFLAG_PSTATE_SS_SHIFT)
+/* We store the bottom two bits of the CPAR as TB flags and handle
+ * checks on the other bits at runtime
+ */
+#define ARM_TBFLAG_XSCALE_CPAR_SHIFT 20
+#define ARM_TBFLAG_XSCALE_CPAR_MASK (3 << ARM_TBFLAG_XSCALE_CPAR_SHIFT)
/* Bit usage when in AArch64 state */
#define ARM_TBFLAG_AA64_EL_SHIFT 0
#define ARM_TBFLAG_AA64_EL_MASK (0x3 << ARM_TBFLAG_AA64_EL_SHIFT)
#define ARM_TBFLAG_AA64_FPEN_SHIFT 2
#define ARM_TBFLAG_AA64_FPEN_MASK (1 << ARM_TBFLAG_AA64_FPEN_SHIFT)
+#define ARM_TBFLAG_AA64_SS_ACTIVE_SHIFT 3
+#define ARM_TBFLAG_AA64_SS_ACTIVE_MASK (1 << ARM_TBFLAG_AA64_SS_ACTIVE_SHIFT)
+#define ARM_TBFLAG_AA64_PSTATE_SS_SHIFT 4
+#define ARM_TBFLAG_AA64_PSTATE_SS_MASK (1 << ARM_TBFLAG_AA64_PSTATE_SS_SHIFT)
/* some convenience accessor macros */
#define ARM_TBFLAG_AARCH64_STATE(F) \
@@ -1162,23 +1432,53 @@ static inline int cpu_mmu_index (CPUARMState *env)
(((F) & ARM_TBFLAG_BSWAP_CODE_MASK) >> ARM_TBFLAG_BSWAP_CODE_SHIFT)
#define ARM_TBFLAG_CPACR_FPEN(F) \
(((F) & ARM_TBFLAG_CPACR_FPEN_MASK) >> ARM_TBFLAG_CPACR_FPEN_SHIFT)
+#define ARM_TBFLAG_SS_ACTIVE(F) \
+ (((F) & ARM_TBFLAG_SS_ACTIVE_MASK) >> ARM_TBFLAG_SS_ACTIVE_SHIFT)
+#define ARM_TBFLAG_PSTATE_SS(F) \
+ (((F) & ARM_TBFLAG_PSTATE_SS_MASK) >> ARM_TBFLAG_PSTATE_SS_SHIFT)
+#define ARM_TBFLAG_XSCALE_CPAR(F) \
+ (((F) & ARM_TBFLAG_XSCALE_CPAR_MASK) >> ARM_TBFLAG_XSCALE_CPAR_SHIFT)
#define ARM_TBFLAG_AA64_EL(F) \
(((F) & ARM_TBFLAG_AA64_EL_MASK) >> ARM_TBFLAG_AA64_EL_SHIFT)
#define ARM_TBFLAG_AA64_FPEN(F) \
(((F) & ARM_TBFLAG_AA64_FPEN_MASK) >> ARM_TBFLAG_AA64_FPEN_SHIFT)
+#define ARM_TBFLAG_AA64_SS_ACTIVE(F) \
+ (((F) & ARM_TBFLAG_AA64_SS_ACTIVE_MASK) >> ARM_TBFLAG_AA64_SS_ACTIVE_SHIFT)
+#define ARM_TBFLAG_AA64_PSTATE_SS(F) \
+ (((F) & ARM_TBFLAG_AA64_PSTATE_SS_MASK) >> ARM_TBFLAG_AA64_PSTATE_SS_SHIFT)
static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
target_ulong *cs_base, int *flags)
{
- int fpen = extract32(env->cp15.c1_coproc, 20, 2);
+ int fpen;
+
+ if (arm_feature(env, ARM_FEATURE_V6)) {
+ fpen = extract32(env->cp15.c1_coproc, 20, 2);
+ } else {
+ /* CPACR doesn't exist before v6, so VFP is always accessible */
+ fpen = 3;
+ }
if (is_a64(env)) {
*pc = env->pc;
*flags = ARM_TBFLAG_AARCH64_STATE_MASK
- | (arm_current_pl(env) << ARM_TBFLAG_AA64_EL_SHIFT);
- if (fpen == 3 || (fpen == 1 && arm_current_pl(env) != 0)) {
+ | (arm_current_el(env) << ARM_TBFLAG_AA64_EL_SHIFT);
+ if (fpen == 3 || (fpen == 1 && arm_current_el(env) != 0)) {
*flags |= ARM_TBFLAG_AA64_FPEN_MASK;
}
+ /* The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
+ * states defined in the ARM ARM for software singlestep:
+ * SS_ACTIVE PSTATE.SS State
+ * 0 x Inactive (the TB flag for SS is always 0)
+ * 1 0 Active-pending
+ * 1 1 Active-not-pending
+ */
+ if (arm_singlestep_active(env)) {
+ *flags |= ARM_TBFLAG_AA64_SS_ACTIVE_MASK;
+ if (env->pstate & PSTATE_SS) {
+ *flags |= ARM_TBFLAG_AA64_PSTATE_SS_MASK;
+ }
+ }
} else {
int privmode;
*pc = env->regs[15];
@@ -1199,9 +1499,24 @@ static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
|| arm_el_is_aa64(env, 1)) {
*flags |= ARM_TBFLAG_VFPEN_MASK;
}
- if (fpen == 3 || (fpen == 1 && arm_current_pl(env) != 0)) {
+ if (fpen == 3 || (fpen == 1 && arm_current_el(env) != 0)) {
*flags |= ARM_TBFLAG_CPACR_FPEN_MASK;
}
+ /* The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
+ * states defined in the ARM ARM for software singlestep:
+ * SS_ACTIVE PSTATE.SS State
+ * 0 x Inactive (the TB flag for SS is always 0)
+ * 1 0 Active-pending
+ * 1 1 Active-not-pending
+ */
+ if (arm_singlestep_active(env)) {
+ *flags |= ARM_TBFLAG_SS_ACTIVE_MASK;
+ if (env->uncached_cpsr & PSTATE_SS) {
+ *flags |= ARM_TBFLAG_PSTATE_SS_MASK;
+ }
+ }
+ *flags |= (extract32(env->cp15.c15_cpar, 0, 2)
+ << ARM_TBFLAG_XSCALE_CPAR_SHIFT);
}
*cs_base = 0;
@@ -1218,4 +1533,10 @@ static inline void cpu_pc_from_tb(CPUARMState *env, TranslationBlock *tb)
}
}
+enum {
+ QEMU_PSCI_CONDUIT_DISABLED = 0,
+ QEMU_PSCI_CONDUIT_SMC = 1,
+ QEMU_PSCI_CONDUIT_HVC = 2,
+};
+
#endif
diff --git a/target-arm/cpu64.c b/target-arm/cpu64.c
index 8b2081c24..bb778b3d9 100644
--- a/target-arm/cpu64.c
+++ b/target-arm/cpu64.c
@@ -123,10 +123,12 @@ static void aarch64_a57_initfn(Object *obj)
cpu->id_isar2 = 0x21232042;
cpu->id_isar3 = 0x01112131;
cpu->id_isar4 = 0x00011142;
+ cpu->id_isar5 = 0x00011121;
cpu->id_aa64pfr0 = 0x00002222;
cpu->id_aa64dfr0 = 0x10305106;
- cpu->id_aa64isar0 = 0x00010000;
+ cpu->id_aa64isar0 = 0x00011120;
cpu->id_aa64mmfr0 = 0x00001124;
+ cpu->dbgdidr = 0x3516d000;
cpu->clidr = 0x0a200023;
cpu->ccsidr[0] = 0x701fe00a; /* 32KB L1 dcache */
cpu->ccsidr[1] = 0x201fe012; /* 48KB L1 icache */
@@ -149,7 +151,7 @@ static void aarch64_any_initfn(Object *obj)
set_feature(&cpu->env, ARM_FEATURE_V8_SHA256);
set_feature(&cpu->env, ARM_FEATURE_V8_PMULL);
set_feature(&cpu->env, ARM_FEATURE_CRC);
- cpu->ctr = 0x80030003; /* 32 byte I and D cacheline size, VIPT icache */
+ cpu->ctr = 0x80038003; /* 32 byte I and D cacheline size, VIPT icache */
cpu->dcz_blocksize = 7; /* 512 bytes */
}
#endif
@@ -194,7 +196,10 @@ static void aarch64_cpu_class_init(ObjectClass *oc, void *data)
{
CPUClass *cc = CPU_CLASS(oc);
+#if !defined(CONFIG_USER_ONLY)
cc->do_interrupt = aarch64_cpu_do_interrupt;
+#endif
+ cc->cpu_exec_interrupt = arm_cpu_exec_interrupt;
cc->set_pc = aarch64_cpu_set_pc;
cc->gdb_read_register = aarch64_cpu_gdb_read_register;
cc->gdb_write_register = aarch64_cpu_gdb_write_register;
diff --git a/target-arm/helper-a64.c b/target-arm/helper-a64.c
index 2b4ce6ac6..81066ca93 100644
--- a/target-arm/helper-a64.c
+++ b/target-arm/helper-a64.c
@@ -438,15 +438,19 @@ uint64_t HELPER(crc32c_64)(uint64_t acc, uint64_t val, uint32_t bytes)
return crc32c(acc, buf, bytes) ^ 0xffffffff;
}
+#if !defined(CONFIG_USER_ONLY)
+
/* Handle a CPU exception. */
void aarch64_cpu_do_interrupt(CPUState *cs)
{
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
- target_ulong addr = env->cp15.vbar_el[1];
+ unsigned int new_el = arm_excp_target_el(cs, cs->exception_index);
+ target_ulong addr = env->cp15.vbar_el[new_el];
+ unsigned int new_mode = aarch64_pstate_mode(new_el, true);
int i;
- if (arm_current_pl(env) == 0) {
+ if (arm_current_el(env) < new_el) {
if (env->aarch64) {
addr += 0x400;
} else {
@@ -457,30 +461,40 @@ void aarch64_cpu_do_interrupt(CPUState *cs)
}
arm_log_exception(cs->exception_index);
- qemu_log_mask(CPU_LOG_INT, "...from EL%d\n", arm_current_pl(env));
+ qemu_log_mask(CPU_LOG_INT, "...from EL%d\n", arm_current_el(env));
if (qemu_loglevel_mask(CPU_LOG_INT)
&& !excp_is_internal(cs->exception_index)) {
qemu_log_mask(CPU_LOG_INT, "...with ESR 0x%" PRIx32 "\n",
env->exception.syndrome);
}
- env->cp15.esr_el[1] = env->exception.syndrome;
- env->cp15.far_el1 = env->exception.vaddress;
+ if (arm_is_psci_call(cpu, cs->exception_index)) {
+ arm_handle_psci_call(cpu);
+ qemu_log_mask(CPU_LOG_INT, "...handled as PSCI call\n");
+ return;
+ }
switch (cs->exception_index) {
case EXCP_PREFETCH_ABORT:
case EXCP_DATA_ABORT:
+ env->cp15.far_el[new_el] = env->exception.vaddress;
qemu_log_mask(CPU_LOG_INT, "...with FAR 0x%" PRIx64 "\n",
- env->cp15.far_el1);
- break;
+ env->cp15.far_el[new_el]);
+ /* fall through */
case EXCP_BKPT:
case EXCP_UDEF:
case EXCP_SWI:
+ case EXCP_HVC:
+ case EXCP_HYP_TRAP:
+ case EXCP_SMC:
+ env->cp15.esr_el[new_el] = env->exception.syndrome;
break;
case EXCP_IRQ:
+ case EXCP_VIRQ:
addr += 0x80;
break;
case EXCP_FIQ:
+ case EXCP_VFIQ:
addr += 0x100;
break;
default:
@@ -488,16 +502,15 @@ void aarch64_cpu_do_interrupt(CPUState *cs)
}
if (is_a64(env)) {
- env->banked_spsr[aarch64_banked_spsr_index(1)] = pstate_read(env);
- env->sp_el[arm_current_pl(env)] = env->xregs[31];
- env->xregs[31] = env->sp_el[1];
- env->elr_el[1] = env->pc;
+ env->banked_spsr[aarch64_banked_spsr_index(new_el)] = pstate_read(env);
+ aarch64_save_sp(env, arm_current_el(env));
+ env->elr_el[new_el] = env->pc;
} else {
env->banked_spsr[0] = cpsr_read(env);
if (!env->thumb) {
- env->cp15.esr_el[1] |= 1 << 25;
+ env->cp15.esr_el[new_el] |= 1 << 25;
}
- env->elr_el[1] = env->regs[15];
+ env->elr_el[new_el] = env->regs[15];
for (i = 0; i < 15; i++) {
env->xregs[i] = env->regs[i];
@@ -506,9 +519,11 @@ void aarch64_cpu_do_interrupt(CPUState *cs)
env->condexec_bits = 0;
}
- pstate_write(env, PSTATE_DAIF | PSTATE_MODE_EL1h);
+ pstate_write(env, PSTATE_DAIF | new_mode);
env->aarch64 = 1;
+ aarch64_restore_sp(env, new_el);
env->pc = addr;
cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
}
+#endif
diff --git a/target-arm/helper.c b/target-arm/helper.c
index d3438560e..b74d348a3 100644
--- a/target-arm/helper.c
+++ b/target-arm/helper.c
@@ -304,17 +304,6 @@ void init_cpreg_list(ARMCPU *cpu)
g_list_free(keys);
}
-/* Return true if extended addresses are enabled.
- * This is always the case if our translation regime is 64 bit,
- * but depends on TTBCR.EAE for 32 bit.
- */
-static inline bool extended_addresses_enabled(CPUARMState *env)
-{
- return arm_el_is_aa64(env, 1)
- || ((arm_feature(env, ARM_FEATURE_LPAE)
- && (env->cp15.c2_control & TTBCR_EAE)));
-}
-
static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
{
ARMCPU *cpu = arm_env_get_cpu(env);
@@ -388,13 +377,48 @@ static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
}
+/* IS variants of TLB operations must affect all cores */
+static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *other_cs;
+
+ CPU_FOREACH(other_cs) {
+ tlb_flush(other_cs, 1);
+ }
+}
+
+static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *other_cs;
+
+ CPU_FOREACH(other_cs) {
+ tlb_flush(other_cs, value == 0);
+ }
+}
+
+static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *other_cs;
+
+ CPU_FOREACH(other_cs) {
+ tlb_flush_page(other_cs, value & TARGET_PAGE_MASK);
+ }
+}
+
+static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *other_cs;
+
+ CPU_FOREACH(other_cs) {
+ tlb_flush_page(other_cs, value & TARGET_PAGE_MASK);
+ }
+}
+
static const ARMCPRegInfo cp_reginfo[] = {
- /* DBGDIDR: just RAZ. In particular this means the "debug architecture
- * version" bits will read as a reserved value, which should cause
- * Linux to not try to use the debug hardware.
- */
- { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
- .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
{ .name = "FCSEIDR", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 0,
.access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c13_fcse),
.resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
@@ -420,21 +444,6 @@ static const ARMCPRegInfo not_v8_cp_reginfo[] = {
*/
{ .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = CP_ANY,
.opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
- /* MMU TLB control. Note that the wildcarding means we cover not just
- * the unified TLB ops but also the dside/iside/inner-shareable variants.
- */
- { .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY,
- .opc1 = CP_ANY, .opc2 = 0, .access = PL1_W, .writefn = tlbiall_write,
- .type = ARM_CP_NO_MIGRATE },
- { .name = "TLBIMVA", .cp = 15, .crn = 8, .crm = CP_ANY,
- .opc1 = CP_ANY, .opc2 = 1, .access = PL1_W, .writefn = tlbimva_write,
- .type = ARM_CP_NO_MIGRATE },
- { .name = "TLBIASID", .cp = 15, .crn = 8, .crm = CP_ANY,
- .opc1 = CP_ANY, .opc2 = 2, .access = PL1_W, .writefn = tlbiasid_write,
- .type = ARM_CP_NO_MIGRATE },
- { .name = "TLBIMVAA", .cp = 15, .crn = 8, .crm = CP_ANY,
- .opc1 = CP_ANY, .opc2 = 3, .access = PL1_W, .writefn = tlbimvaa_write,
- .type = ARM_CP_NO_MIGRATE },
/* Cache maintenance ops; some of this space may be overridden later. */
{ .name = "CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
.opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
@@ -471,6 +480,28 @@ static const ARMCPRegInfo not_v7_cp_reginfo[] = {
{ .name = "DUMMY", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = CP_ANY,
.access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_MIGRATE,
.resetvalue = 0 },
+ /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
+ * implementing it as RAZ means the "debug architecture version" bits
+ * will read as a reserved value, which should cause Linux to not try
+ * to use the debug hardware.
+ */
+ { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
+ .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
+ /* MMU TLB control. Note that the wildcarding means we cover not just
+ * the unified TLB ops but also the dside/iside/inner-shareable variants.
+ */
+ { .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY,
+ .opc1 = CP_ANY, .opc2 = 0, .access = PL1_W, .writefn = tlbiall_write,
+ .type = ARM_CP_NO_MIGRATE },
+ { .name = "TLBIMVA", .cp = 15, .crn = 8, .crm = CP_ANY,
+ .opc1 = CP_ANY, .opc2 = 1, .access = PL1_W, .writefn = tlbimva_write,
+ .type = ARM_CP_NO_MIGRATE },
+ { .name = "TLBIASID", .cp = 15, .crn = 8, .crm = CP_ANY,
+ .opc1 = CP_ANY, .opc2 = 2, .access = PL1_W, .writefn = tlbiasid_write,
+ .type = ARM_CP_NO_MIGRATE },
+ { .name = "TLBIMVAA", .cp = 15, .crn = 8, .crm = CP_ANY,
+ .opc1 = CP_ANY, .opc2 = 3, .access = PL1_W, .writefn = tlbimvaa_write,
+ .type = ARM_CP_NO_MIGRATE },
REGINFO_SENTINEL
};
@@ -521,7 +552,7 @@ static const ARMCPRegInfo v6_cp_reginfo[] = {
.access = PL0_W, .type = ARM_CP_NOP },
{ .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 2,
.access = PL1_RW,
- .fieldoffset = offsetofhigh32(CPUARMState, cp15.far_el1),
+ .fieldoffset = offsetofhigh32(CPUARMState, cp15.far_el[1]),
.resetvalue = 0, },
/* Watchpoint Fault Address Register : should actually only be present
* for 1136, 1176, 11MPCore.
@@ -540,32 +571,47 @@ static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri)
/* Performance monitor registers user accessibility is controlled
* by PMUSERENR.
*/
- if (arm_current_pl(env) == 0 && !env->cp15.c9_pmuserenr) {
+ if (arm_current_el(env) == 0 && !env->cp15.c9_pmuserenr) {
return CP_ACCESS_TRAP;
}
return CP_ACCESS_OK;
}
#ifndef CONFIG_USER_ONLY
-static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
+
+static inline bool arm_ccnt_enabled(CPUARMState *env)
{
- /* Don't computer the number of ticks in user mode */
- uint32_t temp_ticks;
+ /* This does not support checking PMCCFILTR_EL0 register */
- temp_ticks = qemu_clock_get_us(QEMU_CLOCK_VIRTUAL) *
- get_ticks_per_sec() / 1000000;
+ if (!(env->cp15.c9_pmcr & PMCRE)) {
+ return false;
+ }
- if (env->cp15.c9_pmcr & PMCRE) {
- /* If the counter is enabled */
- if (env->cp15.c9_pmcr & PMCRD) {
- /* Increment once every 64 processor clock cycles */
- env->cp15.c15_ccnt = (temp_ticks/64) - env->cp15.c15_ccnt;
- } else {
- env->cp15.c15_ccnt = temp_ticks - env->cp15.c15_ccnt;
- }
+ return true;
+}
+
+void pmccntr_sync(CPUARMState *env)
+{
+ uint64_t temp_ticks;
+
+ temp_ticks = muldiv64(qemu_clock_get_us(QEMU_CLOCK_VIRTUAL),
+ get_ticks_per_sec(), 1000000);
+
+ if (env->cp15.c9_pmcr & PMCRD) {
+ /* Increment once every 64 processor clock cycles */
+ temp_ticks /= 64;
}
+ if (arm_ccnt_enabled(env)) {
+ env->cp15.c15_ccnt = temp_ticks - env->cp15.c15_ccnt;
+ }
+}
+
+static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ pmccntr_sync(env);
+
if (value & PMCRC) {
/* The counter has been reset */
env->cp15.c15_ccnt = 0;
@@ -575,26 +621,20 @@ static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
env->cp15.c9_pmcr &= ~0x39;
env->cp15.c9_pmcr |= (value & 0x39);
- if (env->cp15.c9_pmcr & PMCRE) {
- if (env->cp15.c9_pmcr & PMCRD) {
- /* Increment once every 64 processor clock cycles */
- temp_ticks /= 64;
- }
- env->cp15.c15_ccnt = temp_ticks - env->cp15.c15_ccnt;
- }
+ pmccntr_sync(env);
}
static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
{
- uint32_t total_ticks;
+ uint64_t total_ticks;
- if (!(env->cp15.c9_pmcr & PMCRE)) {
+ if (!arm_ccnt_enabled(env)) {
/* Counter is disabled, do not change value */
return env->cp15.c15_ccnt;
}
- total_ticks = qemu_clock_get_us(QEMU_CLOCK_VIRTUAL) *
- get_ticks_per_sec() / 1000000;
+ total_ticks = muldiv64(qemu_clock_get_us(QEMU_CLOCK_VIRTUAL),
+ get_ticks_per_sec(), 1000000);
if (env->cp15.c9_pmcr & PMCRD) {
/* Increment once every 64 processor clock cycles */
@@ -606,16 +646,16 @@ static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
- uint32_t total_ticks;
+ uint64_t total_ticks;
- if (!(env->cp15.c9_pmcr & PMCRE)) {
+ if (!arm_ccnt_enabled(env)) {
/* Counter is disabled, set the absolute value */
env->cp15.c15_ccnt = value;
return;
}
- total_ticks = qemu_clock_get_us(QEMU_CLOCK_VIRTUAL) *
- get_ticks_per_sec() / 1000000;
+ total_ticks = muldiv64(qemu_clock_get_us(QEMU_CLOCK_VIRTUAL),
+ get_ticks_per_sec(), 1000000);
if (env->cp15.c9_pmcr & PMCRD) {
/* Increment once every 64 processor clock cycles */
@@ -623,8 +663,31 @@ static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
}
env->cp15.c15_ccnt = total_ticks - value;
}
+
+static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ uint64_t cur_val = pmccntr_read(env, NULL);
+
+ pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value));
+}
+
+#else /* CONFIG_USER_ONLY */
+
+void pmccntr_sync(CPUARMState *env)
+{
+}
+
#endif
+static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ pmccntr_sync(env);
+ env->cp15.pmccfiltr_el0 = value & 0x7E000000;
+ pmccntr_sync(env);
+}
+
static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
@@ -684,6 +747,32 @@ static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
raw_write(env, ri, value & ~0x1FULL);
}
+static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
+{
+ /* We only mask off bits that are RES0 both for AArch64 and AArch32.
+ * For bits that vary between AArch32/64, code needs to check the
+ * current execution mode before directly using the feature bit.
+ */
+ uint32_t valid_mask = SCR_AARCH64_MASK | SCR_AARCH32_MASK;
+
+ if (!arm_feature(env, ARM_FEATURE_EL2)) {
+ valid_mask &= ~SCR_HCE;
+
+ /* On ARMv7, SMD (or SCD as it is called in v7) is only
+ * supported if EL2 exists. The bit is UNK/SBZP when
+ * EL2 is unavailable. In QEMU ARMv7, we force it to always zero
+ * when EL2 is unavailable.
+ */
+ if (arm_feature(env, ARM_FEATURE_V7)) {
+ valid_mask &= ~SCR_SMD;
+ }
+ }
+
+ /* Clear all-context RES0 bits. */
+ value &= valid_mask;
+ raw_write(env, ri, value);
+}
+
static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
{
ARMCPU *cpu = arm_env_get_cpu(env);
@@ -712,13 +801,6 @@ static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
}
static const ARMCPRegInfo v7_cp_reginfo[] = {
- /* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped
- * debug components
- */
- { .name = "DBGDRAR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 0,
- .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
- { .name = "DBGDSAR", .cp = 14, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
- .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
/* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
{ .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
.access = PL1_W, .type = ARM_CP_NOP },
@@ -734,16 +816,28 @@ static const ARMCPRegInfo v7_cp_reginfo[] = {
* or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
*/
{ .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1,
- .access = PL0_RW, .resetvalue = 0,
- .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten),
+ .access = PL0_RW, .type = ARM_CP_NO_MIGRATE,
+ .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
.writefn = pmcntenset_write,
.accessfn = pmreg_access,
.raw_writefn = raw_write },
+ { .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 1,
+ .access = PL0_RW, .accessfn = pmreg_access,
+ .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .resetvalue = 0,
+ .writefn = pmcntenset_write, .raw_writefn = raw_write },
{ .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2,
- .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten),
+ .access = PL0_RW,
+ .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
.accessfn = pmreg_access,
.writefn = pmcntenclr_write,
.type = ARM_CP_NO_MIGRATE },
+ { .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2,
+ .access = PL0_RW, .accessfn = pmreg_access,
+ .type = ARM_CP_NO_MIGRATE,
+ .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten),
+ .writefn = pmcntenclr_write },
{ .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3,
.access = PL0_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
.accessfn = pmreg_access,
@@ -761,9 +855,21 @@ static const ARMCPRegInfo v7_cp_reginfo[] = {
#ifndef CONFIG_USER_ONLY
{ .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0,
.access = PL0_RW, .resetvalue = 0, .type = ARM_CP_IO,
- .readfn = pmccntr_read, .writefn = pmccntr_write,
+ .readfn = pmccntr_read, .writefn = pmccntr_write32,
.accessfn = pmreg_access },
+ { .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 0,
+ .access = PL0_RW, .accessfn = pmreg_access,
+ .type = ARM_CP_IO,
+ .readfn = pmccntr_read, .writefn = pmccntr_write, },
#endif
+ { .name = "PMCCFILTR_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 15, .opc2 = 7,
+ .writefn = pmccfiltr_write,
+ .access = PL0_RW, .accessfn = pmreg_access,
+ .type = ARM_CP_IO,
+ .fieldoffset = offsetof(CPUARMState, cp15.pmccfiltr_el0),
+ .resetvalue = 0, },
{ .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1,
.access = PL0_RW,
.fieldoffset = offsetof(CPUARMState, cp15.c9_pmxevtyper),
@@ -793,8 +899,8 @@ static const ARMCPRegInfo v7_cp_reginfo[] = {
.fieldoffset = offsetof(CPUARMState, cp15.vbar_el[1]),
.resetvalue = 0 },
{ .name = "SCR", .cp = 15, .crn = 1, .crm = 1, .opc1 = 0, .opc2 = 0,
- .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c1_scr),
- .resetvalue = 0, },
+ .access = PL1_RW, .fieldoffset = offsetoflow32(CPUARMState, cp15.scr_el3),
+ .resetvalue = 0, .writefn = scr_write },
{ .name = "CCSIDR", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0,
.access = PL1_R, .readfn = ccsidr_read, .type = ARM_CP_NO_MIGRATE },
@@ -840,6 +946,44 @@ static const ARMCPRegInfo v7_cp_reginfo[] = {
{ .name = "ISR_EL1", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 0,
.type = ARM_CP_NO_MIGRATE, .access = PL1_R, .readfn = isr_read },
+ /* 32 bit ITLB invalidates */
+ { .name = "ITLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 0,
+ .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbiall_write },
+ { .name = "ITLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1,
+ .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbimva_write },
+ { .name = "ITLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 2,
+ .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbiasid_write },
+ /* 32 bit DTLB invalidates */
+ { .name = "DTLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 0,
+ .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbiall_write },
+ { .name = "DTLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1,
+ .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbimva_write },
+ { .name = "DTLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 2,
+ .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbiasid_write },
+ /* 32 bit TLB invalidates */
+ { .name = "TLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
+ .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbiall_write },
+ { .name = "TLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
+ .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbimva_write },
+ { .name = "TLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
+ .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbiasid_write },
+ { .name = "TLBIMVAA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
+ .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbimvaa_write },
+ REGINFO_SENTINEL
+};
+
+static const ARMCPRegInfo v7mp_cp_reginfo[] = {
+ /* 32 bit TLB invalidates, Inner Shareable */
+ { .name = "TLBIALLIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
+ .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbiall_is_write },
+ { .name = "TLBIMVAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
+ .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbimva_is_write },
+ { .name = "TLBIASIDIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
+ .type = ARM_CP_NO_MIGRATE, .access = PL1_W,
+ .writefn = tlbiasid_is_write },
+ { .name = "TLBIMVAAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
+ .type = ARM_CP_NO_MIGRATE, .access = PL1_W,
+ .writefn = tlbimvaa_is_write },
REGINFO_SENTINEL
};
@@ -852,7 +996,7 @@ static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri,
static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri)
{
- if (arm_current_pl(env) == 0 && (env->teecr & 1)) {
+ if (arm_current_el(env) == 0 && (env->teecr & 1)) {
return CP_ACCESS_TRAP;
}
return CP_ACCESS_OK;
@@ -898,7 +1042,7 @@ static const ARMCPRegInfo v6k_cp_reginfo[] = {
static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri)
{
/* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero */
- if (arm_current_pl(env) == 0 && !extract32(env->cp15.c14_cntkctl, 0, 2)) {
+ if (arm_current_el(env) == 0 && !extract32(env->cp15.c14_cntkctl, 0, 2)) {
return CP_ACCESS_TRAP;
}
return CP_ACCESS_OK;
@@ -907,7 +1051,7 @@ static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri)
static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx)
{
/* CNT[PV]CT: not visible from PL0 if ELO[PV]CTEN is zero */
- if (arm_current_pl(env) == 0 &&
+ if (arm_current_el(env) == 0 &&
!extract32(env->cp15.c14_cntkctl, timeridx, 1)) {
return CP_ACCESS_TRAP;
}
@@ -919,7 +1063,7 @@ static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx)
/* CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from PL0 if
* EL0[PV]TEN is zero.
*/
- if (arm_current_pl(env) == 0 &&
+ if (arm_current_el(env) == 0 &&
!extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) {
return CP_ACCESS_TRAP;
}
@@ -1516,7 +1660,7 @@ static const ARMCPRegInfo vmsa_cp_reginfo[] = {
/* 64-bit FAR; this entry also gives us the AArch32 DFAR */
{ .name = "FAR_EL1", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0,
- .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el1),
+ .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[1]),
.resetvalue = 0, },
REGINFO_SENTINEL
};
@@ -1596,12 +1740,7 @@ static const ARMCPRegInfo omap_cp_reginfo[] = {
static void xscale_cpar_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
- value &= 0x3fff;
- if (env->cp15.c15_cpar != value) {
- /* Changes cp0 to cp13 behavior, so needs a TB flush. */
- tb_flush(env);
- env->cp15.c15_cpar = value;
- }
+ env->cp15.c15_cpar = value & 0x3fff;
}
static const ARMCPRegInfo xscale_cp_reginfo[] = {
@@ -1734,11 +1873,6 @@ static const ARMCPRegInfo lpae_cp_reginfo[] = {
{ .name = "AMAIR1", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 1,
.access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_OVERRIDE,
.resetvalue = 0 },
- /* 64 bit access versions of the (dummy) debug registers */
- { .name = "DBGDRAR", .cp = 14, .crm = 1, .opc1 = 0,
- .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 },
- { .name = "DBGDSAR", .cp = 14, .crm = 2, .opc1 = 0,
- .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 },
{ .name = "PAR", .cp = 15, .crm = 7, .opc1 = 0,
.access = PL1_RW, .type = ARM_CP_64BIT,
.fieldoffset = offsetof(CPUARMState, cp15.par_el1), .resetvalue = 0 },
@@ -1777,7 +1911,7 @@ static void aa64_fpsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri)
{
- if (arm_current_pl(env) == 0 && !(env->cp15.c1_sys & SCTLR_UMA)) {
+ if (arm_current_el(env) == 0 && !(env->cp15.c1_sys & SCTLR_UMA)) {
return CP_ACCESS_TRAP;
}
return CP_ACCESS_OK;
@@ -1795,18 +1929,23 @@ static CPAccessResult aa64_cacheop_access(CPUARMState *env,
/* Cache invalidate/clean: NOP, but EL0 must UNDEF unless
* SCTLR_EL1.UCI is set.
*/
- if (arm_current_pl(env) == 0 && !(env->cp15.c1_sys & SCTLR_UCI)) {
+ if (arm_current_el(env) == 0 && !(env->cp15.c1_sys & SCTLR_UCI)) {
return CP_ACCESS_TRAP;
}
return CP_ACCESS_OK;
}
+/* See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions
+ * Page D4-1736 (DDI0487A.b)
+ */
+
static void tlbi_aa64_va_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
/* Invalidate by VA (AArch64 version) */
ARMCPU *cpu = arm_env_get_cpu(env);
- uint64_t pageaddr = value << 12;
+ uint64_t pageaddr = sextract64(value << 12, 0, 56);
+
tlb_flush_page(CPU(cpu), pageaddr);
}
@@ -1815,7 +1954,8 @@ static void tlbi_aa64_vaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
{
/* Invalidate by VA, all ASIDs (AArch64 version) */
ARMCPU *cpu = arm_env_get_cpu(env);
- uint64_t pageaddr = value << 12;
+ uint64_t pageaddr = sextract64(value << 12, 0, 56);
+
tlb_flush_page(CPU(cpu), pageaddr);
}
@@ -1828,12 +1968,45 @@ static void tlbi_aa64_asid_write(CPUARMState *env, const ARMCPRegInfo *ri,
tlb_flush(CPU(cpu), asid == 0);
}
+static void tlbi_aa64_va_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *other_cs;
+ uint64_t pageaddr = sextract64(value << 12, 0, 56);
+
+ CPU_FOREACH(other_cs) {
+ tlb_flush_page(other_cs, pageaddr);
+ }
+}
+
+static void tlbi_aa64_vaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *other_cs;
+ uint64_t pageaddr = sextract64(value << 12, 0, 56);
+
+ CPU_FOREACH(other_cs) {
+ tlb_flush_page(other_cs, pageaddr);
+ }
+}
+
+static void tlbi_aa64_asid_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *other_cs;
+ int asid = extract64(value, 48, 16);
+
+ CPU_FOREACH(other_cs) {
+ tlb_flush(other_cs, asid == 0);
+ }
+}
+
static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri)
{
/* We don't implement EL2, so the only control on DC ZVA is the
* bit in the SCTLR which can prohibit access for EL0.
*/
- if (arm_current_pl(env) == 0 && !(env->cp15.c1_sys & SCTLR_DZE)) {
+ if (arm_current_el(env) == 0 && !(env->cp15.c1_sys & SCTLR_DZE)) {
return CP_ACCESS_TRAP;
}
return CP_ACCESS_OK;
@@ -1845,7 +2018,7 @@ static uint64_t aa64_dczid_read(CPUARMState *env, const ARMCPRegInfo *ri)
int dzp_bit = 1 << 4;
/* DZP indicates whether DC ZVA access is allowed */
- if (aa64_zva_access(env, NULL) != CP_ACCESS_OK) {
+ if (aa64_zva_access(env, NULL) == CP_ACCESS_OK) {
dzp_bit = 0;
}
return cpu->dcz_blocksize | dzp_bit;
@@ -1853,7 +2026,7 @@ static uint64_t aa64_dczid_read(CPUARMState *env, const ARMCPRegInfo *ri)
static CPAccessResult sp_el0_access(CPUARMState *env, const ARMCPRegInfo *ri)
{
- if (!env->pstate & PSTATE_SP) {
+ if (!(env->pstate & PSTATE_SP)) {
/* Access to SP_EL0 is undefined if it's being used as
* the stack pointer.
*/
@@ -1945,27 +2118,27 @@ static const ARMCPRegInfo v8_cp_reginfo[] = {
{ .name = "TLBI_VMALLE1IS", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
.access = PL1_W, .type = ARM_CP_NO_MIGRATE,
- .writefn = tlbiall_write },
+ .writefn = tlbiall_is_write },
{ .name = "TLBI_VAE1IS", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
.access = PL1_W, .type = ARM_CP_NO_MIGRATE,
- .writefn = tlbi_aa64_va_write },
+ .writefn = tlbi_aa64_va_is_write },
{ .name = "TLBI_ASIDE1IS", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
.access = PL1_W, .type = ARM_CP_NO_MIGRATE,
- .writefn = tlbi_aa64_asid_write },
+ .writefn = tlbi_aa64_asid_is_write },
{ .name = "TLBI_VAAE1IS", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
.access = PL1_W, .type = ARM_CP_NO_MIGRATE,
- .writefn = tlbi_aa64_vaa_write },
+ .writefn = tlbi_aa64_vaa_is_write },
{ .name = "TLBI_VALE1IS", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
.access = PL1_W, .type = ARM_CP_NO_MIGRATE,
- .writefn = tlbi_aa64_va_write },
+ .writefn = tlbi_aa64_va_is_write },
{ .name = "TLBI_VAALE1IS", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
.access = PL1_W, .type = ARM_CP_NO_MIGRATE,
- .writefn = tlbi_aa64_vaa_write },
+ .writefn = tlbi_aa64_vaa_is_write },
{ .name = "TLBI_VMALLE1", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
.access = PL1_W, .type = ARM_CP_NO_MIGRATE,
@@ -2005,42 +2178,12 @@ static const ARMCPRegInfo v8_cp_reginfo[] = {
.opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 3,
.access = PL1_W, .type = ARM_CP_NO_MIGRATE, .writefn = ats_write },
#endif
- /* 32 bit TLB invalidates, Inner Shareable */
- { .name = "TLBIALLIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
- .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbiall_write },
- { .name = "TLBIMVAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
- .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbimva_write },
- { .name = "TLBIASIDIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
- .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbiasid_write },
- { .name = "TLBIMVAAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
- .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbimvaa_write },
+ /* TLB invalidate last level of translation table walk */
{ .name = "TLBIMVALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
- .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbimva_write },
+ .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbimva_is_write },
{ .name = "TLBIMVAALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
- .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbimvaa_write },
- /* 32 bit ITLB invalidates */
- { .name = "ITLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 0,
- .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbiall_write },
- { .name = "ITLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1,
- .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbimva_write },
- { .name = "ITLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 2,
- .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbiasid_write },
- /* 32 bit DTLB invalidates */
- { .name = "DTLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 0,
- .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbiall_write },
- { .name = "DTLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1,
- .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbimva_write },
- { .name = "DTLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 2,
- .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbiasid_write },
- /* 32 bit TLB invalidates */
- { .name = "TLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
- .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbiall_write },
- { .name = "TLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
- .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbimva_write },
- { .name = "TLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
- .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbiasid_write },
- { .name = "TLBIMVAA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
- .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbimvaa_write },
+ .type = ARM_CP_NO_MIGRATE, .access = PL1_W,
+ .writefn = tlbimvaa_is_write },
{ .name = "TLBIMVAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
.type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbimva_write },
{ .name = "TLBIMVAAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
@@ -2077,16 +2220,6 @@ static const ARMCPRegInfo v8_cp_reginfo[] = {
.opc1 = 0, .crn = 3, .crm = 0, .opc2 = 0,
.access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c3),
.resetvalue = 0, .writefn = dacr_write, .raw_writefn = raw_write, },
- /* Dummy implementation of monitor debug system control register:
- * we don't support debug.
- */
- { .name = "MDSCR_EL1", .state = ARM_CP_STATE_AA64,
- .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
- .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
- /* We define a dummy WI OSLAR_EL1, because Linux writes to it. */
- { .name = "OSLAR_EL1", .state = ARM_CP_STATE_AA64,
- .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 4,
- .access = PL1_W, .type = ARM_CP_NOP },
{ .name = "ELR_EL1", .state = ARM_CP_STATE_AA64,
.type = ARM_CP_NO_MIGRATE,
.opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 1,
@@ -2118,15 +2251,56 @@ static const ARMCPRegInfo v8_el3_no_el2_cp_reginfo[] = {
.opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
.access = PL2_RW,
.readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore },
+ { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64,
+ .type = ARM_CP_NO_MIGRATE,
+ .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
+ .access = PL2_RW,
+ .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore },
REGINFO_SENTINEL
};
+static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
+{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+ uint64_t valid_mask = HCR_MASK;
+
+ if (arm_feature(env, ARM_FEATURE_EL3)) {
+ valid_mask &= ~HCR_HCD;
+ } else {
+ valid_mask &= ~HCR_TSC;
+ }
+
+ /* Clear RES0 bits. */
+ value &= valid_mask;
+
+ /* These bits change the MMU setup:
+ * HCR_VM enables stage 2 translation
+ * HCR_PTW forbids certain page-table setups
+ * HCR_DC Disables stage1 and enables stage2 translation
+ */
+ if ((raw_read(env, ri) ^ value) & (HCR_VM | HCR_PTW | HCR_DC)) {
+ tlb_flush(CPU(cpu), 1);
+ }
+ raw_write(env, ri, value);
+}
+
static const ARMCPRegInfo v8_el2_cp_reginfo[] = {
+ { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
+ .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
+ .writefn = hcr_write },
{ .name = "ELR_EL2", .state = ARM_CP_STATE_AA64,
.type = ARM_CP_NO_MIGRATE,
.opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1,
.access = PL2_RW,
.fieldoffset = offsetof(CPUARMState, elr_el[2]) },
+ { .name = "ESR_EL2", .state = ARM_CP_STATE_AA64,
+ .type = ARM_CP_NO_MIGRATE,
+ .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0,
+ .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[2]) },
+ { .name = "FAR_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0,
+ .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[2]) },
{ .name = "SPSR_EL2", .state = ARM_CP_STATE_AA64,
.type = ARM_CP_NO_MIGRATE,
.opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 0,
@@ -2145,6 +2319,13 @@ static const ARMCPRegInfo v8_el3_cp_reginfo[] = {
.opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 1,
.access = PL3_RW,
.fieldoffset = offsetof(CPUARMState, elr_el[3]) },
+ { .name = "ESR_EL3", .state = ARM_CP_STATE_AA64,
+ .type = ARM_CP_NO_MIGRATE,
+ .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 2, .opc2 = 0,
+ .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[3]) },
+ { .name = "FAR_EL3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 6, .crn = 6, .crm = 0, .opc2 = 0,
+ .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[3]) },
{ .name = "SPSR_EL3", .state = ARM_CP_STATE_AA64,
.type = ARM_CP_NO_MIGRATE,
.opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 0,
@@ -2154,6 +2335,11 @@ static const ARMCPRegInfo v8_el3_cp_reginfo[] = {
.access = PL3_RW, .writefn = vbar_write,
.fieldoffset = offsetof(CPUARMState, cp15.vbar_el[3]),
.resetvalue = 0 },
+ { .name = "SCR_EL3", .state = ARM_CP_STATE_AA64,
+ .type = ARM_CP_NO_MIGRATE,
+ .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 0,
+ .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.scr_el3),
+ .writefn = scr_write },
REGINFO_SENTINEL
};
@@ -2180,38 +2366,380 @@ static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri)
/* Only accessible in EL0 if SCTLR.UCT is set (and only in AArch64,
* but the AArch32 CTR has its own reginfo struct)
*/
- if (arm_current_pl(env) == 0 && !(env->cp15.c1_sys & SCTLR_UCT)) {
+ if (arm_current_el(env) == 0 && !(env->cp15.c1_sys & SCTLR_UCT)) {
return CP_ACCESS_TRAP;
}
return CP_ACCESS_OK;
}
-static void define_aarch64_debug_regs(ARMCPU *cpu)
+static const ARMCPRegInfo debug_cp_reginfo[] = {
+ /* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped
+ * debug components. The AArch64 version of DBGDRAR is named MDRAR_EL1;
+ * unlike DBGDRAR it is never accessible from EL0.
+ * DBGDSAR is deprecated and must RAZ from v8 anyway, so it has no AArch64
+ * accessor.
+ */
+ { .name = "DBGDRAR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 0,
+ .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "MDRAR_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0,
+ .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "DBGDSAR", .cp = 14, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
+ .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
+ /* Monitor debug system control register; the 32-bit alias is DBGDSCRext. */
+ { .name = "MDSCR_EL1", .state = ARM_CP_STATE_BOTH,
+ .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
+ .access = PL1_RW,
+ .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1),
+ .resetvalue = 0 },
+ /* MDCCSR_EL0, aka DBGDSCRint. This is a read-only mirror of MDSCR_EL1.
+ * We don't implement the configurable EL0 access.
+ */
+ { .name = "MDCCSR_EL0", .state = ARM_CP_STATE_BOTH,
+ .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
+ .type = ARM_CP_NO_MIGRATE,
+ .access = PL1_R,
+ .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1),
+ .resetfn = arm_cp_reset_ignore },
+ /* We define a dummy WI OSLAR_EL1, because Linux writes to it. */
+ { .name = "OSLAR_EL1", .state = ARM_CP_STATE_BOTH,
+ .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 4,
+ .access = PL1_W, .type = ARM_CP_NOP },
+ /* Dummy OSDLR_EL1: 32-bit Linux will read this */
+ { .name = "OSDLR_EL1", .state = ARM_CP_STATE_BOTH,
+ .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 4,
+ .access = PL1_RW, .type = ARM_CP_NOP },
+ /* Dummy DBGVCR: Linux wants to clear this on startup, but we don't
+ * implement vector catch debug events yet.
+ */
+ { .name = "DBGVCR",
+ .cp = 14, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
+ .access = PL1_RW, .type = ARM_CP_NOP },
+ REGINFO_SENTINEL
+};
+
+static const ARMCPRegInfo debug_lpae_cp_reginfo[] = {
+ /* 64 bit access versions of the (dummy) debug registers */
+ { .name = "DBGDRAR", .cp = 14, .crm = 1, .opc1 = 0,
+ .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 },
+ { .name = "DBGDSAR", .cp = 14, .crm = 2, .opc1 = 0,
+ .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 },
+ REGINFO_SENTINEL
+};
+
+void hw_watchpoint_update(ARMCPU *cpu, int n)
{
- /* Define breakpoint and watchpoint registers. These do nothing
- * but read as written, for now.
+ CPUARMState *env = &cpu->env;
+ vaddr len = 0;
+ vaddr wvr = env->cp15.dbgwvr[n];
+ uint64_t wcr = env->cp15.dbgwcr[n];
+ int mask;
+ int flags = BP_CPU | BP_STOP_BEFORE_ACCESS;
+
+ if (env->cpu_watchpoint[n]) {
+ cpu_watchpoint_remove_by_ref(CPU(cpu), env->cpu_watchpoint[n]);
+ env->cpu_watchpoint[n] = NULL;
+ }
+
+ if (!extract64(wcr, 0, 1)) {
+ /* E bit clear : watchpoint disabled */
+ return;
+ }
+
+ switch (extract64(wcr, 3, 2)) {
+ case 0:
+ /* LSC 00 is reserved and must behave as if the wp is disabled */
+ return;
+ case 1:
+ flags |= BP_MEM_READ;
+ break;
+ case 2:
+ flags |= BP_MEM_WRITE;
+ break;
+ case 3:
+ flags |= BP_MEM_ACCESS;
+ break;
+ }
+
+ /* Attempts to use both MASK and BAS fields simultaneously are
+ * CONSTRAINED UNPREDICTABLE; we opt to ignore BAS in this case,
+ * thus generating a watchpoint for every byte in the masked region.
*/
+ mask = extract64(wcr, 24, 4);
+ if (mask == 1 || mask == 2) {
+ /* Reserved values of MASK; we must act as if the mask value was
+ * some non-reserved value, or as if the watchpoint were disabled.
+ * We choose the latter.
+ */
+ return;
+ } else if (mask) {
+ /* Watchpoint covers an aligned area up to 2GB in size */
+ len = 1ULL << mask;
+ /* If masked bits in WVR are not zero it's CONSTRAINED UNPREDICTABLE
+ * whether the watchpoint fires when the unmasked bits match; we opt
+ * to generate the exceptions.
+ */
+ wvr &= ~(len - 1);
+ } else {
+ /* Watchpoint covers bytes defined by the byte address select bits */
+ int bas = extract64(wcr, 5, 8);
+ int basstart;
+
+ if (bas == 0) {
+ /* This must act as if the watchpoint is disabled */
+ return;
+ }
+
+ if (extract64(wvr, 2, 1)) {
+ /* Deprecated case of an only 4-aligned address. BAS[7:4] are
+ * ignored, and BAS[3:0] define which bytes to watch.
+ */
+ bas &= 0xf;
+ }
+ /* The BAS bits are supposed to be programmed to indicate a contiguous
+ * range of bytes. Otherwise it is CONSTRAINED UNPREDICTABLE whether
+ * we fire for each byte in the word/doubleword addressed by the WVR.
+ * We choose to ignore any non-zero bits after the first range of 1s.
+ */
+ basstart = ctz32(bas);
+ len = cto32(bas >> basstart);
+ wvr += basstart;
+ }
+
+ cpu_watchpoint_insert(CPU(cpu), wvr, len, flags,
+ &env->cpu_watchpoint[n]);
+}
+
+void hw_watchpoint_update_all(ARMCPU *cpu)
+{
int i;
+ CPUARMState *env = &cpu->env;
- for (i = 0; i < 16; i++) {
+ /* Completely clear out existing QEMU watchpoints and our array, to
+ * avoid possible stale entries following migration load.
+ */
+ cpu_watchpoint_remove_all(CPU(cpu), BP_CPU);
+ memset(env->cpu_watchpoint, 0, sizeof(env->cpu_watchpoint));
+
+ for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_watchpoint); i++) {
+ hw_watchpoint_update(cpu, i);
+ }
+}
+
+static void dbgwvr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+ int i = ri->crm;
+
+ /* Bits [63:49] are hardwired to the value of bit [48]; that is, the
+ * register reads and behaves as if values written are sign extended.
+ * Bits [1:0] are RES0.
+ */
+ value = sextract64(value, 0, 49) & ~3ULL;
+
+ raw_write(env, ri, value);
+ hw_watchpoint_update(cpu, i);
+}
+
+static void dbgwcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+ int i = ri->crm;
+
+ raw_write(env, ri, value);
+ hw_watchpoint_update(cpu, i);
+}
+
+void hw_breakpoint_update(ARMCPU *cpu, int n)
+{
+ CPUARMState *env = &cpu->env;
+ uint64_t bvr = env->cp15.dbgbvr[n];
+ uint64_t bcr = env->cp15.dbgbcr[n];
+ vaddr addr;
+ int bt;
+ int flags = BP_CPU;
+
+ if (env->cpu_breakpoint[n]) {
+ cpu_breakpoint_remove_by_ref(CPU(cpu), env->cpu_breakpoint[n]);
+ env->cpu_breakpoint[n] = NULL;
+ }
+
+ if (!extract64(bcr, 0, 1)) {
+ /* E bit clear : watchpoint disabled */
+ return;
+ }
+
+ bt = extract64(bcr, 20, 4);
+
+ switch (bt) {
+ case 4: /* unlinked address mismatch (reserved if AArch64) */
+ case 5: /* linked address mismatch (reserved if AArch64) */
+ qemu_log_mask(LOG_UNIMP,
+ "arm: address mismatch breakpoint types not implemented");
+ return;
+ case 0: /* unlinked address match */
+ case 1: /* linked address match */
+ {
+ /* Bits [63:49] are hardwired to the value of bit [48]; that is,
+ * we behave as if the register was sign extended. Bits [1:0] are
+ * RES0. The BAS field is used to allow setting breakpoints on 16
+ * bit wide instructions; it is CONSTRAINED UNPREDICTABLE whether
+ * a bp will fire if the addresses covered by the bp and the addresses
+ * covered by the insn overlap but the insn doesn't start at the
+ * start of the bp address range. We choose to require the insn and
+ * the bp to have the same address. The constraints on writing to
+ * BAS enforced in dbgbcr_write mean we have only four cases:
+ * 0b0000 => no breakpoint
+ * 0b0011 => breakpoint on addr
+ * 0b1100 => breakpoint on addr + 2
+ * 0b1111 => breakpoint on addr
+ * See also figure D2-3 in the v8 ARM ARM (DDI0487A.c).
+ */
+ int bas = extract64(bcr, 5, 4);
+ addr = sextract64(bvr, 0, 49) & ~3ULL;
+ if (bas == 0) {
+ return;
+ }
+ if (bas == 0xc) {
+ addr += 2;
+ }
+ break;
+ }
+ case 2: /* unlinked context ID match */
+ case 8: /* unlinked VMID match (reserved if no EL2) */
+ case 10: /* unlinked context ID and VMID match (reserved if no EL2) */
+ qemu_log_mask(LOG_UNIMP,
+ "arm: unlinked context breakpoint types not implemented");
+ return;
+ case 9: /* linked VMID match (reserved if no EL2) */
+ case 11: /* linked context ID and VMID match (reserved if no EL2) */
+ case 3: /* linked context ID match */
+ default:
+ /* We must generate no events for Linked context matches (unless
+ * they are linked to by some other bp/wp, which is handled in
+ * updates for the linking bp/wp). We choose to also generate no events
+ * for reserved values.
+ */
+ return;
+ }
+
+ cpu_breakpoint_insert(CPU(cpu), addr, flags, &env->cpu_breakpoint[n]);
+}
+
+void hw_breakpoint_update_all(ARMCPU *cpu)
+{
+ int i;
+ CPUARMState *env = &cpu->env;
+
+ /* Completely clear out existing QEMU breakpoints and our array, to
+ * avoid possible stale entries following migration load.
+ */
+ cpu_breakpoint_remove_all(CPU(cpu), BP_CPU);
+ memset(env->cpu_breakpoint, 0, sizeof(env->cpu_breakpoint));
+
+ for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_breakpoint); i++) {
+ hw_breakpoint_update(cpu, i);
+ }
+}
+
+static void dbgbvr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+ int i = ri->crm;
+
+ raw_write(env, ri, value);
+ hw_breakpoint_update(cpu, i);
+}
+
+static void dbgbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+ int i = ri->crm;
+
+ /* BAS[3] is a read-only copy of BAS[2], and BAS[1] a read-only
+ * copy of BAS[0].
+ */
+ value = deposit64(value, 6, 1, extract64(value, 5, 1));
+ value = deposit64(value, 8, 1, extract64(value, 7, 1));
+
+ raw_write(env, ri, value);
+ hw_breakpoint_update(cpu, i);
+}
+
+static void define_debug_regs(ARMCPU *cpu)
+{
+ /* Define v7 and v8 architectural debug registers.
+ * These are just dummy implementations for now.
+ */
+ int i;
+ int wrps, brps, ctx_cmps;
+ ARMCPRegInfo dbgdidr = {
+ .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
+ .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = cpu->dbgdidr,
+ };
+
+ /* Note that all these register fields hold "number of Xs minus 1". */
+ brps = extract32(cpu->dbgdidr, 24, 4);
+ wrps = extract32(cpu->dbgdidr, 28, 4);
+ ctx_cmps = extract32(cpu->dbgdidr, 20, 4);
+
+ assert(ctx_cmps <= brps);
+
+ /* The DBGDIDR and ID_AA64DFR0_EL1 define various properties
+ * of the debug registers such as number of breakpoints;
+ * check that if they both exist then they agree.
+ */
+ if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
+ assert(extract32(cpu->id_aa64dfr0, 12, 4) == brps);
+ assert(extract32(cpu->id_aa64dfr0, 20, 4) == wrps);
+ assert(extract32(cpu->id_aa64dfr0, 28, 4) == ctx_cmps);
+ }
+
+ define_one_arm_cp_reg(cpu, &dbgdidr);
+ define_arm_cp_regs(cpu, debug_cp_reginfo);
+
+ if (arm_feature(&cpu->env, ARM_FEATURE_LPAE)) {
+ define_arm_cp_regs(cpu, debug_lpae_cp_reginfo);
+ }
+
+ for (i = 0; i < brps + 1; i++) {
ARMCPRegInfo dbgregs[] = {
- { .name = "DBGBVR", .state = ARM_CP_STATE_AA64,
- .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 4,
+ { .name = "DBGBVR", .state = ARM_CP_STATE_BOTH,
+ .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 4,
.access = PL1_RW,
- .fieldoffset = offsetof(CPUARMState, cp15.dbgbvr[i]) },
- { .name = "DBGBCR", .state = ARM_CP_STATE_AA64,
- .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 5,
+ .fieldoffset = offsetof(CPUARMState, cp15.dbgbvr[i]),
+ .writefn = dbgbvr_write, .raw_writefn = raw_write
+ },
+ { .name = "DBGBCR", .state = ARM_CP_STATE_BOTH,
+ .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 5,
.access = PL1_RW,
- .fieldoffset = offsetof(CPUARMState, cp15.dbgbcr[i]) },
- { .name = "DBGWVR", .state = ARM_CP_STATE_AA64,
- .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 6,
+ .fieldoffset = offsetof(CPUARMState, cp15.dbgbcr[i]),
+ .writefn = dbgbcr_write, .raw_writefn = raw_write
+ },
+ REGINFO_SENTINEL
+ };
+ define_arm_cp_regs(cpu, dbgregs);
+ }
+
+ for (i = 0; i < wrps + 1; i++) {
+ ARMCPRegInfo dbgregs[] = {
+ { .name = "DBGWVR", .state = ARM_CP_STATE_BOTH,
+ .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 6,
.access = PL1_RW,
- .fieldoffset = offsetof(CPUARMState, cp15.dbgwvr[i]) },
- { .name = "DBGWCR", .state = ARM_CP_STATE_AA64,
- .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 7,
+ .fieldoffset = offsetof(CPUARMState, cp15.dbgwvr[i]),
+ .writefn = dbgwvr_write, .raw_writefn = raw_write
+ },
+ { .name = "DBGWCR", .state = ARM_CP_STATE_BOTH,
+ .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 7,
.access = PL1_RW,
- .fieldoffset = offsetof(CPUARMState, cp15.dbgwcr[i]) },
- REGINFO_SENTINEL
+ .fieldoffset = offsetof(CPUARMState, cp15.dbgwcr[i]),
+ .writefn = dbgwcr_write, .raw_writefn = raw_write
+ },
+ REGINFO_SENTINEL
};
define_arm_cp_regs(cpu, dbgregs);
}
@@ -2310,6 +2838,9 @@ void register_cp_regs_for_features(ARMCPU *cpu)
if (arm_feature(env, ARM_FEATURE_V6K)) {
define_arm_cp_regs(cpu, v6k_cp_reginfo);
}
+ if (arm_feature(env, ARM_FEATURE_V7MP)) {
+ define_arm_cp_regs(cpu, v7mp_cp_reginfo);
+ }
if (arm_feature(env, ARM_FEATURE_V7)) {
/* v7 performance monitor control register: same implementor
* field as main ID register, and we implement only the cycle
@@ -2318,13 +2849,23 @@ void register_cp_regs_for_features(ARMCPU *cpu)
#ifndef CONFIG_USER_ONLY
ARMCPRegInfo pmcr = {
.name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0,
- .access = PL0_RW, .resetvalue = cpu->midr & 0xff000000,
- .type = ARM_CP_IO,
- .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr),
+ .access = PL0_RW,
+ .type = ARM_CP_IO | ARM_CP_NO_MIGRATE,
+ .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcr),
.accessfn = pmreg_access, .writefn = pmcr_write,
.raw_writefn = raw_write,
};
+ ARMCPRegInfo pmcr64 = {
+ .name = "PMCR_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 0,
+ .access = PL0_RW, .accessfn = pmreg_access,
+ .type = ARM_CP_IO,
+ .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr),
+ .resetvalue = cpu->midr & 0xff000000,
+ .writefn = pmcr_write, .raw_writefn = raw_write,
+ };
define_one_arm_cp_reg(cpu, &pmcr);
+ define_one_arm_cp_reg(cpu, &pmcr64);
#endif
ARMCPRegInfo clidr = {
.name = "CLIDR", .state = ARM_CP_STATE_BOTH,
@@ -2333,6 +2874,7 @@ void register_cp_regs_for_features(ARMCPU *cpu)
};
define_one_arm_cp_reg(cpu, &clidr);
define_arm_cp_regs(cpu, v7_cp_reginfo);
+ define_debug_regs(cpu);
} else {
define_arm_cp_regs(cpu, not_v7_cp_reginfo);
}
@@ -2406,7 +2948,6 @@ void register_cp_regs_for_features(ARMCPU *cpu)
define_one_arm_cp_reg(cpu, &rvbar);
define_arm_cp_regs(cpu, v8_idregs);
define_arm_cp_regs(cpu, v8_cp_reginfo);
- define_aarch64_debug_regs(cpu);
}
if (arm_feature(env, ARM_FEATURE_EL2)) {
define_arm_cp_regs(cpu, v8_el2_cp_reginfo);
@@ -2759,9 +3300,11 @@ static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
/* The AArch32 view of a shared register sees the lower 32 bits
* of a 64 bit backing field. It is not migratable as the AArch64
* view handles that. AArch64 also handles reset.
- * We assume it is a cp15 register.
+ * We assume it is a cp15 register if the .cp field is left unset.
*/
- r2->cp = 15;
+ if (r2->cp == 0) {
+ r2->cp = 15;
+ }
r2->type |= ARM_CP_NO_MIGRATE;
r2->resetfn = arm_cp_reset_ignore;
#ifdef HOST_WORDS_BIGENDIAN
@@ -2774,8 +3317,11 @@ static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
/* To allow abbreviation of ARMCPRegInfo
* definitions, we treat cp == 0 as equivalent to
* the value for "standard guest-visible sysreg".
+ * STATE_BOTH definitions are also always "standard
+ * sysreg" in their AArch64 view (the .cp value may
+ * be non-zero for the benefit of the AArch32 view).
*/
- if (r->cp == 0) {
+ if (r->cp == 0 || r->state == ARM_CP_STATE_BOTH) {
r2->cp = CP_REG_ARM64_SYSREG_CP;
}
*key = ENCODE_AA64_CP_REG(r2->cp, r2->crn, crm,
@@ -2985,6 +3531,8 @@ static int bad_mode_switch(CPUARMState *env, int mode)
case ARM_CPU_MODE_IRQ:
case ARM_CPU_MODE_FIQ:
return 0;
+ case ARM_CPU_MODE_MON:
+ return !arm_is_secure(env);
default:
return 1;
}
@@ -3098,11 +3646,6 @@ uint32_t HELPER(rbit)(uint32_t x)
#if defined(CONFIG_USER_ONLY)
-void arm_cpu_do_interrupt(CPUState *cs)
-{
- cs->exception_index = -1;
-}
-
int arm_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
int mmu_idx)
{
@@ -3158,6 +3701,11 @@ uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode)
return 0;
}
+unsigned int arm_excp_target_el(CPUState *cs, unsigned int excp_idx)
+{
+ return 1;
+}
+
#else
/* Map CPU modes onto saved register banks. */
@@ -3213,6 +3761,57 @@ void switch_mode(CPUARMState *env, int mode)
env->spsr = env->banked_spsr[i];
}
+/*
+ * Determine the target EL for a given exception type.
+ */
+unsigned int arm_excp_target_el(CPUState *cs, unsigned int excp_idx)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+ unsigned int cur_el = arm_current_el(env);
+ unsigned int target_el;
+ /* FIXME: Use actual secure state. */
+ bool secure = false;
+
+ if (!env->aarch64) {
+ /* TODO: Add EL2 and 3 exception handling for AArch32. */
+ return 1;
+ }
+
+ switch (excp_idx) {
+ case EXCP_HVC:
+ case EXCP_HYP_TRAP:
+ target_el = 2;
+ break;
+ case EXCP_SMC:
+ target_el = 3;
+ break;
+ case EXCP_FIQ:
+ case EXCP_IRQ:
+ {
+ const uint64_t hcr_mask = excp_idx == EXCP_FIQ ? HCR_FMO : HCR_IMO;
+ const uint32_t scr_mask = excp_idx == EXCP_FIQ ? SCR_FIQ : SCR_IRQ;
+
+ target_el = 1;
+ if (!secure && (env->cp15.hcr_el2 & hcr_mask)) {
+ target_el = 2;
+ }
+ if (env->cp15.scr_el3 & scr_mask) {
+ target_el = 3;
+ }
+ break;
+ }
+ case EXCP_VIRQ:
+ case EXCP_VFIQ:
+ target_el = 1;
+ break;
+ default:
+ target_el = MAX(cur_el, 1);
+ break;
+ }
+ return target_el;
+}
+
static void v7m_push(CPUARMState *env, uint32_t val)
{
CPUState *cs = CPU(arm_env_get_cpu(env));
@@ -3367,11 +3966,43 @@ void arm_cpu_do_interrupt(CPUState *cs)
uint32_t mask;
int new_mode;
uint32_t offset;
+ uint32_t moe;
assert(!IS_M(env));
arm_log_exception(cs->exception_index);
+ if (arm_is_psci_call(cpu, cs->exception_index)) {
+ arm_handle_psci_call(cpu);
+ qemu_log_mask(CPU_LOG_INT, "...handled as PSCI call\n");
+ return;
+ }
+
+ /* If this is a debug exception we must update the DBGDSCR.MOE bits */
+ switch (env->exception.syndrome >> ARM_EL_EC_SHIFT) {
+ case EC_BREAKPOINT:
+ case EC_BREAKPOINT_SAME_EL:
+ moe = 1;
+ break;
+ case EC_WATCHPOINT:
+ case EC_WATCHPOINT_SAME_EL:
+ moe = 10;
+ break;
+ case EC_AA32_BKPT:
+ moe = 3;
+ break;
+ case EC_VECTORCATCH:
+ moe = 5;
+ break;
+ default:
+ moe = 0;
+ break;
+ }
+
+ if (moe) {
+ env->cp15.mdscr_el1 = deposit64(env->cp15.mdscr_el1, 2, 4, moe);
+ }
+
/* TODO: Vectored interrupt controller. */
switch (cs->exception_index) {
case EXCP_UDEF:
@@ -3425,8 +4056,8 @@ void arm_cpu_do_interrupt(CPUState *cs)
/* Fall through to prefetch abort. */
case EXCP_PREFETCH_ABORT:
env->cp15.ifsr_el2 = env->exception.fsr;
- env->cp15.far_el1 = deposit64(env->cp15.far_el1, 32, 32,
- env->exception.vaddress);
+ env->cp15.far_el[1] = deposit64(env->cp15.far_el[1], 32, 32,
+ env->exception.vaddress);
qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x IFAR 0x%x\n",
env->cp15.ifsr_el2, (uint32_t)env->exception.vaddress);
new_mode = ARM_CPU_MODE_ABT;
@@ -3436,8 +4067,8 @@ void arm_cpu_do_interrupt(CPUState *cs)
break;
case EXCP_DATA_ABORT:
env->cp15.esr_el[1] = env->exception.fsr;
- env->cp15.far_el1 = deposit64(env->cp15.far_el1, 0, 32,
- env->exception.vaddress);
+ env->cp15.far_el[1] = deposit64(env->cp15.far_el[1], 0, 32,
+ env->exception.vaddress);
qemu_log_mask(CPU_LOG_INT, "...with DFSR 0x%x DFAR 0x%x\n",
(uint32_t)env->cp15.esr_el[1],
(uint32_t)env->exception.vaddress);
@@ -3460,6 +4091,12 @@ void arm_cpu_do_interrupt(CPUState *cs)
mask = CPSR_A | CPSR_I | CPSR_F;
offset = 4;
break;
+ case EXCP_SMC:
+ new_mode = ARM_CPU_MODE_MON;
+ addr = 0x08;
+ mask = CPSR_A | CPSR_I | CPSR_F;
+ offset = 0;
+ break;
default:
cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
return; /* Never happens. Keep compiler happy. */
@@ -3478,7 +4115,16 @@ void arm_cpu_do_interrupt(CPUState *cs)
*/
addr += env->cp15.vbar_el[1];
}
+
+ if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) {
+ env->cp15.scr_el3 &= ~SCR_NS;
+ }
+
switch_mode (env, new_mode);
+ /* For exceptions taken to AArch32 we must clear the SS bit in both
+ * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now.
+ */
+ env->uncached_cpsr &= ~PSTATE_SS;
env->spsr = cpsr_read(env);
/* Clear IT bits. */
env->condexec_bits = 0;
@@ -3899,16 +4545,18 @@ static int get_phys_addr_lpae(CPUARMState *env, target_ulong address,
goto do_fault;
}
- /* The starting level depends on the virtual address size which can be
- * up to 48-bits and the translation granule size.
+ /* The starting level depends on the virtual address size (which can be
+ * up to 48 bits) and the translation granule size. It indicates the number
+ * of strides (granule_sz bits at a time) needed to consume the bits
+ * of the input address. In the pseudocode this is:
+ * level = 4 - RoundUp((inputsize - grainsize) / stride)
+ * where their 'inputsize' is our 'va_size - tsz', 'grainsize' is
+ * our 'granule_sz + 3' and 'stride' is our 'granule_sz'.
+ * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying:
+ * = 4 - (va_size - tsz - granule_sz - 3 + granule_sz - 1) / granule_sz
+ * = 4 - (va_size - tsz - 4) / granule_sz;
*/
- if ((va_size - tsz) > (granule_sz * 4 + 3)) {
- level = 0;
- } else if ((va_size - tsz) > (granule_sz * 3 + 3)) {
- level = 1;
- } else {
- level = 2;
- }
+ level = 4 - (va_size - tsz - 4) / granule_sz;
/* Clear the vaddr bits which aren't part of the within-region address,
* so that we don't have to special case things when calculating the
@@ -4135,15 +4783,15 @@ int arm_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
int prot;
int ret, is_user;
uint32_t syn;
- bool same_el = (arm_current_pl(env) != 0);
+ bool same_el = (arm_current_el(env) != 0);
is_user = mmu_idx == MMU_USER_IDX;
ret = get_phys_addr(env, address, access_type, is_user, &phys_addr, &prot,
&page_size);
if (ret == 0) {
/* Map a single [sub]page. */
- phys_addr &= ~(hwaddr)0x3ff;
- address &= ~(target_ulong)0x3ff;
+ phys_addr &= TARGET_PAGE_MASK;
+ address &= TARGET_PAGE_MASK;
tlb_set_page(cs, address, phys_addr, prot, mmu_idx, page_size);
return 0;
}
diff --git a/target-arm/helper.h b/target-arm/helper.h
index facfcd2c1..dec372879 100644
--- a/target-arm/helper.h
+++ b/target-arm/helper.h
@@ -50,6 +50,8 @@ DEF_HELPER_2(exception_internal, void, env, i32)
DEF_HELPER_3(exception_with_syndrome, void, env, i32, i32)
DEF_HELPER_1(wfi, void, env)
DEF_HELPER_1(wfe, void, env)
+DEF_HELPER_1(pre_hvc, void, env)
+DEF_HELPER_2(pre_smc, void, env, i32)
DEF_HELPER_3(cpsr_write, void, env, i32, i32)
DEF_HELPER_1(cpsr_read, i32, env)
@@ -64,6 +66,7 @@ DEF_HELPER_3(set_cp_reg64, void, env, ptr, i64)
DEF_HELPER_2(get_cp_reg64, i64, env, ptr)
DEF_HELPER_3(msr_i_pstate, void, env, i32, i32)
+DEF_HELPER_1(clear_pstate_ss, void, env)
DEF_HELPER_1(exception_return, void, env)
DEF_HELPER_2(get_r13_banked, i32, env, i32)
diff --git a/target-arm/internals.h b/target-arm/internals.h
index 564b5fa60..2dff4ffb1 100644
--- a/target-arm/internals.h
+++ b/target-arm/internals.h
@@ -53,6 +53,11 @@ static const char * const excnames[] = {
[EXCP_EXCEPTION_EXIT] = "QEMU v7M exception exit",
[EXCP_KERNEL_TRAP] = "QEMU intercept of kernel commpage",
[EXCP_STREX] = "QEMU intercept of STREX",
+ [EXCP_HVC] = "Hypervisor Call",
+ [EXCP_HYP_TRAP] = "Hypervisor Trap",
+ [EXCP_SMC] = "Secure Monitor Call",
+ [EXCP_VIRQ] = "Virtual IRQ",
+ [EXCP_VFIQ] = "Virtual FIQ",
};
static inline void arm_log_exception(int idx)
@@ -105,30 +110,52 @@ enum arm_fprounding {
int arm_rmode_to_sf(int rmode);
+static inline void aarch64_save_sp(CPUARMState *env, int el)
+{
+ if (env->pstate & PSTATE_SP) {
+ env->sp_el[el] = env->xregs[31];
+ } else {
+ env->sp_el[0] = env->xregs[31];
+ }
+}
+
+static inline void aarch64_restore_sp(CPUARMState *env, int el)
+{
+ if (env->pstate & PSTATE_SP) {
+ env->xregs[31] = env->sp_el[el];
+ } else {
+ env->xregs[31] = env->sp_el[0];
+ }
+}
+
static inline void update_spsel(CPUARMState *env, uint32_t imm)
{
- unsigned int cur_el = arm_current_pl(env);
+ unsigned int cur_el = arm_current_el(env);
/* Update PSTATE SPSel bit; this requires us to update the
* working stack pointer in xregs[31].
*/
if (!((imm ^ env->pstate) & PSTATE_SP)) {
return;
}
+ aarch64_save_sp(env, cur_el);
env->pstate = deposit32(env->pstate, 0, 1, imm);
/* We rely on illegal updates to SPsel from EL0 to get trapped
* at translation time.
*/
assert(cur_el >= 1 && cur_el <= 3);
- if (env->pstate & PSTATE_SP) {
- /* Switch from using SP_EL0 to using SP_ELx */
- env->sp_el[0] = env->xregs[31];
- env->xregs[31] = env->sp_el[cur_el];
- } else {
- /* Switch from SP_EL0 to SP_ELx */
- env->sp_el[cur_el] = env->xregs[31];
- env->xregs[31] = env->sp_el[0];
- }
+ aarch64_restore_sp(env, cur_el);
+}
+
+/* Return true if extended addresses are enabled.
+ * This is always the case if our translation regime is 64 bit,
+ * but depends on TTBCR.EAE for 32 bit.
+ */
+static inline bool extended_addresses_enabled(CPUARMState *env)
+{
+ return arm_el_is_aa64(env, 1)
+ || ((arm_feature(env, ARM_FEATURE_LPAE)
+ && (env->cp15.c2_control & TTBCR_EAE)));
}
/* Valid Syndrome Register EC field values */
@@ -193,12 +220,32 @@ static inline uint32_t syn_aa64_svc(uint32_t imm16)
return (EC_AA64_SVC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff);
}
+static inline uint32_t syn_aa64_hvc(uint32_t imm16)
+{
+ return (EC_AA64_HVC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff);
+}
+
+static inline uint32_t syn_aa64_smc(uint32_t imm16)
+{
+ return (EC_AA64_SMC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff);
+}
+
static inline uint32_t syn_aa32_svc(uint32_t imm16, bool is_thumb)
{
return (EC_AA32_SVC << ARM_EL_EC_SHIFT) | (imm16 & 0xffff)
| (is_thumb ? 0 : ARM_EL_IL);
}
+static inline uint32_t syn_aa32_hvc(uint32_t imm16)
+{
+ return (EC_AA32_HVC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff);
+}
+
+static inline uint32_t syn_aa32_smc(void)
+{
+ return (EC_AA32_SMC << ARM_EL_EC_SHIFT) | ARM_EL_IL;
+}
+
static inline uint32_t syn_aa64_bkpt(uint32_t imm16)
{
return (EC_AA64_BKPT << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff);
@@ -279,4 +326,56 @@ static inline uint32_t syn_data_abort(int same_el, int ea, int cm, int s1ptw,
| (ea << 9) | (cm << 8) | (s1ptw << 7) | (wnr << 6) | fsc;
}
+static inline uint32_t syn_swstep(int same_el, int isv, int ex)
+{
+ return (EC_SOFTWARESTEP << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT)
+ | (isv << 24) | (ex << 6) | 0x22;
+}
+
+static inline uint32_t syn_watchpoint(int same_el, int cm, int wnr)
+{
+ return (EC_WATCHPOINT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT)
+ | (cm << 8) | (wnr << 6) | 0x22;
+}
+
+static inline uint32_t syn_breakpoint(int same_el)
+{
+ return (EC_BREAKPOINT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT)
+ | ARM_EL_IL | 0x22;
+}
+
+/* Update a QEMU watchpoint based on the information the guest has set in the
+ * DBGWCR<n>_EL1 and DBGWVR<n>_EL1 registers.
+ */
+void hw_watchpoint_update(ARMCPU *cpu, int n);
+/* Update the QEMU watchpoints for every guest watchpoint. This does a
+ * complete delete-and-reinstate of the QEMU watchpoint list and so is
+ * suitable for use after migration or on reset.
+ */
+void hw_watchpoint_update_all(ARMCPU *cpu);
+/* Update a QEMU breakpoint based on the information the guest has set in the
+ * DBGBCR<n>_EL1 and DBGBVR<n>_EL1 registers.
+ */
+void hw_breakpoint_update(ARMCPU *cpu, int n);
+/* Update the QEMU breakpoints for every guest breakpoint. This does a
+ * complete delete-and-reinstate of the QEMU breakpoint list and so is
+ * suitable for use after migration or on reset.
+ */
+void hw_breakpoint_update_all(ARMCPU *cpu);
+
+/* Callback function for when a watchpoint or breakpoint triggers. */
+void arm_debug_excp_handler(CPUState *cs);
+
+#ifdef CONFIG_USER_ONLY
+static inline bool arm_is_psci_call(ARMCPU *cpu, int excp_type)
+{
+ return false;
+}
+#else
+/* Return true if the r0/x0 value indicates that this SMC/HVC is a PSCI call. */
+bool arm_is_psci_call(ARMCPU *cpu, int excp_type);
+/* Actually handle a PSCI call */
+void arm_handle_psci_call(ARMCPU *cpu);
+#endif
+
#endif
diff --git a/target-arm/kvm-consts.h b/target-arm/kvm-consts.h
index 6009a33f1..aea12f1bc 100644
--- a/target-arm/kvm-consts.h
+++ b/target-arm/kvm-consts.h
@@ -17,6 +17,7 @@
#ifdef CONFIG_KVM
#include "qemu/compiler.h"
#include <linux/kvm.h>
+#include <linux/psci.h>
#define MISMATCH_CHECK(X, Y) QEMU_BUILD_BUG_ON(X != Y)
@@ -38,17 +39,83 @@ MISMATCH_CHECK(CP_REG_SIZE_U64, KVM_REG_SIZE_U64)
MISMATCH_CHECK(CP_REG_ARM, KVM_REG_ARM)
MISMATCH_CHECK(CP_REG_ARCH_MASK, KVM_REG_ARCH_MASK)
-#define PSCI_FN_BASE 0x95c1ba5e
-#define PSCI_FN(n) (PSCI_FN_BASE + (n))
-#define PSCI_FN_CPU_SUSPEND PSCI_FN(0)
-#define PSCI_FN_CPU_OFF PSCI_FN(1)
-#define PSCI_FN_CPU_ON PSCI_FN(2)
-#define PSCI_FN_MIGRATE PSCI_FN(3)
-
-MISMATCH_CHECK(PSCI_FN_CPU_SUSPEND, KVM_PSCI_FN_CPU_SUSPEND)
-MISMATCH_CHECK(PSCI_FN_CPU_OFF, KVM_PSCI_FN_CPU_OFF)
-MISMATCH_CHECK(PSCI_FN_CPU_ON, KVM_PSCI_FN_CPU_ON)
-MISMATCH_CHECK(PSCI_FN_MIGRATE, KVM_PSCI_FN_MIGRATE)
+#define QEMU_PSCI_0_1_FN_BASE 0x95c1ba5e
+#define QEMU_PSCI_0_1_FN(n) (QEMU_PSCI_0_1_FN_BASE + (n))
+#define QEMU_PSCI_0_1_FN_CPU_SUSPEND QEMU_PSCI_0_1_FN(0)
+#define QEMU_PSCI_0_1_FN_CPU_OFF QEMU_PSCI_0_1_FN(1)
+#define QEMU_PSCI_0_1_FN_CPU_ON QEMU_PSCI_0_1_FN(2)
+#define QEMU_PSCI_0_1_FN_MIGRATE QEMU_PSCI_0_1_FN(3)
+
+MISMATCH_CHECK(QEMU_PSCI_0_1_FN_CPU_SUSPEND, KVM_PSCI_FN_CPU_SUSPEND)
+MISMATCH_CHECK(QEMU_PSCI_0_1_FN_CPU_OFF, KVM_PSCI_FN_CPU_OFF)
+MISMATCH_CHECK(QEMU_PSCI_0_1_FN_CPU_ON, KVM_PSCI_FN_CPU_ON)
+MISMATCH_CHECK(QEMU_PSCI_0_1_FN_MIGRATE, KVM_PSCI_FN_MIGRATE)
+
+#define QEMU_PSCI_0_2_FN_BASE 0x84000000
+#define QEMU_PSCI_0_2_FN(n) (QEMU_PSCI_0_2_FN_BASE + (n))
+
+#define QEMU_PSCI_0_2_64BIT 0x40000000
+#define QEMU_PSCI_0_2_FN64_BASE \
+ (QEMU_PSCI_0_2_FN_BASE + QEMU_PSCI_0_2_64BIT)
+#define QEMU_PSCI_0_2_FN64(n) (QEMU_PSCI_0_2_FN64_BASE + (n))
+
+#define QEMU_PSCI_0_2_FN_PSCI_VERSION QEMU_PSCI_0_2_FN(0)
+#define QEMU_PSCI_0_2_FN_CPU_SUSPEND QEMU_PSCI_0_2_FN(1)
+#define QEMU_PSCI_0_2_FN_CPU_OFF QEMU_PSCI_0_2_FN(2)
+#define QEMU_PSCI_0_2_FN_CPU_ON QEMU_PSCI_0_2_FN(3)
+#define QEMU_PSCI_0_2_FN_AFFINITY_INFO QEMU_PSCI_0_2_FN(4)
+#define QEMU_PSCI_0_2_FN_MIGRATE QEMU_PSCI_0_2_FN(5)
+#define QEMU_PSCI_0_2_FN_MIGRATE_INFO_TYPE QEMU_PSCI_0_2_FN(6)
+#define QEMU_PSCI_0_2_FN_MIGRATE_INFO_UP_CPU QEMU_PSCI_0_2_FN(7)
+#define QEMU_PSCI_0_2_FN_SYSTEM_OFF QEMU_PSCI_0_2_FN(8)
+#define QEMU_PSCI_0_2_FN_SYSTEM_RESET QEMU_PSCI_0_2_FN(9)
+
+#define QEMU_PSCI_0_2_FN64_CPU_SUSPEND QEMU_PSCI_0_2_FN64(1)
+#define QEMU_PSCI_0_2_FN64_CPU_OFF QEMU_PSCI_0_2_FN64(2)
+#define QEMU_PSCI_0_2_FN64_CPU_ON QEMU_PSCI_0_2_FN64(3)
+#define QEMU_PSCI_0_2_FN64_AFFINITY_INFO QEMU_PSCI_0_2_FN64(4)
+#define QEMU_PSCI_0_2_FN64_MIGRATE QEMU_PSCI_0_2_FN64(5)
+
+MISMATCH_CHECK(QEMU_PSCI_0_2_FN_CPU_SUSPEND, PSCI_0_2_FN_CPU_SUSPEND)
+MISMATCH_CHECK(QEMU_PSCI_0_2_FN_CPU_OFF, PSCI_0_2_FN_CPU_OFF)
+MISMATCH_CHECK(QEMU_PSCI_0_2_FN_CPU_ON, PSCI_0_2_FN_CPU_ON)
+MISMATCH_CHECK(QEMU_PSCI_0_2_FN_MIGRATE, PSCI_0_2_FN_MIGRATE)
+MISMATCH_CHECK(QEMU_PSCI_0_2_FN64_CPU_SUSPEND, PSCI_0_2_FN64_CPU_SUSPEND)
+MISMATCH_CHECK(QEMU_PSCI_0_2_FN64_CPU_ON, PSCI_0_2_FN64_CPU_ON)
+MISMATCH_CHECK(QEMU_PSCI_0_2_FN64_MIGRATE, PSCI_0_2_FN64_MIGRATE)
+
+/* PSCI v0.2 return values used by TCG emulation of PSCI */
+
+/* No Trusted OS migration to worry about when offlining CPUs */
+#define QEMU_PSCI_0_2_RET_TOS_MIGRATION_NOT_REQUIRED 2
+
+/* We implement version 0.2 only */
+#define QEMU_PSCI_0_2_RET_VERSION_0_2 2
+
+MISMATCH_CHECK(QEMU_PSCI_0_2_RET_TOS_MIGRATION_NOT_REQUIRED, PSCI_0_2_TOS_MP)
+MISMATCH_CHECK(QEMU_PSCI_0_2_RET_VERSION_0_2,
+ (PSCI_VERSION_MAJOR(0) | PSCI_VERSION_MINOR(2)))
+
+/* PSCI return values (inclusive of all PSCI versions) */
+#define QEMU_PSCI_RET_SUCCESS 0
+#define QEMU_PSCI_RET_NOT_SUPPORTED -1
+#define QEMU_PSCI_RET_INVALID_PARAMS -2
+#define QEMU_PSCI_RET_DENIED -3
+#define QEMU_PSCI_RET_ALREADY_ON -4
+#define QEMU_PSCI_RET_ON_PENDING -5
+#define QEMU_PSCI_RET_INTERNAL_FAILURE -6
+#define QEMU_PSCI_RET_NOT_PRESENT -7
+#define QEMU_PSCI_RET_DISABLED -8
+
+MISMATCH_CHECK(QEMU_PSCI_RET_SUCCESS, PSCI_RET_SUCCESS)
+MISMATCH_CHECK(QEMU_PSCI_RET_NOT_SUPPORTED, PSCI_RET_NOT_SUPPORTED)
+MISMATCH_CHECK(QEMU_PSCI_RET_INVALID_PARAMS, PSCI_RET_INVALID_PARAMS)
+MISMATCH_CHECK(QEMU_PSCI_RET_DENIED, PSCI_RET_DENIED)
+MISMATCH_CHECK(QEMU_PSCI_RET_ALREADY_ON, PSCI_RET_ALREADY_ON)
+MISMATCH_CHECK(QEMU_PSCI_RET_ON_PENDING, PSCI_RET_ON_PENDING)
+MISMATCH_CHECK(QEMU_PSCI_RET_INTERNAL_FAILURE, PSCI_RET_INTERNAL_FAILURE)
+MISMATCH_CHECK(QEMU_PSCI_RET_NOT_PRESENT, PSCI_RET_NOT_PRESENT)
+MISMATCH_CHECK(QEMU_PSCI_RET_DISABLED, PSCI_RET_DISABLED)
/* Note that KVM uses overlapping values for AArch32 and AArch64
* target CPU numbers. AArch32 targets:
diff --git a/target-arm/kvm64.c b/target-arm/kvm64.c
index 5d217ca2a..c61528615 100644
--- a/target-arm/kvm64.c
+++ b/target-arm/kvm64.c
@@ -21,6 +21,7 @@
#include "sysemu/kvm.h"
#include "kvm_arm.h"
#include "cpu.h"
+#include "internals.h"
#include "hw/arm/arm.h"
static inline void set_feature(uint64_t *features, int feature)
@@ -132,11 +133,7 @@ int kvm_arch_put_registers(CPUState *cs, int level)
/* KVM puts SP_EL0 in regs.sp and SP_EL1 in regs.sp_el1. On the
* QEMU side we keep the current SP in xregs[31] as well.
*/
- if (env->pstate & PSTATE_SP) {
- env->sp_el[1] = env->xregs[31];
- } else {
- env->sp_el[0] = env->xregs[31];
- }
+ aarch64_save_sp(env, 1);
reg.id = AARCH64_CORE_REG(regs.sp);
reg.addr = (uintptr_t) &env->sp_el[0];
@@ -235,11 +232,7 @@ int kvm_arch_get_registers(CPUState *cs)
/* KVM puts SP_EL0 in regs.sp and SP_EL1 in regs.sp_el1. On the
* QEMU side we keep the current SP in xregs[31] as well.
*/
- if (env->pstate & PSTATE_SP) {
- env->xregs[31] = env->sp_el[1];
- } else {
- env->xregs[31] = env->sp_el[0];
- }
+ aarch64_restore_sp(env, 1);
reg.id = AARCH64_CORE_REG(regs.pc);
reg.addr = (uintptr_t) &env->pc;
diff --git a/target-arm/machine.c b/target-arm/machine.c
index 3bcc7cc83..6437690af 100644
--- a/target-arm/machine.c
+++ b/target-arm/machine.c
@@ -2,6 +2,7 @@
#include "hw/boards.h"
#include "sysemu/kvm.h"
#include "kvm_arm.h"
+#include "internals.h"
static bool vfp_needed(void *opaque)
{
@@ -213,13 +214,16 @@ static int cpu_post_load(void *opaque, int version_id)
}
}
+ hw_breakpoint_update_all(cpu);
+ hw_watchpoint_update_all(cpu);
+
return 0;
}
const VMStateDescription vmstate_arm_cpu = {
.name = "cpu",
- .version_id = 20,
- .minimum_version_id = 20,
+ .version_id = 21,
+ .minimum_version_id = 21,
.pre_save = cpu_pre_save,
.post_load = cpu_post_load,
.fields = (VMStateField[]) {
@@ -234,8 +238,8 @@ const VMStateDescription vmstate_arm_cpu = {
},
VMSTATE_UINT32(env.spsr, ARMCPU),
VMSTATE_UINT64_ARRAY(env.banked_spsr, ARMCPU, 8),
- VMSTATE_UINT32_ARRAY(env.banked_r13, ARMCPU, 6),
- VMSTATE_UINT32_ARRAY(env.banked_r14, ARMCPU, 6),
+ VMSTATE_UINT32_ARRAY(env.banked_r13, ARMCPU, 8),
+ VMSTATE_UINT32_ARRAY(env.banked_r14, ARMCPU, 8),
VMSTATE_UINT32_ARRAY(env.usr_regs, ARMCPU, 5),
VMSTATE_UINT32_ARRAY(env.fiq_regs, ARMCPU, 5),
VMSTATE_UINT64_ARRAY(env.elr_el, ARMCPU, 4),
@@ -259,6 +263,7 @@ const VMStateDescription vmstate_arm_cpu = {
VMSTATE_UINT64(env.exception.vaddress, ARMCPU),
VMSTATE_TIMER(gt_timer[GTIMER_PHYS], ARMCPU),
VMSTATE_TIMER(gt_timer[GTIMER_VIRT], ARMCPU),
+ VMSTATE_BOOL(powered_off, ARMCPU),
VMSTATE_END_OF_LIST()
},
.subsections = (VMStateSubsection[]) {
diff --git a/target-arm/op_helper.c b/target-arm/op_helper.c
index 9c1ef525a..62012c3a6 100644
--- a/target-arm/op_helper.c
+++ b/target-arm/op_helper.c
@@ -258,7 +258,7 @@ void HELPER(exception_with_syndrome)(CPUARMState *env, uint32_t excp,
uint32_t HELPER(cpsr_read)(CPUARMState *env)
{
- return cpsr_read(env) & ~CPSR_EXEC;
+ return cpsr_read(env) & ~(CPSR_EXEC | CPSR_RESERVED);
}
void HELPER(cpsr_write)(CPUARMState *env, uint32_t val, uint32_t mask)
@@ -301,6 +301,17 @@ void HELPER(set_user_reg)(CPUARMState *env, uint32_t regno, uint32_t val)
void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip, uint32_t syndrome)
{
const ARMCPRegInfo *ri = rip;
+
+ if (arm_feature(env, ARM_FEATURE_XSCALE) && ri->cp < 14
+ && extract32(env->cp15.c15_cpar, ri->cp, 1) == 0) {
+ env->exception.syndrome = syndrome;
+ raise_exception(env, EXCP_UDEF);
+ }
+
+ if (!ri->accessfn) {
+ return;
+ }
+
switch (ri->accessfn(env, ri)) {
case CP_ACCESS_OK:
return;
@@ -350,7 +361,7 @@ void HELPER(msr_i_pstate)(CPUARMState *env, uint32_t op, uint32_t imm)
* Note that SPSel is never OK from EL0; we rely on handle_msr_i()
* to catch that case at translate time.
*/
- if (arm_current_pl(env) == 0 && !(env->cp15.c1_sys & SCTLR_UMA)) {
+ if (arm_current_el(env) == 0 && !(env->cp15.c1_sys & SCTLR_UMA)) {
raise_exception(env, EXCP_UDEF);
}
@@ -369,27 +380,117 @@ void HELPER(msr_i_pstate)(CPUARMState *env, uint32_t op, uint32_t imm)
}
}
+void HELPER(clear_pstate_ss)(CPUARMState *env)
+{
+ env->pstate &= ~PSTATE_SS;
+}
+
+void HELPER(pre_hvc)(CPUARMState *env)
+{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+ int cur_el = arm_current_el(env);
+ /* FIXME: Use actual secure state. */
+ bool secure = false;
+ bool undef;
+
+ if (arm_is_psci_call(cpu, EXCP_HVC)) {
+ /* If PSCI is enabled and this looks like a valid PSCI call then
+ * that overrides the architecturally mandated HVC behaviour.
+ */
+ return;
+ }
+
+ if (!arm_feature(env, ARM_FEATURE_EL2)) {
+ /* If EL2 doesn't exist, HVC always UNDEFs */
+ undef = true;
+ } else if (arm_feature(env, ARM_FEATURE_EL3)) {
+ /* EL3.HCE has priority over EL2.HCD. */
+ undef = !(env->cp15.scr_el3 & SCR_HCE);
+ } else {
+ undef = env->cp15.hcr_el2 & HCR_HCD;
+ }
+
+ /* In ARMv7 and ARMv8/AArch32, HVC is undef in secure state.
+ * For ARMv8/AArch64, HVC is allowed in EL3.
+ * Note that we've already trapped HVC from EL0 at translation
+ * time.
+ */
+ if (secure && (!is_a64(env) || cur_el == 1)) {
+ undef = true;
+ }
+
+ if (undef) {
+ env->exception.syndrome = syn_uncategorized();
+ raise_exception(env, EXCP_UDEF);
+ }
+}
+
+void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome)
+{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+ int cur_el = arm_current_el(env);
+ bool secure = arm_is_secure(env);
+ bool smd = env->cp15.scr_el3 & SCR_SMD;
+ /* On ARMv8 AArch32, SMD only applies to NS state.
+ * On ARMv7 SMD only applies to NS state and only if EL2 is available.
+ * For ARMv7 non EL2, we force SMD to zero so we don't need to re-check
+ * the EL2 condition here.
+ */
+ bool undef = is_a64(env) ? smd : (!secure && smd);
+
+ if (arm_is_psci_call(cpu, EXCP_SMC)) {
+ /* If PSCI is enabled and this looks like a valid PSCI call then
+ * that overrides the architecturally mandated SMC behaviour.
+ */
+ return;
+ }
+
+ if (!arm_feature(env, ARM_FEATURE_EL3)) {
+ /* If we have no EL3 then SMC always UNDEFs */
+ undef = true;
+ } else if (!secure && cur_el == 1 && (env->cp15.hcr_el2 & HCR_TSC)) {
+ /* In NS EL1, HCR controlled routing to EL2 has priority over SMD. */
+ env->exception.syndrome = syndrome;
+ raise_exception(env, EXCP_HYP_TRAP);
+ }
+
+ if (undef) {
+ env->exception.syndrome = syn_uncategorized();
+ raise_exception(env, EXCP_UDEF);
+ }
+}
+
void HELPER(exception_return)(CPUARMState *env)
{
- int cur_el = arm_current_pl(env);
+ int cur_el = arm_current_el(env);
unsigned int spsr_idx = aarch64_banked_spsr_index(cur_el);
uint32_t spsr = env->banked_spsr[spsr_idx];
int new_el, i;
- if (env->pstate & PSTATE_SP) {
- env->sp_el[cur_el] = env->xregs[31];
- } else {
- env->sp_el[0] = env->xregs[31];
- }
+ aarch64_save_sp(env, cur_el);
env->exclusive_addr = -1;
+ /* We must squash the PSTATE.SS bit to zero unless both of the
+ * following hold:
+ * 1. debug exceptions are currently disabled
+ * 2. singlestep will be active in the EL we return to
+ * We check 1 here and 2 after we've done the pstate/cpsr write() to
+ * transition to the EL we're going to.
+ */
+ if (arm_generate_debug_exceptions(env)) {
+ spsr &= ~PSTATE_SS;
+ }
+
if (spsr & PSTATE_nRW) {
/* TODO: We currently assume EL1/2/3 are running in AArch64. */
env->aarch64 = 0;
new_el = 0;
env->uncached_cpsr = 0x10;
cpsr_write(env, spsr, ~0);
+ if (!arm_singlestep_active(env)) {
+ env->uncached_cpsr &= ~PSTATE_SS;
+ }
for (i = 0; i < 15; i++) {
env->regs[i] = env->xregs[i];
}
@@ -414,7 +515,10 @@ void HELPER(exception_return)(CPUARMState *env)
}
env->aarch64 = 1;
pstate_write(env, spsr);
- env->xregs[31] = env->sp_el[new_el];
+ if (!arm_singlestep_active(env)) {
+ env->pstate &= ~PSTATE_SS;
+ }
+ aarch64_restore_sp(env, new_el);
env->pc = env->elr_el[cur_el];
}
@@ -433,6 +537,242 @@ illegal_return:
spsr &= PSTATE_NZCV | PSTATE_DAIF;
spsr |= pstate_read(env) & ~(PSTATE_NZCV | PSTATE_DAIF);
pstate_write(env, spsr);
+ if (!arm_singlestep_active(env)) {
+ env->pstate &= ~PSTATE_SS;
+ }
+}
+
+/* Return true if the linked breakpoint entry lbn passes its checks */
+static bool linked_bp_matches(ARMCPU *cpu, int lbn)
+{
+ CPUARMState *env = &cpu->env;
+ uint64_t bcr = env->cp15.dbgbcr[lbn];
+ int brps = extract32(cpu->dbgdidr, 24, 4);
+ int ctx_cmps = extract32(cpu->dbgdidr, 20, 4);
+ int bt;
+ uint32_t contextidr;
+
+ /* Links to unimplemented or non-context aware breakpoints are
+ * CONSTRAINED UNPREDICTABLE: either behave as if disabled, or
+ * as if linked to an UNKNOWN context-aware breakpoint (in which
+ * case DBGWCR<n>_EL1.LBN must indicate that breakpoint).
+ * We choose the former.
+ */
+ if (lbn > brps || lbn < (brps - ctx_cmps)) {
+ return false;
+ }
+
+ bcr = env->cp15.dbgbcr[lbn];
+
+ if (extract64(bcr, 0, 1) == 0) {
+ /* Linked breakpoint disabled : generate no events */
+ return false;
+ }
+
+ bt = extract64(bcr, 20, 4);
+
+ /* We match the whole register even if this is AArch32 using the
+ * short descriptor format (in which case it holds both PROCID and ASID),
+ * since we don't implement the optional v7 context ID masking.
+ */
+ contextidr = extract64(env->cp15.contextidr_el1, 0, 32);
+
+ switch (bt) {
+ case 3: /* linked context ID match */
+ if (arm_current_el(env) > 1) {
+ /* Context matches never fire in EL2 or (AArch64) EL3 */
+ return false;
+ }
+ return (contextidr == extract64(env->cp15.dbgbvr[lbn], 0, 32));
+ case 5: /* linked address mismatch (reserved in AArch64) */
+ case 9: /* linked VMID match (reserved if no EL2) */
+ case 11: /* linked context ID and VMID match (reserved if no EL2) */
+ default:
+ /* Links to Unlinked context breakpoints must generate no
+ * events; we choose to do the same for reserved values too.
+ */
+ return false;
+ }
+
+ return false;
+}
+
+static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp)
+{
+ CPUARMState *env = &cpu->env;
+ uint64_t cr;
+ int pac, hmc, ssc, wt, lbn;
+ /* TODO: check against CPU security state when we implement TrustZone */
+ bool is_secure = false;
+
+ if (is_wp) {
+ if (!env->cpu_watchpoint[n]
+ || !(env->cpu_watchpoint[n]->flags & BP_WATCHPOINT_HIT)) {
+ return false;
+ }
+ cr = env->cp15.dbgwcr[n];
+ } else {
+ uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
+
+ if (!env->cpu_breakpoint[n] || env->cpu_breakpoint[n]->pc != pc) {
+ return false;
+ }
+ cr = env->cp15.dbgbcr[n];
+ }
+ /* The WATCHPOINT_HIT flag guarantees us that the watchpoint is
+ * enabled and that the address and access type match; for breakpoints
+ * we know the address matched; check the remaining fields, including
+ * linked breakpoints. We rely on WCR and BCR having the same layout
+ * for the LBN, SSC, HMC, PAC/PMC and is-linked fields.
+ * Note that some combinations of {PAC, HMC, SSC} are reserved and
+ * must act either like some valid combination or as if the watchpoint
+ * were disabled. We choose the former, and use this together with
+ * the fact that EL3 must always be Secure and EL2 must always be
+ * Non-Secure to simplify the code slightly compared to the full
+ * table in the ARM ARM.
+ */
+ pac = extract64(cr, 1, 2);
+ hmc = extract64(cr, 13, 1);
+ ssc = extract64(cr, 14, 2);
+
+ switch (ssc) {
+ case 0:
+ break;
+ case 1:
+ case 3:
+ if (is_secure) {
+ return false;
+ }
+ break;
+ case 2:
+ if (!is_secure) {
+ return false;
+ }
+ break;
+ }
+
+ /* TODO: this is not strictly correct because the LDRT/STRT/LDT/STT
+ * "unprivileged access" instructions should match watchpoints as if
+ * they were accesses done at EL0, even if the CPU is at EL1 or higher.
+ * Implementing this would require reworking the core watchpoint code
+ * to plumb the mmu_idx through to this point. Luckily Linux does not
+ * rely on this behaviour currently.
+ * For breakpoints we do want to use the current CPU state.
+ */
+ switch (arm_current_el(env)) {
+ case 3:
+ case 2:
+ if (!hmc) {
+ return false;
+ }
+ break;
+ case 1:
+ if (extract32(pac, 0, 1) == 0) {
+ return false;
+ }
+ break;
+ case 0:
+ if (extract32(pac, 1, 1) == 0) {
+ return false;
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ wt = extract64(cr, 20, 1);
+ lbn = extract64(cr, 16, 4);
+
+ if (wt && !linked_bp_matches(cpu, lbn)) {
+ return false;
+ }
+
+ return true;
+}
+
+static bool check_watchpoints(ARMCPU *cpu)
+{
+ CPUARMState *env = &cpu->env;
+ int n;
+
+ /* If watchpoints are disabled globally or we can't take debug
+ * exceptions here then watchpoint firings are ignored.
+ */
+ if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
+ || !arm_generate_debug_exceptions(env)) {
+ return false;
+ }
+
+ for (n = 0; n < ARRAY_SIZE(env->cpu_watchpoint); n++) {
+ if (bp_wp_matches(cpu, n, true)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+static bool check_breakpoints(ARMCPU *cpu)
+{
+ CPUARMState *env = &cpu->env;
+ int n;
+
+ /* If breakpoints are disabled globally or we can't take debug
+ * exceptions here then breakpoint firings are ignored.
+ */
+ if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
+ || !arm_generate_debug_exceptions(env)) {
+ return false;
+ }
+
+ for (n = 0; n < ARRAY_SIZE(env->cpu_breakpoint); n++) {
+ if (bp_wp_matches(cpu, n, false)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+void arm_debug_excp_handler(CPUState *cs)
+{
+ /* Called by core code when a watchpoint or breakpoint fires;
+ * need to check which one and raise the appropriate exception.
+ */
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+ CPUWatchpoint *wp_hit = cs->watchpoint_hit;
+
+ if (wp_hit) {
+ if (wp_hit->flags & BP_CPU) {
+ cs->watchpoint_hit = NULL;
+ if (check_watchpoints(cpu)) {
+ bool wnr = (wp_hit->flags & BP_WATCHPOINT_HIT_WRITE) != 0;
+ bool same_el = arm_debug_target_el(env) == arm_current_el(env);
+
+ env->exception.syndrome = syn_watchpoint(same_el, 0, wnr);
+ if (extended_addresses_enabled(env)) {
+ env->exception.fsr = (1 << 9) | 0x22;
+ } else {
+ env->exception.fsr = 0x2;
+ }
+ env->exception.vaddress = wp_hit->hitaddr;
+ raise_exception(env, EXCP_DATA_ABORT);
+ } else {
+ cpu_resume_from_signal(cs, NULL);
+ }
+ }
+ } else {
+ if (check_breakpoints(cpu)) {
+ bool same_el = (arm_debug_target_el(env) == arm_current_el(env));
+ env->exception.syndrome = syn_breakpoint(same_el);
+ if (extended_addresses_enabled(env)) {
+ env->exception.fsr = (1 << 9) | 0x22;
+ } else {
+ env->exception.fsr = 0x2;
+ }
+ /* FAR is UNKNOWN, so doesn't need setting */
+ raise_exception(env, EXCP_PREFETCH_ABORT);
+ }
+ }
}
/* ??? Flag setting arithmetic is awkward because we need to do comparisons.
diff --git a/target-arm/psci.c b/target-arm/psci.c
new file mode 100644
index 000000000..d8fafab2f
--- /dev/null
+++ b/target-arm/psci.c
@@ -0,0 +1,242 @@
+/*
+ * Copyright (C) 2014 - Linaro
+ * Author: Rob Herring <rob.herring@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include <cpu.h>
+#include <cpu-qom.h>
+#include <exec/helper-proto.h>
+#include <kvm-consts.h>
+#include <sysemu/sysemu.h>
+#include "internals.h"
+
+bool arm_is_psci_call(ARMCPU *cpu, int excp_type)
+{
+ /* Return true if the r0/x0 value indicates a PSCI call and
+ * the exception type matches the configured PSCI conduit. This is
+ * called before the SMC/HVC instruction is executed, to decide whether
+ * we should treat it as a PSCI call or with the architecturally
+ * defined behaviour for an SMC or HVC (which might be UNDEF or trap
+ * to EL2 or to EL3).
+ */
+ CPUARMState *env = &cpu->env;
+ uint64_t param = is_a64(env) ? env->xregs[0] : env->regs[0];
+
+ switch (excp_type) {
+ case EXCP_HVC:
+ if (cpu->psci_conduit != QEMU_PSCI_CONDUIT_HVC) {
+ return false;
+ }
+ break;
+ case EXCP_SMC:
+ if (cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) {
+ return false;
+ }
+ break;
+ default:
+ return false;
+ }
+
+ switch (param) {
+ case QEMU_PSCI_0_2_FN_PSCI_VERSION:
+ case QEMU_PSCI_0_2_FN_MIGRATE_INFO_TYPE:
+ case QEMU_PSCI_0_2_FN_AFFINITY_INFO:
+ case QEMU_PSCI_0_2_FN64_AFFINITY_INFO:
+ case QEMU_PSCI_0_2_FN_SYSTEM_RESET:
+ case QEMU_PSCI_0_2_FN_SYSTEM_OFF:
+ case QEMU_PSCI_0_1_FN_CPU_ON:
+ case QEMU_PSCI_0_2_FN_CPU_ON:
+ case QEMU_PSCI_0_2_FN64_CPU_ON:
+ case QEMU_PSCI_0_1_FN_CPU_OFF:
+ case QEMU_PSCI_0_2_FN_CPU_OFF:
+ case QEMU_PSCI_0_1_FN_CPU_SUSPEND:
+ case QEMU_PSCI_0_2_FN_CPU_SUSPEND:
+ case QEMU_PSCI_0_2_FN64_CPU_SUSPEND:
+ case QEMU_PSCI_0_1_FN_MIGRATE:
+ case QEMU_PSCI_0_2_FN_MIGRATE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+void arm_handle_psci_call(ARMCPU *cpu)
+{
+ /*
+ * This function partially implements the logic for dispatching Power State
+ * Coordination Interface (PSCI) calls (as described in ARM DEN 0022B.b),
+ * to the extent required for bringing up and taking down secondary cores,
+ * and for handling reset and poweroff requests.
+ * Additional information about the calling convention used is available in
+ * the document 'SMC Calling Convention' (ARM DEN 0028)
+ */
+ CPUState *cs = CPU(cpu);
+ CPUARMState *env = &cpu->env;
+ uint64_t param[4];
+ uint64_t context_id, mpidr;
+ target_ulong entry;
+ int32_t ret = 0;
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ /*
+ * All PSCI functions take explicit 32-bit or native int sized
+ * arguments so we can simply zero-extend all arguments regardless
+ * of which exact function we are about to call.
+ */
+ param[i] = is_a64(env) ? env->xregs[i] : env->regs[i];
+ }
+
+ if ((param[0] & QEMU_PSCI_0_2_64BIT) && !is_a64(env)) {
+ ret = QEMU_PSCI_RET_INVALID_PARAMS;
+ goto err;
+ }
+
+ switch (param[0]) {
+ CPUState *target_cpu_state;
+ ARMCPU *target_cpu;
+ CPUClass *target_cpu_class;
+
+ case QEMU_PSCI_0_2_FN_PSCI_VERSION:
+ ret = QEMU_PSCI_0_2_RET_VERSION_0_2;
+ break;
+ case QEMU_PSCI_0_2_FN_MIGRATE_INFO_TYPE:
+ ret = QEMU_PSCI_0_2_RET_TOS_MIGRATION_NOT_REQUIRED; /* No trusted OS */
+ break;
+ case QEMU_PSCI_0_2_FN_AFFINITY_INFO:
+ case QEMU_PSCI_0_2_FN64_AFFINITY_INFO:
+ mpidr = param[1];
+
+ switch (param[2]) {
+ case 0:
+ target_cpu_state = qemu_get_cpu(mpidr & 0xff);
+ if (!target_cpu_state) {
+ ret = QEMU_PSCI_RET_INVALID_PARAMS;
+ break;
+ }
+ target_cpu = ARM_CPU(target_cpu_state);
+ ret = target_cpu->powered_off ? 1 : 0;
+ break;
+ default:
+ /* Everything above affinity level 0 is always on. */
+ ret = 0;
+ }
+ break;
+ case QEMU_PSCI_0_2_FN_SYSTEM_RESET:
+ qemu_system_reset_request();
+ /* QEMU reset and shutdown are async requests, but PSCI
+ * mandates that we never return from the reset/shutdown
+ * call, so power the CPU off now so it doesn't execute
+ * anything further.
+ */
+ goto cpu_off;
+ case QEMU_PSCI_0_2_FN_SYSTEM_OFF:
+ qemu_system_shutdown_request();
+ goto cpu_off;
+ case QEMU_PSCI_0_1_FN_CPU_ON:
+ case QEMU_PSCI_0_2_FN_CPU_ON:
+ case QEMU_PSCI_0_2_FN64_CPU_ON:
+ mpidr = param[1];
+ entry = param[2];
+ context_id = param[3];
+
+ /* change to the cpu we are powering up */
+ target_cpu_state = qemu_get_cpu(mpidr & 0xff);
+ if (!target_cpu_state) {
+ ret = QEMU_PSCI_RET_INVALID_PARAMS;
+ break;
+ }
+ target_cpu = ARM_CPU(target_cpu_state);
+ if (!target_cpu->powered_off) {
+ ret = QEMU_PSCI_RET_ALREADY_ON;
+ break;
+ }
+ target_cpu_class = CPU_GET_CLASS(target_cpu);
+
+ /* Initialize the cpu we are turning on */
+ cpu_reset(target_cpu_state);
+ target_cpu->powered_off = false;
+ target_cpu_state->halted = 0;
+
+ /*
+ * The PSCI spec mandates that newly brought up CPUs enter the
+ * exception level of the caller in the same execution mode as
+ * the caller, with context_id in x0/r0, respectively.
+ *
+ * For now, it is sufficient to assert() that CPUs come out of
+ * reset in the same mode as the calling CPU, since we only
+ * implement EL1, which means that
+ * (a) there is no EL2 for the calling CPU to trap into to change
+ * its state
+ * (b) the newly brought up CPU enters EL1 immediately after coming
+ * out of reset in the default state
+ */
+ assert(is_a64(env) == is_a64(&target_cpu->env));
+ if (is_a64(env)) {
+ if (entry & 1) {
+ ret = QEMU_PSCI_RET_INVALID_PARAMS;
+ break;
+ }
+ target_cpu->env.xregs[0] = context_id;
+ } else {
+ target_cpu->env.regs[0] = context_id;
+ target_cpu->env.thumb = entry & 1;
+ }
+ target_cpu_class->set_pc(target_cpu_state, entry);
+
+ ret = 0;
+ break;
+ case QEMU_PSCI_0_1_FN_CPU_OFF:
+ case QEMU_PSCI_0_2_FN_CPU_OFF:
+ goto cpu_off;
+ case QEMU_PSCI_0_1_FN_CPU_SUSPEND:
+ case QEMU_PSCI_0_2_FN_CPU_SUSPEND:
+ case QEMU_PSCI_0_2_FN64_CPU_SUSPEND:
+ /* Affinity levels are not supported in QEMU */
+ if (param[1] & 0xfffe0000) {
+ ret = QEMU_PSCI_RET_INVALID_PARAMS;
+ break;
+ }
+ /* Powerdown is not supported, we always go into WFI */
+ if (is_a64(env)) {
+ env->xregs[0] = 0;
+ } else {
+ env->regs[0] = 0;
+ }
+ helper_wfi(env);
+ break;
+ case QEMU_PSCI_0_1_FN_MIGRATE:
+ case QEMU_PSCI_0_2_FN_MIGRATE:
+ ret = QEMU_PSCI_RET_NOT_SUPPORTED;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+err:
+ if (is_a64(env)) {
+ env->xregs[0] = ret;
+ } else {
+ env->regs[0] = ret;
+ }
+ return;
+
+cpu_off:
+ cpu->powered_off = true;
+ cs->halted = 1;
+ cs->exception_index = EXCP_HLT;
+ cpu_loop_exit(cs);
+ /* notreached */
+}
diff --git a/target-arm/translate-a64.c b/target-arm/translate-a64.c
index 33b5025fe..80d2c07e8 100644
--- a/target-arm/translate-a64.c
+++ b/target-arm/translate-a64.c
@@ -35,6 +35,8 @@
#include "exec/helper-proto.h"
#include "exec/helper-gen.h"
+#include "trace-tcg.h"
+
static TCGv_i64 cpu_X[32];
static TCGv_i64 cpu_pc;
static TCGv_i32 cpu_NF, cpu_ZF, cpu_CF, cpu_VF;
@@ -203,10 +205,39 @@ static void gen_exception_insn(DisasContext *s, int offset, int excp,
s->is_jmp = DISAS_EXC;
}
+static void gen_ss_advance(DisasContext *s)
+{
+ /* If the singlestep state is Active-not-pending, advance to
+ * Active-pending.
+ */
+ if (s->ss_active) {
+ s->pstate_ss = 0;
+ gen_helper_clear_pstate_ss(cpu_env);
+ }
+}
+
+static void gen_step_complete_exception(DisasContext *s)
+{
+ /* We just completed step of an insn. Move from Active-not-pending
+ * to Active-pending, and then also take the swstep exception.
+ * This corresponds to making the (IMPDEF) choice to prioritize
+ * swstep exceptions over asynchronous exceptions taken to an exception
+ * level where debug is disabled. This choice has the advantage that
+ * we do not need to maintain internal state corresponding to the
+ * ISV/EX syndrome bits between completion of the step and generation
+ * of the exception, and our syndrome information is always correct.
+ */
+ gen_ss_advance(s);
+ gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex));
+ s->is_jmp = DISAS_EXC;
+}
+
static inline bool use_goto_tb(DisasContext *s, int n, uint64_t dest)
{
- /* No direct tb linking with singlestep or deterministic io */
- if (s->singlestep_enabled || (s->tb->cflags & CF_LAST_IO)) {
+ /* No direct tb linking with singlestep (either QEMU's or the ARM
+ * debug architecture kind) or deterministic io
+ */
+ if (s->singlestep_enabled || s->ss_active || (s->tb->cflags & CF_LAST_IO)) {
return false;
}
@@ -230,11 +261,14 @@ static inline void gen_goto_tb(DisasContext *s, int n, uint64_t dest)
s->is_jmp = DISAS_TB_JUMP;
} else {
gen_a64_set_pc_im(dest);
- if (s->singlestep_enabled) {
+ if (s->ss_active) {
+ gen_step_complete_exception(s);
+ } else if (s->singlestep_enabled) {
gen_exception_internal(EXCP_DEBUG);
+ } else {
+ tcg_gen_exit_tb(0);
+ s->is_jmp = DISAS_TB_JUMP;
}
- tcg_gen_exit_tb(0);
- s->is_jmp = DISAS_JUMP;
}
}
@@ -714,7 +748,6 @@ static void do_fp_st(DisasContext *s, int srcidx, TCGv_i64 tcg_addr, int size)
} else {
TCGv_i64 tcg_hiaddr = tcg_temp_new_i64();
tcg_gen_qemu_st_i64(tmp, tcg_addr, get_mem_index(s), MO_TEQ);
- tcg_gen_qemu_st64(tmp, tcg_addr, get_mem_index(s));
tcg_gen_ld_i64(tmp, cpu_env, fp_reg_hi_offset(s, srcidx));
tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8);
tcg_gen_qemu_st_i64(tmp, tcg_hiaddr, get_mem_index(s), MO_TEQ);
@@ -1192,7 +1225,7 @@ static void handle_msr_i(DisasContext *s, uint32_t insn,
int op = op1 << 3 | op2;
switch (op) {
case 0x05: /* SPSel */
- if (s->current_pl == 0) {
+ if (s->current_el == 0) {
unallocated_encoding(s);
return;
}
@@ -1289,7 +1322,7 @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
}
/* Check access permissions */
- if (!cp_access_ok(s->current_pl, ri, isread)) {
+ if (!cp_access_ok(s->current_el, ri, isread)) {
unallocated_encoding(s);
return;
}
@@ -1328,7 +1361,7 @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
* guaranteed to be constant by the tb flags.
*/
tcg_rt = cpu_reg(s, rt);
- tcg_gen_movi_i64(tcg_rt, s->current_pl << 2);
+ tcg_gen_movi_i64(tcg_rt, s->current_el << 2);
return;
case ARM_CP_DC_ZVA:
/* Writes clear the aligned block of memory which rt points into. */
@@ -1436,17 +1469,49 @@ static void disas_exc(DisasContext *s, uint32_t insn)
int opc = extract32(insn, 21, 3);
int op2_ll = extract32(insn, 0, 5);
int imm16 = extract32(insn, 5, 16);
+ TCGv_i32 tmp;
switch (opc) {
case 0:
- /* SVC, HVC, SMC; since we don't support the Virtualization
- * or TrustZone extensions these all UNDEF except SVC.
+ /* For SVC, HVC and SMC we advance the single-step state
+ * machine before taking the exception. This is architecturally
+ * mandated, to ensure that single-stepping a system call
+ * instruction works properly.
*/
- if (op2_ll != 1) {
+ switch (op2_ll) {
+ case 1:
+ gen_ss_advance(s);
+ gen_exception_insn(s, 0, EXCP_SWI, syn_aa64_svc(imm16));
+ break;
+ case 2:
+ if (s->current_el == 0) {
+ unallocated_encoding(s);
+ break;
+ }
+ /* The pre HVC helper handles cases when HVC gets trapped
+ * as an undefined insn by runtime configuration.
+ */
+ gen_a64_set_pc_im(s->pc - 4);
+ gen_helper_pre_hvc(cpu_env);
+ gen_ss_advance(s);
+ gen_exception_insn(s, 0, EXCP_HVC, syn_aa64_hvc(imm16));
+ break;
+ case 3:
+ if (s->current_el == 0) {
+ unallocated_encoding(s);
+ break;
+ }
+ gen_a64_set_pc_im(s->pc - 4);
+ tmp = tcg_const_i32(syn_aa64_smc(imm16));
+ gen_helper_pre_smc(cpu_env, tmp);
+ tcg_temp_free_i32(tmp);
+ gen_ss_advance(s);
+ gen_exception_insn(s, 0, EXCP_SMC, syn_aa64_smc(imm16));
+ break;
+ default:
unallocated_encoding(s);
break;
}
- gen_exception_insn(s, 0, EXCP_SWI, syn_aa64_svc(imm16));
break;
case 1:
if (op2_ll != 0) {
@@ -1454,7 +1519,7 @@ static void disas_exc(DisasContext *s, uint32_t insn)
break;
}
/* BRK */
- gen_exception_insn(s, 0, EXCP_BKPT, syn_aa64_bkpt(imm16));
+ gen_exception_insn(s, 4, EXCP_BKPT, syn_aa64_bkpt(imm16));
break;
case 2:
if (op2_ll != 0) {
@@ -1509,7 +1574,7 @@ static void disas_uncond_b_reg(DisasContext *s, uint32_t insn)
tcg_gen_movi_i64(cpu_reg(s, 30), s->pc);
break;
case 4: /* ERET */
- if (s->current_pl == 0) {
+ if (s->current_el == 0) {
unallocated_encoding(s);
return;
}
@@ -1726,6 +1791,7 @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
if (is_excl) {
if (!is_store) {
+ s->is_ldex = true;
gen_load_exclusive(s, rt, rt2, tcg_addr, size, is_pair);
} else {
gen_store_exclusive(s, rs, rt, rt2, tcg_addr, size, is_pair);
@@ -10863,9 +10929,29 @@ void gen_intermediate_code_internal_a64(ARMCPU *cpu,
dc->vec_len = 0;
dc->vec_stride = 0;
dc->cp_regs = cpu->cp_regs;
- dc->current_pl = arm_current_pl(env);
+ dc->current_el = arm_current_el(env);
dc->features = env->features;
+ /* Single step state. The code-generation logic here is:
+ * SS_ACTIVE == 0:
+ * generate code with no special handling for single-stepping (except
+ * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
+ * this happens anyway because those changes are all system register or
+ * PSTATE writes).
+ * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
+ * emit code for one insn
+ * emit code to clear PSTATE.SS
+ * emit code to generate software step exception for completed step
+ * end TB (as usual for having generated an exception)
+ * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
+ * emit code to generate a software step exception
+ * end the TB
+ */
+ dc->ss_active = ARM_TBFLAG_AA64_SS_ACTIVE(tb->flags);
+ dc->pstate_ss = ARM_TBFLAG_AA64_PSTATE_SS(tb->flags);
+ dc->is_ldex = false;
+ dc->ss_same_el = (arm_debug_target_el(env) == dc->current_el);
+
init_tmp_a64_array(dc);
next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
@@ -10914,6 +11000,23 @@ void gen_intermediate_code_internal_a64(ARMCPU *cpu,
tcg_gen_debug_insn_start(dc->pc);
}
+ if (dc->ss_active && !dc->pstate_ss) {
+ /* Singlestep state is Active-pending.
+ * If we're in this state at the start of a TB then either
+ * a) we just took an exception to an EL which is being debugged
+ * and this is the first insn in the exception handler
+ * b) debug exceptions were masked and we just unmasked them
+ * without changing EL (eg by clearing PSTATE.D)
+ * In either case we're going to take a swstep exception in the
+ * "did not step an insn" case, and so the syndrome ISV and EX
+ * bits should be zero.
+ */
+ assert(num_insns == 0);
+ gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0));
+ dc->is_jmp = DISAS_EXC;
+ break;
+ }
+
disas_a64_insn(env, dc);
if (tcg_check_temp_count()) {
@@ -10930,6 +11033,7 @@ void gen_intermediate_code_internal_a64(ARMCPU *cpu,
} while (!dc->is_jmp && tcg_ctx.gen_opc_ptr < gen_opc_end &&
!cs->singlestep_enabled &&
!singlestep &&
+ !dc->ss_active &&
dc->pc < next_page_start &&
num_insns < max_insns);
@@ -10937,7 +11041,8 @@ void gen_intermediate_code_internal_a64(ARMCPU *cpu,
gen_io_end();
}
- if (unlikely(cs->singlestep_enabled) && dc->is_jmp != DISAS_EXC) {
+ if (unlikely(cs->singlestep_enabled || dc->ss_active)
+ && dc->is_jmp != DISAS_EXC) {
/* Note that this means single stepping WFI doesn't halt the CPU.
* For conditional branch insns this is harmless unreachable code as
* gen_goto_tb() has already handled emitting the debug exception
@@ -10947,7 +11052,11 @@ void gen_intermediate_code_internal_a64(ARMCPU *cpu,
if (dc->is_jmp != DISAS_JUMP) {
gen_a64_set_pc_im(dc->pc);
}
- gen_exception_internal(EXCP_DEBUG);
+ if (cs->singlestep_enabled) {
+ gen_exception_internal(EXCP_DEBUG);
+ } else {
+ gen_step_complete_exception(dc);
+ }
} else {
switch (dc->is_jmp) {
case DISAS_NEXT:
diff --git a/target-arm/translate.c b/target-arm/translate.c
index cf4e767ff..af5156857 100644
--- a/target-arm/translate.c
+++ b/target-arm/translate.c
@@ -35,16 +35,19 @@
#include "exec/helper-proto.h"
#include "exec/helper-gen.h"
-#define ENABLE_ARCH_4T arm_feature(env, ARM_FEATURE_V4T)
-#define ENABLE_ARCH_5 arm_feature(env, ARM_FEATURE_V5)
+#include "trace-tcg.h"
+
+
+#define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
+#define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
/* currently all emulated v5 cores are also v5TE, so don't bother */
-#define ENABLE_ARCH_5TE arm_feature(env, ARM_FEATURE_V5)
+#define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
#define ENABLE_ARCH_5J 0
-#define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
-#define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
-#define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
-#define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
-#define ENABLE_ARCH_8 arm_feature(env, ARM_FEATURE_V8)
+#define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
+#define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
+#define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
+#define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
+#define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
@@ -202,6 +205,33 @@ static void gen_exception(int excp, uint32_t syndrome)
tcg_temp_free_i32(tcg_excp);
}
+static void gen_ss_advance(DisasContext *s)
+{
+ /* If the singlestep state is Active-not-pending, advance to
+ * Active-pending.
+ */
+ if (s->ss_active) {
+ s->pstate_ss = 0;
+ gen_helper_clear_pstate_ss(cpu_env);
+ }
+}
+
+static void gen_step_complete_exception(DisasContext *s)
+{
+ /* We just completed step of an insn. Move from Active-not-pending
+ * to Active-pending, and then also take the swstep exception.
+ * This corresponds to making the (IMPDEF) choice to prioritize
+ * swstep exceptions over asynchronous exceptions taken to an exception
+ * level where debug is disabled. This choice has the advantage that
+ * we do not need to maintain internal state corresponding to the
+ * ISV/EX syndrome bits between completion of the step and generation
+ * of the exception, and our syndrome information is always correct.
+ */
+ gen_ss_advance(s);
+ gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex));
+ s->is_jmp = DISAS_EXC;
+}
+
static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
{
TCGv_i32 tmp1 = tcg_temp_new_i32();
@@ -804,8 +834,7 @@ static inline void gen_bx(DisasContext *s, TCGv_i32 var)
/* Variant of store_reg which uses branch&exchange logic when storing
to r15 in ARM architecture v7 and above. The source must be a temporary
and will be marked as dead. */
-static inline void store_reg_bx(CPUARMState *env, DisasContext *s,
- int reg, TCGv_i32 var)
+static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var)
{
if (reg == 15 && ENABLE_ARCH_7) {
gen_bx(s, var);
@@ -818,8 +847,7 @@ static inline void store_reg_bx(CPUARMState *env, DisasContext *s,
* to r15 in ARM architecture v5T and above. This is used for storing
* the results of a LDR/LDM/POP into r15, and corresponds to the cases
* in the ARM ARM which use the LoadWritePC() pseudocode function. */
-static inline void store_reg_from_load(CPUARMState *env, DisasContext *s,
- int reg, TCGv_i32 var)
+static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
{
if (reg == 15 && ENABLE_ARCH_5) {
gen_bx(s, var);
@@ -911,6 +939,39 @@ static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
tcg_gen_movi_i32(cpu_R[15], val);
}
+static inline void gen_hvc(DisasContext *s, int imm16)
+{
+ /* The pre HVC helper handles cases when HVC gets trapped
+ * as an undefined insn by runtime configuration (ie before
+ * the insn really executes).
+ */
+ gen_set_pc_im(s, s->pc - 4);
+ gen_helper_pre_hvc(cpu_env);
+ /* Otherwise we will treat this as a real exception which
+ * happens after execution of the insn. (The distinction matters
+ * for the PC value reported to the exception handler and also
+ * for single stepping.)
+ */
+ s->svc_imm = imm16;
+ gen_set_pc_im(s, s->pc);
+ s->is_jmp = DISAS_HVC;
+}
+
+static inline void gen_smc(DisasContext *s)
+{
+ /* As with HVC, we may take an exception either before or after
+ * the insn executes.
+ */
+ TCGv_i32 tmp;
+
+ gen_set_pc_im(s, s->pc - 4);
+ tmp = tcg_const_i32(syn_aa32_smc());
+ gen_helper_pre_smc(cpu_env, tmp);
+ tcg_temp_free_i32(tmp);
+ gen_set_pc_im(s, s->pc);
+ s->is_jmp = DISAS_SMC;
+}
+
static inline void
gen_set_condexec (DisasContext *s)
{
@@ -1478,7 +1539,7 @@ static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
(ie. an undefined instruction). */
-static int disas_iwmmxt_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
+static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
{
int rd, wrd;
int rdhi, rdlo, rd0, rd1, i;
@@ -2484,7 +2545,7 @@ static int disas_iwmmxt_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
(ie. an undefined instruction). */
-static int disas_dsp_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
+static int disas_dsp_insn(DisasContext *s, uint32_t insn)
{
int acc, rd0, rd1, rdhi, rdlo;
TCGv_i32 tmp, tmp2;
@@ -2556,7 +2617,7 @@ static int disas_dsp_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
#define VFP_SREG(insn, bigbit, smallbit) \
((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
- if (arm_feature(env, ARM_FEATURE_VFP3)) { \
+ if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \
reg = (((insn) >> (bigbit)) & 0x0f) \
| (((insn) >> ((smallbit) - 4)) & 0x10); \
} else { \
@@ -2903,11 +2964,11 @@ static const uint8_t fp_decode_rm[] = {
FPROUNDING_NEGINF,
};
-static int disas_vfp_v8_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
+static int disas_vfp_v8_insn(DisasContext *s, uint32_t insn)
{
uint32_t rd, rn, rm, dp = extract32(insn, 8, 1);
- if (!arm_feature(env, ARM_FEATURE_V8)) {
+ if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
return 1;
}
@@ -2939,7 +3000,7 @@ static int disas_vfp_v8_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
/* Disassemble a VFP instruction. Returns nonzero if an error occurred
(ie. an undefined instruction). */
-static int disas_vfp_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
+static int disas_vfp_insn(DisasContext *s, uint32_t insn)
{
uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
int dp, veclen;
@@ -2947,8 +3008,9 @@ static int disas_vfp_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
TCGv_i32 tmp;
TCGv_i32 tmp2;
- if (!arm_feature(env, ARM_FEATURE_VFP))
+ if (!arm_dc_feature(s, ARM_FEATURE_VFP)) {
return 1;
+ }
/* FIXME: this access check should not take precedence over UNDEF
* for invalid encodings; we will generate incorrect syndrome information
@@ -2975,7 +3037,7 @@ static int disas_vfp_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
/* Encodings with T=1 (Thumb) or unconditional (ARM):
* only used in v8 and above.
*/
- return disas_vfp_v8_insn(env, s, insn);
+ return disas_vfp_v8_insn(s, insn);
}
dp = ((insn & 0xf00) == 0xb00);
@@ -2992,8 +3054,9 @@ static int disas_vfp_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
if (insn & 0xf)
return 1;
if (insn & 0x00c00060
- && !arm_feature(env, ARM_FEATURE_NEON))
+ && !arm_dc_feature(s, ARM_FEATURE_NEON)) {
return 1;
+ }
pass = (insn >> 21) & 1;
if (insn & (1 << 22)) {
@@ -3088,8 +3151,9 @@ static int disas_vfp_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
VFP3 restricts all id registers to privileged
accesses. */
if (IS_USER(s)
- && arm_feature(env, ARM_FEATURE_VFP3))
+ && arm_dc_feature(s, ARM_FEATURE_VFP3)) {
return 1;
+ }
tmp = load_cpu_field(vfp.xregs[rn]);
break;
case ARM_VFP_FPEXC:
@@ -3101,8 +3165,9 @@ static int disas_vfp_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
case ARM_VFP_FPINST2:
/* Not present in VFP3. */
if (IS_USER(s)
- || arm_feature(env, ARM_FEATURE_VFP3))
+ || arm_dc_feature(s, ARM_FEATURE_VFP3)) {
return 1;
+ }
tmp = load_cpu_field(vfp.xregs[rn]);
break;
case ARM_VFP_FPSCR:
@@ -3115,15 +3180,16 @@ static int disas_vfp_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
}
break;
case ARM_VFP_MVFR2:
- if (!arm_feature(env, ARM_FEATURE_V8)) {
+ if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
return 1;
}
/* fall through */
case ARM_VFP_MVFR0:
case ARM_VFP_MVFR1:
if (IS_USER(s)
- || !arm_feature(env, ARM_FEATURE_MVFR))
+ || !arm_dc_feature(s, ARM_FEATURE_MVFR)) {
return 1;
+ }
tmp = load_cpu_field(vfp.xregs[rn]);
break;
default:
@@ -3169,6 +3235,9 @@ static int disas_vfp_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
break;
case ARM_VFP_FPINST:
case ARM_VFP_FPINST2:
+ if (IS_USER(s)) {
+ return 1;
+ }
tmp = load_reg(s, rd);
store_cpu_field(tmp, vfp.xregs[rn]);
break;
@@ -3301,8 +3370,8 @@ static int disas_vfp_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
* UNPREDICTABLE if bit 8 is set prior to ARMv8
* (we choose to UNDEF)
*/
- if ((dp && !arm_feature(env, ARM_FEATURE_V8)) ||
- !arm_feature(env, ARM_FEATURE_VFP_FP16)) {
+ if ((dp && !arm_dc_feature(s, ARM_FEATURE_V8)) ||
+ !arm_dc_feature(s, ARM_FEATURE_VFP_FP16)) {
return 1;
}
if (!extract32(rn, 1, 1)) {
@@ -3381,7 +3450,7 @@ static int disas_vfp_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
* correct : an input NaN should come out with its sign bit
* flipped if it is a negated-input.
*/
- if (!arm_feature(env, ARM_FEATURE_VFP4)) {
+ if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
return 1;
}
if (dp) {
@@ -3422,8 +3491,9 @@ static int disas_vfp_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
}
break;
case 14: /* fconst */
- if (!arm_feature(env, ARM_FEATURE_VFP3))
- return 1;
+ if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
+ return 1;
+ }
n = (insn << 12) & 0x80000000;
i = ((insn >> 12) & 0x70) | (insn & 0xf);
@@ -3578,23 +3648,27 @@ static int disas_vfp_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
gen_vfp_sito(dp, 0);
break;
case 20: /* fshto */
- if (!arm_feature(env, ARM_FEATURE_VFP3))
- return 1;
+ if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
+ return 1;
+ }
gen_vfp_shto(dp, 16 - rm, 0);
break;
case 21: /* fslto */
- if (!arm_feature(env, ARM_FEATURE_VFP3))
- return 1;
+ if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
+ return 1;
+ }
gen_vfp_slto(dp, 32 - rm, 0);
break;
case 22: /* fuhto */
- if (!arm_feature(env, ARM_FEATURE_VFP3))
- return 1;
+ if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
+ return 1;
+ }
gen_vfp_uhto(dp, 16 - rm, 0);
break;
case 23: /* fulto */
- if (!arm_feature(env, ARM_FEATURE_VFP3))
- return 1;
+ if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
+ return 1;
+ }
gen_vfp_ulto(dp, 32 - rm, 0);
break;
case 24: /* ftoui */
@@ -3610,23 +3684,27 @@ static int disas_vfp_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
gen_vfp_tosiz(dp, 0);
break;
case 28: /* ftosh */
- if (!arm_feature(env, ARM_FEATURE_VFP3))
- return 1;
+ if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
+ return 1;
+ }
gen_vfp_tosh(dp, 16 - rm, 0);
break;
case 29: /* ftosl */
- if (!arm_feature(env, ARM_FEATURE_VFP3))
- return 1;
+ if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
+ return 1;
+ }
gen_vfp_tosl(dp, 32 - rm, 0);
break;
case 30: /* ftouh */
- if (!arm_feature(env, ARM_FEATURE_VFP3))
- return 1;
+ if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
+ return 1;
+ }
gen_vfp_touh(dp, 16 - rm, 0);
break;
case 31: /* ftoul */
- if (!arm_feature(env, ARM_FEATURE_VFP3))
- return 1;
+ if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
+ return 1;
+ }
gen_vfp_toul(dp, 32 - rm, 0);
break;
default: /* undefined */
@@ -3857,7 +3935,7 @@ static inline void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
static inline void gen_jmp (DisasContext *s, uint32_t dest)
{
- if (unlikely(s->singlestep_enabled)) {
+ if (unlikely(s->singlestep_enabled || s->ss_active)) {
/* An indirect jump so that we still trigger the debug exception. */
if (s->thumb)
dest |= 1;
@@ -3882,7 +3960,8 @@ static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
}
/* Return the mask of PSR bits set by a MSR instruction. */
-static uint32_t msr_mask(CPUARMState *env, DisasContext *s, int flags, int spsr) {
+static uint32_t msr_mask(DisasContext *s, int flags, int spsr)
+{
uint32_t mask;
mask = 0;
@@ -3897,17 +3976,22 @@ static uint32_t msr_mask(CPUARMState *env, DisasContext *s, int flags, int spsr)
/* Mask out undefined bits. */
mask &= ~CPSR_RESERVED;
- if (!arm_feature(env, ARM_FEATURE_V4T))
+ if (!arm_dc_feature(s, ARM_FEATURE_V4T)) {
mask &= ~CPSR_T;
- if (!arm_feature(env, ARM_FEATURE_V5))
+ }
+ if (!arm_dc_feature(s, ARM_FEATURE_V5)) {
mask &= ~CPSR_Q; /* V5TE in reality*/
- if (!arm_feature(env, ARM_FEATURE_V6))
+ }
+ if (!arm_dc_feature(s, ARM_FEATURE_V6)) {
mask &= ~(CPSR_E | CPSR_GE);
- if (!arm_feature(env, ARM_FEATURE_THUMB2))
+ }
+ if (!arm_dc_feature(s, ARM_FEATURE_THUMB2)) {
mask &= ~CPSR_IT;
- /* Mask out execution state bits. */
- if (!spsr)
- mask &= ~CPSR_EXEC;
+ }
+ /* Mask out execution state and reserved bits. */
+ if (!spsr) {
+ mask &= ~(CPSR_EXEC | CPSR_RESERVED);
+ }
/* Mask out privileged bits. */
if (IS_USER(s))
mask &= CPSR_USER;
@@ -3951,7 +4035,7 @@ static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
TCGv_i32 tmp;
store_reg(s, 15, pc);
tmp = load_cpu_field(spsr);
- gen_set_cpsr(tmp, 0xffffffff);
+ gen_set_cpsr(tmp, CPSR_ERET_MASK);
tcg_temp_free_i32(tmp);
s->is_jmp = DISAS_UPDATE;
}
@@ -3959,7 +4043,7 @@ static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
/* Generate a v6 exception return. Marks both values as dead. */
static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
{
- gen_set_cpsr(cpsr, 0xffffffff);
+ gen_set_cpsr(cpsr, CPSR_ERET_MASK);
tcg_temp_free_i32(cpsr);
store_reg(s, 15, pc);
s->is_jmp = DISAS_UPDATE;
@@ -4227,7 +4311,7 @@ static struct {
/* Translate a NEON load/store element instruction. Return nonzero if the
instruction is invalid. */
-static int disas_neon_ls_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
+static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
{
int rd, rn, rm;
int op;
@@ -4969,7 +5053,7 @@ static const uint8_t neon_2rm_sizes[] = {
We process data in a mixture of 32-bit and 64-bit chunks.
Mostly we use 32-bit chunks so we can use normal scalar instructions. */
-static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
+static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
{
int op;
int q;
@@ -5025,7 +5109,7 @@ static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t ins
return 1;
}
if (!u) { /* SHA-1 */
- if (!arm_feature(env, ARM_FEATURE_V8_SHA1)) {
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
return 1;
}
tmp = tcg_const_i32(rd);
@@ -5035,7 +5119,7 @@ static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t ins
gen_helper_crypto_sha1_3reg(cpu_env, tmp, tmp2, tmp3, tmp4);
tcg_temp_free_i32(tmp4);
} else { /* SHA-256 */
- if (!arm_feature(env, ARM_FEATURE_V8_SHA256) || size == 3) {
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256) || size == 3) {
return 1;
}
tmp = tcg_const_i32(rd);
@@ -5170,7 +5254,7 @@ static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t ins
break;
case NEON_3R_FLOAT_MISC:
/* VMAXNM/VMINNM in ARMv8 */
- if (u && !arm_feature(env, ARM_FEATURE_V8)) {
+ if (u && !arm_dc_feature(s, ARM_FEATURE_V8)) {
return 1;
}
break;
@@ -5181,7 +5265,7 @@ static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t ins
}
break;
case NEON_3R_VFM:
- if (!arm_feature(env, ARM_FEATURE_VFP4) || u) {
+ if (!arm_dc_feature(s, ARM_FEATURE_VFP4) || u) {
return 1;
}
break;
@@ -6000,7 +6084,7 @@ static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t ins
if (op == 14 && size == 2) {
TCGv_i64 tcg_rn, tcg_rm, tcg_rd;
- if (!arm_feature(env, ARM_FEATURE_V8_PMULL)) {
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_PMULL)) {
return 1;
}
tcg_rn = tcg_temp_new_i64();
@@ -6488,7 +6572,7 @@ static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t ins
}
break;
case NEON_2RM_VCVT_F16_F32:
- if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
+ if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
q || (rm & 1)) {
return 1;
}
@@ -6512,7 +6596,7 @@ static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t ins
tcg_temp_free_i32(tmp);
break;
case NEON_2RM_VCVT_F32_F16:
- if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
+ if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
q || (rd & 1)) {
return 1;
}
@@ -6536,7 +6620,7 @@ static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t ins
tcg_temp_free_i32(tmp3);
break;
case NEON_2RM_AESE: case NEON_2RM_AESMC:
- if (!arm_feature(env, ARM_FEATURE_V8_AES)
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_AES)
|| ((rm | rd) & 1)) {
return 1;
}
@@ -6558,7 +6642,7 @@ static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t ins
tcg_temp_free_i32(tmp3);
break;
case NEON_2RM_SHA1H:
- if (!arm_feature(env, ARM_FEATURE_V8_SHA1)
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)
|| ((rm | rd) & 1)) {
return 1;
}
@@ -6576,10 +6660,10 @@ static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t ins
}
/* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
if (q) {
- if (!arm_feature(env, ARM_FEATURE_V8_SHA256)) {
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256)) {
return 1;
}
- } else if (!arm_feature(env, ARM_FEATURE_V8_SHA1)) {
+ } else if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
return 1;
}
tmp = tcg_const_i32(rd);
@@ -6964,28 +7048,24 @@ static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t ins
return 0;
}
-static int disas_coproc_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
+static int disas_coproc_insn(DisasContext *s, uint32_t insn)
{
int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
const ARMCPRegInfo *ri;
cpnum = (insn >> 8) & 0xf;
- if (arm_feature(env, ARM_FEATURE_XSCALE)
- && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
- return 1;
-
- /* First check for coprocessor space used for actual instructions */
- switch (cpnum) {
- case 0:
- case 1:
- if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
- return disas_iwmmxt_insn(env, s, insn);
- } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
- return disas_dsp_insn(env, s, insn);
- }
- return 1;
- default:
- break;
+
+ /* First check for coprocessor space used for XScale/iwMMXt insns */
+ if (arm_dc_feature(s, ARM_FEATURE_XSCALE) && (cpnum < 2)) {
+ if (extract32(s->c15_cpar, cpnum, 1) == 0) {
+ return 1;
+ }
+ if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
+ return disas_iwmmxt_insn(s, insn);
+ } else if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) {
+ return disas_dsp_insn(s, insn);
+ }
+ return 1;
}
/* Otherwise treat as a generic register access */
@@ -7014,13 +7094,16 @@ static int disas_coproc_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
ENCODE_CP_REG(cpnum, is64, crn, crm, opc1, opc2));
if (ri) {
/* Check access permissions */
- if (!cp_access_ok(s->current_pl, ri, isread)) {
+ if (!cp_access_ok(s->current_el, ri, isread)) {
return 1;
}
- if (ri->accessfn) {
+ if (ri->accessfn ||
+ (arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) {
/* Emit code to perform further access permissions checks at
* runtime; this may result in an exception.
+ * Note that on XScale all cp0..c13 registers do an access check
+ * call in order to handle c15_cpar.
*/
TCGv_ptr tmpptr;
TCGv_i32 tcg_syn;
@@ -7059,7 +7142,7 @@ static int disas_coproc_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
* in which case the syndrome information won't actually be
* guest visible.
*/
- assert(!arm_feature(env, ARM_FEATURE_V8));
+ assert(!arm_dc_feature(s, ARM_FEATURE_V8));
syndrome = syn_uncategorized();
break;
}
@@ -7277,6 +7360,8 @@ static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
{
TCGv_i32 tmp = tcg_temp_new_i32();
+ s->is_ldex = true;
+
switch (size) {
case 0:
gen_aa32_ld8u(tmp, addr, get_mem_index(s));
@@ -7475,21 +7560,19 @@ static void gen_srs(DisasContext *s,
tcg_temp_free_i32(addr);
}
-static void disas_arm_insn(CPUARMState * env, DisasContext *s)
+static void disas_arm_insn(DisasContext *s, unsigned int insn)
{
- unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
+ unsigned int cond, val, op1, i, shift, rm, rs, rn, rd, sh;
TCGv_i32 tmp;
TCGv_i32 tmp2;
TCGv_i32 tmp3;
TCGv_i32 addr;
TCGv_i64 tmp64;
- insn = arm_ldl_code(env, s->pc, s->bswap_code);
- s->pc += 4;
-
/* M variants do not implement ARM mode. */
- if (IS_M(env))
+ if (arm_dc_feature(s, ARM_FEATURE_M)) {
goto illegal_op;
+ }
cond = insn >> 28;
if (cond == 0xf){
/* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
@@ -7501,25 +7584,29 @@ static void disas_arm_insn(CPUARMState * env, DisasContext *s)
/* Unconditional instructions. */
if (((insn >> 25) & 7) == 1) {
/* NEON Data processing. */
- if (!arm_feature(env, ARM_FEATURE_NEON))
+ if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
goto illegal_op;
+ }
- if (disas_neon_data_insn(env, s, insn))
+ if (disas_neon_data_insn(s, insn)) {
goto illegal_op;
+ }
return;
}
if ((insn & 0x0f100000) == 0x04000000) {
/* NEON load/store. */
- if (!arm_feature(env, ARM_FEATURE_NEON))
+ if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
goto illegal_op;
+ }
- if (disas_neon_ls_insn(env, s, insn))
+ if (disas_neon_ls_insn(s, insn)) {
goto illegal_op;
+ }
return;
}
if ((insn & 0x0f000e10) == 0x0e000a00) {
/* VFP. */
- if (disas_vfp_insn(env, s, insn)) {
+ if (disas_vfp_insn(s, insn)) {
goto illegal_op;
}
return;
@@ -7528,7 +7615,7 @@ static void disas_arm_insn(CPUARMState * env, DisasContext *s)
((insn & 0x0f30f010) == 0x0710f000)) {
if ((insn & (1 << 22)) == 0) {
/* PLDW; v7MP */
- if (!arm_feature(env, ARM_FEATURE_V7MP)) {
+ if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
goto illegal_op;
}
}
@@ -7543,7 +7630,7 @@ static void disas_arm_insn(CPUARMState * env, DisasContext *s)
}
if (((insn & 0x0f700000) == 0x04100000) ||
((insn & 0x0f700010) == 0x06100000)) {
- if (!arm_feature(env, ARM_FEATURE_V7MP)) {
+ if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
goto illegal_op;
}
return; /* v7MP: Unallocated memory hint: must NOP */
@@ -7640,11 +7727,13 @@ static void disas_arm_insn(CPUARMState * env, DisasContext *s)
gen_bx_im(s, val);
return;
} else if ((insn & 0x0e000f00) == 0x0c000100) {
- if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
+ if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
/* iWMMXt register transfer. */
- if (env->cp15.c15_cpar & (1 << 1))
- if (!disas_iwmmxt_insn(env, s, insn))
+ if (extract32(s->c15_cpar, 1, 1)) {
+ if (!disas_iwmmxt_insn(s, insn)) {
return;
+ }
+ }
}
} else if ((insn & 0x0fe00000) == 0x0c400000) {
/* Coprocessor double register transfer. */
@@ -7714,8 +7803,10 @@ static void disas_arm_insn(CPUARMState * env, DisasContext *s)
if (shift)
val = (val >> shift) | (val << (32 - shift));
i = ((insn & (1 << 22)) != 0);
- if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
+ if (gen_set_psr_im(s, msr_mask(s, (insn >> 16) & 0xf, i),
+ i, val)) {
goto illegal_op;
+ }
}
}
} else if ((insn & 0x0f900000) == 0x01000000
@@ -7730,7 +7821,7 @@ static void disas_arm_insn(CPUARMState * env, DisasContext *s)
/* PSR = reg */
tmp = load_reg(s, rm);
i = ((op1 & 2) != 0);
- if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
+ if (gen_set_psr(s, msr_mask(s, (insn >> 16) & 0xf, i), i, tmp))
goto illegal_op;
} else {
/* reg = PSR */
@@ -7794,7 +7885,7 @@ static void disas_arm_insn(CPUARMState * env, DisasContext *s)
* op1 == 3 is UNPREDICTABLE but handle as UNDEFINED.
* Bits 8, 10 and 11 should be zero.
*/
- if (!arm_feature(env, ARM_FEATURE_CRC) || op1 == 0x3 ||
+ if (!arm_dc_feature(s, ARM_FEATURE_CRC) || op1 == 0x3 ||
(c & 0xd) != 0) {
goto illegal_op;
}
@@ -7838,15 +7929,32 @@ static void disas_arm_insn(CPUARMState * env, DisasContext *s)
case 7:
{
int imm16 = extract32(insn, 0, 4) | (extract32(insn, 8, 12) << 4);
- /* SMC instruction (op1 == 3)
- and undefined instructions (op1 == 0 || op1 == 2)
- will trap */
- if (op1 != 1) {
+ switch (op1) {
+ case 1:
+ /* bkpt */
+ ARCH(5);
+ gen_exception_insn(s, 4, EXCP_BKPT,
+ syn_aa32_bkpt(imm16, false));
+ break;
+ case 2:
+ /* Hypervisor call (v7) */
+ ARCH(7);
+ if (IS_USER(s)) {
+ goto illegal_op;
+ }
+ gen_hvc(s, imm16);
+ break;
+ case 3:
+ /* Secure monitor call (v6+) */
+ ARCH(6K);
+ if (IS_USER(s)) {
+ goto illegal_op;
+ }
+ gen_smc(s);
+ break;
+ default:
goto illegal_op;
}
- /* bkpt */
- ARCH(5);
- gen_exception_insn(s, 4, EXCP_BKPT, syn_aa32_bkpt(imm16, false));
break;
}
case 0x8: /* signed multiply */
@@ -7951,14 +8059,14 @@ static void disas_arm_insn(CPUARMState * env, DisasContext *s)
if (logic_cc) {
gen_logic_CC(tmp);
}
- store_reg_bx(env, s, rd, tmp);
+ store_reg_bx(s, rd, tmp);
break;
case 0x01:
tcg_gen_xor_i32(tmp, tmp, tmp2);
if (logic_cc) {
gen_logic_CC(tmp);
}
- store_reg_bx(env, s, rd, tmp);
+ store_reg_bx(s, rd, tmp);
break;
case 0x02:
if (set_cc && rd == 15) {
@@ -7974,7 +8082,7 @@ static void disas_arm_insn(CPUARMState * env, DisasContext *s)
} else {
tcg_gen_sub_i32(tmp, tmp, tmp2);
}
- store_reg_bx(env, s, rd, tmp);
+ store_reg_bx(s, rd, tmp);
}
break;
case 0x03:
@@ -7983,7 +8091,7 @@ static void disas_arm_insn(CPUARMState * env, DisasContext *s)
} else {
tcg_gen_sub_i32(tmp, tmp2, tmp);
}
- store_reg_bx(env, s, rd, tmp);
+ store_reg_bx(s, rd, tmp);
break;
case 0x04:
if (set_cc) {
@@ -7991,7 +8099,7 @@ static void disas_arm_insn(CPUARMState * env, DisasContext *s)
} else {
tcg_gen_add_i32(tmp, tmp, tmp2);
}
- store_reg_bx(env, s, rd, tmp);
+ store_reg_bx(s, rd, tmp);
break;
case 0x05:
if (set_cc) {
@@ -7999,7 +8107,7 @@ static void disas_arm_insn(CPUARMState * env, DisasContext *s)
} else {
gen_add_carry(tmp, tmp, tmp2);
}
- store_reg_bx(env, s, rd, tmp);
+ store_reg_bx(s, rd, tmp);
break;
case 0x06:
if (set_cc) {
@@ -8007,7 +8115,7 @@ static void disas_arm_insn(CPUARMState * env, DisasContext *s)
} else {
gen_sub_carry(tmp, tmp, tmp2);
}
- store_reg_bx(env, s, rd, tmp);
+ store_reg_bx(s, rd, tmp);
break;
case 0x07:
if (set_cc) {
@@ -8015,7 +8123,7 @@ static void disas_arm_insn(CPUARMState * env, DisasContext *s)
} else {
gen_sub_carry(tmp, tmp2, tmp);
}
- store_reg_bx(env, s, rd, tmp);
+ store_reg_bx(s, rd, tmp);
break;
case 0x08:
if (set_cc) {
@@ -8048,7 +8156,7 @@ static void disas_arm_insn(CPUARMState * env, DisasContext *s)
if (logic_cc) {
gen_logic_CC(tmp);
}
- store_reg_bx(env, s, rd, tmp);
+ store_reg_bx(s, rd, tmp);
break;
case 0x0d:
if (logic_cc && rd == 15) {
@@ -8061,7 +8169,7 @@ static void disas_arm_insn(CPUARMState * env, DisasContext *s)
if (logic_cc) {
gen_logic_CC(tmp2);
}
- store_reg_bx(env, s, rd, tmp2);
+ store_reg_bx(s, rd, tmp2);
}
break;
case 0x0e:
@@ -8069,7 +8177,7 @@ static void disas_arm_insn(CPUARMState * env, DisasContext *s)
if (logic_cc) {
gen_logic_CC(tmp);
}
- store_reg_bx(env, s, rd, tmp);
+ store_reg_bx(s, rd, tmp);
break;
default:
case 0x0f:
@@ -8077,7 +8185,7 @@ static void disas_arm_insn(CPUARMState * env, DisasContext *s)
if (logic_cc) {
gen_logic_CC(tmp2);
}
- store_reg_bx(env, s, rd, tmp2);
+ store_reg_bx(s, rd, tmp2);
break;
}
if (op1 != 0x0f && op1 != 0x0d) {
@@ -8586,7 +8694,7 @@ static void disas_arm_insn(CPUARMState * env, DisasContext *s)
case 1:
case 3:
/* SDIV, UDIV */
- if (!arm_feature(env, ARM_FEATURE_ARM_DIV)) {
+ if (!arm_dc_feature(s, ARM_FEATURE_ARM_DIV)) {
goto illegal_op;
}
if (((insn >> 5) & 7) || (rd != 15)) {
@@ -8715,7 +8823,7 @@ static void disas_arm_insn(CPUARMState * env, DisasContext *s)
}
if (insn & (1 << 20)) {
/* Complete the load. */
- store_reg_from_load(env, s, rd, tmp);
+ store_reg_from_load(s, rd, tmp);
}
break;
case 0x08:
@@ -8778,7 +8886,7 @@ static void disas_arm_insn(CPUARMState * env, DisasContext *s)
loaded_var = tmp;
loaded_base = 1;
} else {
- store_reg_from_load(env, s, i, tmp);
+ store_reg_from_load(s, i, tmp);
}
} else {
/* store */
@@ -8833,7 +8941,7 @@ static void disas_arm_insn(CPUARMState * env, DisasContext *s)
if ((insn & (1 << 22)) && !user) {
/* Restore CPSR from SPSR. */
tmp = load_cpu_field(spsr);
- gen_set_cpsr(tmp, 0xffffffff);
+ gen_set_cpsr(tmp, CPSR_ERET_MASK);
tcg_temp_free_i32(tmp);
s->is_jmp = DISAS_UPDATE;
}
@@ -8861,10 +8969,10 @@ static void disas_arm_insn(CPUARMState * env, DisasContext *s)
case 0xe:
if (((insn >> 8) & 0xe) == 10) {
/* VFP. */
- if (disas_vfp_insn(env, s, insn)) {
+ if (disas_vfp_insn(s, insn)) {
goto illegal_op;
}
- } else if (disas_coproc_insn(env, s, insn)) {
+ } else if (disas_coproc_insn(s, insn)) {
/* Coprocessor. */
goto illegal_op;
}
@@ -8982,8 +9090,8 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw
int conds;
int logic_cc;
- if (!(arm_feature(env, ARM_FEATURE_THUMB2)
- || arm_feature (env, ARM_FEATURE_M))) {
+ if (!(arm_dc_feature(s, ARM_FEATURE_THUMB2)
+ || arm_dc_feature(s, ARM_FEATURE_M))) {
/* Thumb-1 cores may need to treat bl and blx as a pair of
16-bit instructions to get correct prefetch abort behavior. */
insn = insn_hw1;
@@ -9193,7 +9301,7 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw
/* Load/store multiple, RFE, SRS. */
if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
/* RFE, SRS: not available in user mode or on M profile */
- if (IS_USER(s) || IS_M(env)) {
+ if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
goto illegal_op;
}
if (insn & (1 << 20)) {
@@ -9345,7 +9453,7 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw
gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
if (logic_cc)
gen_logic_CC(tmp);
- store_reg_bx(env, s, rd, tmp);
+ store_reg_bx(s, rd, tmp);
break;
case 1: /* Sign/zero extend. */
tmp = load_reg(s, rm);
@@ -9436,7 +9544,7 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw
uint32_t sz = op & 0x3;
uint32_t c = op & 0x8;
- if (!arm_feature(env, ARM_FEATURE_CRC)) {
+ if (!arm_dc_feature(s, ARM_FEATURE_CRC)) {
goto illegal_op;
}
@@ -9564,7 +9672,7 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw
tmp2 = load_reg(s, rm);
if ((op & 0x50) == 0x10) {
/* sdiv, udiv */
- if (!arm_feature(env, ARM_FEATURE_THUMB_DIV)) {
+ if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DIV)) {
goto illegal_op;
}
if (op & 0x20)
@@ -9627,17 +9735,19 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw
if (((insn >> 24) & 3) == 3) {
/* Translate into the equivalent ARM encoding. */
insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
- if (disas_neon_data_insn(env, s, insn))
+ if (disas_neon_data_insn(s, insn)) {
goto illegal_op;
+ }
} else if (((insn >> 8) & 0xe) == 10) {
- if (disas_vfp_insn(env, s, insn)) {
+ if (disas_vfp_insn(s, insn)) {
goto illegal_op;
}
} else {
if (insn & (1 << 28))
goto illegal_op;
- if (disas_coproc_insn (env, s, insn))
+ if (disas_coproc_insn(s, insn)) {
goto illegal_op;
+ }
}
break;
case 8: case 9: case 10: case 11:
@@ -9676,15 +9786,28 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw
goto illegal_op;
if (insn & (1 << 26)) {
- /* Secure monitor call (v6Z) */
- qemu_log_mask(LOG_UNIMP,
- "arm: unimplemented secure monitor call\n");
- goto illegal_op; /* not implemented. */
+ if (!(insn & (1 << 20))) {
+ /* Hypervisor call (v7) */
+ int imm16 = extract32(insn, 16, 4) << 12
+ | extract32(insn, 0, 12);
+ ARCH(7);
+ if (IS_USER(s)) {
+ goto illegal_op;
+ }
+ gen_hvc(s, imm16);
+ } else {
+ /* Secure monitor call (v6+) */
+ ARCH(6K);
+ if (IS_USER(s)) {
+ goto illegal_op;
+ }
+ gen_smc(s);
+ }
} else {
op = (insn >> 20) & 7;
switch (op) {
case 0: /* msr cpsr. */
- if (IS_M(env)) {
+ if (arm_dc_feature(s, ARM_FEATURE_M)) {
tmp = load_reg(s, rn);
addr = tcg_const_i32(insn & 0xff);
gen_helper_v7m_msr(cpu_env, addr, tmp);
@@ -9695,11 +9818,12 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw
}
/* fall through */
case 1: /* msr spsr. */
- if (IS_M(env))
+ if (arm_dc_feature(s, ARM_FEATURE_M)) {
goto illegal_op;
+ }
tmp = load_reg(s, rn);
if (gen_set_psr(s,
- msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
+ msr_mask(s, (insn >> 8) & 0xf, op == 1),
op == 1, tmp))
goto illegal_op;
break;
@@ -9764,7 +9888,7 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw
break;
case 6: /* mrs cpsr. */
tmp = tcg_temp_new_i32();
- if (IS_M(env)) {
+ if (arm_dc_feature(s, ARM_FEATURE_M)) {
addr = tcg_const_i32(insn & 0xff);
gen_helper_v7m_mrs(tmp, cpu_env, addr);
tcg_temp_free_i32(addr);
@@ -9775,8 +9899,9 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw
break;
case 7: /* mrs spsr. */
/* Not accessible in user mode. */
- if (IS_USER(s) || IS_M(env))
+ if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
goto illegal_op;
+ }
tmp = load_cpu_field(spsr);
store_reg(s, rd, tmp);
break;
@@ -9964,8 +10089,9 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw
int writeback = 0;
int memidx;
if ((insn & 0x01100000) == 0x01000000) {
- if (disas_neon_ls_insn(env, s, insn))
+ if (disas_neon_ls_insn(s, insn)) {
goto illegal_op;
+ }
break;
}
op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
@@ -10661,7 +10787,7 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
store_reg(s, 13, addr);
/* set the new PC value */
if ((insn & 0x0900) == 0x0900) {
- store_reg_from_load(env, s, 15, tmp);
+ store_reg_from_load(s, 15, tmp);
}
break;
@@ -10731,7 +10857,7 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
if (IS_USER(s)) {
break;
}
- if (IS_M(env)) {
+ if (arm_dc_feature(s, ARM_FEATURE_M)) {
tmp = tcg_const_i32((insn & (1 << 4)) != 0);
/* FAULTMASK */
if (insn & 1) {
@@ -10909,10 +11035,31 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
+ dc->c15_cpar = ARM_TBFLAG_XSCALE_CPAR(tb->flags);
dc->cp_regs = cpu->cp_regs;
- dc->current_pl = arm_current_pl(env);
+ dc->current_el = arm_current_el(env);
dc->features = env->features;
+ /* Single step state. The code-generation logic here is:
+ * SS_ACTIVE == 0:
+ * generate code with no special handling for single-stepping (except
+ * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
+ * this happens anyway because those changes are all system register or
+ * PSTATE writes).
+ * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
+ * emit code for one insn
+ * emit code to clear PSTATE.SS
+ * emit code to generate software step exception for completed step
+ * end TB (as usual for having generated an exception)
+ * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
+ * emit code to generate a software step exception
+ * end the TB
+ */
+ dc->ss_active = ARM_TBFLAG_SS_ACTIVE(tb->flags);
+ dc->pstate_ss = ARM_TBFLAG_PSTATE_SS(tb->flags);
+ dc->is_ldex = false;
+ dc->ss_same_el = false; /* Can't be true since EL_d must be AArch64 */
+
cpu_F0s = tcg_temp_new_i32();
cpu_F1s = tcg_temp_new_i32();
cpu_F0d = tcg_temp_new_i64();
@@ -10982,7 +11129,7 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
break;
}
#else
- if (dc->pc >= 0xfffffff0 && IS_M(env)) {
+ if (dc->pc >= 0xfffffff0 && arm_dc_feature(dc, ARM_FEATURE_M)) {
/* We always get here via a jump, so know we are not in a
conditional execution block. */
gen_exception_internal(EXCP_EXCEPTION_EXIT);
@@ -11022,6 +11169,22 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
tcg_gen_debug_insn_start(dc->pc);
}
+ if (dc->ss_active && !dc->pstate_ss) {
+ /* Singlestep state is Active-pending.
+ * If we're in this state at the start of a TB then either
+ * a) we just took an exception to an EL which is being debugged
+ * and this is the first insn in the exception handler
+ * b) debug exceptions were masked and we just unmasked them
+ * without changing EL (eg by clearing PSTATE.D)
+ * In either case we're going to take a swstep exception in the
+ * "did not step an insn" case, and so the syndrome ISV and EX
+ * bits should be zero.
+ */
+ assert(num_insns == 0);
+ gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0));
+ goto done_generating;
+ }
+
if (dc->thumb) {
disas_thumb_insn(env, dc);
if (dc->condexec_mask) {
@@ -11033,7 +11196,9 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
}
}
} else {
- disas_arm_insn(env, dc);
+ unsigned int insn = arm_ldl_code(env, dc->pc, dc->bswap_code);
+ dc->pc += 4;
+ disas_arm_insn(dc, insn);
}
if (dc->condjmp && !dc->is_jmp) {
@@ -11054,6 +11219,7 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
} while (!dc->is_jmp && tcg_ctx.gen_opc_ptr < gen_opc_end &&
!cs->singlestep_enabled &&
!singlestep &&
+ !dc->ss_active &&
dc->pc < next_page_start &&
num_insns < max_insns);
@@ -11069,12 +11235,21 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
/* At this stage dc->condjmp will only be set when the skipped
instruction was a conditional branch or trap, and the PC has
already been written. */
- if (unlikely(cs->singlestep_enabled)) {
+ if (unlikely(cs->singlestep_enabled || dc->ss_active)) {
/* Make sure the pc is updated, and raise a debug exception. */
if (dc->condjmp) {
gen_set_condexec(dc);
if (dc->is_jmp == DISAS_SWI) {
+ gen_ss_advance(dc);
gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb));
+ } else if (dc->is_jmp == DISAS_HVC) {
+ gen_ss_advance(dc);
+ gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm));
+ } else if (dc->is_jmp == DISAS_SMC) {
+ gen_ss_advance(dc);
+ gen_exception(EXCP_SMC, syn_aa32_smc());
+ } else if (dc->ss_active) {
+ gen_step_complete_exception(dc);
} else {
gen_exception_internal(EXCP_DEBUG);
}
@@ -11086,7 +11261,16 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
}
gen_set_condexec(dc);
if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
+ gen_ss_advance(dc);
gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb));
+ } else if (dc->is_jmp == DISAS_HVC && !dc->condjmp) {
+ gen_ss_advance(dc);
+ gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm));
+ } else if (dc->is_jmp == DISAS_SMC && !dc->condjmp) {
+ gen_ss_advance(dc);
+ gen_exception(EXCP_SMC, syn_aa32_smc());
+ } else if (dc->ss_active) {
+ gen_step_complete_exception(dc);
} else {
/* FIXME: Single stepping a WFI insn will not halt
the CPU. */
@@ -11124,6 +11308,12 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
case DISAS_SWI:
gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb));
break;
+ case DISAS_HVC:
+ gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm));
+ break;
+ case DISAS_SMC:
+ gen_exception(EXCP_SMC, syn_aa32_smc());
+ break;
}
if (dc->condjmp) {
gen_set_label(dc->condlabel);
diff --git a/target-arm/translate.h b/target-arm/translate.h
index 31a0104b5..41a907157 100644
--- a/target-arm/translate.h
+++ b/target-arm/translate.h
@@ -29,7 +29,7 @@ typedef struct DisasContext {
*/
uint32_t svc_imm;
int aarch64;
- int current_pl;
+ int current_el;
GHashTable *cp_regs;
uint64_t features; /* CPU features bits */
/* Because unallocated encodings generate different exception syndrome
@@ -40,6 +40,20 @@ typedef struct DisasContext {
* that it is set at the point where we actually touch the FP regs.
*/
bool fp_access_checked;
+ /* ARMv8 single-step state (this is distinct from the QEMU gdbstub
+ * single-step support).
+ */
+ bool ss_active;
+ bool pstate_ss;
+ /* True if the insn just emitted was a load-exclusive instruction
+ * (necessary for syndrome information for single step exceptions),
+ * ie A64 LDX*, LDAX*, A32/T32 LDREX*, LDAEX*.
+ */
+ bool is_ldex;
+ /* True if a single-step exception will be taken to the current EL */
+ bool ss_same_el;
+ /* Bottom two bits of XScale c15_cpar coprocessor access control reg */
+ int c15_cpar;
#define TMP_A64_MAX 16
int tmp_a64_count;
TCGv_i64 tmp_a64[TMP_A64_MAX];
@@ -54,7 +68,7 @@ static inline int arm_dc_feature(DisasContext *dc, int feature)
static inline int get_mem_index(DisasContext *s)
{
- return s->current_pl;
+ return s->current_el;
}
/* target-specific extra values for is_jmp */
@@ -70,6 +84,8 @@ static inline int get_mem_index(DisasContext *s)
#define DISAS_EXC 6
/* WFE */
#define DISAS_WFE 7
+#define DISAS_HVC 8
+#define DISAS_SMC 9
#ifdef TARGET_AARCH64
void a64_translate_init(void);