summaryrefslogtreecommitdiff
path: root/target-ppc
diff options
context:
space:
mode:
authorSeokYeon Hwang <syeon.hwang@samsung.com>2017-06-28 16:20:50 +0900
committerSeokYeon Hwang <syeon.hwang@samsung.com>2017-06-28 16:21:16 +0900
commite6230f92c2a7c924f4092b877ee70a74f76e6841 (patch)
tree3f6f4a364da163b80c9669f903eda6f8aeb4d19b /target-ppc
parente9775ba331166fc2de29a387b1070281c8ec0985 (diff)
parent6ca9395b240883513b16a1875a7080b081612c57 (diff)
downloadqemu-e6230f92c2a7c924f4092b877ee70a74f76e6841.tar.gz
qemu-e6230f92c2a7c924f4092b877ee70a74f76e6841.tar.bz2
qemu-e6230f92c2a7c924f4092b877ee70a74f76e6841.zip
Merge spin into tizen
Change-Id: I00f8d0dbf2d26f3c9c6754b9f1d986355037f5bb
Diffstat (limited to 'target-ppc')
-rw-r--r--target-ppc/cpu-models.c9
-rw-r--r--target-ppc/cpu-models.h3
-rw-r--r--target-ppc/cpu-qom.h2
-rw-r--r--target-ppc/cpu.h25
-rw-r--r--target-ppc/dfp_helper.c35
-rw-r--r--target-ppc/excp_helper.c244
-rw-r--r--target-ppc/fpu_helper.c264
-rw-r--r--target-ppc/helper.h127
-rw-r--r--target-ppc/helper_regs.h36
-rw-r--r--target-ppc/int_helper.c555
-rw-r--r--target-ppc/internal.h50
-rw-r--r--target-ppc/kvm.c77
-rw-r--r--target-ppc/kvm_ppc.h11
-rw-r--r--target-ppc/machine.c40
-rw-r--r--target-ppc/mem_helper.c85
-rw-r--r--target-ppc/misc_helper.c9
-rw-r--r--target-ppc/mmu-hash64.c22
-rw-r--r--target-ppc/mmu-hash64.h1
-rw-r--r--target-ppc/mmu_helper.c46
-rw-r--r--target-ppc/timebase_helper.c23
-rw-r--r--target-ppc/translate.c6122
-rw-r--r--target-ppc/translate/dfp-impl.inc.c232
-rw-r--r--target-ppc/translate/dfp-ops.inc.c165
-rw-r--r--target-ppc/translate/fp-impl.inc.c1070
-rw-r--r--target-ppc/translate/fp-ops.inc.c111
-rw-r--r--target-ppc/translate/spe-impl.inc.c1229
-rw-r--r--target-ppc/translate/spe-ops.inc.c105
-rw-r--r--target-ppc/translate/vmx-impl.inc.c1113
-rw-r--r--target-ppc/translate/vmx-ops.inc.c294
-rw-r--r--target-ppc/translate/vsx-impl.inc.c1009
-rw-r--r--target-ppc/translate/vsx-ops.inc.c300
-rw-r--r--target-ppc/translate_init.c235
32 files changed, 7897 insertions, 5752 deletions
diff --git a/target-ppc/cpu-models.c b/target-ppc/cpu-models.c
index 5209e63a72..506dee1ee8 100644
--- a/target-ppc/cpu-models.c
+++ b/target-ppc/cpu-models.c
@@ -1130,10 +1130,6 @@
#if defined(TODO)
POWERPC_DEF("POWER6", CPU_POWERPC_POWER6, POWER6,
"POWER6")
- POWERPC_DEF("POWER6_5", CPU_POWERPC_POWER6_5, POWER5,
- "POWER6 running in POWER5 mode")
- POWERPC_DEF("POWER6A", CPU_POWERPC_POWER6A, POWER6,
- "POWER6A")
#endif
POWERPC_DEF("POWER7_v2.3", CPU_POWERPC_POWER7_v23, POWER7,
"POWER7 v2.3")
@@ -1147,6 +1143,10 @@
"POWER8NVL v1.0")
POWERPC_DEF("970_v2.2", CPU_POWERPC_970_v22, 970,
"PowerPC 970 v2.2")
+
+ POWERPC_DEF("POWER9_v1.0", CPU_POWERPC_POWER9_BASE, POWER9,
+ "POWER9 v1.0")
+
POWERPC_DEF("970fx_v1.0", CPU_POWERPC_970FX_v10, 970,
"PowerPC 970FX v1.0 (G5)")
POWERPC_DEF("970fx_v2.0", CPU_POWERPC_970FX_v20, 970,
@@ -1395,6 +1395,7 @@ PowerPCCPUAlias ppc_cpu_aliases[] = {
{ "POWER8E", "POWER8E_v2.1" },
{ "POWER8", "POWER8_v2.0" },
{ "POWER8NVL", "POWER8NVL_v1.0" },
+ { "POWER9", "POWER9_v1.0" },
{ "970", "970_v2.2" },
{ "970fx", "970fx_v3.1" },
{ "970mp", "970mp_v1.1" },
diff --git a/target-ppc/cpu-models.h b/target-ppc/cpu-models.h
index f21a44c830..aafbbd7d5d 100644
--- a/target-ppc/cpu-models.h
+++ b/target-ppc/cpu-models.h
@@ -549,8 +549,6 @@ enum {
CPU_POWERPC_POWER5 = 0x003A0203,
CPU_POWERPC_POWER5P_v21 = 0x003B0201,
CPU_POWERPC_POWER6 = 0x003E0000,
- CPU_POWERPC_POWER6_5 = 0x0F000001, /* POWER6 in POWER5 mode */
- CPU_POWERPC_POWER6A = 0x0F000002,
CPU_POWERPC_POWER_SERVER_MASK = 0xFFFF0000,
CPU_POWERPC_POWER7_BASE = 0x003F0000,
CPU_POWERPC_POWER7_v23 = 0x003F0203,
@@ -562,6 +560,7 @@ enum {
CPU_POWERPC_POWER8_v20 = 0x004D0200,
CPU_POWERPC_POWER8NVL_BASE = 0x004C0000,
CPU_POWERPC_POWER8NVL_v10 = 0x004C0100,
+ CPU_POWERPC_POWER9_BASE = 0x004E0000,
CPU_POWERPC_970_v22 = 0x00390202,
CPU_POWERPC_970FX_v10 = 0x00391100,
CPU_POWERPC_970FX_v20 = 0x003C0200,
diff --git a/target-ppc/cpu-qom.h b/target-ppc/cpu-qom.h
index 286410502f..d46c31a15d 100644
--- a/target-ppc/cpu-qom.h
+++ b/target-ppc/cpu-qom.h
@@ -86,6 +86,7 @@ enum powerpc_mmu_t {
POWERPC_MMU_2_07 = POWERPC_MMU_64 | POWERPC_MMU_1TSEG
| POWERPC_MMU_64K
| POWERPC_MMU_AMR | 0x00000004,
+ /* FIXME Add POWERPC_MMU_3_OO defines */
/* Architecture 2.07 "degraded" (no 1T segments) */
POWERPC_MMU_2_07a = POWERPC_MMU_64 | POWERPC_MMU_AMR
| 0x00000004,
@@ -173,6 +174,7 @@ typedef struct PowerPCCPUClass {
/*< public >*/
DeviceRealize parent_realize;
+ DeviceUnrealize parent_unrealize;
void (*parent_reset)(CPUState *cpu);
uint32_t pvr;
diff --git a/target-ppc/cpu.h b/target-ppc/cpu.h
index 786ab5cdfa..2a50c43689 100644
--- a/target-ppc/cpu.h
+++ b/target-ppc/cpu.h
@@ -1009,6 +1009,8 @@ struct CPUPPCState {
bool tlb_dirty; /* Set to non-zero when modifying TLB */
bool kvm_sw_tlb; /* non-zero if KVM SW TLB API is active */
uint32_t tlb_need_flush; /* Delayed flush needed */
+#define TLB_NEED_LOCAL_FLUSH 0x1
+#define TLB_NEED_GLOBAL_FLUSH 0x2
#endif
/* Other registers */
@@ -1164,6 +1166,13 @@ struct PowerPCCPU {
int cpu_dt_id;
uint32_t max_compat;
uint32_t cpu_version;
+
+ /* Fields related to migration compatibility hacks */
+ bool pre_2_8_migration;
+ target_ulong mig_msr_mask;
+ uint64_t mig_insns_flags;
+ uint64_t mig_insns_flags2;
+ uint32_t mig_nb_BATs;
};
static inline PowerPCCPU *ppc_env_get_cpu(CPUPPCState *env)
@@ -1184,8 +1193,6 @@ void ppc_cpu_dump_state(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
int flags);
void ppc_cpu_dump_statistics(CPUState *cpu, FILE *f,
fprintf_function cpu_fprintf, int flags);
-int ppc_cpu_get_monitor_def(CPUState *cs, const char *name,
- uint64_t *pval);
hwaddr ppc_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
int ppc_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg);
int ppc_cpu_gdb_read_register_apple(CPUState *cpu, uint8_t *buf, int reg);
@@ -1202,7 +1209,6 @@ extern const struct VMStateDescription vmstate_ppc_cpu;
PowerPCCPU *cpu_ppc_init(const char *cpu_model);
void ppc_translate_init(void);
const char *ppc_cpu_lookup_alias(const char *alias);
-void gen_update_current_nip(void *opaque);
/* you can call this signal handler from your SIGBUS and SIGSEGV
signal handlers to inform the virtual CPU of exceptions. non zero
is returned if the signal was handled by the virtual CPU. */
@@ -2095,6 +2101,8 @@ enum {
PPC2_TM = 0x0000000000020000ULL,
/* Server PM instructgions (ISA 2.06, Book III) */
PPC2_PM_ISA206 = 0x0000000000040000ULL,
+ /* POWER ISA 3.0 */
+ PPC2_ISA300 = 0x0000000000080000ULL,
#define PPC_TCG_INSNS2 (PPC2_BOOKE206 | PPC2_VSX | PPC2_PRCNTL | PPC2_DBRX | \
PPC2_ISA205 | PPC2_VSX207 | PPC2_PERM_ISA206 | \
@@ -2102,7 +2110,8 @@ enum {
PPC2_FP_CVT_ISA206 | PPC2_FP_TST_ISA206 | \
PPC2_BCTAR_ISA207 | PPC2_LSQ_ISA207 | \
PPC2_ALTIVEC_207 | PPC2_ISA207S | PPC2_DFP | \
- PPC2_FP_CVT_S64 | PPC2_TM | PPC2_PM_ISA206)
+ PPC2_FP_CVT_S64 | PPC2_TM | PPC2_PM_ISA206 | \
+ PPC2_ISA300)
};
/*****************************************************************************/
@@ -2296,6 +2305,14 @@ static inline void cpu_get_tb_cpu_state(CPUPPCState *env, target_ulong *pc,
*flags = env->hflags;
}
+void QEMU_NORETURN raise_exception(CPUPPCState *env, uint32_t exception);
+void QEMU_NORETURN raise_exception_ra(CPUPPCState *env, uint32_t exception,
+ uintptr_t raddr);
+void QEMU_NORETURN raise_exception_err(CPUPPCState *env, uint32_t exception,
+ uint32_t error_code);
+void QEMU_NORETURN raise_exception_err_ra(CPUPPCState *env, uint32_t exception,
+ uint32_t error_code, uintptr_t raddr);
+
#if !defined(CONFIG_USER_ONLY)
static inline int booke206_tlbm_id(CPUPPCState *env, ppcmas_tlb_t *tlbm)
{
diff --git a/target-ppc/dfp_helper.c b/target-ppc/dfp_helper.c
index db0ede698b..9164fe701b 100644
--- a/target-ppc/dfp_helper.c
+++ b/target-ppc/dfp_helper.c
@@ -647,6 +647,41 @@ uint32_t helper_##op(CPUPPCState *env, uint64_t *a, uint64_t *b) \
DFP_HELPER_TSTSF(dtstsf, 64)
DFP_HELPER_TSTSF(dtstsfq, 128)
+#define DFP_HELPER_TSTSFI(op, size) \
+uint32_t helper_##op(CPUPPCState *env, uint32_t a, uint64_t *b) \
+{ \
+ struct PPC_DFP dfp; \
+ unsigned uim; \
+ \
+ dfp_prepare_decimal##size(&dfp, 0, b, env); \
+ \
+ uim = a & 0x3F; \
+ \
+ if (unlikely(decNumberIsSpecial(&dfp.b))) { \
+ dfp.crbf = 1; \
+ } else if (uim == 0) { \
+ dfp.crbf = 4; \
+ } else if (unlikely(decNumberIsZero(&dfp.b))) { \
+ /* Zero has no sig digits */ \
+ dfp.crbf = 4; \
+ } else { \
+ unsigned nsd = dfp.b.digits; \
+ if (uim < nsd) { \
+ dfp.crbf = 8; \
+ } else if (uim > nsd) { \
+ dfp.crbf = 4; \
+ } else { \
+ dfp.crbf = 2; \
+ } \
+ } \
+ \
+ dfp_set_FPCC_from_CRBF(&dfp); \
+ return dfp.crbf; \
+}
+
+DFP_HELPER_TSTSFI(dtstsfi, 64)
+DFP_HELPER_TSTSFI(dtstsfiq, 128)
+
static void QUA_PPs(struct PPC_DFP *dfp)
{
dfp_set_FPRF_from_FRT(dfp);
diff --git a/target-ppc/excp_helper.c b/target-ppc/excp_helper.c
index d6e1678a63..93369d4fe5 100644
--- a/target-ppc/excp_helper.c
+++ b/target-ppc/excp_helper.c
@@ -198,7 +198,7 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
default:
goto excp_invalid;
}
- goto store_next;
+ break;
case POWERPC_EXCP_MCHECK: /* Machine check exception */
if (msr_me == 0) {
/* Machine check exception is not enabled.
@@ -213,7 +213,12 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
cs->halted = 1;
cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
}
- new_msr |= (target_ulong)MSR_HVB;
+ if (env->msr_mask & MSR_HVB) {
+ /* ISA specifies HV, but can be delivered to guest with HV clear
+ * (e.g., see FWNMI in PAPR).
+ */
+ new_msr |= (target_ulong)MSR_HVB;
+ }
ail = 0;
/* machine check exceptions don't have ME set */
@@ -235,16 +240,16 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
default:
break;
}
- goto store_next;
+ break;
case POWERPC_EXCP_DSI: /* Data storage exception */
LOG_EXCP("DSI exception: DSISR=" TARGET_FMT_lx" DAR=" TARGET_FMT_lx
"\n", env->spr[SPR_DSISR], env->spr[SPR_DAR]);
- goto store_next;
+ break;
case POWERPC_EXCP_ISI: /* Instruction storage exception */
LOG_EXCP("ISI exception: msr=" TARGET_FMT_lx ", nip=" TARGET_FMT_lx
"\n", msr, env->nip);
msr |= env->error_code;
- goto store_next;
+ break;
case POWERPC_EXCP_EXTERNAL: /* External input */
cs = CPU(cpu);
@@ -258,13 +263,15 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
/* IACK the IRQ on delivery */
env->spr[SPR_BOOKE_EPR] = ldl_phys(cs->as, env->mpic_iack);
}
- goto store_next;
+ break;
case POWERPC_EXCP_ALIGN: /* Alignment exception */
- /* XXX: this is false */
/* Get rS/rD and rA from faulting opcode */
- env->spr[SPR_DSISR] |= (cpu_ldl_code(env, (env->nip - 4))
- & 0x03FF0000) >> 16;
- goto store_next;
+ /* Note: the opcode fields will not be set properly for a direct
+ * store load/store, but nobody cares as nobody actually uses
+ * direct store segments.
+ */
+ env->spr[SPR_DSISR] |= (env->error_code & 0x03FF0000) >> 16;
+ break;
case POWERPC_EXCP_PROGRAM: /* Program exception */
switch (env->error_code & ~0xF) {
case POWERPC_EXCP_FP:
@@ -274,11 +281,12 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
env->error_code = 0;
return;
}
+
+ /* FP exceptions always have NIP pointing to the faulting
+ * instruction, so always use store_next and claim we are
+ * precise in the MSR.
+ */
msr |= 0x00100000;
- if (msr_fe0 == msr_fe1) {
- goto store_next;
- }
- msr |= 0x00010000;
break;
case POWERPC_EXCP_INVAL:
LOG_EXCP("Invalid instruction at " TARGET_FMT_lx "\n", env->nip);
@@ -299,19 +307,16 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
env->error_code);
break;
}
- goto store_current;
- case POWERPC_EXCP_HV_EMU:
- srr0 = SPR_HSRR0;
- srr1 = SPR_HSRR1;
- new_msr |= (target_ulong)MSR_HVB;
- new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
- goto store_current;
- case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */
- goto store_current;
+ break;
case POWERPC_EXCP_SYSCALL: /* System call exception */
dump_syscall(env);
lev = env->error_code;
+ /* We need to correct the NIP which in this case is supposed
+ * to point to the next instruction
+ */
+ env->nip += 4;
+
/* "PAPR mode" built-in hypercall emulation */
if ((lev == 1) && cpu_ppc_hypercall) {
cpu_ppc_hypercall(cpu);
@@ -320,15 +325,15 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
if (lev == 1) {
new_msr |= (target_ulong)MSR_HVB;
}
- goto store_next;
+ break;
+ case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */
case POWERPC_EXCP_APU: /* Auxiliary processor unavailable */
- goto store_current;
case POWERPC_EXCP_DECR: /* Decrementer exception */
- goto store_next;
+ break;
case POWERPC_EXCP_FIT: /* Fixed-interval timer interrupt */
/* FIT on 4xx */
LOG_EXCP("FIT exception\n");
- goto store_next;
+ break;
case POWERPC_EXCP_WDT: /* Watchdog timer interrupt */
LOG_EXCP("WDT exception\n");
switch (excp_model) {
@@ -339,11 +344,10 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
default:
break;
}
- goto store_next;
+ break;
case POWERPC_EXCP_DTLB: /* Data TLB error */
- goto store_next;
case POWERPC_EXCP_ITLB: /* Instruction TLB error */
- goto store_next;
+ break;
case POWERPC_EXCP_DEBUG: /* Debug interrupt */
switch (excp_model) {
case POWERPC_EXCP_BOOKE:
@@ -358,102 +362,91 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
}
/* XXX: TODO */
cpu_abort(cs, "Debug exception is not implemented yet !\n");
- goto store_next;
+ break;
case POWERPC_EXCP_SPEU: /* SPE/embedded floating-point unavailable */
env->spr[SPR_BOOKE_ESR] = ESR_SPV;
- goto store_current;
+ break;
case POWERPC_EXCP_EFPDI: /* Embedded floating-point data interrupt */
/* XXX: TODO */
cpu_abort(cs, "Embedded floating point data exception "
"is not implemented yet !\n");
env->spr[SPR_BOOKE_ESR] = ESR_SPV;
- goto store_next;
+ break;
case POWERPC_EXCP_EFPRI: /* Embedded floating-point round interrupt */
/* XXX: TODO */
cpu_abort(cs, "Embedded floating point round exception "
"is not implemented yet !\n");
env->spr[SPR_BOOKE_ESR] = ESR_SPV;
- goto store_next;
+ break;
case POWERPC_EXCP_EPERFM: /* Embedded performance monitor interrupt */
/* XXX: TODO */
cpu_abort(cs,
"Performance counter exception is not implemented yet !\n");
- goto store_next;
+ break;
case POWERPC_EXCP_DOORI: /* Embedded doorbell interrupt */
- goto store_next;
+ break;
case POWERPC_EXCP_DOORCI: /* Embedded doorbell critical interrupt */
srr0 = SPR_BOOKE_CSRR0;
srr1 = SPR_BOOKE_CSRR1;
- goto store_next;
+ break;
case POWERPC_EXCP_RESET: /* System reset exception */
+ /* A power-saving exception sets ME, otherwise it is unchanged */
if (msr_pow) {
/* indicate that we resumed from power save mode */
msr |= 0x10000;
+ new_msr |= ((target_ulong)1 << MSR_ME);
+ }
+ if (env->msr_mask & MSR_HVB) {
+ /* ISA specifies HV, but can be delivered to guest with HV clear
+ * (e.g., see FWNMI in PAPR, NMI injection in QEMU).
+ */
+ new_msr |= (target_ulong)MSR_HVB;
} else {
- new_msr &= ~((target_ulong)1 << MSR_ME);
+ if (msr_pow) {
+ cpu_abort(cs, "Trying to deliver power-saving system reset "
+ "exception %d with no HV support\n", excp);
+ }
}
-
- new_msr |= (target_ulong)MSR_HVB;
ail = 0;
- goto store_next;
+ break;
case POWERPC_EXCP_DSEG: /* Data segment exception */
- goto store_next;
case POWERPC_EXCP_ISEG: /* Instruction segment exception */
- goto store_next;
- case POWERPC_EXCP_HDECR: /* Hypervisor decrementer exception */
- srr0 = SPR_HSRR0;
- srr1 = SPR_HSRR1;
- new_msr |= (target_ulong)MSR_HVB;
- new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
- goto store_next;
case POWERPC_EXCP_TRACE: /* Trace exception */
- goto store_next;
+ break;
+ case POWERPC_EXCP_HDECR: /* Hypervisor decrementer exception */
case POWERPC_EXCP_HDSI: /* Hypervisor data storage exception */
- srr0 = SPR_HSRR0;
- srr1 = SPR_HSRR1;
- new_msr |= (target_ulong)MSR_HVB;
- new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
- goto store_next;
case POWERPC_EXCP_HISI: /* Hypervisor instruction storage exception */
- srr0 = SPR_HSRR0;
- srr1 = SPR_HSRR1;
- new_msr |= (target_ulong)MSR_HVB;
- new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
- goto store_next;
case POWERPC_EXCP_HDSEG: /* Hypervisor data segment exception */
- srr0 = SPR_HSRR0;
- srr1 = SPR_HSRR1;
- new_msr |= (target_ulong)MSR_HVB;
- new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
- goto store_next;
case POWERPC_EXCP_HISEG: /* Hypervisor instruction segment exception */
+ case POWERPC_EXCP_HV_EMU:
srr0 = SPR_HSRR0;
srr1 = SPR_HSRR1;
new_msr |= (target_ulong)MSR_HVB;
new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
- goto store_next;
+ break;
case POWERPC_EXCP_VPU: /* Vector unavailable exception */
- goto store_current;
case POWERPC_EXCP_VSXU: /* VSX unavailable exception */
- goto store_current;
case POWERPC_EXCP_FU: /* Facility unavailable exception */
- goto store_current;
+#ifdef TARGET_PPC64
+ env->spr[SPR_FSCR] |= ((target_ulong)env->error_code << 56);
+#endif
+ break;
case POWERPC_EXCP_PIT: /* Programmable interval timer interrupt */
LOG_EXCP("PIT exception\n");
- goto store_next;
+ break;
case POWERPC_EXCP_IO: /* IO error exception */
/* XXX: TODO */
cpu_abort(cs, "601 IO error exception is not implemented yet !\n");
- goto store_next;
+ break;
case POWERPC_EXCP_RUNM: /* Run mode exception */
/* XXX: TODO */
cpu_abort(cs, "601 run mode exception is not implemented yet !\n");
- goto store_next;
+ break;
case POWERPC_EXCP_EMUL: /* Emulation trap exception */
/* XXX: TODO */
cpu_abort(cs, "602 emulation trap exception "
"is not implemented yet !\n");
- goto store_next;
+ break;
case POWERPC_EXCP_IFTLB: /* Instruction fetch TLB error */
switch (excp_model) {
case POWERPC_EXCP_602:
@@ -568,78 +561,80 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
cpu_abort(cs, "Invalid data store TLB miss exception\n");
break;
}
- goto store_next;
+ break;
case POWERPC_EXCP_FPA: /* Floating-point assist exception */
/* XXX: TODO */
cpu_abort(cs, "Floating point assist exception "
"is not implemented yet !\n");
- goto store_next;
+ break;
case POWERPC_EXCP_DABR: /* Data address breakpoint */
/* XXX: TODO */
cpu_abort(cs, "DABR exception is not implemented yet !\n");
- goto store_next;
+ break;
case POWERPC_EXCP_IABR: /* Instruction address breakpoint */
/* XXX: TODO */
cpu_abort(cs, "IABR exception is not implemented yet !\n");
- goto store_next;
+ break;
case POWERPC_EXCP_SMI: /* System management interrupt */
/* XXX: TODO */
cpu_abort(cs, "SMI exception is not implemented yet !\n");
- goto store_next;
+ break;
case POWERPC_EXCP_THERM: /* Thermal interrupt */
/* XXX: TODO */
cpu_abort(cs, "Thermal management exception "
"is not implemented yet !\n");
- goto store_next;
+ break;
case POWERPC_EXCP_PERFM: /* Embedded performance monitor interrupt */
/* XXX: TODO */
cpu_abort(cs,
"Performance counter exception is not implemented yet !\n");
- goto store_next;
+ break;
case POWERPC_EXCP_VPUA: /* Vector assist exception */
/* XXX: TODO */
cpu_abort(cs, "VPU assist exception is not implemented yet !\n");
- goto store_next;
+ break;
case POWERPC_EXCP_SOFTP: /* Soft patch exception */
/* XXX: TODO */
cpu_abort(cs,
"970 soft-patch exception is not implemented yet !\n");
- goto store_next;
+ break;
case POWERPC_EXCP_MAINT: /* Maintenance exception */
/* XXX: TODO */
cpu_abort(cs,
"970 maintenance exception is not implemented yet !\n");
- goto store_next;
+ break;
case POWERPC_EXCP_MEXTBR: /* Maskable external breakpoint */
/* XXX: TODO */
cpu_abort(cs, "Maskable external exception "
"is not implemented yet !\n");
- goto store_next;
+ break;
case POWERPC_EXCP_NMEXTBR: /* Non maskable external breakpoint */
/* XXX: TODO */
cpu_abort(cs, "Non maskable external exception "
"is not implemented yet !\n");
- goto store_next;
+ break;
default:
excp_invalid:
cpu_abort(cs, "Invalid PowerPC exception %d. Aborting\n", excp);
break;
- store_current:
- /* save current instruction location */
- env->spr[srr0] = env->nip - 4;
- break;
- store_next:
- /* save next instruction location */
- env->spr[srr0] = env->nip;
- break;
}
+
+ /* Save PC */
+ env->spr[srr0] = env->nip;
+
/* Save MSR */
env->spr[srr1] = msr;
/* Sanity check */
- if (!(env->msr_mask & MSR_HVB) && (srr0 == SPR_HSRR0)) {
- cpu_abort(cs, "Trying to deliver HV exception %d with "
- "no HV support\n", excp);
+ if (!(env->msr_mask & MSR_HVB)) {
+ if (new_msr & MSR_HVB) {
+ cpu_abort(cs, "Trying to deliver HV exception (MSR) %d with "
+ "no HV support\n", excp);
+ }
+ if (srr0 == SPR_HSRR0) {
+ cpu_abort(cs, "Trying to deliver HV exception (HSRR) %d with "
+ "no HV support\n", excp);
+ }
}
/* If any alternate SRR register are defined, duplicate saved values */
@@ -739,7 +734,7 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
/* Any interrupt is context synchronizing, check if TCG TLB
* needs a delayed flush on ppc64
*/
- check_tlb_flush(env);
+ check_tlb_flush(env, false);
}
void ppc_cpu_do_interrupt(CPUState *cs)
@@ -898,34 +893,53 @@ static void cpu_dump_rfi(target_ulong RA, target_ulong msr)
/*****************************************************************************/
/* Exceptions processing helpers */
-void helper_raise_exception_err(CPUPPCState *env, uint32_t exception,
- uint32_t error_code)
+void raise_exception_err_ra(CPUPPCState *env, uint32_t exception,
+ uint32_t error_code, uintptr_t raddr)
{
CPUState *cs = CPU(ppc_env_get_cpu(env));
-#if 0
- printf("Raise exception %3x code : %d\n", exception, error_code);
-#endif
cs->exception_index = exception;
env->error_code = error_code;
- cpu_loop_exit(cs);
+ cpu_loop_exit_restore(cs, raddr);
+}
+
+void raise_exception_err(CPUPPCState *env, uint32_t exception,
+ uint32_t error_code)
+{
+ raise_exception_err_ra(env, exception, error_code, 0);
+}
+
+void raise_exception(CPUPPCState *env, uint32_t exception)
+{
+ raise_exception_err_ra(env, exception, 0, 0);
+}
+
+void raise_exception_ra(CPUPPCState *env, uint32_t exception,
+ uintptr_t raddr)
+{
+ raise_exception_err_ra(env, exception, 0, raddr);
+}
+
+void helper_raise_exception_err(CPUPPCState *env, uint32_t exception,
+ uint32_t error_code)
+{
+ raise_exception_err_ra(env, exception, error_code, 0);
}
void helper_raise_exception(CPUPPCState *env, uint32_t exception)
{
- helper_raise_exception_err(env, exception, 0);
+ raise_exception_err_ra(env, exception, 0, 0);
}
#if !defined(CONFIG_USER_ONLY)
void helper_store_msr(CPUPPCState *env, target_ulong val)
{
- CPUState *cs;
+ uint32_t excp = hreg_store_msr(env, val, 0);
- val = hreg_store_msr(env, val, 0);
- if (val != 0) {
- cs = CPU(ppc_env_get_cpu(env));
+ if (excp != 0) {
+ CPUState *cs = CPU(ppc_env_get_cpu(env));
cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
- helper_raise_exception(env, val);
+ raise_exception(env, excp);
}
}
@@ -951,7 +965,7 @@ void helper_pminsn(CPUPPCState *env, powerpc_pm_insn_t insn)
* but this doesn't seem to be a problem.
*/
env->msr |= (1ull << MSR_EE);
- helper_raise_exception(env, EXCP_HLT);
+ raise_exception(env, EXCP_HLT);
}
#endif /* defined(TARGET_PPC64) */
@@ -982,7 +996,7 @@ static inline void do_rfi(CPUPPCState *env, target_ulong nip, target_ulong msr)
cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
/* Context synchronizing: check if TCG TLB needs flush */
- check_tlb_flush(env);
+ check_tlb_flush(env, false);
}
void helper_rfi(CPUPPCState *env)
@@ -1041,8 +1055,8 @@ void helper_tw(CPUPPCState *env, target_ulong arg1, target_ulong arg2,
((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) ||
((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) ||
((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) {
- helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM,
- POWERPC_EXCP_TRAP);
+ raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
+ POWERPC_EXCP_TRAP, GETPC());
}
}
@@ -1055,8 +1069,8 @@ void helper_td(CPUPPCState *env, target_ulong arg1, target_ulong arg2,
((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) ||
((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) ||
((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01))))) {
- helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM,
- POWERPC_EXCP_TRAP);
+ raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
+ POWERPC_EXCP_TRAP, GETPC());
}
}
#endif
diff --git a/target-ppc/fpu_helper.c b/target-ppc/fpu_helper.c
index d9795d04d0..8a389e19af 100644
--- a/target-ppc/fpu_helper.c
+++ b/target-ppc/fpu_helper.c
@@ -19,6 +19,7 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "exec/helper-proto.h"
+#include "exec/exec-all.h"
#define float64_snan_to_qnan(x) ((x) | 0x0008000000000000ULL)
#define float32_snan_to_qnan(x) ((x) | 0x00400000)
@@ -116,8 +117,8 @@ void helper_compute_fprf(CPUPPCState *env, uint64_t arg)
}
/* Floating-point invalid operations exception */
-static inline uint64_t fload_invalid_op_excp(CPUPPCState *env, int op,
- int set_fpcc)
+static inline __attribute__((__always_inline__))
+uint64_t float_invalid_op_excp(CPUPPCState *env, int op, int set_fpcc)
{
CPUState *cs = CPU(ppc_env_get_cpu(env));
uint64_t ret = 0;
@@ -200,14 +201,15 @@ static inline uint64_t fload_invalid_op_excp(CPUPPCState *env, int op,
/* Update the floating-point enabled exception summary */
env->fpscr |= 1 << FPSCR_FEX;
if (msr_fe0 != 0 || msr_fe1 != 0) {
- helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM,
- POWERPC_EXCP_FP | op);
+ /* GETPC() works here because this is inline */
+ raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
+ POWERPC_EXCP_FP | op, GETPC());
}
}
return ret;
}
-static inline void float_zero_divide_excp(CPUPPCState *env)
+static inline void float_zero_divide_excp(CPUPPCState *env, uintptr_t raddr)
{
env->fpscr |= 1 << FPSCR_ZX;
env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
@@ -217,8 +219,9 @@ static inline void float_zero_divide_excp(CPUPPCState *env)
/* Update the floating-point enabled exception summary */
env->fpscr |= 1 << FPSCR_FEX;
if (msr_fe0 != 0 || msr_fe1 != 0) {
- helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM,
- POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX);
+ raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
+ POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX,
+ raddr);
}
}
}
@@ -491,13 +494,13 @@ void store_fpscr(CPUPPCState *env, uint64_t arg, uint32_t mask)
helper_store_fpscr(env, arg, mask);
}
-void helper_float_check_status(CPUPPCState *env)
+static void do_float_check_status(CPUPPCState *env, uintptr_t raddr)
{
CPUState *cs = CPU(ppc_env_get_cpu(env));
int status = get_float_exception_flags(&env->fp_status);
if (status & float_flag_divbyzero) {
- float_zero_divide_excp(env);
+ float_zero_divide_excp(env, raddr);
} else if (status & float_flag_overflow) {
float_overflow_excp(env);
} else if (status & float_flag_underflow) {
@@ -510,12 +513,24 @@ void helper_float_check_status(CPUPPCState *env)
(env->error_code & POWERPC_EXCP_FP)) {
/* Differred floating-point exception after target FPR update */
if (msr_fe0 != 0 || msr_fe1 != 0) {
- helper_raise_exception_err(env, cs->exception_index,
- env->error_code);
+ raise_exception_err_ra(env, cs->exception_index,
+ env->error_code, raddr);
}
}
}
+static inline __attribute__((__always_inline__))
+void float_check_status(CPUPPCState *env)
+{
+ /* GETPC() works here because this is inline */
+ do_float_check_status(env, GETPC());
+}
+
+void helper_float_check_status(CPUPPCState *env)
+{
+ do_float_check_status(env, GETPC());
+}
+
void helper_reset_fpstatus(CPUPPCState *env)
{
set_float_exception_flags(0, &env->fp_status);
@@ -532,12 +547,12 @@ uint64_t helper_fadd(CPUPPCState *env, uint64_t arg1, uint64_t arg2)
if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
float64_is_neg(farg1.d) != float64_is_neg(farg2.d))) {
/* Magnitude subtraction of infinities */
- farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
+ farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
} else {
if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) ||
float64_is_signaling_nan(farg2.d, &env->fp_status))) {
/* sNaN addition */
- fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
}
farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
}
@@ -556,12 +571,12 @@ uint64_t helper_fsub(CPUPPCState *env, uint64_t arg1, uint64_t arg2)
if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
float64_is_neg(farg1.d) == float64_is_neg(farg2.d))) {
/* Magnitude subtraction of infinities */
- farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
+ farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
} else {
if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) ||
float64_is_signaling_nan(farg2.d, &env->fp_status))) {
/* sNaN subtraction */
- fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
}
farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
}
@@ -580,12 +595,12 @@ uint64_t helper_fmul(CPUPPCState *env, uint64_t arg1, uint64_t arg2)
if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
(float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
/* Multiplication of zero by infinity */
- farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1);
+ farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1);
} else {
if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) ||
float64_is_signaling_nan(farg2.d, &env->fp_status))) {
/* sNaN multiplication */
- fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
}
farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
}
@@ -604,15 +619,15 @@ uint64_t helper_fdiv(CPUPPCState *env, uint64_t arg1, uint64_t arg2)
if (unlikely(float64_is_infinity(farg1.d) &&
float64_is_infinity(farg2.d))) {
/* Division of infinity by infinity */
- farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIDI, 1);
+ farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIDI, 1);
} else if (unlikely(float64_is_zero(farg1.d) && float64_is_zero(farg2.d))) {
/* Division of zero by zero */
- farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXZDZ, 1);
+ farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXZDZ, 1);
} else {
if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) ||
float64_is_signaling_nan(farg2.d, &env->fp_status))) {
/* sNaN division */
- fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
}
farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
}
@@ -631,16 +646,16 @@ uint64_t helper_##op(CPUPPCState *env, uint64_t arg) \
\
if (unlikely(env->fp_status.float_exception_flags)) { \
if (float64_is_any_nan(arg)) { \
- fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 1); \
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 1); \
if (float64_is_signaling_nan(arg, &env->fp_status)) { \
- fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); \
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); \
} \
farg.ll = nanval; \
} else if (env->fp_status.float_exception_flags & \
float_flag_invalid) { \
- fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 1); \
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 1); \
} \
- helper_float_check_status(env); \
+ float_check_status(env); \
} \
return farg.ll; \
}
@@ -665,7 +680,7 @@ uint64_t helper_##op(CPUPPCState *env, uint64_t arg) \
} else { \
farg.d = cvtr(arg, &env->fp_status); \
} \
- helper_float_check_status(env); \
+ float_check_status(env); \
return farg.ll; \
}
@@ -683,7 +698,7 @@ static inline uint64_t do_fri(CPUPPCState *env, uint64_t arg,
if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
/* sNaN round */
- fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
farg.ll = arg | 0x0008000000000000ULL;
} else {
int inexact = get_float_exception_flags(&env->fp_status) &
@@ -698,7 +713,7 @@ static inline uint64_t do_fri(CPUPPCState *env, uint64_t arg,
env->fp_status.float_exception_flags &= ~float_flag_inexact;
}
}
- helper_float_check_status(env);
+ float_check_status(env);
return farg.ll;
}
@@ -735,13 +750,13 @@ uint64_t helper_fmadd(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
(float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
/* Multiplication of zero by infinity */
- farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1);
+ farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1);
} else {
if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) ||
float64_is_signaling_nan(farg2.d, &env->fp_status) ||
float64_is_signaling_nan(farg3.d, &env->fp_status))) {
/* sNaN operation */
- fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
}
/* This is the way the PowerPC specification defines it */
float128 ft0_128, ft1_128;
@@ -753,7 +768,7 @@ uint64_t helper_fmadd(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
float64_is_infinity(farg3.d) &&
float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
/* Magnitude subtraction of infinities */
- farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
+ farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
} else {
ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
@@ -778,13 +793,13 @@ uint64_t helper_fmsub(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
(float64_is_zero(farg1.d) &&
float64_is_infinity(farg2.d)))) {
/* Multiplication of zero by infinity */
- farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1);
+ farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1);
} else {
if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) ||
float64_is_signaling_nan(farg2.d, &env->fp_status) ||
float64_is_signaling_nan(farg3.d, &env->fp_status))) {
/* sNaN operation */
- fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
}
/* This is the way the PowerPC specification defines it */
float128 ft0_128, ft1_128;
@@ -796,7 +811,7 @@ uint64_t helper_fmsub(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
float64_is_infinity(farg3.d) &&
float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
/* Magnitude subtraction of infinities */
- farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
+ farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
} else {
ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
@@ -819,13 +834,13 @@ uint64_t helper_fnmadd(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
(float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
/* Multiplication of zero by infinity */
- farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1);
+ farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1);
} else {
if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) ||
float64_is_signaling_nan(farg2.d, &env->fp_status) ||
float64_is_signaling_nan(farg3.d, &env->fp_status))) {
/* sNaN operation */
- fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
}
/* This is the way the PowerPC specification defines it */
float128 ft0_128, ft1_128;
@@ -837,7 +852,7 @@ uint64_t helper_fnmadd(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
float64_is_infinity(farg3.d) &&
float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
/* Magnitude subtraction of infinities */
- farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
+ farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
} else {
ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
@@ -864,13 +879,13 @@ uint64_t helper_fnmsub(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
(float64_is_zero(farg1.d) &&
float64_is_infinity(farg2.d)))) {
/* Multiplication of zero by infinity */
- farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1);
+ farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1);
} else {
if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) ||
float64_is_signaling_nan(farg2.d, &env->fp_status) ||
float64_is_signaling_nan(farg3.d, &env->fp_status))) {
/* sNaN operation */
- fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
}
/* This is the way the PowerPC specification defines it */
float128 ft0_128, ft1_128;
@@ -882,7 +897,7 @@ uint64_t helper_fnmsub(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
float64_is_infinity(farg3.d) &&
float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
/* Magnitude subtraction of infinities */
- farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
+ farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
} else {
ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
@@ -905,7 +920,7 @@ uint64_t helper_frsp(CPUPPCState *env, uint64_t arg)
if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
/* sNaN square root */
- fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
}
f32 = float64_to_float32(farg.d, &env->fp_status);
farg.d = float32_to_float64(f32, &env->fp_status);
@@ -923,12 +938,12 @@ uint64_t helper_fsqrt(CPUPPCState *env, uint64_t arg)
if (unlikely(float64_is_any_nan(farg.d))) {
if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
/* sNaN reciprocal square root */
- fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
farg.ll = float64_snan_to_qnan(farg.ll);
}
} else if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
/* Square root of a negative nonzero number */
- farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, 1);
+ farg.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, 1);
} else {
farg.d = float64_sqrt(farg.d, &env->fp_status);
}
@@ -944,7 +959,7 @@ uint64_t helper_fre(CPUPPCState *env, uint64_t arg)
if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
/* sNaN reciprocal */
- fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
}
farg.d = float64_div(float64_one, farg.d, &env->fp_status);
return farg.d;
@@ -960,7 +975,7 @@ uint64_t helper_fres(CPUPPCState *env, uint64_t arg)
if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
/* sNaN reciprocal */
- fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
}
farg.d = float64_div(float64_one, farg.d, &env->fp_status);
f32 = float64_to_float32(farg.d, &env->fp_status);
@@ -979,12 +994,12 @@ uint64_t helper_frsqrte(CPUPPCState *env, uint64_t arg)
if (unlikely(float64_is_any_nan(farg.d))) {
if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
/* sNaN reciprocal square root */
- fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
farg.ll = float64_snan_to_qnan(farg.ll);
}
} else if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
/* Reciprocal square root of a negative nonzero number */
- farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, 1);
+ farg.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, 1);
} else {
farg.d = float64_sqrt(farg.d, &env->fp_status);
farg.d = float64_div(float64_one, farg.d, &env->fp_status);
@@ -1103,7 +1118,7 @@ void helper_fcmpu(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
&& (float64_is_signaling_nan(farg1.d, &env->fp_status) ||
float64_is_signaling_nan(farg2.d, &env->fp_status)))) {
/* sNaN comparison */
- fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
}
}
@@ -1134,11 +1149,11 @@ void helper_fcmpo(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
if (float64_is_signaling_nan(farg1.d, &env->fp_status) ||
float64_is_signaling_nan(farg2.d, &env->fp_status)) {
/* sNaN comparison */
- fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN |
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN |
POWERPC_EXCP_FP_VXVC, 1);
} else {
/* qNaN comparison */
- fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 1);
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 1);
}
}
}
@@ -1838,10 +1853,10 @@ void helper_##name(CPUPPCState *env, uint32_t opcode) \
\
if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
if (tp##_is_infinity(xa.fld) && tp##_is_infinity(xb.fld)) { \
- fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, sfprf); \
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, sfprf); \
} else if (tp##_is_signaling_nan(xa.fld, &tstat) || \
tp##_is_signaling_nan(xb.fld, &tstat)) { \
- fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
} \
} \
\
@@ -1854,7 +1869,7 @@ void helper_##name(CPUPPCState *env, uint32_t opcode) \
} \
} \
putVSR(xT(opcode), &xt, env); \
- helper_float_check_status(env); \
+ float_check_status(env); \
}
VSX_ADD_SUB(xsadddp, add, 1, float64, VsrD(0), 1, 0)
@@ -1893,10 +1908,10 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
if ((tp##_is_infinity(xa.fld) && tp##_is_zero(xb.fld)) || \
(tp##_is_infinity(xb.fld) && tp##_is_zero(xa.fld))) { \
- fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, sfprf); \
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, sfprf); \
} else if (tp##_is_signaling_nan(xa.fld, &tstat) || \
tp##_is_signaling_nan(xb.fld, &tstat)) { \
- fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
} \
} \
\
@@ -1910,7 +1925,7 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
} \
\
putVSR(xT(opcode), &xt, env); \
- helper_float_check_status(env); \
+ float_check_status(env); \
}
VSX_MUL(xsmuldp, 1, float64, VsrD(0), 1, 0)
@@ -1944,13 +1959,13 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
\
if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
if (tp##_is_infinity(xa.fld) && tp##_is_infinity(xb.fld)) { \
- fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIDI, sfprf); \
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIDI, sfprf); \
} else if (tp##_is_zero(xa.fld) && \
tp##_is_zero(xb.fld)) { \
- fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXZDZ, sfprf); \
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXZDZ, sfprf); \
} else if (tp##_is_signaling_nan(xa.fld, &tstat) || \
tp##_is_signaling_nan(xb.fld, &tstat)) { \
- fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
} \
} \
\
@@ -1964,7 +1979,7 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
} \
\
putVSR(xT(opcode), &xt, env); \
- helper_float_check_status(env); \
+ float_check_status(env); \
}
VSX_DIV(xsdivdp, 1, float64, VsrD(0), 1, 0)
@@ -1991,7 +2006,7 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
\
for (i = 0; i < nels; i++) { \
if (unlikely(tp##_is_signaling_nan(xb.fld, &env->fp_status))) { \
- fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
} \
xt.fld = tp##_div(tp##_one, xb.fld, &env->fp_status); \
\
@@ -2005,7 +2020,7 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
} \
\
putVSR(xT(opcode), &xt, env); \
- helper_float_check_status(env); \
+ float_check_status(env); \
}
VSX_RE(xsredp, 1, float64, VsrD(0), 1, 0)
@@ -2038,9 +2053,9 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
\
if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
if (tp##_is_neg(xb.fld) && !tp##_is_zero(xb.fld)) { \
- fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, sfprf); \
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, sfprf); \
} else if (tp##_is_signaling_nan(xb.fld, &tstat)) { \
- fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
} \
} \
\
@@ -2054,7 +2069,7 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
} \
\
putVSR(xT(opcode), &xt, env); \
- helper_float_check_status(env); \
+ float_check_status(env); \
}
VSX_SQRT(xssqrtdp, 1, float64, VsrD(0), 1, 0)
@@ -2088,9 +2103,9 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
\
if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
if (tp##_is_neg(xb.fld) && !tp##_is_zero(xb.fld)) { \
- fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, sfprf); \
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, sfprf); \
} else if (tp##_is_signaling_nan(xb.fld, &tstat)) { \
- fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
} \
} \
\
@@ -2104,7 +2119,7 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
} \
\
putVSR(xT(opcode), &xt, env); \
- helper_float_check_status(env); \
+ float_check_status(env); \
}
VSX_RSQRTE(xsrsqrtedp, 1, float64, VsrD(0), 1, 0)
@@ -2277,12 +2292,12 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
if (tp##_is_signaling_nan(xa.fld, &tstat) || \
tp##_is_signaling_nan(b->fld, &tstat) || \
tp##_is_signaling_nan(c->fld, &tstat)) { \
- fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
tstat.float_exception_flags &= ~float_flag_invalid; \
} \
if ((tp##_is_infinity(xa.fld) && tp##_is_zero(b->fld)) || \
(tp##_is_zero(xa.fld) && tp##_is_infinity(b->fld))) { \
- xt_out.fld = float64_to_##tp(fload_invalid_op_excp(env, \
+ xt_out.fld = float64_to_##tp(float_invalid_op_excp(env, \
POWERPC_EXCP_FP_VXIMZ, sfprf), &env->fp_status); \
tstat.float_exception_flags &= ~float_flag_invalid; \
} \
@@ -2290,7 +2305,7 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
((tp##_is_infinity(xa.fld) || \
tp##_is_infinity(b->fld)) && \
tp##_is_infinity(c->fld))) { \
- fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, sfprf); \
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, sfprf); \
} \
} \
\
@@ -2303,7 +2318,7 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
} \
} \
putVSR(xT(opcode), &xt_out, env); \
- helper_float_check_status(env); \
+ float_check_status(env); \
}
#define MADD_FLGS 0
@@ -2347,6 +2362,58 @@ VSX_MADD(xvnmaddmsp, 4, float32, VsrW(i), NMADD_FLGS, 0, 0, 0)
VSX_MADD(xvnmsubasp, 4, float32, VsrW(i), NMSUB_FLGS, 1, 0, 0)
VSX_MADD(xvnmsubmsp, 4, float32, VsrW(i), NMSUB_FLGS, 0, 0, 0)
+/* VSX_SCALAR_CMP_DP - VSX scalar floating point compare double precision
+ * op - instruction mnemonic
+ * cmp - comparison operation
+ * exp - expected result of comparison
+ * svxvc - set VXVC bit
+ */
+#define VSX_SCALAR_CMP_DP(op, cmp, exp, svxvc) \
+void helper_##op(CPUPPCState *env, uint32_t opcode) \
+{ \
+ ppc_vsr_t xt, xa, xb; \
+ bool vxsnan_flag = false, vxvc_flag = false, vex_flag = false; \
+ \
+ getVSR(xA(opcode), &xa, env); \
+ getVSR(xB(opcode), &xb, env); \
+ getVSR(xT(opcode), &xt, env); \
+ \
+ if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status) || \
+ float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) { \
+ vxsnan_flag = true; \
+ if (fpscr_ve == 0 && svxvc) { \
+ vxvc_flag = true; \
+ } \
+ } else if (svxvc) { \
+ vxvc_flag = float64_is_quiet_nan(xa.VsrD(0), &env->fp_status) || \
+ float64_is_quiet_nan(xb.VsrD(0), &env->fp_status); \
+ } \
+ if (vxsnan_flag) { \
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
+ } \
+ if (vxvc_flag) { \
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 0); \
+ } \
+ vex_flag = fpscr_ve && (vxvc_flag || vxsnan_flag); \
+ \
+ if (!vex_flag) { \
+ if (float64_##cmp(xb.VsrD(0), xa.VsrD(0), &env->fp_status) == exp) { \
+ xt.VsrD(0) = -1; \
+ xt.VsrD(1) = 0; \
+ } else { \
+ xt.VsrD(0) = 0; \
+ xt.VsrD(1) = 0; \
+ } \
+ } \
+ putVSR(xT(opcode), &xt, env); \
+ helper_float_check_status(env); \
+}
+
+VSX_SCALAR_CMP_DP(xscmpeqdp, eq, 1, 0)
+VSX_SCALAR_CMP_DP(xscmpgedp, le, 1, 1)
+VSX_SCALAR_CMP_DP(xscmpgtdp, lt, 1, 1)
+VSX_SCALAR_CMP_DP(xscmpnedp, eq, 0, 0)
+
#define VSX_SCALAR_CMP(op, ordered) \
void helper_##op(CPUPPCState *env, uint32_t opcode) \
{ \
@@ -2360,10 +2427,10 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
float64_is_any_nan(xb.VsrD(0)))) { \
if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status) || \
float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) { \
- fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
} \
if (ordered) { \
- fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 0); \
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 0); \
} \
cc = 1; \
} else { \
@@ -2381,7 +2448,7 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
env->fpscr |= cc << FPSCR_FPRF; \
env->crf[BF(opcode)] = cc; \
\
- helper_float_check_status(env); \
+ float_check_status(env); \
}
VSX_SCALAR_CMP(xscmpodp, 1)
@@ -2408,12 +2475,12 @@ void helper_##name(CPUPPCState *env, uint32_t opcode) \
xt.fld = tp##_##op(xa.fld, xb.fld, &env->fp_status); \
if (unlikely(tp##_is_signaling_nan(xa.fld, &env->fp_status) || \
tp##_is_signaling_nan(xb.fld, &env->fp_status))) { \
- fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
} \
} \
\
putVSR(xT(opcode), &xt, env); \
- helper_float_check_status(env); \
+ float_check_status(env); \
}
VSX_MAX_MIN(xsmaxdp, maxnum, 1, float64, VsrD(0))
@@ -2430,8 +2497,9 @@ VSX_MAX_MIN(xvminsp, minnum, 4, float32, VsrW(i))
* fld - vsr_t field (VsrD(*) or VsrW(*))
* cmp - comparison operation
* svxvc - set VXVC bit
+ * exp - expected result of comparison
*/
-#define VSX_CMP(op, nels, tp, fld, cmp, svxvc) \
+#define VSX_CMP(op, nels, tp, fld, cmp, svxvc, exp) \
void helper_##op(CPUPPCState *env, uint32_t opcode) \
{ \
ppc_vsr_t xt, xa, xb; \
@@ -2448,15 +2516,15 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
tp##_is_any_nan(xb.fld))) { \
if (tp##_is_signaling_nan(xa.fld, &env->fp_status) || \
tp##_is_signaling_nan(xb.fld, &env->fp_status)) { \
- fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
} \
if (svxvc) { \
- fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 0); \
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 0); \
} \
xt.fld = 0; \
all_true = 0; \
} else { \
- if (tp##_##cmp(xb.fld, xa.fld, &env->fp_status) == 1) { \
+ if (tp##_##cmp(xb.fld, xa.fld, &env->fp_status) == exp) { \
xt.fld = -1; \
all_false = 0; \
} else { \
@@ -2470,15 +2538,17 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
if ((opcode >> (31-21)) & 1) { \
env->crf[6] = (all_true ? 0x8 : 0) | (all_false ? 0x2 : 0); \
} \
- helper_float_check_status(env); \
+ float_check_status(env); \
}
-VSX_CMP(xvcmpeqdp, 2, float64, VsrD(i), eq, 0)
-VSX_CMP(xvcmpgedp, 2, float64, VsrD(i), le, 1)
-VSX_CMP(xvcmpgtdp, 2, float64, VsrD(i), lt, 1)
-VSX_CMP(xvcmpeqsp, 4, float32, VsrW(i), eq, 0)
-VSX_CMP(xvcmpgesp, 4, float32, VsrW(i), le, 1)
-VSX_CMP(xvcmpgtsp, 4, float32, VsrW(i), lt, 1)
+VSX_CMP(xvcmpeqdp, 2, float64, VsrD(i), eq, 0, 1)
+VSX_CMP(xvcmpgedp, 2, float64, VsrD(i), le, 1, 1)
+VSX_CMP(xvcmpgtdp, 2, float64, VsrD(i), lt, 1, 1)
+VSX_CMP(xvcmpnedp, 2, float64, VsrD(i), eq, 0, 0)
+VSX_CMP(xvcmpeqsp, 4, float32, VsrW(i), eq, 0, 1)
+VSX_CMP(xvcmpgesp, 4, float32, VsrW(i), le, 1, 1)
+VSX_CMP(xvcmpgtsp, 4, float32, VsrW(i), lt, 1, 1)
+VSX_CMP(xvcmpnesp, 4, float32, VsrW(i), eq, 0, 0)
/* VSX_CVT_FP_TO_FP - VSX floating point/floating point conversion
* op - instruction mnemonic
@@ -2502,7 +2572,7 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status); \
if (unlikely(stp##_is_signaling_nan(xb.sfld, \
&env->fp_status))) { \
- fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
xt.tfld = ttp##_snan_to_qnan(xt.tfld); \
} \
if (sfprf) { \
@@ -2512,7 +2582,7 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
} \
\
putVSR(xT(opcode), &xt, env); \
- helper_float_check_status(env); \
+ float_check_status(env); \
}
VSX_CVT_FP_TO_FP(xscvdpsp, 1, float64, float32, VsrD(0), VsrW(0), 1)
@@ -2557,21 +2627,21 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
for (i = 0; i < nels; i++) { \
if (unlikely(stp##_is_any_nan(xb.sfld))) { \
if (stp##_is_signaling_nan(xb.sfld, &env->fp_status)) { \
- fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
} \
- fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 0); \
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 0); \
xt.tfld = rnan; \
} else { \
xt.tfld = stp##_to_##ttp##_round_to_zero(xb.sfld, \
&env->fp_status); \
if (env->fp_status.float_exception_flags & float_flag_invalid) { \
- fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 0); \
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 0); \
} \
} \
} \
\
putVSR(xT(opcode), &xt, env); \
- helper_float_check_status(env); \
+ float_check_status(env); \
}
VSX_CVT_FP_TO_INT(xscvdpsxds, 1, float64, int64, VsrD(0), VsrD(0), \
@@ -2622,7 +2692,7 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
} \
\
putVSR(xT(opcode), &xt, env); \
- helper_float_check_status(env); \
+ float_check_status(env); \
}
VSX_CVT_INT_TO_FP(xscvsxddp, 1, int64, float64, VsrD(0), VsrD(0), 1, 0)
@@ -2667,7 +2737,7 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
for (i = 0; i < nels; i++) { \
if (unlikely(tp##_is_signaling_nan(xb.fld, \
&env->fp_status))) { \
- fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
xt.fld = tp##_snan_to_qnan(xb.fld); \
} else { \
xt.fld = tp##_round_to_int(xb.fld, &env->fp_status); \
@@ -2686,7 +2756,7 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
} \
\
putVSR(xT(opcode), &xt, env); \
- helper_float_check_status(env); \
+ float_check_status(env); \
}
VSX_ROUND(xsrdpi, 1, float64, VsrD(0), float_round_ties_away, 1)
@@ -2714,6 +2784,6 @@ uint64_t helper_xsrsp(CPUPPCState *env, uint64_t xb)
uint64_t xt = helper_frsp(env, xb);
helper_compute_fprf(env, xt);
- helper_float_check_status(env);
+ float_check_status(env);
return xt;
}
diff --git a/target-ppc/helper.h b/target-ppc/helper.h
index 1f5cfd0990..da00f0ab49 100644
--- a/target-ppc/helper.h
+++ b/target-ppc/helper.h
@@ -1,8 +1,8 @@
-DEF_HELPER_3(raise_exception_err, void, env, i32, i32)
-DEF_HELPER_2(raise_exception, void, env, i32)
-DEF_HELPER_4(tw, void, env, tl, tl, i32)
+DEF_HELPER_FLAGS_3(raise_exception_err, TCG_CALL_NO_WG, void, env, i32, i32)
+DEF_HELPER_FLAGS_2(raise_exception, TCG_CALL_NO_WG, void, env, i32)
+DEF_HELPER_FLAGS_4(tw, TCG_CALL_NO_WG, void, env, tl, tl, i32)
#if defined(TARGET_PPC64)
-DEF_HELPER_4(td, void, env, tl, tl, i32)
+DEF_HELPER_FLAGS_4(td, TCG_CALL_NO_WG, void, env, tl, tl, i32)
#endif
#if !defined(CONFIG_USER_ONLY)
DEF_HELPER_2(store_msr, void, env, tl)
@@ -18,16 +18,17 @@ DEF_HELPER_1(rfid, void, env)
DEF_HELPER_1(hrfid, void, env)
DEF_HELPER_2(store_lpcr, void, env, tl)
#endif
-DEF_HELPER_1(check_tlb_flush, void, env)
+DEF_HELPER_1(check_tlb_flush_local, void, env)
+DEF_HELPER_1(check_tlb_flush_global, void, env)
#endif
DEF_HELPER_3(lmw, void, env, tl, i32)
-DEF_HELPER_3(stmw, void, env, tl, i32)
+DEF_HELPER_FLAGS_3(stmw, TCG_CALL_NO_WG, void, env, tl, i32)
DEF_HELPER_4(lsw, void, env, tl, i32, i32)
DEF_HELPER_5(lswx, void, env, tl, i32, i32, i32)
-DEF_HELPER_4(stsw, void, env, tl, i32, i32)
-DEF_HELPER_3(dcbz, void, env, tl, i32)
-DEF_HELPER_2(icbi, void, env, tl)
+DEF_HELPER_FLAGS_4(stsw, TCG_CALL_NO_WG, void, env, tl, i32, i32)
+DEF_HELPER_FLAGS_3(dcbz, TCG_CALL_NO_WG, void, env, tl, i32)
+DEF_HELPER_FLAGS_2(icbi, TCG_CALL_NO_WG, void, env, tl)
DEF_HELPER_5(lscbx, tl, env, tl, i32, i32, i32)
#if defined(TARGET_PPC64)
@@ -38,15 +39,20 @@ DEF_HELPER_4(divweu, tl, env, tl, tl, i32)
DEF_HELPER_4(divwe, tl, env, tl, tl, i32)
DEF_HELPER_FLAGS_1(cntlzw, TCG_CALL_NO_RWG_SE, tl, tl)
+DEF_HELPER_FLAGS_1(cnttzw, TCG_CALL_NO_RWG_SE, tl, tl)
DEF_HELPER_FLAGS_1(popcntb, TCG_CALL_NO_RWG_SE, tl, tl)
DEF_HELPER_FLAGS_1(popcntw, TCG_CALL_NO_RWG_SE, tl, tl)
DEF_HELPER_FLAGS_2(cmpb, TCG_CALL_NO_RWG_SE, tl, tl, tl)
DEF_HELPER_3(sraw, tl, env, tl, tl)
#if defined(TARGET_PPC64)
+DEF_HELPER_FLAGS_2(cmpeqb, TCG_CALL_NO_RWG_SE, i32, tl, tl)
DEF_HELPER_FLAGS_1(cntlzd, TCG_CALL_NO_RWG_SE, tl, tl)
+DEF_HELPER_FLAGS_1(cnttzd, TCG_CALL_NO_RWG_SE, tl, tl)
DEF_HELPER_FLAGS_1(popcntd, TCG_CALL_NO_RWG_SE, tl, tl)
DEF_HELPER_FLAGS_2(bpermd, TCG_CALL_NO_RWG_SE, i64, i64, i64)
DEF_HELPER_3(srad, tl, env, tl, tl)
+DEF_HELPER_0(darn32, tl)
+DEF_HELPER_0(darn64, tl)
#endif
DEF_HELPER_FLAGS_1(cntlsw32, TCG_CALL_NO_RWG_SE, i32, i32)
@@ -115,6 +121,9 @@ DEF_HELPER_3(vsubudm, void, avr, avr, avr)
DEF_HELPER_3(vavgub, void, avr, avr, avr)
DEF_HELPER_3(vavguh, void, avr, avr, avr)
DEF_HELPER_3(vavguw, void, avr, avr, avr)
+DEF_HELPER_3(vabsdub, void, avr, avr, avr)
+DEF_HELPER_3(vabsduh, void, avr, avr, avr)
+DEF_HELPER_3(vabsduw, void, avr, avr, avr)
DEF_HELPER_3(vavgsb, void, avr, avr, avr)
DEF_HELPER_3(vavgsh, void, avr, avr, avr)
DEF_HELPER_3(vavgsw, void, avr, avr, avr)
@@ -138,6 +147,12 @@ DEF_HELPER_4(vcmpequb, void, env, avr, avr, avr)
DEF_HELPER_4(vcmpequh, void, env, avr, avr, avr)
DEF_HELPER_4(vcmpequw, void, env, avr, avr, avr)
DEF_HELPER_4(vcmpequd, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpneb, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpneh, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpnew, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpnezb, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpnezh, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpnezw, void, env, avr, avr, avr)
DEF_HELPER_4(vcmpgtub, void, env, avr, avr, avr)
DEF_HELPER_4(vcmpgtuh, void, env, avr, avr, avr)
DEF_HELPER_4(vcmpgtuw, void, env, avr, avr, avr)
@@ -154,6 +169,12 @@ DEF_HELPER_4(vcmpequb_dot, void, env, avr, avr, avr)
DEF_HELPER_4(vcmpequh_dot, void, env, avr, avr, avr)
DEF_HELPER_4(vcmpequw_dot, void, env, avr, avr, avr)
DEF_HELPER_4(vcmpequd_dot, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpneb_dot, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpneh_dot, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpnew_dot, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpnezb_dot, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpnezh_dot, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpnezw_dot, void, env, avr, avr, avr)
DEF_HELPER_4(vcmpgtub_dot, void, env, avr, avr, avr)
DEF_HELPER_4(vcmpgtuh_dot, void, env, avr, avr, avr)
DEF_HELPER_4(vcmpgtuw_dot, void, env, avr, avr, avr)
@@ -199,7 +220,12 @@ DEF_HELPER_3(vslw, void, avr, avr, avr)
DEF_HELPER_3(vsld, void, avr, avr, avr)
DEF_HELPER_3(vslo, void, avr, avr, avr)
DEF_HELPER_3(vsro, void, avr, avr, avr)
+DEF_HELPER_3(vsrv, void, avr, avr, avr)
+DEF_HELPER_3(vslv, void, avr, avr, avr)
DEF_HELPER_3(vaddcuw, void, avr, avr, avr)
+DEF_HELPER_2(vprtybw, void, avr, avr)
+DEF_HELPER_2(vprtybd, void, avr, avr)
+DEF_HELPER_2(vprtybq, void, avr, avr)
DEF_HELPER_3(vsubcuw, void, avr, avr, avr)
DEF_HELPER_2(lvsl, void, avr, tl)
DEF_HELPER_2(lvsr, void, avr, tl)
@@ -236,6 +262,21 @@ DEF_HELPER_2(vspltisw, void, avr, i32)
DEF_HELPER_3(vspltb, void, avr, avr, i32)
DEF_HELPER_3(vsplth, void, avr, avr, i32)
DEF_HELPER_3(vspltw, void, avr, avr, i32)
+DEF_HELPER_3(vextractub, void, avr, avr, i32)
+DEF_HELPER_3(vextractuh, void, avr, avr, i32)
+DEF_HELPER_3(vextractuw, void, avr, avr, i32)
+DEF_HELPER_3(vextractd, void, avr, avr, i32)
+DEF_HELPER_3(vinsertb, void, avr, avr, i32)
+DEF_HELPER_3(vinserth, void, avr, avr, i32)
+DEF_HELPER_3(vinsertw, void, avr, avr, i32)
+DEF_HELPER_3(vinsertd, void, avr, avr, i32)
+DEF_HELPER_2(vextsb2w, void, avr, avr)
+DEF_HELPER_2(vextsh2w, void, avr, avr)
+DEF_HELPER_2(vextsb2d, void, avr, avr)
+DEF_HELPER_2(vextsh2d, void, avr, avr)
+DEF_HELPER_2(vextsw2d, void, avr, avr)
+DEF_HELPER_2(vnegw, void, avr, avr)
+DEF_HELPER_2(vnegd, void, avr, avr)
DEF_HELPER_2(vupkhpx, void, avr, avr)
DEF_HELPER_2(vupklpx, void, avr, avr)
DEF_HELPER_2(vupkhsb, void, avr, avr)
@@ -248,6 +289,7 @@ DEF_HELPER_5(vmsumubm, void, env, avr, avr, avr, avr)
DEF_HELPER_5(vmsummbm, void, env, avr, avr, avr, avr)
DEF_HELPER_5(vsel, void, env, avr, avr, avr, avr)
DEF_HELPER_5(vperm, void, env, avr, avr, avr, avr)
+DEF_HELPER_5(vpermr, void, env, avr, avr, avr, avr)
DEF_HELPER_4(vpkshss, void, env, avr, avr, avr)
DEF_HELPER_4(vpkshus, void, env, avr, avr, avr)
DEF_HELPER_4(vpkswss, void, env, avr, avr, avr)
@@ -286,6 +328,10 @@ DEF_HELPER_4(vmaxfp, void, env, avr, avr, avr)
DEF_HELPER_4(vminfp, void, env, avr, avr, avr)
DEF_HELPER_3(vrefp, void, env, avr, avr)
DEF_HELPER_3(vrsqrtefp, void, env, avr, avr)
+DEF_HELPER_3(vrlwmi, void, avr, avr, avr)
+DEF_HELPER_3(vrldmi, void, avr, avr, avr)
+DEF_HELPER_3(vrldnm, void, avr, avr, avr)
+DEF_HELPER_3(vrlwnm, void, avr, avr, avr)
DEF_HELPER_5(vmaddfp, void, env, avr, avr, avr, avr)
DEF_HELPER_5(vnmsubfp, void, env, avr, avr, avr, avr)
DEF_HELPER_3(vexptefp, void, env, avr, avr)
@@ -303,10 +349,17 @@ DEF_HELPER_2(vclzb, void, avr, avr)
DEF_HELPER_2(vclzh, void, avr, avr)
DEF_HELPER_2(vclzw, void, avr, avr)
DEF_HELPER_2(vclzd, void, avr, avr)
+DEF_HELPER_2(vctzb, void, avr, avr)
+DEF_HELPER_2(vctzh, void, avr, avr)
+DEF_HELPER_2(vctzw, void, avr, avr)
+DEF_HELPER_2(vctzd, void, avr, avr)
DEF_HELPER_2(vpopcntb, void, avr, avr)
DEF_HELPER_2(vpopcnth, void, avr, avr)
DEF_HELPER_2(vpopcntw, void, avr, avr)
DEF_HELPER_2(vpopcntd, void, avr, avr)
+DEF_HELPER_1(vclzlsbb, tl, avr)
+DEF_HELPER_1(vctzlsbb, tl, avr)
+DEF_HELPER_3(vbpermd, void, avr, avr, avr)
DEF_HELPER_3(vbpermq, void, avr, avr, avr)
DEF_HELPER_2(vgbbd, void, avr, avr)
DEF_HELPER_3(vpmsumb, void, avr, avr, avr)
@@ -325,6 +378,10 @@ DEF_HELPER_4(vpermxor, void, avr, avr, avr, avr)
DEF_HELPER_4(bcdadd, i32, avr, avr, avr, i32)
DEF_HELPER_4(bcdsub, i32, avr, avr, avr, i32)
+DEF_HELPER_3(bcdcfn, i32, avr, avr, i32)
+DEF_HELPER_3(bcdctn, i32, avr, avr, i32)
+DEF_HELPER_3(bcdcfz, i32, avr, avr, i32)
+DEF_HELPER_3(bcdctz, i32, avr, avr, i32)
DEF_HELPER_2(xsadddp, void, env, i32)
DEF_HELPER_2(xssubdp, void, env, i32)
@@ -343,6 +400,10 @@ DEF_HELPER_2(xsnmaddadp, void, env, i32)
DEF_HELPER_2(xsnmaddmdp, void, env, i32)
DEF_HELPER_2(xsnmsubadp, void, env, i32)
DEF_HELPER_2(xsnmsubmdp, void, env, i32)
+DEF_HELPER_2(xscmpeqdp, void, env, i32)
+DEF_HELPER_2(xscmpgtdp, void, env, i32)
+DEF_HELPER_2(xscmpgedp, void, env, i32)
+DEF_HELPER_2(xscmpnedp, void, env, i32)
DEF_HELPER_2(xscmpodp, void, env, i32)
DEF_HELPER_2(xscmpudp, void, env, i32)
DEF_HELPER_2(xsmaxdp, void, env, i32)
@@ -404,6 +465,7 @@ DEF_HELPER_2(xvmindp, void, env, i32)
DEF_HELPER_2(xvcmpeqdp, void, env, i32)
DEF_HELPER_2(xvcmpgedp, void, env, i32)
DEF_HELPER_2(xvcmpgtdp, void, env, i32)
+DEF_HELPER_2(xvcmpnedp, void, env, i32)
DEF_HELPER_2(xvcvdpsp, void, env, i32)
DEF_HELPER_2(xvcvdpsxds, void, env, i32)
DEF_HELPER_2(xvcvdpsxws, void, env, i32)
@@ -441,6 +503,7 @@ DEF_HELPER_2(xvminsp, void, env, i32)
DEF_HELPER_2(xvcmpeqsp, void, env, i32)
DEF_HELPER_2(xvcmpgesp, void, env, i32)
DEF_HELPER_2(xvcmpgtsp, void, env, i32)
+DEF_HELPER_2(xvcmpnesp, void, env, i32)
DEF_HELPER_2(xvcvspdp, void, env, i32)
DEF_HELPER_2(xvcvspsxds, void, env, i32)
DEF_HELPER_2(xvcvspsxws, void, env, i32)
@@ -581,35 +644,35 @@ DEF_HELPER_2(load_dump_spr, void, env, i32)
DEF_HELPER_2(store_dump_spr, void, env, i32)
DEF_HELPER_4(fscr_facility_check, void, env, i32, i32, i32)
DEF_HELPER_4(msr_facility_check, void, env, i32, i32, i32)
-DEF_HELPER_1(load_tbl, tl, env)
-DEF_HELPER_1(load_tbu, tl, env)
-DEF_HELPER_1(load_atbl, tl, env)
-DEF_HELPER_1(load_atbu, tl, env)
-DEF_HELPER_1(load_601_rtcl, tl, env)
-DEF_HELPER_1(load_601_rtcu, tl, env)
+DEF_HELPER_FLAGS_1(load_tbl, TCG_CALL_NO_RWG, tl, env)
+DEF_HELPER_FLAGS_1(load_tbu, TCG_CALL_NO_RWG, tl, env)
+DEF_HELPER_FLAGS_1(load_atbl, TCG_CALL_NO_RWG, tl, env)
+DEF_HELPER_FLAGS_1(load_atbu, TCG_CALL_NO_RWG, tl, env)
+DEF_HELPER_FLAGS_1(load_601_rtcl, TCG_CALL_NO_RWG, tl, env)
+DEF_HELPER_FLAGS_1(load_601_rtcu, TCG_CALL_NO_RWG, tl, env)
#if !defined(CONFIG_USER_ONLY)
#if defined(TARGET_PPC64)
-DEF_HELPER_1(load_purr, tl, env)
+DEF_HELPER_FLAGS_1(load_purr, TCG_CALL_NO_RWG, tl, env)
#endif
DEF_HELPER_2(store_sdr1, void, env, tl)
-DEF_HELPER_2(store_tbl, void, env, tl)
-DEF_HELPER_2(store_tbu, void, env, tl)
-DEF_HELPER_2(store_atbl, void, env, tl)
-DEF_HELPER_2(store_atbu, void, env, tl)
-DEF_HELPER_2(store_601_rtcl, void, env, tl)
-DEF_HELPER_2(store_601_rtcu, void, env, tl)
-DEF_HELPER_1(load_decr, tl, env)
-DEF_HELPER_2(store_decr, void, env, tl)
-DEF_HELPER_1(load_hdecr, tl, env)
-DEF_HELPER_2(store_hdecr, void, env, tl)
+DEF_HELPER_FLAGS_2(store_tbl, TCG_CALL_NO_RWG, void, env, tl)
+DEF_HELPER_FLAGS_2(store_tbu, TCG_CALL_NO_RWG, void, env, tl)
+DEF_HELPER_FLAGS_2(store_atbl, TCG_CALL_NO_RWG, void, env, tl)
+DEF_HELPER_FLAGS_2(store_atbu, TCG_CALL_NO_RWG, void, env, tl)
+DEF_HELPER_FLAGS_2(store_601_rtcl, TCG_CALL_NO_RWG, void, env, tl)
+DEF_HELPER_FLAGS_2(store_601_rtcu, TCG_CALL_NO_RWG, void, env, tl)
+DEF_HELPER_FLAGS_1(load_decr, TCG_CALL_NO_RWG, tl, env)
+DEF_HELPER_FLAGS_2(store_decr, TCG_CALL_NO_RWG, void, env, tl)
+DEF_HELPER_FLAGS_1(load_hdecr, TCG_CALL_NO_RWG, tl, env)
+DEF_HELPER_FLAGS_2(store_hdecr, TCG_CALL_NO_RWG, void, env, tl)
DEF_HELPER_2(store_hid0_601, void, env, tl)
DEF_HELPER_3(store_403_pbr, void, env, i32, tl)
-DEF_HELPER_1(load_40x_pit, tl, env)
-DEF_HELPER_2(store_40x_pit, void, env, tl)
+DEF_HELPER_FLAGS_1(load_40x_pit, TCG_CALL_NO_RWG, tl, env)
+DEF_HELPER_FLAGS_2(store_40x_pit, TCG_CALL_NO_RWG, void, env, tl)
DEF_HELPER_2(store_40x_dbcr0, void, env, tl)
DEF_HELPER_2(store_40x_sler, void, env, tl)
-DEF_HELPER_2(store_booke_tcr, void, env, tl)
-DEF_HELPER_2(store_booke_tsr, void, env, tl)
+DEF_HELPER_FLAGS_2(store_booke_tcr, TCG_CALL_NO_RWG, void, env, tl)
+DEF_HELPER_FLAGS_2(store_booke_tsr, TCG_CALL_NO_RWG, void, env, tl)
DEF_HELPER_3(store_ibatl, void, env, i32, tl)
DEF_HELPER_3(store_ibatu, void, env, i32, tl)
DEF_HELPER_3(store_dbatl, void, env, i32, tl)
@@ -642,6 +705,8 @@ DEF_HELPER_3(dtstex, i32, env, fprp, fprp)
DEF_HELPER_3(dtstexq, i32, env, fprp, fprp)
DEF_HELPER_3(dtstsf, i32, env, fprp, fprp)
DEF_HELPER_3(dtstsfq, i32, env, fprp, fprp)
+DEF_HELPER_3(dtstsfi, i32, env, i32, fprp)
+DEF_HELPER_3(dtstsfiq, i32, env, i32, fprp)
DEF_HELPER_5(dquai, void, env, fprp, fprp, i32, i32)
DEF_HELPER_5(dquaiq, void, env, fprp, fprp, i32, i32)
DEF_HELPER_5(dqua, void, env, fprp, fprp, fprp, i32)
@@ -674,4 +739,4 @@ DEF_HELPER_4(dscli, void, env, fprp, fprp, i32)
DEF_HELPER_4(dscliq, void, env, fprp, fprp, i32)
DEF_HELPER_1(tbegin, void, env)
-DEF_HELPER_1(fixup_thrm, void, env)
+DEF_HELPER_FLAGS_1(fixup_thrm, TCG_CALL_NO_RWG, void, env)
diff --git a/target-ppc/helper_regs.h b/target-ppc/helper_regs.h
index 3d279f1d8a..62138163a5 100644
--- a/target-ppc/helper_regs.h
+++ b/target-ppc/helper_regs.h
@@ -131,11 +131,14 @@ static inline int hreg_store_msr(CPUPPCState *env, target_ulong value,
}
/* If PR=1 then EE, IR and DR must be 1
*
- * Note: We only enforce this on 64-bit processors. It appears that
- * 32-bit implementations supports PR=1 and EE/DR/IR=0 and MacOS
- * exploits it.
+ * Note: We only enforce this on 64-bit server processors.
+ * It appears that:
+ * - 32-bit implementations supports PR=1 and EE/DR/IR=0 and MacOS
+ * exploits it.
+ * - 64-bit embedded implementations do not need any operation to be
+ * performed when PR is set.
*/
- if ((env->insns_flags & PPC_64B) && ((value >> MSR_PR) & 1)) {
+ if ((env->insns_flags & PPC_SEGMENT_64B) && ((value >> MSR_PR) & 1)) {
value |= (1 << MSR_EE) | (1 << MSR_DR) | (1 << MSR_IR);
}
#endif
@@ -154,16 +157,33 @@ static inline int hreg_store_msr(CPUPPCState *env, target_ulong value,
}
#if !defined(CONFIG_USER_ONLY)
-static inline void check_tlb_flush(CPUPPCState *env)
+static inline void check_tlb_flush(CPUPPCState *env, bool global)
{
CPUState *cs = CPU(ppc_env_get_cpu(env));
- if (env->tlb_need_flush) {
- env->tlb_need_flush = 0;
+ if (env->tlb_need_flush & TLB_NEED_LOCAL_FLUSH) {
tlb_flush(cs, 1);
+ env->tlb_need_flush &= ~TLB_NEED_LOCAL_FLUSH;
+ }
+
+ /* Propagate TLB invalidations to other CPUs when the guest uses broadcast
+ * TLB invalidation instructions.
+ */
+ if (global && (env->tlb_need_flush & TLB_NEED_GLOBAL_FLUSH)) {
+ CPUState *other_cs;
+ CPU_FOREACH(other_cs) {
+ if (other_cs != cs) {
+ PowerPCCPU *cpu = POWERPC_CPU(other_cs);
+ CPUPPCState *other_env = &cpu->env;
+
+ other_env->tlb_need_flush &= ~TLB_NEED_LOCAL_FLUSH;
+ tlb_flush(other_cs, 1);
+ }
+ }
+ env->tlb_need_flush &= ~TLB_NEED_GLOBAL_FLUSH;
}
}
#else
-static inline void check_tlb_flush(CPUPPCState *env) { }
+static inline void check_tlb_flush(CPUPPCState *env, bool global) { }
#endif
#endif /* HELPER_REGS_H */
diff --git a/target-ppc/int_helper.c b/target-ppc/int_helper.c
index 74453763d6..2d57c9a1c2 100644
--- a/target-ppc/int_helper.c
+++ b/target-ppc/int_helper.c
@@ -18,6 +18,7 @@
*/
#include "qemu/osdep.h"
#include "cpu.h"
+#include "internal.h"
#include "exec/exec-all.h"
#include "qemu/host-utils.h"
#include "exec/helper-proto.h"
@@ -145,11 +146,59 @@ target_ulong helper_cntlzw(target_ulong t)
return clz32(t);
}
+target_ulong helper_cnttzw(target_ulong t)
+{
+ return ctz32(t);
+}
+
#if defined(TARGET_PPC64)
+/* if x = 0xab, returns 0xababababababababa */
+#define pattern(x) (((x) & 0xff) * (~(target_ulong)0 / 0xff))
+
+/* substract 1 from each byte, and with inverse, check if MSB is set at each
+ * byte.
+ * i.e. ((0x00 - 0x01) & ~(0x00)) & 0x80
+ * (0xFF & 0xFF) & 0x80 = 0x80 (zero found)
+ */
+#define haszero(v) (((v) - pattern(0x01)) & ~(v) & pattern(0x80))
+
+/* When you XOR the pattern and there is a match, that byte will be zero */
+#define hasvalue(x, n) (haszero((x) ^ pattern(n)))
+
+uint32_t helper_cmpeqb(target_ulong ra, target_ulong rb)
+{
+ return hasvalue(rb, ra) ? 1 << CRF_GT : 0;
+}
+
+#undef pattern
+#undef haszero
+#undef hasvalue
+
target_ulong helper_cntlzd(target_ulong t)
{
return clz64(t);
}
+
+target_ulong helper_cnttzd(target_ulong t)
+{
+ return ctz64(t);
+}
+
+/* Return invalid random number.
+ *
+ * FIXME: Add rng backend or other mechanism to get cryptographically suitable
+ * random number
+ */
+target_ulong helper_darn32(void)
+{
+ return -1;
+}
+
+target_ulong helper_darn64(void)
+{
+ return -1;
+}
+
#endif
#if defined(TARGET_PPC64)
@@ -479,6 +528,40 @@ void helper_vaddcuw(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
}
}
+/* vprtybw */
+void helper_vprtybw(ppc_avr_t *r, ppc_avr_t *b)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
+ uint64_t res = b->u32[i] ^ (b->u32[i] >> 16);
+ res ^= res >> 8;
+ r->u32[i] = res & 1;
+ }
+}
+
+/* vprtybd */
+void helper_vprtybd(ppc_avr_t *r, ppc_avr_t *b)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(r->u64); i++) {
+ uint64_t res = b->u64[i] ^ (b->u64[i] >> 32);
+ res ^= res >> 16;
+ res ^= res >> 8;
+ r->u64[i] = res & 1;
+ }
+}
+
+/* vprtybq */
+void helper_vprtybq(ppc_avr_t *r, ppc_avr_t *b)
+{
+ uint64_t res = b->u64[0] ^ b->u64[1];
+ res ^= res >> 32;
+ res ^= res >> 16;
+ res ^= res >> 8;
+ r->u64[LO_IDX] = res & 1;
+ r->u64[HI_IDX] = 0;
+}
+
#define VARITH_DO(name, op, element) \
void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
{ \
@@ -597,6 +680,30 @@ VAVG(w, s32, int64_t, u32, uint64_t)
#undef VAVG_DO
#undef VAVG
+#define VABSDU_DO(name, element) \
+void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
+{ \
+ int i; \
+ \
+ for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
+ r->element[i] = (a->element[i] > b->element[i]) ? \
+ (a->element[i] - b->element[i]) : \
+ (b->element[i] - a->element[i]); \
+ } \
+}
+
+/* VABSDU - Vector absolute difference unsigned
+ * name - instruction mnemonic suffix (b: byte, h: halfword, w: word)
+ * element - element type to access from vector
+ */
+#define VABSDU(type, element) \
+ VABSDU_DO(absdu##type, element)
+VABSDU(b, u8)
+VABSDU(h, u16)
+VABSDU(w, u32)
+#undef VABSDU_DO
+#undef VABSDU
+
#define VCF(suffix, cvt, element) \
void helper_vcf##suffix(CPUPPCState *env, ppc_avr_t *r, \
ppc_avr_t *b, uint32_t uim) \
@@ -663,6 +770,49 @@ VCMP(gtsd, >, s64)
#undef VCMP_DO
#undef VCMP
+#define VCMPNE_DO(suffix, element, etype, cmpzero, record) \
+void helper_vcmpne##suffix(CPUPPCState *env, ppc_avr_t *r, \
+ ppc_avr_t *a, ppc_avr_t *b) \
+{ \
+ etype ones = (etype)-1; \
+ etype all = ones; \
+ etype result, none = 0; \
+ int i; \
+ \
+ for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
+ if (cmpzero) { \
+ result = ((a->element[i] == 0) \
+ || (b->element[i] == 0) \
+ || (a->element[i] != b->element[i]) ? \
+ ones : 0x0); \
+ } else { \
+ result = (a->element[i] != b->element[i]) ? ones : 0x0; \
+ } \
+ r->element[i] = result; \
+ all &= result; \
+ none |= result; \
+ } \
+ if (record) { \
+ env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1); \
+ } \
+}
+
+/* VCMPNEZ - Vector compare not equal to zero
+ * suffix - instruction mnemonic suffix (b: byte, h: halfword, w: word)
+ * element - element type to access from vector
+ */
+#define VCMPNE(suffix, element, etype, cmpzero) \
+ VCMPNE_DO(suffix, element, etype, cmpzero, 0) \
+ VCMPNE_DO(suffix##_dot, element, etype, cmpzero, 1)
+VCMPNE(zb, u8, uint8_t, 1)
+VCMPNE(zh, u16, uint16_t, 1)
+VCMPNE(zw, u32, uint32_t, 1)
+VCMPNE(b, u8, uint8_t, 0)
+VCMPNE(h, u16, uint16_t, 0)
+VCMPNE(w, u32, uint32_t, 0)
+#undef VCMPNE_DO
+#undef VCMPNE
+
#define VCMPFP_DO(suffix, compare, order, record) \
void helper_vcmp##suffix(CPUPPCState *env, ppc_avr_t *r, \
ppc_avr_t *a, ppc_avr_t *b) \
@@ -766,6 +916,36 @@ VCT(uxs, cvtsduw, u32)
VCT(sxs, cvtsdsw, s32)
#undef VCT
+target_ulong helper_vclzlsbb(ppc_avr_t *r)
+{
+ target_ulong count = 0;
+ int i;
+ VECTOR_FOR_INORDER_I(i, u8) {
+ if (r->u8[i] & 0x01) {
+ break;
+ }
+ count++;
+ }
+ return count;
+}
+
+target_ulong helper_vctzlsbb(ppc_avr_t *r)
+{
+ target_ulong count = 0;
+ int i;
+#if defined(HOST_WORDS_BIGENDIAN)
+ for (i = ARRAY_SIZE(r->u8) - 1; i >= 0; i--) {
+#else
+ for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
+#endif
+ if (r->u8[i] & 0x01) {
+ break;
+ }
+ count++;
+ }
+ return count;
+}
+
void helper_vmhaddshs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
ppc_avr_t *b, ppc_avr_t *c)
{
@@ -1034,14 +1214,57 @@ void helper_vperm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b,
*r = result;
}
+void helper_vpermr(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b,
+ ppc_avr_t *c)
+{
+ ppc_avr_t result;
+ int i;
+
+ VECTOR_FOR_INORDER_I(i, u8) {
+ int s = c->u8[i] & 0x1f;
+#if defined(HOST_WORDS_BIGENDIAN)
+ int index = 15 - (s & 0xf);
+#else
+ int index = s & 0xf;
+#endif
+
+ if (s & 0x10) {
+ result.u8[i] = a->u8[index];
+ } else {
+ result.u8[i] = b->u8[index];
+ }
+ }
+ *r = result;
+}
+
#if defined(HOST_WORDS_BIGENDIAN)
#define VBPERMQ_INDEX(avr, i) ((avr)->u8[(i)])
+#define VBPERMD_INDEX(i) (i)
#define VBPERMQ_DW(index) (((index) & 0x40) != 0)
+#define EXTRACT_BIT(avr, i, index) (extract64((avr)->u64[i], index, 1))
#else
#define VBPERMQ_INDEX(avr, i) ((avr)->u8[15-(i)])
+#define VBPERMD_INDEX(i) (1 - i)
#define VBPERMQ_DW(index) (((index) & 0x40) == 0)
+#define EXTRACT_BIT(avr, i, index) \
+ (extract64((avr)->u64[1 - i], 63 - index, 1))
#endif
+void helper_vbpermd(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+ int i, j;
+ ppc_avr_t result = { .u64 = { 0, 0 } };
+ VECTOR_FOR_INORDER_I(i, u64) {
+ for (j = 0; j < 8; j++) {
+ int index = VBPERMQ_INDEX(b, (i * 8) + j);
+ if (index < 64 && EXTRACT_BIT(a, i, index)) {
+ result.u64[VBPERMD_INDEX(i)] |= (0x80 >> j);
+ }
+ }
+ }
+ *r = result;
+}
+
void helper_vbpermq(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
{
int i;
@@ -1529,6 +1752,34 @@ void helper_vrsqrtefp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *b)
}
}
+#define VRLMI(name, size, element, insert) \
+void helper_##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
+{ \
+ int i; \
+ for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
+ uint##size##_t src1 = a->element[i]; \
+ uint##size##_t src2 = b->element[i]; \
+ uint##size##_t src3 = r->element[i]; \
+ uint##size##_t begin, end, shift, mask, rot_val; \
+ \
+ shift = extract##size(src2, 0, 6); \
+ end = extract##size(src2, 8, 6); \
+ begin = extract##size(src2, 16, 6); \
+ rot_val = rol##size(src1, shift); \
+ mask = mask_u##size(begin, end); \
+ if (insert) { \
+ r->element[i] = (rot_val & mask) | (src3 & ~mask); \
+ } else { \
+ r->element[i] = (rot_val & mask); \
+ } \
+ } \
+}
+
+VRLMI(vrldmi, 64, u64, 1);
+VRLMI(vrlwmi, 32, u32, 1);
+VRLMI(vrldnm, 64, u64, 0);
+VRLMI(vrlwnm, 32, u32, 0);
+
void helper_vsel(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b,
ppc_avr_t *c)
{
@@ -1604,6 +1855,37 @@ VSL(w, u32, 0x1F)
VSL(d, u64, 0x3F)
#undef VSL
+void helper_vslv(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+ int i;
+ unsigned int shift, bytes, size;
+
+ size = ARRAY_SIZE(r->u8);
+ for (i = 0; i < size; i++) {
+ shift = b->u8[i] & 0x7; /* extract shift value */
+ bytes = (a->u8[i] << 8) + /* extract adjacent bytes */
+ (((i + 1) < size) ? a->u8[i + 1] : 0);
+ r->u8[i] = (bytes << shift) >> 8; /* shift and store result */
+ }
+}
+
+void helper_vsrv(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+ int i;
+ unsigned int shift, bytes;
+
+ /* Use reverse order, as destination and source register can be same. Its
+ * being modified in place saving temporary, reverse order will guarantee
+ * that computed result is not fed back.
+ */
+ for (i = ARRAY_SIZE(r->u8) - 1; i >= 0; i--) {
+ shift = b->u8[i] & 0x7; /* extract shift value */
+ bytes = ((i ? a->u8[i - 1] : 0) << 8) + a->u8[i];
+ /* extract adjacent bytes */
+ r->u8[i] = (bytes >> shift) & 0xFF; /* shift and store result */
+ }
+}
+
void helper_vsldoi(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t shift)
{
int sh = shift & 0xf;
@@ -1669,6 +1951,78 @@ VSPLT(w, u32)
#undef VSPLT
#undef SPLAT_ELEMENT
#undef _SPLAT_MASKED
+#if defined(HOST_WORDS_BIGENDIAN)
+#define VINSERT(suffix, element) \
+ void helper_vinsert##suffix(ppc_avr_t *r, ppc_avr_t *b, uint32_t index) \
+ { \
+ memmove(&r->u8[index], &b->u8[8 - sizeof(r->element)], \
+ sizeof(r->element[0])); \
+ }
+#else
+#define VINSERT(suffix, element) \
+ void helper_vinsert##suffix(ppc_avr_t *r, ppc_avr_t *b, uint32_t index) \
+ { \
+ uint32_t d = (16 - index) - sizeof(r->element[0]); \
+ memmove(&r->u8[d], &b->u8[8], sizeof(r->element[0])); \
+ }
+#endif
+VINSERT(b, u8)
+VINSERT(h, u16)
+VINSERT(w, u32)
+VINSERT(d, u64)
+#undef VINSERT
+#if defined(HOST_WORDS_BIGENDIAN)
+#define VEXTRACT(suffix, element) \
+ void helper_vextract##suffix(ppc_avr_t *r, ppc_avr_t *b, uint32_t index) \
+ { \
+ uint32_t es = sizeof(r->element[0]); \
+ memmove(&r->u8[8 - es], &b->u8[index], es); \
+ memset(&r->u8[8], 0, 8); \
+ memset(&r->u8[0], 0, 8 - es); \
+ }
+#else
+#define VEXTRACT(suffix, element) \
+ void helper_vextract##suffix(ppc_avr_t *r, ppc_avr_t *b, uint32_t index) \
+ { \
+ uint32_t es = sizeof(r->element[0]); \
+ uint32_t s = (16 - index) - es; \
+ memmove(&r->u8[8], &b->u8[s], es); \
+ memset(&r->u8[0], 0, 8); \
+ memset(&r->u8[8 + es], 0, 8 - es); \
+ }
+#endif
+VEXTRACT(ub, u8)
+VEXTRACT(uh, u16)
+VEXTRACT(uw, u32)
+VEXTRACT(d, u64)
+#undef VEXTRACT
+
+#define VEXT_SIGNED(name, element, mask, cast, recast) \
+void helper_##name(ppc_avr_t *r, ppc_avr_t *b) \
+{ \
+ int i; \
+ VECTOR_FOR_INORDER_I(i, element) { \
+ r->element[i] = (recast)((cast)(b->element[i] & mask)); \
+ } \
+}
+VEXT_SIGNED(vextsb2w, s32, UINT8_MAX, int8_t, int32_t)
+VEXT_SIGNED(vextsb2d, s64, UINT8_MAX, int8_t, int64_t)
+VEXT_SIGNED(vextsh2w, s32, UINT16_MAX, int16_t, int32_t)
+VEXT_SIGNED(vextsh2d, s64, UINT16_MAX, int16_t, int64_t)
+VEXT_SIGNED(vextsw2d, s64, UINT32_MAX, int32_t, int64_t)
+#undef VEXT_SIGNED
+
+#define VNEG(name, element) \
+void helper_##name(ppc_avr_t *r, ppc_avr_t *b) \
+{ \
+ int i; \
+ VECTOR_FOR_INORDER_I(i, element) { \
+ r->element[i] = -b->element[i]; \
+ } \
+}
+VNEG(vnegw, s32)
+VNEG(vnegd, s64)
+#undef VNEG
#define VSPLTI(suffix, element, splat_type) \
void helper_vspltis##suffix(ppc_avr_t *r, uint32_t splat) \
@@ -1915,6 +2269,21 @@ VGENERIC_DO(clzd, u64)
#undef clzw
#undef clzd
+#define ctzb(v) ((v) ? ctz32(v) : 8)
+#define ctzh(v) ((v) ? ctz32(v) : 16)
+#define ctzw(v) ctz32((v))
+#define ctzd(v) ctz64((v))
+
+VGENERIC_DO(ctzb, u8)
+VGENERIC_DO(ctzh, u16)
+VGENERIC_DO(ctzw, u32)
+VGENERIC_DO(ctzd, u64)
+
+#undef ctzb
+#undef ctzh
+#undef ctzw
+#undef ctzd
+
#define popcntb(v) ctpop8(v)
#define popcnth(v) ctpop16(v)
#define popcntw(v) ctpop32(v)
@@ -2123,6 +2492,8 @@ void helper_vsubecuq(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
#define BCD_NEG_PREF 0xD
#define BCD_NEG_ALT 0xB
#define BCD_PLUS_ALT_2 0xE
+#define NATIONAL_PLUS 0x2B
+#define NATIONAL_NEG 0x2D
#if defined(HOST_WORDS_BIGENDIAN)
#define BCD_DIG_BYTE(n) (15 - (n/2))
@@ -2189,6 +2560,33 @@ static void bcd_put_digit(ppc_avr_t *bcd, uint8_t digit, int n)
}
}
+static int bcd_cmp_zero(ppc_avr_t *bcd)
+{
+ if (bcd->u64[HI_IDX] == 0 && (bcd->u64[LO_IDX] >> 4) == 0) {
+ return 1 << CRF_EQ;
+ } else {
+ return (bcd_get_sgn(bcd) == 1) ? 1 << CRF_GT : 1 << CRF_LT;
+ }
+}
+
+static uint16_t get_national_digit(ppc_avr_t *reg, int n)
+{
+#if defined(HOST_WORDS_BIGENDIAN)
+ return reg->u16[7 - n];
+#else
+ return reg->u16[n];
+#endif
+}
+
+static void set_national_digit(ppc_avr_t *reg, uint8_t val, int n)
+{
+#if defined(HOST_WORDS_BIGENDIAN)
+ reg->u16[7 - n] = val;
+#else
+ reg->u16[n] = val;
+#endif
+}
+
static int bcd_cmp_mag(ppc_avr_t *a, ppc_avr_t *b)
{
int i;
@@ -2319,6 +2717,163 @@ uint32_t helper_bcdsub(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t ps)
return helper_bcdadd(r, a, &bcopy, ps);
}
+uint32_t helper_bcdcfn(ppc_avr_t *r, ppc_avr_t *b, uint32_t ps)
+{
+ int i;
+ int cr = 0;
+ uint16_t national = 0;
+ uint16_t sgnb = get_national_digit(b, 0);
+ ppc_avr_t ret = { .u64 = { 0, 0 } };
+ int invalid = (sgnb != NATIONAL_PLUS && sgnb != NATIONAL_NEG);
+
+ for (i = 1; i < 8; i++) {
+ national = get_national_digit(b, i);
+ if (unlikely(national < 0x30 || national > 0x39)) {
+ invalid = 1;
+ break;
+ }
+
+ bcd_put_digit(&ret, national & 0xf, i);
+ }
+
+ if (sgnb == NATIONAL_PLUS) {
+ bcd_put_digit(&ret, (ps == 0) ? BCD_PLUS_PREF_1 : BCD_PLUS_PREF_2, 0);
+ } else {
+ bcd_put_digit(&ret, BCD_NEG_PREF, 0);
+ }
+
+ cr = bcd_cmp_zero(&ret);
+
+ if (unlikely(invalid)) {
+ cr = 1 << CRF_SO;
+ }
+
+ *r = ret;
+
+ return cr;
+}
+
+uint32_t helper_bcdctn(ppc_avr_t *r, ppc_avr_t *b, uint32_t ps)
+{
+ int i;
+ int cr = 0;
+ int sgnb = bcd_get_sgn(b);
+ int invalid = (sgnb == 0);
+ ppc_avr_t ret = { .u64 = { 0, 0 } };
+
+ int ox_flag = (b->u64[HI_IDX] != 0) || ((b->u64[LO_IDX] >> 32) != 0);
+
+ for (i = 1; i < 8; i++) {
+ set_national_digit(&ret, 0x30 + bcd_get_digit(b, i, &invalid), i);
+
+ if (unlikely(invalid)) {
+ break;
+ }
+ }
+ set_national_digit(&ret, (sgnb == -1) ? NATIONAL_NEG : NATIONAL_PLUS, 0);
+
+ cr = bcd_cmp_zero(b);
+
+ if (ox_flag) {
+ cr |= 1 << CRF_SO;
+ }
+
+ if (unlikely(invalid)) {
+ cr = 1 << CRF_SO;
+ }
+
+ *r = ret;
+
+ return cr;
+}
+
+uint32_t helper_bcdcfz(ppc_avr_t *r, ppc_avr_t *b, uint32_t ps)
+{
+ int i;
+ int cr = 0;
+ int invalid = 0;
+ int zone_digit = 0;
+ int zone_lead = ps ? 0xF : 0x3;
+ int digit = 0;
+ ppc_avr_t ret = { .u64 = { 0, 0 } };
+ int sgnb = b->u8[BCD_DIG_BYTE(0)] >> 4;
+
+ if (unlikely((sgnb < 0xA) && ps)) {
+ invalid = 1;
+ }
+
+ for (i = 0; i < 16; i++) {
+ zone_digit = (i * 2) ? b->u8[BCD_DIG_BYTE(i * 2)] >> 4 : zone_lead;
+ digit = b->u8[BCD_DIG_BYTE(i * 2)] & 0xF;
+ if (unlikely(zone_digit != zone_lead || digit > 0x9)) {
+ invalid = 1;
+ break;
+ }
+
+ bcd_put_digit(&ret, digit, i + 1);
+ }
+
+ if ((ps && (sgnb == 0xB || sgnb == 0xD)) ||
+ (!ps && (sgnb & 0x4))) {
+ bcd_put_digit(&ret, BCD_NEG_PREF, 0);
+ } else {
+ bcd_put_digit(&ret, BCD_PLUS_PREF_1, 0);
+ }
+
+ cr = bcd_cmp_zero(&ret);
+
+ if (unlikely(invalid)) {
+ cr = 1 << CRF_SO;
+ }
+
+ *r = ret;
+
+ return cr;
+}
+
+uint32_t helper_bcdctz(ppc_avr_t *r, ppc_avr_t *b, uint32_t ps)
+{
+ int i;
+ int cr = 0;
+ uint8_t digit = 0;
+ int sgnb = bcd_get_sgn(b);
+ int zone_lead = (ps) ? 0xF0 : 0x30;
+ int invalid = (sgnb == 0);
+ ppc_avr_t ret = { .u64 = { 0, 0 } };
+
+ int ox_flag = ((b->u64[HI_IDX] >> 4) != 0);
+
+ for (i = 0; i < 16; i++) {
+ digit = bcd_get_digit(b, i + 1, &invalid);
+
+ if (unlikely(invalid)) {
+ break;
+ }
+
+ ret.u8[BCD_DIG_BYTE(i * 2)] = zone_lead + digit;
+ }
+
+ if (ps) {
+ bcd_put_digit(&ret, (sgnb == 1) ? 0xC : 0xD, 1);
+ } else {
+ bcd_put_digit(&ret, (sgnb == 1) ? 0x3 : 0x7, 1);
+ }
+
+ cr = bcd_cmp_zero(b);
+
+ if (ox_flag) {
+ cr |= 1 << CRF_SO;
+ }
+
+ if (unlikely(invalid)) {
+ cr = 1 << CRF_SO;
+ }
+
+ *r = ret;
+
+ return cr;
+}
+
void helper_vsbox(ppc_avr_t *r, ppc_avr_t *a)
{
int i;
diff --git a/target-ppc/internal.h b/target-ppc/internal.h
new file mode 100644
index 0000000000..1ff4896c45
--- /dev/null
+++ b/target-ppc/internal.h
@@ -0,0 +1,50 @@
+/*
+ * PowerPC interal definitions for qemu.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef PPC_INTERNAL_H
+#define PPC_INTERNAL_H
+
+#define FUNC_MASK(name, ret_type, size, max_val) \
+static inline ret_type name(uint##size##_t start, \
+ uint##size##_t end) \
+{ \
+ ret_type ret, max_bit = size - 1; \
+ \
+ if (likely(start == 0)) { \
+ ret = max_val << (max_bit - end); \
+ } else if (likely(end == max_bit)) { \
+ ret = max_val >> start; \
+ } else { \
+ ret = (((uint##size##_t)(-1ULL)) >> (start)) ^ \
+ (((uint##size##_t)(-1ULL) >> (end)) >> 1); \
+ if (unlikely(start > end)) { \
+ return ~ret; \
+ } \
+ } \
+ \
+ return ret; \
+}
+
+#if defined(TARGET_PPC64)
+FUNC_MASK(MASK, target_ulong, 64, UINT64_MAX);
+#else
+FUNC_MASK(MASK, target_ulong, 32, UINT32_MAX);
+#endif
+FUNC_MASK(mask_u32, uint32_t, 32, UINT32_MAX);
+FUNC_MASK(mask_u64, uint64_t, 64, UINT64_MAX);
+
+#endif /* PPC_INTERNAL_H */
diff --git a/target-ppc/kvm.c b/target-ppc/kvm.c
index dcb68b9081..9c4834c4fc 100644
--- a/target-ppc/kvm.c
+++ b/target-ppc/kvm.c
@@ -36,6 +36,7 @@
#include "hw/sysbus.h"
#include "hw/ppc/spapr.h"
#include "hw/ppc/spapr_vio.h"
+#include "hw/ppc/spapr_cpu_core.h"
#include "hw/ppc/ppc.h"
#include "sysemu/watchdog.h"
#include "trace.h"
@@ -79,6 +80,7 @@ static int cap_ppc_watchdog;
static int cap_papr;
static int cap_htab_fd;
static int cap_fixup_hcalls;
+static int cap_htm; /* Hardware transactional memory support */
static uint32_t debug_inst_opcode;
@@ -100,6 +102,16 @@ static void kvm_kick_cpu(void *opaque)
qemu_cpu_kick(CPU(cpu));
}
+/* Check whether we are running with KVM-PR (instead of KVM-HV). This
+ * should only be used for fallback tests - generally we should use
+ * explicit capabilities for the features we want, rather than
+ * assuming what is/isn't available depending on the KVM variant. */
+static bool kvmppc_is_pr(KVMState *ks)
+{
+ /* Assume KVM-PR if the GET_PVINFO capability is available */
+ return kvm_check_extension(ks, KVM_CAP_PPC_GET_PVINFO) != 0;
+}
+
static int kvm_ppc_register_host_cpu_type(void);
int kvm_arch_init(MachineState *ms, KVMState *s)
@@ -121,6 +133,7 @@ int kvm_arch_init(MachineState *ms, KVMState *s)
* only activated after this by kvmppc_set_papr() */
cap_htab_fd = kvm_check_extension(s, KVM_CAP_PPC_HTAB_FD);
cap_fixup_hcalls = kvm_check_extension(s, KVM_CAP_PPC_FIXUP_HCALL);
+ cap_htm = kvm_vm_check_extension(s, KVM_CAP_PPC_HTM);
if (!cap_interrupt_level) {
fprintf(stderr, "KVM: Couldn't find level irq capability. Expect the "
@@ -220,10 +233,9 @@ static void kvm_get_fallback_smmu_info(PowerPCCPU *cpu,
*
* For that to work we make a few assumptions:
*
- * - If KVM_CAP_PPC_GET_PVINFO is supported we are running "PR"
- * KVM which only supports 4K and 16M pages, but supports them
- * regardless of the backing store characteritics. We also don't
- * support 1T segments.
+ * - Check whether we are running "PR" KVM which only supports 4K
+ * and 16M pages, but supports them regardless of the backing
+ * store characteritics. We also don't support 1T segments.
*
* This is safe as if HV KVM ever supports that capability or PR
* KVM grows supports for more page/segment sizes, those versions
@@ -238,7 +250,7 @@ static void kvm_get_fallback_smmu_info(PowerPCCPU *cpu,
* implements KVM_CAP_PPC_GET_SMMU_INFO and thus doesn't hit
* this fallback.
*/
- if (kvm_check_extension(cs->kvm_state, KVM_CAP_PPC_GET_PVINFO)) {
+ if (kvmppc_is_pr(cs->kvm_state)) {
/* No flags */
info->flags = 0;
info->slb_size = 64;
@@ -427,6 +439,7 @@ static void kvm_fixup_page_sizes(PowerPCCPU *cpu)
CPUPPCState *env = &cpu->env;
long rampagesize;
int iq, ik, jq, jk;
+ bool has_64k_pages = false;
/* We only handle page sizes for 64-bit server guests for now */
if (!(env->mmu_model & POWERPC_MMU_64)) {
@@ -470,6 +483,9 @@ static void kvm_fixup_page_sizes(PowerPCCPU *cpu)
ksps->enc[jk].page_shift)) {
continue;
}
+ if (ksps->enc[jk].page_shift == 16) {
+ has_64k_pages = true;
+ }
qsps->enc[jq].page_shift = ksps->enc[jk].page_shift;
qsps->enc[jq].pte_enc = ksps->enc[jk].pte_enc;
if (++jq >= PPC_PAGE_SIZES_MAX_SZ) {
@@ -484,6 +500,9 @@ static void kvm_fixup_page_sizes(PowerPCCPU *cpu)
if (!(smmu_info.flags & KVM_PPC_1T_SEGMENTS)) {
env->mmu_model &= ~POWERPC_MMU_1TSEG;
}
+ if (!has_64k_pages) {
+ env->mmu_model &= ~POWERPC_MMU_64K;
+ }
}
#else /* defined (TARGET_PPC64) */
@@ -551,11 +570,18 @@ int kvm_arch_init_vcpu(CPUState *cs)
idle_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, kvm_kick_cpu, cpu);
- /* Some targets support access to KVM's guest TLB. */
switch (cenv->mmu_model) {
case POWERPC_MMU_BOOKE206:
+ /* This target supports access to KVM's guest TLB */
ret = kvm_booke206_tlb_init(cpu);
break;
+ case POWERPC_MMU_2_07:
+ if (!cap_htm && !kvmppc_is_pr(cs->kvm_state)) {
+ /* KVM-HV has transactional memory on POWER8 also without the
+ * KVM_CAP_PPC_HTM extension, so enable it here instead. */
+ cap_htm = true;
+ }
+ break;
default:
break;
}
@@ -2055,6 +2081,12 @@ void kvmppc_enable_set_mode_hcall(void)
kvmppc_enable_hcall(kvm_state, H_SET_MODE);
}
+void kvmppc_enable_clear_ref_mod_hcalls(void)
+{
+ kvmppc_enable_hcall(kvm_state, H_CLEAR_REF);
+ kvmppc_enable_hcall(kvm_state, H_CLEAR_MOD);
+}
+
void kvmppc_set_papr(PowerPCCPU *cpu)
{
CPUState *cs = CPU(cpu);
@@ -2254,11 +2286,8 @@ int kvmppc_reset_htab(int shift_hint)
/* We have a kernel that predates the htab reset calls. For PR
* KVM, we need to allocate the htab ourselves, for an HV KVM of
- * this era, it has allocated a 16MB fixed size hash table
- * already. Kernels of this era have the GET_PVINFO capability
- * only on PR, so we use this hack to determine the right
- * answer */
- if (kvm_check_extension(kvm_state, KVM_CAP_PPC_GET_PVINFO)) {
+ * this era, it has allocated a 16MB fixed size hash table already. */
+ if (kvmppc_is_pr(kvm_state)) {
/* PR - tell caller to allocate htab */
return 0;
} else {
@@ -2339,6 +2368,11 @@ bool kvmppc_has_cap_fixup_hcalls(void)
return cap_fixup_hcalls;
}
+bool kvmppc_has_cap_htm(void)
+{
+ return cap_htm;
+}
+
static PowerPCCPUClass *ppc_cpu_get_family_class(PowerPCCPUClass *pcc)
{
ObjectClass *oc = OBJECT_CLASS(pcc);
@@ -2364,19 +2398,6 @@ PowerPCCPUClass *kvm_ppc_get_host_cpu_class(void)
return pvr_pcc;
}
-#if defined(TARGET_PPC64)
-static void spapr_cpu_core_host_initfn(Object *obj)
-{
- sPAPRCPUCore *core = SPAPR_CPU_CORE(obj);
- char *name = g_strdup_printf("%s-" TYPE_POWERPC_CPU, "host");
- ObjectClass *oc = object_class_by_name(name);
-
- g_assert(oc);
- g_free((void *)name);
- core->cpu_class = oc;
-}
-#endif
-
static int kvm_ppc_register_host_cpu_type(void)
{
TypeInfo type_info = {
@@ -2404,14 +2425,16 @@ static int kvm_ppc_register_host_cpu_type(void)
#if defined(TARGET_PPC64)
type_info.name = g_strdup_printf("%s-"TYPE_SPAPR_CPU_CORE, "host");
type_info.parent = TYPE_SPAPR_CPU_CORE,
- type_info.instance_size = sizeof(sPAPRCPUCore),
- type_info.instance_init = spapr_cpu_core_host_initfn,
- type_info.class_init = NULL;
+ type_info.instance_size = sizeof(sPAPRCPUCore);
+ type_info.instance_init = NULL;
+ type_info.class_init = spapr_cpu_core_class_init;
+ type_info.class_data = (void *) "host";
type_register(&type_info);
g_free((void *)type_info.name);
/* Register generic spapr CPU family class for current host CPU type */
type_info.name = g_strdup_printf("%s-"TYPE_SPAPR_CPU_CORE, dc->desc);
+ type_info.class_data = (void *) dc->desc;
type_register(&type_info);
g_free((void *)type_info.name);
#endif
diff --git a/target-ppc/kvm_ppc.h b/target-ppc/kvm_ppc.h
index 5461d1082c..bd1d78bfbe 100644
--- a/target-ppc/kvm_ppc.h
+++ b/target-ppc/kvm_ppc.h
@@ -24,6 +24,7 @@ int kvmppc_get_hypercall(CPUPPCState *env, uint8_t *buf, int buf_len);
int kvmppc_set_interrupt(PowerPCCPU *cpu, int irq, int level);
void kvmppc_enable_logical_ci_hcalls(void);
void kvmppc_enable_set_mode_hcall(void);
+void kvmppc_enable_clear_ref_mod_hcalls(void);
void kvmppc_set_papr(PowerPCCPU *cpu);
int kvmppc_set_compat(PowerPCCPU *cpu, uint32_t cpu_version);
void kvmppc_set_mpic_proxy(PowerPCCPU *cpu, int mpic_proxy);
@@ -54,6 +55,7 @@ void kvmppc_hash64_free_pteg(uint64_t token);
void kvmppc_hash64_write_pte(CPUPPCState *env, target_ulong pte_index,
target_ulong pte0, target_ulong pte1);
bool kvmppc_has_cap_fixup_hcalls(void);
+bool kvmppc_has_cap_htm(void);
int kvmppc_enable_hwrng(void);
int kvmppc_put_books_sregs(PowerPCCPU *cpu);
PowerPCCPUClass *kvm_ppc_get_host_cpu_class(void);
@@ -113,6 +115,10 @@ static inline void kvmppc_enable_set_mode_hcall(void)
{
}
+static inline void kvmppc_enable_clear_ref_mod_hcalls(void)
+{
+}
+
static inline void kvmppc_set_papr(PowerPCCPU *cpu)
{
}
@@ -244,6 +250,11 @@ static inline bool kvmppc_has_cap_fixup_hcalls(void)
abort();
}
+static inline bool kvmppc_has_cap_htm(void)
+{
+ return false;
+}
+
static inline int kvmppc_enable_hwrng(void)
{
return -1;
diff --git a/target-ppc/machine.c b/target-ppc/machine.c
index 4820f22377..18c16d2512 100644
--- a/target-ppc/machine.c
+++ b/target-ppc/machine.c
@@ -8,7 +8,6 @@
#include "helper_regs.h"
#include "mmu-hash64.h"
#include "migration/cpu.h"
-#include "exec/exec-all.h"
static int cpu_load_old(QEMUFile *f, void *opaque, int version_id)
{
@@ -136,11 +135,33 @@ static const VMStateInfo vmstate_info_avr = {
#define VMSTATE_AVR_ARRAY(_f, _s, _n) \
VMSTATE_AVR_ARRAY_V(_f, _s, _n, 0)
+static bool cpu_pre_2_8_migration(void *opaque, int version_id)
+{
+ PowerPCCPU *cpu = opaque;
+
+ return cpu->pre_2_8_migration;
+}
+
static void cpu_pre_save(void *opaque)
{
PowerPCCPU *cpu = opaque;
CPUPPCState *env = &cpu->env;
int i;
+ uint64_t insns_compat_mask =
+ PPC_INSNS_BASE | PPC_ISEL | PPC_STRING | PPC_MFTB
+ | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES
+ | PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | PPC_FLOAT_FRSQRTES
+ | PPC_FLOAT_STFIWX | PPC_FLOAT_EXT
+ | PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ
+ | PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE | PPC_MEM_TLBSYNC
+ | PPC_64B | PPC_64BX | PPC_ALTIVEC
+ | PPC_SEGMENT_64B | PPC_SLBI | PPC_POPCNTB | PPC_POPCNTWD;
+ uint64_t insns_compat_mask2 = PPC2_VSX | PPC2_VSX207 | PPC2_DFP | PPC2_DBRX
+ | PPC2_PERM_ISA206 | PPC2_DIVE_ISA206
+ | PPC2_ATOMIC_ISA206 | PPC2_FP_CVT_ISA206
+ | PPC2_FP_TST_ISA206 | PPC2_BCTAR_ISA207
+ | PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207
+ | PPC2_ISA205 | PPC2_ISA207S | PPC2_FP_CVT_S64 | PPC2_TM;
env->spr[SPR_LR] = env->lr;
env->spr[SPR_CTR] = env->ctr;
@@ -162,6 +183,14 @@ static void cpu_pre_save(void *opaque)
env->spr[SPR_IBAT4U + 2*i] = env->IBAT[0][i+4];
env->spr[SPR_IBAT4U + 2*i + 1] = env->IBAT[1][i+4];
}
+
+ /* Hacks for migration compatibility between 2.6, 2.7 & 2.8 */
+ if (cpu->pre_2_8_migration) {
+ cpu->mig_msr_mask = env->msr_mask;
+ cpu->mig_insns_flags = env->insns_flags & insns_compat_mask;
+ cpu->mig_insns_flags2 = env->insns_flags2 & insns_compat_mask2;
+ cpu->mig_nb_BATs = env->nb_BATs;
+ }
}
static int cpu_post_load(void *opaque, int version_id)
@@ -562,10 +591,11 @@ const VMStateDescription vmstate_ppc_cpu = {
/* FIXME: access_type? */
/* Sanity checking */
- VMSTATE_UINTTL_EQUAL(env.msr_mask, PowerPCCPU),
- VMSTATE_UINT64_EQUAL(env.insns_flags, PowerPCCPU),
- VMSTATE_UINT64_EQUAL(env.insns_flags2, PowerPCCPU),
- VMSTATE_UINT32_EQUAL(env.nb_BATs, PowerPCCPU),
+ VMSTATE_UINTTL_TEST(mig_msr_mask, PowerPCCPU, cpu_pre_2_8_migration),
+ VMSTATE_UINT64_TEST(mig_insns_flags, PowerPCCPU, cpu_pre_2_8_migration),
+ VMSTATE_UINT64_TEST(mig_insns_flags2, PowerPCCPU,
+ cpu_pre_2_8_migration),
+ VMSTATE_UINT32_TEST(mig_nb_BATs, PowerPCCPU, cpu_pre_2_8_migration),
VMSTATE_END_OF_LIST()
},
.subsections = (const VMStateDescription*[]) {
diff --git a/target-ppc/mem_helper.c b/target-ppc/mem_helper.c
index e4ed3773e8..1ab8a6eab4 100644
--- a/target-ppc/mem_helper.c
+++ b/target-ppc/mem_helper.c
@@ -23,7 +23,6 @@
#include "exec/helper-proto.h"
#include "helper_regs.h"
-#include "exec/exec-all.h"
#include "exec/cpu_ldst.h"
//#define DEBUG_OP
@@ -57,9 +56,9 @@ void helper_lmw(CPUPPCState *env, target_ulong addr, uint32_t reg)
{
for (; reg < 32; reg++) {
if (needs_byteswap(env)) {
- env->gpr[reg] = bswap32(cpu_ldl_data(env, addr));
+ env->gpr[reg] = bswap32(cpu_ldl_data_ra(env, addr, GETPC()));
} else {
- env->gpr[reg] = cpu_ldl_data(env, addr);
+ env->gpr[reg] = cpu_ldl_data_ra(env, addr, GETPC());
}
addr = addr_add(env, addr, 4);
}
@@ -69,31 +68,39 @@ void helper_stmw(CPUPPCState *env, target_ulong addr, uint32_t reg)
{
for (; reg < 32; reg++) {
if (needs_byteswap(env)) {
- cpu_stl_data(env, addr, bswap32((uint32_t)env->gpr[reg]));
+ cpu_stl_data_ra(env, addr, bswap32((uint32_t)env->gpr[reg]),
+ GETPC());
} else {
- cpu_stl_data(env, addr, (uint32_t)env->gpr[reg]);
+ cpu_stl_data_ra(env, addr, (uint32_t)env->gpr[reg], GETPC());
}
addr = addr_add(env, addr, 4);
}
}
-void helper_lsw(CPUPPCState *env, target_ulong addr, uint32_t nb, uint32_t reg)
+static void do_lsw(CPUPPCState *env, target_ulong addr, uint32_t nb,
+ uint32_t reg, uintptr_t raddr)
{
int sh;
for (; nb > 3; nb -= 4) {
- env->gpr[reg] = cpu_ldl_data(env, addr);
+ env->gpr[reg] = cpu_ldl_data_ra(env, addr, raddr);
reg = (reg + 1) % 32;
addr = addr_add(env, addr, 4);
}
if (unlikely(nb > 0)) {
env->gpr[reg] = 0;
for (sh = 24; nb > 0; nb--, sh -= 8) {
- env->gpr[reg] |= cpu_ldub_data(env, addr) << sh;
+ env->gpr[reg] |= cpu_ldub_data_ra(env, addr, raddr) << sh;
addr = addr_add(env, addr, 1);
}
}
}
+
+void helper_lsw(CPUPPCState *env, target_ulong addr, uint32_t nb, uint32_t reg)
+{
+ do_lsw(env, addr, nb, reg, GETPC());
+}
+
/* PPC32 specification says we must generate an exception if
* rA is in the range of registers to be loaded.
* In an other hand, IBM says this is valid, but rA won't be loaded.
@@ -106,12 +113,11 @@ void helper_lswx(CPUPPCState *env, target_ulong addr, uint32_t reg,
int num_used_regs = (xer_bc + 3) / 4;
if (unlikely((ra != 0 && lsw_reg_in_range(reg, num_used_regs, ra)) ||
lsw_reg_in_range(reg, num_used_regs, rb))) {
- env->nip += 4; /* Compensate the "nip - 4" from gen_lswx() */
- helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM,
- POWERPC_EXCP_INVAL |
- POWERPC_EXCP_INVAL_LSWX);
+ raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
+ POWERPC_EXCP_INVAL |
+ POWERPC_EXCP_INVAL_LSWX, GETPC());
} else {
- helper_lsw(env, addr, xer_bc, reg);
+ do_lsw(env, addr, xer_bc, reg, GETPC());
}
}
}
@@ -122,46 +128,51 @@ void helper_stsw(CPUPPCState *env, target_ulong addr, uint32_t nb,
int sh;
for (; nb > 3; nb -= 4) {
- cpu_stl_data(env, addr, env->gpr[reg]);
+ cpu_stl_data_ra(env, addr, env->gpr[reg], GETPC());
reg = (reg + 1) % 32;
addr = addr_add(env, addr, 4);
}
if (unlikely(nb > 0)) {
for (sh = 24; nb > 0; nb--, sh -= 8) {
- cpu_stb_data(env, addr, (env->gpr[reg] >> sh) & 0xFF);
+ cpu_stb_data_ra(env, addr, (env->gpr[reg] >> sh) & 0xFF, GETPC());
addr = addr_add(env, addr, 1);
}
}
}
-static void do_dcbz(CPUPPCState *env, target_ulong addr, int dcache_line_size)
+void helper_dcbz(CPUPPCState *env, target_ulong addr, uint32_t opcode)
{
- int i;
-
- addr &= ~(dcache_line_size - 1);
- for (i = 0; i < dcache_line_size; i += 4) {
- cpu_stl_data(env, addr + i, 0);
- }
- if (env->reserve_addr == addr) {
- env->reserve_addr = (target_ulong)-1ULL;
- }
-}
-
-void helper_dcbz(CPUPPCState *env, target_ulong addr, uint32_t is_dcbzl)
-{
- int dcbz_size = env->dcache_line_size;
+ target_ulong mask, dcbz_size = env->dcache_line_size;
+ uint32_t i;
+ void *haddr;
#if defined(TARGET_PPC64)
- if (!is_dcbzl &&
- (env->excp_model == POWERPC_EXCP_970) &&
- ((env->spr[SPR_970_HID5] >> 7) & 0x3) == 1) {
+ /* Check for dcbz vs dcbzl on 970 */
+ if (env->excp_model == POWERPC_EXCP_970 &&
+ !(opcode & 0x00200000) && ((env->spr[SPR_970_HID5] >> 7) & 0x3) == 1) {
dcbz_size = 32;
}
#endif
- /* XXX add e500mc support */
+ /* Align address */
+ mask = ~(dcbz_size - 1);
+ addr &= mask;
- do_dcbz(env, addr, dcbz_size);
+ /* Check reservation */
+ if ((env->reserve_addr & mask) == (addr & mask)) {
+ env->reserve_addr = (target_ulong)-1ULL;
+ }
+
+ /* Try fast path translate */
+ haddr = tlb_vaddr_to_host(env, addr, MMU_DATA_STORE, env->dmmu_idx);
+ if (haddr) {
+ memset(haddr, 0, dcbz_size);
+ } else {
+ /* Slow path */
+ for (i = 0; i < dcbz_size; i += 8) {
+ cpu_stq_data_ra(env, addr + i, 0, GETPC());
+ }
+ }
}
void helper_icbi(CPUPPCState *env, target_ulong addr)
@@ -172,7 +183,7 @@ void helper_icbi(CPUPPCState *env, target_ulong addr)
* (not a fetch) by the MMU. To be sure it will be so,
* do the load "by hand".
*/
- cpu_ldl_data(env, addr);
+ cpu_ldl_data_ra(env, addr, GETPC());
}
/* XXX: to be tested */
@@ -183,7 +194,7 @@ target_ulong helper_lscbx(CPUPPCState *env, target_ulong addr, uint32_t reg,
d = 24;
for (i = 0; i < xer_bc; i++) {
- c = cpu_ldub_data(env, addr);
+ c = cpu_ldub_data_ra(env, addr, GETPC());
addr = addr_add(env, addr, 1);
/* ra (if not 0) and rb are never modified */
if (likely(reg != rb && (ra == 0 || reg != ra))) {
diff --git a/target-ppc/misc_helper.c b/target-ppc/misc_helper.c
index cb5ebf56cf..1e6e705a4e 100644
--- a/target-ppc/misc_helper.c
+++ b/target-ppc/misc_helper.c
@@ -39,7 +39,8 @@ void helper_store_dump_spr(CPUPPCState *env, uint32_t sprn)
#ifdef TARGET_PPC64
static void raise_fu_exception(CPUPPCState *env, uint32_t bit,
- uint32_t sprn, uint32_t cause)
+ uint32_t sprn, uint32_t cause,
+ uintptr_t raddr)
{
qemu_log("Facility SPR %d is unavailable (SPR FSCR:%d)\n", sprn, bit);
@@ -47,7 +48,7 @@ static void raise_fu_exception(CPUPPCState *env, uint32_t bit,
cause &= FSCR_IC_MASK;
env->spr[SPR_FSCR] |= (target_ulong)cause << FSCR_IC_POS;
- helper_raise_exception_err(env, POWERPC_EXCP_FU, 0);
+ raise_exception_err_ra(env, POWERPC_EXCP_FU, 0, raddr);
}
#endif
@@ -59,7 +60,7 @@ void helper_fscr_facility_check(CPUPPCState *env, uint32_t bit,
/* Facility is enabled, continue */
return;
}
- raise_fu_exception(env, bit, sprn, cause);
+ raise_fu_exception(env, bit, sprn, cause, GETPC());
#endif
}
@@ -71,7 +72,7 @@ void helper_msr_facility_check(CPUPPCState *env, uint32_t bit,
/* Facility is enabled, continue */
return;
}
- raise_fu_exception(env, bit, sprn, cause);
+ raise_fu_exception(env, bit, sprn, cause, GETPC());
#endif
}
diff --git a/target-ppc/mmu-hash64.c b/target-ppc/mmu-hash64.c
index 5de1358d1c..fdb7a787bf 100644
--- a/target-ppc/mmu-hash64.c
+++ b/target-ppc/mmu-hash64.c
@@ -110,7 +110,7 @@ void helper_slbia(CPUPPCState *env)
* and we still don't have a tlb_flush_mask(env, n, mask)
* in QEMU, we just invalidate all TLBs
*/
- env->tlb_need_flush = 1;
+ env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
}
}
}
@@ -132,7 +132,7 @@ void helper_slbie(CPUPPCState *env, target_ulong addr)
* and we still don't have a tlb_flush_mask(env, n, mask)
* in QEMU, we just invalidate all TLBs
*/
- env->tlb_need_flush = 1;
+ env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
}
}
@@ -241,8 +241,8 @@ void helper_store_slb(CPUPPCState *env, target_ulong rb, target_ulong rs)
PowerPCCPU *cpu = ppc_env_get_cpu(env);
if (ppc_store_slb(cpu, rb & 0xfff, rb & ~0xfffULL, rs) < 0) {
- helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM,
- POWERPC_EXCP_INVAL);
+ raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
+ POWERPC_EXCP_INVAL, GETPC());
}
}
@@ -252,8 +252,8 @@ target_ulong helper_load_slb_esid(CPUPPCState *env, target_ulong rb)
target_ulong rt = 0;
if (ppc_load_slb_esid(cpu, rb, &rt) < 0) {
- helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM,
- POWERPC_EXCP_INVAL);
+ raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
+ POWERPC_EXCP_INVAL, GETPC());
}
return rt;
}
@@ -264,8 +264,8 @@ target_ulong helper_find_slb_vsid(CPUPPCState *env, target_ulong rb)
target_ulong rt = 0;
if (ppc_find_slb_vsid(cpu, rb, &rt) < 0) {
- helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM,
- POWERPC_EXCP_INVAL);
+ raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
+ POWERPC_EXCP_INVAL, GETPC());
}
return rt;
}
@@ -276,8 +276,8 @@ target_ulong helper_load_slb_vsid(CPUPPCState *env, target_ulong rb)
target_ulong rt = 0;
if (ppc_load_slb_vsid(cpu, rb, &rt) < 0) {
- helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM,
- POWERPC_EXCP_INVAL);
+ raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
+ POWERPC_EXCP_INVAL, GETPC());
}
return rt;
}
@@ -912,7 +912,7 @@ void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu,
* invalidate, and we still don't have a tlb_flush_mask(env, n,
* mask) in QEMU, we just invalidate all TLBs
*/
- tlb_flush(CPU(cpu), 1);
+ cpu->env.tlb_need_flush = TLB_NEED_GLOBAL_FLUSH | TLB_NEED_LOCAL_FLUSH;
}
void ppc_hash64_update_rmls(CPUPPCState *env)
diff --git a/target-ppc/mmu-hash64.h b/target-ppc/mmu-hash64.h
index db265e30b2..ab5d347a53 100644
--- a/target-ppc/mmu-hash64.h
+++ b/target-ppc/mmu-hash64.h
@@ -4,7 +4,6 @@
#ifndef CONFIG_USER_ONLY
#ifdef TARGET_PPC64
-void ppc_hash64_check_page_sizes(PowerPCCPU *cpu, Error **errp);
void dump_slb(FILE *f, fprintf_function cpu_fprintf, PowerPCCPU *cpu);
int ppc_store_slb(PowerPCCPU *cpu, target_ulong slot,
target_ulong esid, target_ulong vsid);
diff --git a/target-ppc/mmu_helper.c b/target-ppc/mmu_helper.c
index 3eb3cd78e2..d09fc0a85f 100644
--- a/target-ppc/mmu_helper.c
+++ b/target-ppc/mmu_helper.c
@@ -1941,7 +1941,7 @@ void ppc_tlb_invalidate_all(CPUPPCState *env)
break;
default:
/* XXX: TODO */
- cpu_abort(CPU(cpu), "Unknown MMU model\n");
+ cpu_abort(CPU(cpu), "Unknown MMU model %d\n", env->mmu_model);
break;
}
}
@@ -1965,7 +1965,7 @@ void ppc_tlb_invalidate_one(CPUPPCState *env, target_ulong addr)
* we just mark the TLB to be flushed later (context synchronizing
* event or sync instruction on 32-bit).
*/
- env->tlb_need_flush = 1;
+ env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
break;
#if defined(TARGET_PPC64)
case POWERPC_MMU_64B:
@@ -1979,7 +1979,7 @@ void ppc_tlb_invalidate_one(CPUPPCState *env, target_ulong addr)
* and we still don't have a tlb_flush_mask(env, n, mask) in QEMU,
* we just invalidate all TLBs
*/
- env->tlb_need_flush = 1;
+ env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
break;
#endif /* defined(TARGET_PPC64) */
default:
@@ -2065,7 +2065,7 @@ void helper_store_sr(CPUPPCState *env, target_ulong srnum, target_ulong value)
}
}
#else
- env->tlb_need_flush = 1;
+ env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
#endif
}
}
@@ -2598,9 +2598,9 @@ void helper_booke206_tlbwe(CPUPPCState *env)
tlb = booke206_cur_tlb(env);
if (!tlb) {
- helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM,
- POWERPC_EXCP_INVAL |
- POWERPC_EXCP_INVAL_INVAL);
+ raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
+ POWERPC_EXCP_INVAL |
+ POWERPC_EXCP_INVAL_INVAL, GETPC());
}
/* check that we support the targeted size */
@@ -2608,9 +2608,9 @@ void helper_booke206_tlbwe(CPUPPCState *env)
size_ps = booke206_tlbnps(env, tlbn);
if ((env->spr[SPR_BOOKE_MAS1] & MAS1_VALID) && (tlbncfg & TLBnCFG_AVAIL) &&
!(size_ps & (1 << size_tlb))) {
- helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM,
- POWERPC_EXCP_INVAL |
- POWERPC_EXCP_INVAL_INVAL);
+ raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
+ POWERPC_EXCP_INVAL |
+ POWERPC_EXCP_INVAL_INVAL, GETPC());
}
if (msr_gs) {
@@ -2757,7 +2757,7 @@ static inline void booke206_invalidate_ea_tlb(CPUPPCState *env, int tlbn,
void helper_booke206_tlbivax(CPUPPCState *env, target_ulong address)
{
- PowerPCCPU *cpu = ppc_env_get_cpu(env);
+ CPUState *cs;
if (address & 0x4) {
/* flush all entries */
@@ -2774,11 +2774,15 @@ void helper_booke206_tlbivax(CPUPPCState *env, target_ulong address)
if (address & 0x8) {
/* flush TLB1 entries */
booke206_invalidate_ea_tlb(env, 1, address);
- tlb_flush(CPU(cpu), 1);
+ CPU_FOREACH(cs) {
+ tlb_flush(cs, 1);
+ }
} else {
/* flush TLB0 entries */
booke206_invalidate_ea_tlb(env, 0, address);
- tlb_flush_page(CPU(cpu), address & MAS2_EPN_MASK);
+ CPU_FOREACH(cs) {
+ tlb_flush_page(cs, address & MAS2_EPN_MASK);
+ }
}
}
@@ -2867,9 +2871,14 @@ void helper_booke206_tlbflush(CPUPPCState *env, target_ulong type)
}
-void helper_check_tlb_flush(CPUPPCState *env)
+void helper_check_tlb_flush_local(CPUPPCState *env)
{
- check_tlb_flush(env);
+ check_tlb_flush(env, false);
+}
+
+void helper_check_tlb_flush_global(CPUPPCState *env)
+{
+ check_tlb_flush(env, true);
}
/*****************************************************************************/
@@ -2892,10 +2901,7 @@ void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
ret = cpu_ppc_handle_mmu_fault(env, addr, access_type, mmu_idx);
}
if (unlikely(ret != 0)) {
- if (likely(retaddr)) {
- /* now we have a real cpu fault */
- cpu_restore_state(cs, retaddr);
- }
- helper_raise_exception_err(env, cs->exception_index, env->error_code);
+ raise_exception_err_ra(env, cs->exception_index, env->error_code,
+ retaddr);
}
}
diff --git a/target-ppc/timebase_helper.c b/target-ppc/timebase_helper.c
index a07faa42cb..73363e08ae 100644
--- a/target-ppc/timebase_helper.c
+++ b/target-ppc/timebase_helper.c
@@ -19,6 +19,7 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "exec/helper-proto.h"
+#include "exec/exec-all.h"
#include "qemu/log.h"
/*****************************************************************************/
@@ -143,15 +144,16 @@ target_ulong helper_load_dcr(CPUPPCState *env, target_ulong dcrn)
if (unlikely(env->dcr_env == NULL)) {
qemu_log_mask(LOG_GUEST_ERROR, "No DCR environment\n");
- helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM,
- POWERPC_EXCP_INVAL |
- POWERPC_EXCP_INVAL_INVAL);
+ raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
+ POWERPC_EXCP_INVAL |
+ POWERPC_EXCP_INVAL_INVAL, GETPC());
} else if (unlikely(ppc_dcr_read(env->dcr_env,
(uint32_t)dcrn, &val) != 0)) {
qemu_log_mask(LOG_GUEST_ERROR, "DCR read error %d %03x\n",
(uint32_t)dcrn, (uint32_t)dcrn);
- helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM,
- POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
+ raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
+ POWERPC_EXCP_INVAL |
+ POWERPC_EXCP_PRIV_REG, GETPC());
}
return val;
}
@@ -160,14 +162,15 @@ void helper_store_dcr(CPUPPCState *env, target_ulong dcrn, target_ulong val)
{
if (unlikely(env->dcr_env == NULL)) {
qemu_log_mask(LOG_GUEST_ERROR, "No DCR environment\n");
- helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM,
- POWERPC_EXCP_INVAL |
- POWERPC_EXCP_INVAL_INVAL);
+ raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
+ POWERPC_EXCP_INVAL |
+ POWERPC_EXCP_INVAL_INVAL, GETPC());
} else if (unlikely(ppc_dcr_write(env->dcr_env, (uint32_t)dcrn,
(uint32_t)val) != 0)) {
qemu_log_mask(LOG_GUEST_ERROR, "DCR write error %d %03x\n",
(uint32_t)dcrn, (uint32_t)dcrn);
- helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM,
- POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
+ raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
+ POWERPC_EXCP_INVAL |
+ POWERPC_EXCP_PRIV_REG, GETPC());
}
}
diff --git a/target-ppc/translate.c b/target-ppc/translate.c
index 92030b66a5..59e9552d2b 100644
--- a/target-ppc/translate.c
+++ b/target-ppc/translate.c
@@ -20,6 +20,7 @@
#include "qemu/osdep.h"
#include "cpu.h"
+#include "internal.h"
#include "disas/disas.h"
#include "exec/exec-all.h"
#include "tcg-op.h"
@@ -195,6 +196,7 @@ struct DisasContext {
/* Routine used to access memory */
bool pr, hv, dr, le_mode;
bool lazy_tlb_flush;
+ bool need_access_type;
int mem_idx;
int access_type;
/* Translation flags */
@@ -250,20 +252,9 @@ struct opc_handler_t {
#endif
};
-static inline void gen_reset_fpstatus(void)
-{
- gen_helper_reset_fpstatus(cpu_env);
-}
-
-static inline void gen_compute_fprf(TCGv_i64 arg)
-{
- gen_helper_compute_fprf(cpu_env, arg);
- gen_helper_float_check_status(cpu_env);
-}
-
static inline void gen_set_access_type(DisasContext *ctx, int access_type)
{
- if (ctx->access_type != access_type) {
+ if (ctx->need_access_type && ctx->access_type != access_type) {
tcg_gen_movi_i32(cpu_access_type, access_type);
ctx->access_type = access_type;
}
@@ -277,18 +268,15 @@ static inline void gen_update_nip(DisasContext *ctx, target_ulong nip)
tcg_gen_movi_tl(cpu_nip, nip);
}
-void gen_update_current_nip(void *opaque)
-{
- DisasContext *ctx = opaque;
-
- tcg_gen_movi_tl(cpu_nip, ctx->nip);
-}
-
static void gen_exception_err(DisasContext *ctx, uint32_t excp, uint32_t error)
{
TCGv_i32 t0, t1;
+
+ /* These are all synchronous exceptions, we set the PC back to
+ * the faulting instruction
+ */
if (ctx->exception == POWERPC_EXCP_NONE) {
- gen_update_nip(ctx, ctx->nip);
+ gen_update_nip(ctx, ctx->nip - 4);
}
t0 = tcg_const_i32(excp);
t1 = tcg_const_i32(error);
@@ -301,8 +289,12 @@ static void gen_exception_err(DisasContext *ctx, uint32_t excp, uint32_t error)
static void gen_exception(DisasContext *ctx, uint32_t excp)
{
TCGv_i32 t0;
+
+ /* These are all synchronous exceptions, we set the PC back to
+ * the faulting instruction
+ */
if (ctx->exception == POWERPC_EXCP_NONE) {
- gen_update_nip(ctx, ctx->nip);
+ gen_update_nip(ctx, ctx->nip - 4);
}
t0 = tcg_const_i32(excp);
gen_helper_raise_exception(cpu_env, t0);
@@ -310,10 +302,25 @@ static void gen_exception(DisasContext *ctx, uint32_t excp)
ctx->exception = (excp);
}
+static void gen_exception_nip(DisasContext *ctx, uint32_t excp,
+ target_ulong nip)
+{
+ TCGv_i32 t0;
+
+ gen_update_nip(ctx, nip);
+ t0 = tcg_const_i32(excp);
+ gen_helper_raise_exception(cpu_env, t0);
+ tcg_temp_free_i32(t0);
+ ctx->exception = (excp);
+}
+
static void gen_debug_exception(DisasContext *ctx)
{
TCGv_i32 t0;
+ /* These are all synchronous exceptions, we set the PC back to
+ * the faulting instruction
+ */
if ((ctx->exception != POWERPC_EXCP_BRANCH) &&
(ctx->exception != POWERPC_EXCP_SYNC)) {
gen_update_nip(ctx, ctx->nip);
@@ -367,12 +374,16 @@ GEN_OPCODE2(name, onam, opc1, opc2, opc3, inval, type, PPC_NONE)
#define GEN_HANDLER2_E(name, onam, opc1, opc2, opc3, inval, type, type2) \
GEN_OPCODE2(name, onam, opc1, opc2, opc3, inval, type, type2)
+#define GEN_HANDLER_E_2(name, opc1, opc2, opc3, opc4, inval, type, type2) \
+GEN_OPCODE3(name, opc1, opc2, opc3, opc4, inval, type, type2)
+
+#define GEN_HANDLER2_E_2(name, onam, opc1, opc2, opc3, opc4, inval, typ, typ2) \
+GEN_OPCODE4(name, onam, opc1, opc2, opc3, opc4, inval, typ, typ2)
+
typedef struct opcode_t {
- unsigned char opc1, opc2, opc3;
+ unsigned char opc1, opc2, opc3, opc4;
#if HOST_LONG_BITS == 64 /* Explicitly align to 64 bits */
- unsigned char pad[5];
-#else
- unsigned char pad[1];
+ unsigned char pad[4];
#endif
opc_handler_t handler;
const char *oname;
@@ -432,12 +443,28 @@ static inline uint32_t name(uint32_t opcode) \
return (((opcode >> (shift1)) & ((1 << (nb1)) - 1)) << nb2) | \
((opcode >> (shift2)) & ((1 << (nb2)) - 1)); \
}
+
+#define EXTRACT_HELPER_DXFORM(name, \
+ d0_bits, shift_op_d0, shift_d0, \
+ d1_bits, shift_op_d1, shift_d1, \
+ d2_bits, shift_op_d2, shift_d2) \
+static inline int16_t name(uint32_t opcode) \
+{ \
+ return \
+ (((opcode >> (shift_op_d0)) & ((1 << (d0_bits)) - 1)) << (shift_d0)) | \
+ (((opcode >> (shift_op_d1)) & ((1 << (d1_bits)) - 1)) << (shift_d1)) | \
+ (((opcode >> (shift_op_d2)) & ((1 << (d2_bits)) - 1)) << (shift_d2)); \
+}
+
+
/* Opcode part 1 */
EXTRACT_HELPER(opc1, 26, 6);
/* Opcode part 2 */
EXTRACT_HELPER(opc2, 1, 5);
/* Opcode part 3 */
EXTRACT_HELPER(opc3, 6, 5);
+/* Opcode part 4 */
+EXTRACT_HELPER(opc4, 16, 5);
/* Update Cr0 flags */
EXTRACT_HELPER(Rc, 0, 1);
/* Update Cr6 flags (Altivec) */
@@ -475,6 +502,8 @@ EXTRACT_HELPER(UIMM, 0, 16);
EXTRACT_HELPER(SIMM5, 16, 5);
/* 5 bits signed immediate value */
EXTRACT_HELPER(UIMM5, 16, 5);
+/* 4 bits unsigned immediate value */
+EXTRACT_HELPER(UIMM4, 16, 4);
/* Bit count */
EXTRACT_HELPER(NB, 11, 5);
/* Shift count */
@@ -501,6 +530,13 @@ EXTRACT_HELPER(FPL, 25, 1);
EXTRACT_HELPER(FPFLM, 17, 8);
EXTRACT_HELPER(FPW, 16, 1);
+/* addpcis */
+EXTRACT_HELPER_DXFORM(DX, 10, 6, 6, 5, 16, 1, 1, 0, 0)
+#if defined(TARGET_PPC64)
+/* darn */
+EXTRACT_HELPER(L, 16, 2);
+#endif
+
/*** Jump target decoding ***/
/* Immediate address */
static inline target_ulong LI(uint32_t opcode)
@@ -526,34 +562,6 @@ EXTRACT_HELPER(DCM, 10, 6)
/* DFP Z23-form */
EXTRACT_HELPER(RMC, 9, 2)
-/* Create a mask between <start> and <end> bits */
-static inline target_ulong MASK(uint32_t start, uint32_t end)
-{
- target_ulong ret;
-
-#if defined(TARGET_PPC64)
- if (likely(start == 0)) {
- ret = UINT64_MAX << (63 - end);
- } else if (likely(end == 63)) {
- ret = UINT64_MAX >> start;
- }
-#else
- if (likely(start == 0)) {
- ret = UINT32_MAX << (31 - end);
- } else if (likely(end == 31)) {
- ret = UINT32_MAX >> start;
- }
-#endif
- else {
- ret = (((target_ulong)(-1ULL)) >> (start)) ^
- (((target_ulong)(-1ULL) >> (end)) >> 1);
- if (unlikely(start > end))
- return ~ret;
- }
-
- return ret;
-}
-
EXTRACT_HELPER_SPLIT(xT, 0, 1, 21, 5);
EXTRACT_HELPER_SPLIT(xS, 0, 1, 21, 5);
EXTRACT_HELPER_SPLIT(xA, 2, 1, 16, 5);
@@ -563,6 +571,8 @@ EXTRACT_HELPER(DM, 8, 2);
EXTRACT_HELPER(UIM, 16, 2);
EXTRACT_HELPER(SHW, 8, 2);
EXTRACT_HELPER(SP, 19, 2);
+EXTRACT_HELPER(IMM8, 11, 8);
+
/*****************************************************************************/
/* PowerPC instructions table */
@@ -572,7 +582,7 @@ EXTRACT_HELPER(SP, 19, 2);
.opc1 = op1, \
.opc2 = op2, \
.opc3 = op3, \
- .pad = { 0, }, \
+ .opc4 = 0xff, \
.handler = { \
.inval1 = invl, \
.type = _typ, \
@@ -587,7 +597,7 @@ EXTRACT_HELPER(SP, 19, 2);
.opc1 = op1, \
.opc2 = op2, \
.opc3 = op3, \
- .pad = { 0, }, \
+ .opc4 = 0xff, \
.handler = { \
.inval1 = invl1, \
.inval2 = invl2, \
@@ -603,7 +613,37 @@ EXTRACT_HELPER(SP, 19, 2);
.opc1 = op1, \
.opc2 = op2, \
.opc3 = op3, \
- .pad = { 0, }, \
+ .opc4 = 0xff, \
+ .handler = { \
+ .inval1 = invl, \
+ .type = _typ, \
+ .type2 = _typ2, \
+ .handler = &gen_##name, \
+ .oname = onam, \
+ }, \
+ .oname = onam, \
+}
+#define GEN_OPCODE3(name, op1, op2, op3, op4, invl, _typ, _typ2) \
+{ \
+ .opc1 = op1, \
+ .opc2 = op2, \
+ .opc3 = op3, \
+ .opc4 = op4, \
+ .handler = { \
+ .inval1 = invl, \
+ .type = _typ, \
+ .type2 = _typ2, \
+ .handler = &gen_##name, \
+ .oname = stringify(name), \
+ }, \
+ .oname = stringify(name), \
+}
+#define GEN_OPCODE4(name, onam, op1, op2, op3, op4, invl, _typ, _typ2) \
+{ \
+ .opc1 = op1, \
+ .opc2 = op2, \
+ .opc3 = op3, \
+ .opc4 = op4, \
.handler = { \
.inval1 = invl, \
.type = _typ, \
@@ -619,7 +659,7 @@ EXTRACT_HELPER(SP, 19, 2);
.opc1 = op1, \
.opc2 = op2, \
.opc3 = op3, \
- .pad = { 0, }, \
+ .opc4 = 0xff, \
.handler = { \
.inval1 = invl, \
.type = _typ, \
@@ -633,7 +673,7 @@ EXTRACT_HELPER(SP, 19, 2);
.opc1 = op1, \
.opc2 = op2, \
.opc3 = op3, \
- .pad = { 0, }, \
+ .opc4 = 0xff, \
.handler = { \
.inval1 = invl1, \
.inval2 = invl2, \
@@ -648,7 +688,35 @@ EXTRACT_HELPER(SP, 19, 2);
.opc1 = op1, \
.opc2 = op2, \
.opc3 = op3, \
- .pad = { 0, }, \
+ .opc4 = 0xff, \
+ .handler = { \
+ .inval1 = invl, \
+ .type = _typ, \
+ .type2 = _typ2, \
+ .handler = &gen_##name, \
+ }, \
+ .oname = onam, \
+}
+#define GEN_OPCODE3(name, op1, op2, op3, op4, invl, _typ, _typ2) \
+{ \
+ .opc1 = op1, \
+ .opc2 = op2, \
+ .opc3 = op3, \
+ .opc4 = op4, \
+ .handler = { \
+ .inval1 = invl, \
+ .type = _typ, \
+ .type2 = _typ2, \
+ .handler = &gen_##name, \
+ }, \
+ .oname = stringify(name), \
+}
+#define GEN_OPCODE4(name, onam, op1, op2, op3, op4, invl, _typ, _typ2) \
+{ \
+ .opc1 = op1, \
+ .opc2 = op2, \
+ .opc3 = op3, \
+ .opc4 = op4, \
.handler = { \
.inval1 = invl, \
.type = _typ, \
@@ -800,6 +868,53 @@ static void gen_cmpli(DisasContext *ctx)
}
}
+/* cmprb - range comparison: isupper, isaplha, islower*/
+static void gen_cmprb(DisasContext *ctx)
+{
+ TCGv_i32 src1 = tcg_temp_new_i32();
+ TCGv_i32 src2 = tcg_temp_new_i32();
+ TCGv_i32 src2lo = tcg_temp_new_i32();
+ TCGv_i32 src2hi = tcg_temp_new_i32();
+ TCGv_i32 crf = cpu_crf[crfD(ctx->opcode)];
+
+ tcg_gen_trunc_tl_i32(src1, cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_trunc_tl_i32(src2, cpu_gpr[rB(ctx->opcode)]);
+
+ tcg_gen_andi_i32(src1, src1, 0xFF);
+ tcg_gen_ext8u_i32(src2lo, src2);
+ tcg_gen_shri_i32(src2, src2, 8);
+ tcg_gen_ext8u_i32(src2hi, src2);
+
+ tcg_gen_setcond_i32(TCG_COND_LEU, src2lo, src2lo, src1);
+ tcg_gen_setcond_i32(TCG_COND_LEU, src2hi, src1, src2hi);
+ tcg_gen_and_i32(crf, src2lo, src2hi);
+
+ if (ctx->opcode & 0x00200000) {
+ tcg_gen_shri_i32(src2, src2, 8);
+ tcg_gen_ext8u_i32(src2lo, src2);
+ tcg_gen_shri_i32(src2, src2, 8);
+ tcg_gen_ext8u_i32(src2hi, src2);
+ tcg_gen_setcond_i32(TCG_COND_LEU, src2lo, src2lo, src1);
+ tcg_gen_setcond_i32(TCG_COND_LEU, src2hi, src1, src2hi);
+ tcg_gen_and_i32(src2lo, src2lo, src2hi);
+ tcg_gen_or_i32(crf, crf, src2lo);
+ }
+ tcg_gen_shli_i32(crf, crf, CRF_GT);
+ tcg_temp_free_i32(src1);
+ tcg_temp_free_i32(src2);
+ tcg_temp_free_i32(src2lo);
+ tcg_temp_free_i32(src2hi);
+}
+
+#if defined(TARGET_PPC64)
+/* cmpeqb */
+static void gen_cmpeqb(DisasContext *ctx)
+{
+ gen_helper_cmpeqb(cpu_crf[crfD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
+ cpu_gpr[rB(ctx->opcode)]);
+}
+#endif
+
/* isel (PowerPC 2.03 specification) */
static void gen_isel(DisasContext *ctx)
{
@@ -984,44 +1099,50 @@ static void gen_addis(DisasContext *ctx)
}
}
+/* addpcis */
+static void gen_addpcis(DisasContext *ctx)
+{
+ target_long d = DX(ctx->opcode);
+
+ tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], ctx->nip + (d << 16));
+}
+
static inline void gen_op_arith_divw(DisasContext *ctx, TCGv ret, TCGv arg1,
TCGv arg2, int sign, int compute_ov)
{
- TCGLabel *l1 = gen_new_label();
- TCGLabel *l2 = gen_new_label();
- TCGv_i32 t0 = tcg_temp_local_new_i32();
- TCGv_i32 t1 = tcg_temp_local_new_i32();
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ TCGv_i32 t1 = tcg_temp_new_i32();
+ TCGv_i32 t2 = tcg_temp_new_i32();
+ TCGv_i32 t3 = tcg_temp_new_i32();
tcg_gen_trunc_tl_i32(t0, arg1);
tcg_gen_trunc_tl_i32(t1, arg2);
- tcg_gen_brcondi_i32(TCG_COND_EQ, t1, 0, l1);
if (sign) {
- TCGLabel *l3 = gen_new_label();
- tcg_gen_brcondi_i32(TCG_COND_NE, t1, -1, l3);
- tcg_gen_brcondi_i32(TCG_COND_EQ, t0, INT32_MIN, l1);
- gen_set_label(l3);
- tcg_gen_div_i32(t0, t0, t1);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, t2, t0, INT_MIN);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, t3, t1, -1);
+ tcg_gen_and_i32(t2, t2, t3);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, t3, t1, 0);
+ tcg_gen_or_i32(t2, t2, t3);
+ tcg_gen_movi_i32(t3, 0);
+ tcg_gen_movcond_i32(TCG_COND_NE, t1, t2, t3, t2, t1);
+ tcg_gen_div_i32(t3, t0, t1);
+ tcg_gen_extu_i32_tl(ret, t3);
} else {
- tcg_gen_divu_i32(t0, t0, t1);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, t2, t1, 0);
+ tcg_gen_movi_i32(t3, 0);
+ tcg_gen_movcond_i32(TCG_COND_NE, t1, t2, t3, t2, t1);
+ tcg_gen_divu_i32(t3, t0, t1);
+ tcg_gen_extu_i32_tl(ret, t3);
}
if (compute_ov) {
- tcg_gen_movi_tl(cpu_ov, 0);
+ tcg_gen_extu_i32_tl(cpu_ov, t2);
+ tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
}
- tcg_gen_br(l2);
- gen_set_label(l1);
- if (sign) {
- tcg_gen_sari_i32(t0, t0, 31);
- } else {
- tcg_gen_movi_i32(t0, 0);
- }
- if (compute_ov) {
- tcg_gen_movi_tl(cpu_ov, 1);
- tcg_gen_movi_tl(cpu_so, 1);
- }
- gen_set_label(l2);
- tcg_gen_extu_i32_tl(ret, t0);
tcg_temp_free_i32(t0);
tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t2);
+ tcg_temp_free_i32(t3);
+
if (unlikely(Rc(ctx->opcode) != 0))
gen_set_Rc0(ctx, ret);
}
@@ -1062,37 +1183,41 @@ GEN_DIVE(divweo, divwe, 1);
static inline void gen_op_arith_divd(DisasContext *ctx, TCGv ret, TCGv arg1,
TCGv arg2, int sign, int compute_ov)
{
- TCGLabel *l1 = gen_new_label();
- TCGLabel *l2 = gen_new_label();
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 t2 = tcg_temp_new_i64();
+ TCGv_i64 t3 = tcg_temp_new_i64();
- tcg_gen_brcondi_i64(TCG_COND_EQ, arg2, 0, l1);
- if (sign) {
- TCGLabel *l3 = gen_new_label();
- tcg_gen_brcondi_i64(TCG_COND_NE, arg2, -1, l3);
- tcg_gen_brcondi_i64(TCG_COND_EQ, arg1, INT64_MIN, l1);
- gen_set_label(l3);
- tcg_gen_div_i64(ret, arg1, arg2);
- } else {
- tcg_gen_divu_i64(ret, arg1, arg2);
- }
- if (compute_ov) {
- tcg_gen_movi_tl(cpu_ov, 0);
- }
- tcg_gen_br(l2);
- gen_set_label(l1);
+ tcg_gen_mov_i64(t0, arg1);
+ tcg_gen_mov_i64(t1, arg2);
if (sign) {
- tcg_gen_sari_i64(ret, arg1, 63);
+ tcg_gen_setcondi_i64(TCG_COND_EQ, t2, t0, INT64_MIN);
+ tcg_gen_setcondi_i64(TCG_COND_EQ, t3, t1, -1);
+ tcg_gen_and_i64(t2, t2, t3);
+ tcg_gen_setcondi_i64(TCG_COND_EQ, t3, t1, 0);
+ tcg_gen_or_i64(t2, t2, t3);
+ tcg_gen_movi_i64(t3, 0);
+ tcg_gen_movcond_i64(TCG_COND_NE, t1, t2, t3, t2, t1);
+ tcg_gen_div_i64(ret, t0, t1);
} else {
- tcg_gen_movi_i64(ret, 0);
+ tcg_gen_setcondi_i64(TCG_COND_EQ, t2, t1, 0);
+ tcg_gen_movi_i64(t3, 0);
+ tcg_gen_movcond_i64(TCG_COND_NE, t1, t2, t3, t2, t1);
+ tcg_gen_divu_i64(ret, t0, t1);
}
if (compute_ov) {
- tcg_gen_movi_tl(cpu_ov, 1);
- tcg_gen_movi_tl(cpu_so, 1);
+ tcg_gen_mov_tl(cpu_ov, t2);
+ tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
}
- gen_set_label(l2);
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+ tcg_temp_free_i64(t2);
+ tcg_temp_free_i64(t3);
+
if (unlikely(Rc(ctx->opcode) != 0))
gen_set_Rc0(ctx, ret);
}
+
#define GEN_INT_ARITH_DIVD(name, opc3, sign, compute_ov) \
static void glue(gen_, name)(DisasContext *ctx) \
{ \
@@ -1113,6 +1238,98 @@ GEN_DIVE(divde, divde, 0);
GEN_DIVE(divdeo, divde, 1);
#endif
+static inline void gen_op_arith_modw(DisasContext *ctx, TCGv ret, TCGv arg1,
+ TCGv arg2, int sign)
+{
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ TCGv_i32 t1 = tcg_temp_new_i32();
+
+ tcg_gen_trunc_tl_i32(t0, arg1);
+ tcg_gen_trunc_tl_i32(t1, arg2);
+ if (sign) {
+ TCGv_i32 t2 = tcg_temp_new_i32();
+ TCGv_i32 t3 = tcg_temp_new_i32();
+ tcg_gen_setcondi_i32(TCG_COND_EQ, t2, t0, INT_MIN);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, t3, t1, -1);
+ tcg_gen_and_i32(t2, t2, t3);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, t3, t1, 0);
+ tcg_gen_or_i32(t2, t2, t3);
+ tcg_gen_movi_i32(t3, 0);
+ tcg_gen_movcond_i32(TCG_COND_NE, t1, t2, t3, t2, t1);
+ tcg_gen_rem_i32(t3, t0, t1);
+ tcg_gen_ext_i32_tl(ret, t3);
+ tcg_temp_free_i32(t2);
+ tcg_temp_free_i32(t3);
+ } else {
+ TCGv_i32 t2 = tcg_const_i32(1);
+ TCGv_i32 t3 = tcg_const_i32(0);
+ tcg_gen_movcond_i32(TCG_COND_EQ, t1, t1, t3, t2, t1);
+ tcg_gen_remu_i32(t3, t0, t1);
+ tcg_gen_extu_i32_tl(ret, t3);
+ tcg_temp_free_i32(t2);
+ tcg_temp_free_i32(t3);
+ }
+ tcg_temp_free_i32(t0);
+ tcg_temp_free_i32(t1);
+}
+
+#define GEN_INT_ARITH_MODW(name, opc3, sign) \
+static void glue(gen_, name)(DisasContext *ctx) \
+{ \
+ gen_op_arith_modw(ctx, cpu_gpr[rD(ctx->opcode)], \
+ cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
+ sign); \
+}
+
+GEN_INT_ARITH_MODW(moduw, 0x08, 0);
+GEN_INT_ARITH_MODW(modsw, 0x18, 1);
+
+#if defined(TARGET_PPC64)
+static inline void gen_op_arith_modd(DisasContext *ctx, TCGv ret, TCGv arg1,
+ TCGv arg2, int sign)
+{
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+
+ tcg_gen_mov_i64(t0, arg1);
+ tcg_gen_mov_i64(t1, arg2);
+ if (sign) {
+ TCGv_i64 t2 = tcg_temp_new_i64();
+ TCGv_i64 t3 = tcg_temp_new_i64();
+ tcg_gen_setcondi_i64(TCG_COND_EQ, t2, t0, INT64_MIN);
+ tcg_gen_setcondi_i64(TCG_COND_EQ, t3, t1, -1);
+ tcg_gen_and_i64(t2, t2, t3);
+ tcg_gen_setcondi_i64(TCG_COND_EQ, t3, t1, 0);
+ tcg_gen_or_i64(t2, t2, t3);
+ tcg_gen_movi_i64(t3, 0);
+ tcg_gen_movcond_i64(TCG_COND_NE, t1, t2, t3, t2, t1);
+ tcg_gen_rem_i64(ret, t0, t1);
+ tcg_temp_free_i64(t2);
+ tcg_temp_free_i64(t3);
+ } else {
+ TCGv_i64 t2 = tcg_const_i64(1);
+ TCGv_i64 t3 = tcg_const_i64(0);
+ tcg_gen_movcond_i64(TCG_COND_EQ, t1, t1, t3, t2, t1);
+ tcg_gen_remu_i64(ret, t0, t1);
+ tcg_temp_free_i64(t2);
+ tcg_temp_free_i64(t3);
+ }
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+}
+
+#define GEN_INT_ARITH_MODD(name, opc3, sign) \
+static void glue(gen_, name)(DisasContext *ctx) \
+{ \
+ gen_op_arith_modd(ctx, cpu_gpr[rD(ctx->opcode)], \
+ cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
+ sign); \
+}
+
+GEN_INT_ARITH_MODD(modud, 0x08, 0);
+GEN_INT_ARITH_MODD(modsd, 0x18, 1);
+#endif
+
/* mulhw mulhw. */
static void gen_mulhw(DisasContext *ctx)
{
@@ -1428,6 +1645,16 @@ static void gen_cntlzw(DisasContext *ctx)
if (unlikely(Rc(ctx->opcode) != 0))
gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
}
+
+/* cnttzw */
+static void gen_cnttzw(DisasContext *ctx)
+{
+ gen_helper_cnttzw(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+ }
+}
+
/* eqv & eqv. */
GEN_LOGICAL2(eqv, tcg_gen_eqv_tl, 0x08, PPC_INTEGER);
/* extsb & extsb. */
@@ -1448,7 +1675,7 @@ static void gen_pause(DisasContext *ctx)
tcg_temp_free_i32(t0);
/* Stop translation, this gives other CPUs a chance to run */
- gen_exception_err(ctx, EXCP_HLT, 1);
+ gen_exception_nip(ctx, EXCP_HLT, ctx->nip);
}
#endif /* defined(TARGET_PPC64) */
@@ -1668,6 +1895,30 @@ static void gen_cntlzd(DisasContext *ctx)
if (unlikely(Rc(ctx->opcode) != 0))
gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
}
+
+/* cnttzd */
+static void gen_cnttzd(DisasContext *ctx)
+{
+ gen_helper_cnttzd(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+ }
+}
+
+/* darn */
+static void gen_darn(DisasContext *ctx)
+{
+ int l = L(ctx->opcode);
+
+ if (l == 0) {
+ gen_helper_darn32(cpu_gpr[rD(ctx->opcode)]);
+ } else if (l <= 2) {
+ /* Return 64-bit random for both CRN and RRN */
+ gen_helper_darn64(cpu_gpr[rD(ctx->opcode)]);
+ } else {
+ tcg_gen_movi_i64(cpu_gpr[rD(ctx->opcode)], -1);
+ }
+}
#endif
/*** Integer rotate ***/
@@ -2106,6 +2357,30 @@ static void gen_sradi1(DisasContext *ctx)
gen_sradi(ctx, 1);
}
+/* extswsli & extswsli. */
+static inline void gen_extswsli(DisasContext *ctx, int n)
+{
+ int sh = SH(ctx->opcode) + (n << 5);
+ TCGv dst = cpu_gpr[rA(ctx->opcode)];
+ TCGv src = cpu_gpr[rS(ctx->opcode)];
+
+ tcg_gen_ext32s_tl(dst, src);
+ tcg_gen_shli_tl(dst, dst, sh);
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, dst);
+ }
+}
+
+static void gen_extswsli0(DisasContext *ctx)
+{
+ gen_extswsli(ctx, 0);
+}
+
+static void gen_extswsli1(DisasContext *ctx)
+{
+ gen_extswsli(ctx, 1);
+}
+
/* srd & srd. */
static void gen_srd(DisasContext *ctx)
{
@@ -2126,602 +2401,6 @@ static void gen_srd(DisasContext *ctx)
}
#endif
-#if defined(TARGET_PPC64)
-static void gen_set_cr1_from_fpscr(DisasContext *ctx)
-{
- TCGv_i32 tmp = tcg_temp_new_i32();
- tcg_gen_trunc_tl_i32(tmp, cpu_fpscr);
- tcg_gen_shri_i32(cpu_crf[1], tmp, 28);
- tcg_temp_free_i32(tmp);
-}
-#else
-static void gen_set_cr1_from_fpscr(DisasContext *ctx)
-{
- tcg_gen_shri_tl(cpu_crf[1], cpu_fpscr, 28);
-}
-#endif
-
-/*** Floating-Point arithmetic ***/
-#define _GEN_FLOAT_ACB(name, op, op1, op2, isfloat, set_fprf, type) \
-static void gen_f##name(DisasContext *ctx) \
-{ \
- if (unlikely(!ctx->fpu_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_FPU); \
- return; \
- } \
- /* NIP cannot be restored if the memory exception comes from an helper */ \
- gen_update_nip(ctx, ctx->nip - 4); \
- gen_reset_fpstatus(); \
- gen_helper_f##op(cpu_fpr[rD(ctx->opcode)], cpu_env, \
- cpu_fpr[rA(ctx->opcode)], \
- cpu_fpr[rC(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]); \
- if (isfloat) { \
- gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_env, \
- cpu_fpr[rD(ctx->opcode)]); \
- } \
- if (set_fprf) { \
- gen_compute_fprf(cpu_fpr[rD(ctx->opcode)]); \
- } \
- if (unlikely(Rc(ctx->opcode) != 0)) { \
- gen_set_cr1_from_fpscr(ctx); \
- } \
-}
-
-#define GEN_FLOAT_ACB(name, op2, set_fprf, type) \
-_GEN_FLOAT_ACB(name, name, 0x3F, op2, 0, set_fprf, type); \
-_GEN_FLOAT_ACB(name##s, name, 0x3B, op2, 1, set_fprf, type);
-
-#define _GEN_FLOAT_AB(name, op, op1, op2, inval, isfloat, set_fprf, type) \
-static void gen_f##name(DisasContext *ctx) \
-{ \
- if (unlikely(!ctx->fpu_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_FPU); \
- return; \
- } \
- /* NIP cannot be restored if the memory exception comes from an helper */ \
- gen_update_nip(ctx, ctx->nip - 4); \
- gen_reset_fpstatus(); \
- gen_helper_f##op(cpu_fpr[rD(ctx->opcode)], cpu_env, \
- cpu_fpr[rA(ctx->opcode)], \
- cpu_fpr[rB(ctx->opcode)]); \
- if (isfloat) { \
- gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_env, \
- cpu_fpr[rD(ctx->opcode)]); \
- } \
- if (set_fprf) { \
- gen_compute_fprf(cpu_fpr[rD(ctx->opcode)]); \
- } \
- if (unlikely(Rc(ctx->opcode) != 0)) { \
- gen_set_cr1_from_fpscr(ctx); \
- } \
-}
-#define GEN_FLOAT_AB(name, op2, inval, set_fprf, type) \
-_GEN_FLOAT_AB(name, name, 0x3F, op2, inval, 0, set_fprf, type); \
-_GEN_FLOAT_AB(name##s, name, 0x3B, op2, inval, 1, set_fprf, type);
-
-#define _GEN_FLOAT_AC(name, op, op1, op2, inval, isfloat, set_fprf, type) \
-static void gen_f##name(DisasContext *ctx) \
-{ \
- if (unlikely(!ctx->fpu_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_FPU); \
- return; \
- } \
- /* NIP cannot be restored if the memory exception comes from an helper */ \
- gen_update_nip(ctx, ctx->nip - 4); \
- gen_reset_fpstatus(); \
- gen_helper_f##op(cpu_fpr[rD(ctx->opcode)], cpu_env, \
- cpu_fpr[rA(ctx->opcode)], \
- cpu_fpr[rC(ctx->opcode)]); \
- if (isfloat) { \
- gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_env, \
- cpu_fpr[rD(ctx->opcode)]); \
- } \
- if (set_fprf) { \
- gen_compute_fprf(cpu_fpr[rD(ctx->opcode)]); \
- } \
- if (unlikely(Rc(ctx->opcode) != 0)) { \
- gen_set_cr1_from_fpscr(ctx); \
- } \
-}
-#define GEN_FLOAT_AC(name, op2, inval, set_fprf, type) \
-_GEN_FLOAT_AC(name, name, 0x3F, op2, inval, 0, set_fprf, type); \
-_GEN_FLOAT_AC(name##s, name, 0x3B, op2, inval, 1, set_fprf, type);
-
-#define GEN_FLOAT_B(name, op2, op3, set_fprf, type) \
-static void gen_f##name(DisasContext *ctx) \
-{ \
- if (unlikely(!ctx->fpu_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_FPU); \
- return; \
- } \
- /* NIP cannot be restored if the memory exception comes from an helper */ \
- gen_update_nip(ctx, ctx->nip - 4); \
- gen_reset_fpstatus(); \
- gen_helper_f##name(cpu_fpr[rD(ctx->opcode)], cpu_env, \
- cpu_fpr[rB(ctx->opcode)]); \
- if (set_fprf) { \
- gen_compute_fprf(cpu_fpr[rD(ctx->opcode)]); \
- } \
- if (unlikely(Rc(ctx->opcode) != 0)) { \
- gen_set_cr1_from_fpscr(ctx); \
- } \
-}
-
-#define GEN_FLOAT_BS(name, op1, op2, set_fprf, type) \
-static void gen_f##name(DisasContext *ctx) \
-{ \
- if (unlikely(!ctx->fpu_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_FPU); \
- return; \
- } \
- /* NIP cannot be restored if the memory exception comes from an helper */ \
- gen_update_nip(ctx, ctx->nip - 4); \
- gen_reset_fpstatus(); \
- gen_helper_f##name(cpu_fpr[rD(ctx->opcode)], cpu_env, \
- cpu_fpr[rB(ctx->opcode)]); \
- if (set_fprf) { \
- gen_compute_fprf(cpu_fpr[rD(ctx->opcode)]); \
- } \
- if (unlikely(Rc(ctx->opcode) != 0)) { \
- gen_set_cr1_from_fpscr(ctx); \
- } \
-}
-
-/* fadd - fadds */
-GEN_FLOAT_AB(add, 0x15, 0x000007C0, 1, PPC_FLOAT);
-/* fdiv - fdivs */
-GEN_FLOAT_AB(div, 0x12, 0x000007C0, 1, PPC_FLOAT);
-/* fmul - fmuls */
-GEN_FLOAT_AC(mul, 0x19, 0x0000F800, 1, PPC_FLOAT);
-
-/* fre */
-GEN_FLOAT_BS(re, 0x3F, 0x18, 1, PPC_FLOAT_EXT);
-
-/* fres */
-GEN_FLOAT_BS(res, 0x3B, 0x18, 1, PPC_FLOAT_FRES);
-
-/* frsqrte */
-GEN_FLOAT_BS(rsqrte, 0x3F, 0x1A, 1, PPC_FLOAT_FRSQRTE);
-
-/* frsqrtes */
-static void gen_frsqrtes(DisasContext *ctx)
-{
- if (unlikely(!ctx->fpu_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_FPU);
- return;
- }
- /* NIP cannot be restored if the memory exception comes from an helper */
- gen_update_nip(ctx, ctx->nip - 4);
- gen_reset_fpstatus();
- gen_helper_frsqrte(cpu_fpr[rD(ctx->opcode)], cpu_env,
- cpu_fpr[rB(ctx->opcode)]);
- gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_env,
- cpu_fpr[rD(ctx->opcode)]);
- gen_compute_fprf(cpu_fpr[rD(ctx->opcode)]);
- if (unlikely(Rc(ctx->opcode) != 0)) {
- gen_set_cr1_from_fpscr(ctx);
- }
-}
-
-/* fsel */
-_GEN_FLOAT_ACB(sel, sel, 0x3F, 0x17, 0, 0, PPC_FLOAT_FSEL);
-/* fsub - fsubs */
-GEN_FLOAT_AB(sub, 0x14, 0x000007C0, 1, PPC_FLOAT);
-/* Optional: */
-
-/* fsqrt */
-static void gen_fsqrt(DisasContext *ctx)
-{
- if (unlikely(!ctx->fpu_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_FPU);
- return;
- }
- /* NIP cannot be restored if the memory exception comes from an helper */
- gen_update_nip(ctx, ctx->nip - 4);
- gen_reset_fpstatus();
- gen_helper_fsqrt(cpu_fpr[rD(ctx->opcode)], cpu_env,
- cpu_fpr[rB(ctx->opcode)]);
- gen_compute_fprf(cpu_fpr[rD(ctx->opcode)]);
- if (unlikely(Rc(ctx->opcode) != 0)) {
- gen_set_cr1_from_fpscr(ctx);
- }
-}
-
-static void gen_fsqrts(DisasContext *ctx)
-{
- if (unlikely(!ctx->fpu_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_FPU);
- return;
- }
- /* NIP cannot be restored if the memory exception comes from an helper */
- gen_update_nip(ctx, ctx->nip - 4);
- gen_reset_fpstatus();
- gen_helper_fsqrt(cpu_fpr[rD(ctx->opcode)], cpu_env,
- cpu_fpr[rB(ctx->opcode)]);
- gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_env,
- cpu_fpr[rD(ctx->opcode)]);
- gen_compute_fprf(cpu_fpr[rD(ctx->opcode)]);
- if (unlikely(Rc(ctx->opcode) != 0)) {
- gen_set_cr1_from_fpscr(ctx);
- }
-}
-
-/*** Floating-Point multiply-and-add ***/
-/* fmadd - fmadds */
-GEN_FLOAT_ACB(madd, 0x1D, 1, PPC_FLOAT);
-/* fmsub - fmsubs */
-GEN_FLOAT_ACB(msub, 0x1C, 1, PPC_FLOAT);
-/* fnmadd - fnmadds */
-GEN_FLOAT_ACB(nmadd, 0x1F, 1, PPC_FLOAT);
-/* fnmsub - fnmsubs */
-GEN_FLOAT_ACB(nmsub, 0x1E, 1, PPC_FLOAT);
-
-/*** Floating-Point round & convert ***/
-/* fctiw */
-GEN_FLOAT_B(ctiw, 0x0E, 0x00, 0, PPC_FLOAT);
-/* fctiwu */
-GEN_FLOAT_B(ctiwu, 0x0E, 0x04, 0, PPC2_FP_CVT_ISA206);
-/* fctiwz */
-GEN_FLOAT_B(ctiwz, 0x0F, 0x00, 0, PPC_FLOAT);
-/* fctiwuz */
-GEN_FLOAT_B(ctiwuz, 0x0F, 0x04, 0, PPC2_FP_CVT_ISA206);
-/* frsp */
-GEN_FLOAT_B(rsp, 0x0C, 0x00, 1, PPC_FLOAT);
-/* fcfid */
-GEN_FLOAT_B(cfid, 0x0E, 0x1A, 1, PPC2_FP_CVT_S64);
-/* fcfids */
-GEN_FLOAT_B(cfids, 0x0E, 0x1A, 0, PPC2_FP_CVT_ISA206);
-/* fcfidu */
-GEN_FLOAT_B(cfidu, 0x0E, 0x1E, 0, PPC2_FP_CVT_ISA206);
-/* fcfidus */
-GEN_FLOAT_B(cfidus, 0x0E, 0x1E, 0, PPC2_FP_CVT_ISA206);
-/* fctid */
-GEN_FLOAT_B(ctid, 0x0E, 0x19, 0, PPC2_FP_CVT_S64);
-/* fctidu */
-GEN_FLOAT_B(ctidu, 0x0E, 0x1D, 0, PPC2_FP_CVT_ISA206);
-/* fctidz */
-GEN_FLOAT_B(ctidz, 0x0F, 0x19, 0, PPC2_FP_CVT_S64);
-/* fctidu */
-GEN_FLOAT_B(ctiduz, 0x0F, 0x1D, 0, PPC2_FP_CVT_ISA206);
-
-/* frin */
-GEN_FLOAT_B(rin, 0x08, 0x0C, 1, PPC_FLOAT_EXT);
-/* friz */
-GEN_FLOAT_B(riz, 0x08, 0x0D, 1, PPC_FLOAT_EXT);
-/* frip */
-GEN_FLOAT_B(rip, 0x08, 0x0E, 1, PPC_FLOAT_EXT);
-/* frim */
-GEN_FLOAT_B(rim, 0x08, 0x0F, 1, PPC_FLOAT_EXT);
-
-static void gen_ftdiv(DisasContext *ctx)
-{
- if (unlikely(!ctx->fpu_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_FPU);
- return;
- }
- gen_helper_ftdiv(cpu_crf[crfD(ctx->opcode)], cpu_fpr[rA(ctx->opcode)],
- cpu_fpr[rB(ctx->opcode)]);
-}
-
-static void gen_ftsqrt(DisasContext *ctx)
-{
- if (unlikely(!ctx->fpu_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_FPU);
- return;
- }
- gen_helper_ftsqrt(cpu_crf[crfD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]);
-}
-
-
-
-/*** Floating-Point compare ***/
-
-/* fcmpo */
-static void gen_fcmpo(DisasContext *ctx)
-{
- TCGv_i32 crf;
- if (unlikely(!ctx->fpu_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_FPU);
- return;
- }
- /* NIP cannot be restored if the memory exception comes from an helper */
- gen_update_nip(ctx, ctx->nip - 4);
- gen_reset_fpstatus();
- crf = tcg_const_i32(crfD(ctx->opcode));
- gen_helper_fcmpo(cpu_env, cpu_fpr[rA(ctx->opcode)],
- cpu_fpr[rB(ctx->opcode)], crf);
- tcg_temp_free_i32(crf);
- gen_helper_float_check_status(cpu_env);
-}
-
-/* fcmpu */
-static void gen_fcmpu(DisasContext *ctx)
-{
- TCGv_i32 crf;
- if (unlikely(!ctx->fpu_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_FPU);
- return;
- }
- /* NIP cannot be restored if the memory exception comes from an helper */
- gen_update_nip(ctx, ctx->nip - 4);
- gen_reset_fpstatus();
- crf = tcg_const_i32(crfD(ctx->opcode));
- gen_helper_fcmpu(cpu_env, cpu_fpr[rA(ctx->opcode)],
- cpu_fpr[rB(ctx->opcode)], crf);
- tcg_temp_free_i32(crf);
- gen_helper_float_check_status(cpu_env);
-}
-
-/*** Floating-point move ***/
-/* fabs */
-/* XXX: beware that fabs never checks for NaNs nor update FPSCR */
-static void gen_fabs(DisasContext *ctx)
-{
- if (unlikely(!ctx->fpu_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_FPU);
- return;
- }
- tcg_gen_andi_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)],
- ~(1ULL << 63));
- if (unlikely(Rc(ctx->opcode))) {
- gen_set_cr1_from_fpscr(ctx);
- }
-}
-
-/* fmr - fmr. */
-/* XXX: beware that fmr never checks for NaNs nor update FPSCR */
-static void gen_fmr(DisasContext *ctx)
-{
- if (unlikely(!ctx->fpu_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_FPU);
- return;
- }
- tcg_gen_mov_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]);
- if (unlikely(Rc(ctx->opcode))) {
- gen_set_cr1_from_fpscr(ctx);
- }
-}
-
-/* fnabs */
-/* XXX: beware that fnabs never checks for NaNs nor update FPSCR */
-static void gen_fnabs(DisasContext *ctx)
-{
- if (unlikely(!ctx->fpu_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_FPU);
- return;
- }
- tcg_gen_ori_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)],
- 1ULL << 63);
- if (unlikely(Rc(ctx->opcode))) {
- gen_set_cr1_from_fpscr(ctx);
- }
-}
-
-/* fneg */
-/* XXX: beware that fneg never checks for NaNs nor update FPSCR */
-static void gen_fneg(DisasContext *ctx)
-{
- if (unlikely(!ctx->fpu_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_FPU);
- return;
- }
- tcg_gen_xori_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)],
- 1ULL << 63);
- if (unlikely(Rc(ctx->opcode))) {
- gen_set_cr1_from_fpscr(ctx);
- }
-}
-
-/* fcpsgn: PowerPC 2.05 specification */
-/* XXX: beware that fcpsgn never checks for NaNs nor update FPSCR */
-static void gen_fcpsgn(DisasContext *ctx)
-{
- if (unlikely(!ctx->fpu_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_FPU);
- return;
- }
- tcg_gen_deposit_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rA(ctx->opcode)],
- cpu_fpr[rB(ctx->opcode)], 0, 63);
- if (unlikely(Rc(ctx->opcode))) {
- gen_set_cr1_from_fpscr(ctx);
- }
-}
-
-static void gen_fmrgew(DisasContext *ctx)
-{
- TCGv_i64 b0;
- if (unlikely(!ctx->fpu_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_FPU);
- return;
- }
- b0 = tcg_temp_new_i64();
- tcg_gen_shri_i64(b0, cpu_fpr[rB(ctx->opcode)], 32);
- tcg_gen_deposit_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rA(ctx->opcode)],
- b0, 0, 32);
- tcg_temp_free_i64(b0);
-}
-
-static void gen_fmrgow(DisasContext *ctx)
-{
- if (unlikely(!ctx->fpu_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_FPU);
- return;
- }
- tcg_gen_deposit_i64(cpu_fpr[rD(ctx->opcode)],
- cpu_fpr[rB(ctx->opcode)],
- cpu_fpr[rA(ctx->opcode)],
- 32, 32);
-}
-
-/*** Floating-Point status & ctrl register ***/
-
-/* mcrfs */
-static void gen_mcrfs(DisasContext *ctx)
-{
- TCGv tmp = tcg_temp_new();
- TCGv_i32 tmask;
- TCGv_i64 tnew_fpscr = tcg_temp_new_i64();
- int bfa;
- int nibble;
- int shift;
-
- if (unlikely(!ctx->fpu_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_FPU);
- return;
- }
- bfa = crfS(ctx->opcode);
- nibble = 7 - bfa;
- shift = 4 * nibble;
- tcg_gen_shri_tl(tmp, cpu_fpscr, shift);
- tcg_gen_trunc_tl_i32(cpu_crf[crfD(ctx->opcode)], tmp);
- tcg_gen_andi_i32(cpu_crf[crfD(ctx->opcode)], cpu_crf[crfD(ctx->opcode)], 0xf);
- tcg_temp_free(tmp);
- tcg_gen_extu_tl_i64(tnew_fpscr, cpu_fpscr);
- /* Only the exception bits (including FX) should be cleared if read */
- tcg_gen_andi_i64(tnew_fpscr, tnew_fpscr, ~((0xF << shift) & FP_EX_CLEAR_BITS));
- /* FEX and VX need to be updated, so don't set fpscr directly */
- tmask = tcg_const_i32(1 << nibble);
- gen_helper_store_fpscr(cpu_env, tnew_fpscr, tmask);
- tcg_temp_free_i32(tmask);
- tcg_temp_free_i64(tnew_fpscr);
-}
-
-/* mffs */
-static void gen_mffs(DisasContext *ctx)
-{
- if (unlikely(!ctx->fpu_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_FPU);
- return;
- }
- gen_reset_fpstatus();
- tcg_gen_extu_tl_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpscr);
- if (unlikely(Rc(ctx->opcode))) {
- gen_set_cr1_from_fpscr(ctx);
- }
-}
-
-/* mtfsb0 */
-static void gen_mtfsb0(DisasContext *ctx)
-{
- uint8_t crb;
-
- if (unlikely(!ctx->fpu_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_FPU);
- return;
- }
- crb = 31 - crbD(ctx->opcode);
- gen_reset_fpstatus();
- if (likely(crb != FPSCR_FEX && crb != FPSCR_VX)) {
- TCGv_i32 t0;
- /* NIP cannot be restored if the memory exception comes from an helper */
- gen_update_nip(ctx, ctx->nip - 4);
- t0 = tcg_const_i32(crb);
- gen_helper_fpscr_clrbit(cpu_env, t0);
- tcg_temp_free_i32(t0);
- }
- if (unlikely(Rc(ctx->opcode) != 0)) {
- tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr);
- tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX);
- }
-}
-
-/* mtfsb1 */
-static void gen_mtfsb1(DisasContext *ctx)
-{
- uint8_t crb;
-
- if (unlikely(!ctx->fpu_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_FPU);
- return;
- }
- crb = 31 - crbD(ctx->opcode);
- gen_reset_fpstatus();
- /* XXX: we pretend we can only do IEEE floating-point computations */
- if (likely(crb != FPSCR_FEX && crb != FPSCR_VX && crb != FPSCR_NI)) {
- TCGv_i32 t0;
- /* NIP cannot be restored if the memory exception comes from an helper */
- gen_update_nip(ctx, ctx->nip - 4);
- t0 = tcg_const_i32(crb);
- gen_helper_fpscr_setbit(cpu_env, t0);
- tcg_temp_free_i32(t0);
- }
- if (unlikely(Rc(ctx->opcode) != 0)) {
- tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr);
- tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX);
- }
- /* We can raise a differed exception */
- gen_helper_float_check_status(cpu_env);
-}
-
-/* mtfsf */
-static void gen_mtfsf(DisasContext *ctx)
-{
- TCGv_i32 t0;
- int flm, l, w;
-
- if (unlikely(!ctx->fpu_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_FPU);
- return;
- }
- flm = FPFLM(ctx->opcode);
- l = FPL(ctx->opcode);
- w = FPW(ctx->opcode);
- if (unlikely(w & !(ctx->insns_flags2 & PPC2_ISA205))) {
- gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
- return;
- }
- /* NIP cannot be restored if the memory exception comes from an helper */
- gen_update_nip(ctx, ctx->nip - 4);
- gen_reset_fpstatus();
- if (l) {
- t0 = tcg_const_i32((ctx->insns_flags2 & PPC2_ISA205) ? 0xffff : 0xff);
- } else {
- t0 = tcg_const_i32(flm << (w * 8));
- }
- gen_helper_store_fpscr(cpu_env, cpu_fpr[rB(ctx->opcode)], t0);
- tcg_temp_free_i32(t0);
- if (unlikely(Rc(ctx->opcode) != 0)) {
- tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr);
- tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX);
- }
- /* We can raise a differed exception */
- gen_helper_float_check_status(cpu_env);
-}
-
-/* mtfsfi */
-static void gen_mtfsfi(DisasContext *ctx)
-{
- int bf, sh, w;
- TCGv_i64 t0;
- TCGv_i32 t1;
-
- if (unlikely(!ctx->fpu_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_FPU);
- return;
- }
- w = FPW(ctx->opcode);
- bf = FPBF(ctx->opcode);
- if (unlikely(w & !(ctx->insns_flags2 & PPC2_ISA205))) {
- gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
- return;
- }
- sh = (8 * w) + 7 - bf;
- /* NIP cannot be restored if the memory exception comes from an helper */
- gen_update_nip(ctx, ctx->nip - 4);
- gen_reset_fpstatus();
- t0 = tcg_const_i64(((uint64_t)FPIMM(ctx->opcode)) << (4 * sh));
- t1 = tcg_const_i32(1 << sh);
- gen_helper_store_fpscr(cpu_env, t0, t1);
- tcg_temp_free_i64(t0);
- tcg_temp_free_i32(t1);
- if (unlikely(Rc(ctx->opcode) != 0)) {
- tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr);
- tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX);
- }
- /* We can raise a differed exception */
- gen_helper_float_check_status(cpu_env);
-}
-
/*** Addressing modes ***/
/* Register indirect with immediate index : EA = (rA|0) + SIMM */
static inline void gen_addr_imm_index(DisasContext *ctx, TCGv EA,
@@ -2790,12 +2469,11 @@ static inline void gen_check_align(DisasContext *ctx, TCGv EA, int mask)
TCGLabel *l1 = gen_new_label();
TCGv t0 = tcg_temp_new();
TCGv_i32 t1, t2;
- /* NIP cannot be restored if the memory exception comes from an helper */
- gen_update_nip(ctx, ctx->nip - 4);
tcg_gen_andi_tl(t0, EA, mask);
tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, l1);
t1 = tcg_const_i32(POWERPC_EXCP_ALIGN);
- t2 = tcg_const_i32(0);
+ t2 = tcg_const_i32(ctx->opcode & 0x03FF0000);
+ gen_update_nip(ctx, ctx->nip - 4);
gen_helper_raise_exception_err(cpu_env, t1, t2);
tcg_temp_free_i32(t1);
tcg_temp_free_i32(t2);
@@ -2803,88 +2481,82 @@ static inline void gen_check_align(DisasContext *ctx, TCGv EA, int mask)
tcg_temp_free(t0);
}
-/*** Integer load ***/
-static inline void gen_qemu_ld8u(DisasContext *ctx, TCGv arg1, TCGv arg2)
+static inline void gen_align_no_le(DisasContext *ctx)
{
- tcg_gen_qemu_ld8u(arg1, arg2, ctx->mem_idx);
+ gen_exception_err(ctx, POWERPC_EXCP_ALIGN,
+ (ctx->opcode & 0x03FF0000) | POWERPC_EXCP_ALIGN_LE);
}
-static inline void gen_qemu_ld16u(DisasContext *ctx, TCGv arg1, TCGv arg2)
-{
- TCGMemOp op = MO_UW | ctx->default_tcg_memop_mask;
- tcg_gen_qemu_ld_tl(arg1, arg2, ctx->mem_idx, op);
-}
+/*** Integer load ***/
+#define DEF_MEMOP(op) ((op) | ctx->default_tcg_memop_mask)
+#define BSWAP_MEMOP(op) ((op) | (ctx->default_tcg_memop_mask ^ MO_BSWAP))
-static inline void gen_qemu_ld16s(DisasContext *ctx, TCGv arg1, TCGv arg2)
-{
- TCGMemOp op = MO_SW | ctx->default_tcg_memop_mask;
- tcg_gen_qemu_ld_tl(arg1, arg2, ctx->mem_idx, op);
+#define GEN_QEMU_LOAD_TL(ldop, op) \
+static void glue(gen_qemu_, ldop)(DisasContext *ctx, \
+ TCGv val, \
+ TCGv addr) \
+{ \
+ tcg_gen_qemu_ld_tl(val, addr, ctx->mem_idx, op); \
}
-static inline void gen_qemu_ld32u(DisasContext *ctx, TCGv arg1, TCGv arg2)
-{
- TCGMemOp op = MO_UL | ctx->default_tcg_memop_mask;
- tcg_gen_qemu_ld_tl(arg1, arg2, ctx->mem_idx, op);
-}
+GEN_QEMU_LOAD_TL(ld8u, DEF_MEMOP(MO_UB))
+GEN_QEMU_LOAD_TL(ld16u, DEF_MEMOP(MO_UW))
+GEN_QEMU_LOAD_TL(ld16s, DEF_MEMOP(MO_SW))
+GEN_QEMU_LOAD_TL(ld32u, DEF_MEMOP(MO_UL))
+GEN_QEMU_LOAD_TL(ld32s, DEF_MEMOP(MO_SL))
-static void gen_qemu_ld32u_i64(DisasContext *ctx, TCGv_i64 val, TCGv addr)
-{
- TCGv tmp = tcg_temp_new();
- gen_qemu_ld32u(ctx, tmp, addr);
- tcg_gen_extu_tl_i64(val, tmp);
- tcg_temp_free(tmp);
-}
+GEN_QEMU_LOAD_TL(ld16ur, BSWAP_MEMOP(MO_UW))
+GEN_QEMU_LOAD_TL(ld32ur, BSWAP_MEMOP(MO_UL))
-static inline void gen_qemu_ld32s(DisasContext *ctx, TCGv arg1, TCGv arg2)
-{
- TCGMemOp op = MO_SL | ctx->default_tcg_memop_mask;
- tcg_gen_qemu_ld_tl(arg1, arg2, ctx->mem_idx, op);
+#define GEN_QEMU_LOAD_64(ldop, op) \
+static void glue(gen_qemu_, glue(ldop, _i64))(DisasContext *ctx, \
+ TCGv_i64 val, \
+ TCGv addr) \
+{ \
+ tcg_gen_qemu_ld_i64(val, addr, ctx->mem_idx, op); \
}
-static void gen_qemu_ld32s_i64(DisasContext *ctx, TCGv_i64 val, TCGv addr)
-{
- TCGv tmp = tcg_temp_new();
- gen_qemu_ld32s(ctx, tmp, addr);
- tcg_gen_ext_tl_i64(val, tmp);
- tcg_temp_free(tmp);
-}
+GEN_QEMU_LOAD_64(ld8u, DEF_MEMOP(MO_UB))
+GEN_QEMU_LOAD_64(ld16u, DEF_MEMOP(MO_UW))
+GEN_QEMU_LOAD_64(ld32u, DEF_MEMOP(MO_UL))
+GEN_QEMU_LOAD_64(ld32s, DEF_MEMOP(MO_SL))
+GEN_QEMU_LOAD_64(ld64, DEF_MEMOP(MO_Q))
-static inline void gen_qemu_ld64(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2)
-{
- TCGMemOp op = MO_Q | ctx->default_tcg_memop_mask;
- tcg_gen_qemu_ld_i64(arg1, arg2, ctx->mem_idx, op);
-}
+#if defined(TARGET_PPC64)
+GEN_QEMU_LOAD_64(ld64ur, BSWAP_MEMOP(MO_Q))
+#endif
-static inline void gen_qemu_st8(DisasContext *ctx, TCGv arg1, TCGv arg2)
-{
- tcg_gen_qemu_st8(arg1, arg2, ctx->mem_idx);
+#define GEN_QEMU_STORE_TL(stop, op) \
+static void glue(gen_qemu_, stop)(DisasContext *ctx, \
+ TCGv val, \
+ TCGv addr) \
+{ \
+ tcg_gen_qemu_st_tl(val, addr, ctx->mem_idx, op); \
}
-static inline void gen_qemu_st16(DisasContext *ctx, TCGv arg1, TCGv arg2)
-{
- TCGMemOp op = MO_UW | ctx->default_tcg_memop_mask;
- tcg_gen_qemu_st_tl(arg1, arg2, ctx->mem_idx, op);
-}
+GEN_QEMU_STORE_TL(st8, DEF_MEMOP(MO_UB))
+GEN_QEMU_STORE_TL(st16, DEF_MEMOP(MO_UW))
+GEN_QEMU_STORE_TL(st32, DEF_MEMOP(MO_UL))
-static inline void gen_qemu_st32(DisasContext *ctx, TCGv arg1, TCGv arg2)
-{
- TCGMemOp op = MO_UL | ctx->default_tcg_memop_mask;
- tcg_gen_qemu_st_tl(arg1, arg2, ctx->mem_idx, op);
-}
+GEN_QEMU_STORE_TL(st16r, BSWAP_MEMOP(MO_UW))
+GEN_QEMU_STORE_TL(st32r, BSWAP_MEMOP(MO_UL))
-static void gen_qemu_st32_i64(DisasContext *ctx, TCGv_i64 val, TCGv addr)
-{
- TCGv tmp = tcg_temp_new();
- tcg_gen_trunc_i64_tl(tmp, val);
- gen_qemu_st32(ctx, tmp, addr);
- tcg_temp_free(tmp);
+#define GEN_QEMU_STORE_64(stop, op) \
+static void glue(gen_qemu_, glue(stop, _i64))(DisasContext *ctx, \
+ TCGv_i64 val, \
+ TCGv addr) \
+{ \
+ tcg_gen_qemu_st_i64(val, addr, ctx->mem_idx, op); \
}
-static inline void gen_qemu_st64(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2)
-{
- TCGMemOp op = MO_Q | ctx->default_tcg_memop_mask;
- tcg_gen_qemu_st_i64(arg1, arg2, ctx->mem_idx, op);
-}
+GEN_QEMU_STORE_64(st8, DEF_MEMOP(MO_UB))
+GEN_QEMU_STORE_64(st16, DEF_MEMOP(MO_UW))
+GEN_QEMU_STORE_64(st32, DEF_MEMOP(MO_UL))
+GEN_QEMU_STORE_64(st64, DEF_MEMOP(MO_Q))
+
+#if defined(TARGET_PPC64)
+GEN_QEMU_STORE_64(st64r, BSWAP_MEMOP(MO_Q))
+#endif
#define GEN_LD(name, ldop, opc, type) \
static void glue(gen_, name)(DisasContext *ctx) \
@@ -2972,12 +2644,12 @@ GEN_LDUX(lwa, ld32s, 0x15, 0x0B, PPC_64B);
/* lwax */
GEN_LDX(lwa, ld32s, 0x15, 0x0A, PPC_64B);
/* ldux */
-GEN_LDUX(ld, ld64, 0x15, 0x01, PPC_64B);
+GEN_LDUX(ld, ld64_i64, 0x15, 0x01, PPC_64B);
/* ldx */
-GEN_LDX(ld, ld64, 0x15, 0x00, PPC_64B);
+GEN_LDX(ld, ld64_i64, 0x15, 0x00, PPC_64B);
/* CI load/store variants */
-GEN_LDX_HVRM(ldcix, ld64, 0x15, 0x1b, PPC_CILDST)
+GEN_LDX_HVRM(ldcix, ld64_i64, 0x15, 0x1b, PPC_CILDST)
GEN_LDX_HVRM(lwzcix, ld32u, 0x15, 0x15, PPC_CILDST)
GEN_LDX_HVRM(lhzcix, ld16u, 0x15, 0x19, PPC_CILDST)
GEN_LDX_HVRM(lbzcix, ld8u, 0x15, 0x1a, PPC_CILDST)
@@ -3000,7 +2672,7 @@ static void gen_ld(DisasContext *ctx)
gen_qemu_ld32s(ctx, cpu_gpr[rD(ctx->opcode)], EA);
} else {
/* ld - ldu */
- gen_qemu_ld64(ctx, cpu_gpr[rD(ctx->opcode)], EA);
+ gen_qemu_ld64_i64(ctx, cpu_gpr[rD(ctx->opcode)], EA);
}
if (Rc(ctx->opcode))
tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA);
@@ -3023,10 +2695,9 @@ static void gen_lq(DisasContext *ctx)
}
if (!le_is_supported && ctx->le_mode) {
- gen_exception_err(ctx, POWERPC_EXCP_ALIGN, POWERPC_EXCP_ALIGN_LE);
+ gen_align_no_le(ctx);
return;
}
-
ra = rA(ctx->opcode);
rd = rD(ctx->opcode);
if (unlikely((rd & 1) || rd == ra)) {
@@ -3038,16 +2709,16 @@ static void gen_lq(DisasContext *ctx)
EA = tcg_temp_new();
gen_addr_imm_index(ctx, EA, 0x0F);
- /* We only need to swap high and low halves. gen_qemu_ld64 does necessary
- 64-bit byteswap already. */
+ /* We only need to swap high and low halves. gen_qemu_ld64_i64 does
+ necessary 64-bit byteswap already. */
if (unlikely(ctx->le_mode)) {
- gen_qemu_ld64(ctx, cpu_gpr[rd+1], EA);
+ gen_qemu_ld64_i64(ctx, cpu_gpr[rd + 1], EA);
gen_addr_add(ctx, EA, EA, 8);
- gen_qemu_ld64(ctx, cpu_gpr[rd], EA);
+ gen_qemu_ld64_i64(ctx, cpu_gpr[rd], EA);
} else {
- gen_qemu_ld64(ctx, cpu_gpr[rd], EA);
+ gen_qemu_ld64_i64(ctx, cpu_gpr[rd], EA);
gen_addr_add(ctx, EA, EA, 8);
- gen_qemu_ld64(ctx, cpu_gpr[rd+1], EA);
+ gen_qemu_ld64_i64(ctx, cpu_gpr[rd + 1], EA);
}
tcg_temp_free(EA);
}
@@ -3130,9 +2801,9 @@ GEN_STS(sth, st16, 0x0C, PPC_INTEGER);
/* stw stwu stwux stwx */
GEN_STS(stw, st32, 0x04, PPC_INTEGER);
#if defined(TARGET_PPC64)
-GEN_STUX(std, st64, 0x15, 0x05, PPC_64B);
-GEN_STX(std, st64, 0x15, 0x04, PPC_64B);
-GEN_STX_HVRM(stdcix, st64, 0x15, 0x1f, PPC_CILDST)
+GEN_STUX(std, st64_i64, 0x15, 0x05, PPC_64B);
+GEN_STX(std, st64_i64, 0x15, 0x04, PPC_64B);
+GEN_STX_HVRM(stdcix, st64_i64, 0x15, 0x1f, PPC_CILDST)
GEN_STX_HVRM(stwcix, st32, 0x15, 0x1c, PPC_CILDST)
GEN_STX_HVRM(sthcix, st16, 0x15, 0x1d, PPC_CILDST)
GEN_STX_HVRM(stbcix, st8, 0x15, 0x1e, PPC_CILDST)
@@ -3157,7 +2828,7 @@ static void gen_std(DisasContext *ctx)
}
if (!le_is_supported && ctx->le_mode) {
- gen_exception_err(ctx, POWERPC_EXCP_ALIGN, POWERPC_EXCP_ALIGN_LE);
+ gen_align_no_le(ctx);
return;
}
@@ -3169,16 +2840,16 @@ static void gen_std(DisasContext *ctx)
EA = tcg_temp_new();
gen_addr_imm_index(ctx, EA, 0x03);
- /* We only need to swap high and low halves. gen_qemu_st64 does
+ /* We only need to swap high and low halves. gen_qemu_st64_i64 does
necessary 64-bit byteswap already. */
if (unlikely(ctx->le_mode)) {
- gen_qemu_st64(ctx, cpu_gpr[rs+1], EA);
+ gen_qemu_st64_i64(ctx, cpu_gpr[rs + 1], EA);
gen_addr_add(ctx, EA, EA, 8);
- gen_qemu_st64(ctx, cpu_gpr[rs], EA);
+ gen_qemu_st64_i64(ctx, cpu_gpr[rs], EA);
} else {
- gen_qemu_st64(ctx, cpu_gpr[rs], EA);
+ gen_qemu_st64_i64(ctx, cpu_gpr[rs], EA);
gen_addr_add(ctx, EA, EA, 8);
- gen_qemu_st64(ctx, cpu_gpr[rs+1], EA);
+ gen_qemu_st64_i64(ctx, cpu_gpr[rs + 1], EA);
}
tcg_temp_free(EA);
} else {
@@ -3192,7 +2863,7 @@ static void gen_std(DisasContext *ctx)
gen_set_access_type(ctx, ACCESS_INT);
EA = tcg_temp_new();
gen_addr_imm_index(ctx, EA, 0x03);
- gen_qemu_st64(ctx, cpu_gpr[rs], EA);
+ gen_qemu_st64_i64(ctx, cpu_gpr[rs], EA);
if (Rc(ctx->opcode))
tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA);
tcg_temp_free(EA);
@@ -3202,57 +2873,23 @@ static void gen_std(DisasContext *ctx)
/*** Integer load and store with byte reverse ***/
/* lhbrx */
-static inline void gen_qemu_ld16ur(DisasContext *ctx, TCGv arg1, TCGv arg2)
-{
- TCGMemOp op = MO_UW | (ctx->default_tcg_memop_mask ^ MO_BSWAP);
- tcg_gen_qemu_ld_tl(arg1, arg2, ctx->mem_idx, op);
-}
GEN_LDX(lhbr, ld16ur, 0x16, 0x18, PPC_INTEGER);
/* lwbrx */
-static inline void gen_qemu_ld32ur(DisasContext *ctx, TCGv arg1, TCGv arg2)
-{
- TCGMemOp op = MO_UL | (ctx->default_tcg_memop_mask ^ MO_BSWAP);
- tcg_gen_qemu_ld_tl(arg1, arg2, ctx->mem_idx, op);
-}
GEN_LDX(lwbr, ld32ur, 0x16, 0x10, PPC_INTEGER);
#if defined(TARGET_PPC64)
/* ldbrx */
-static inline void gen_qemu_ld64ur(DisasContext *ctx, TCGv arg1, TCGv arg2)
-{
- TCGMemOp op = MO_Q | (ctx->default_tcg_memop_mask ^ MO_BSWAP);
- tcg_gen_qemu_ld_i64(arg1, arg2, ctx->mem_idx, op);
-}
-GEN_LDX_E(ldbr, ld64ur, 0x14, 0x10, PPC_NONE, PPC2_DBRX, CHK_NONE);
+GEN_LDX_E(ldbr, ld64ur_i64, 0x14, 0x10, PPC_NONE, PPC2_DBRX, CHK_NONE);
+/* stdbrx */
+GEN_STX_E(stdbr, st64r_i64, 0x14, 0x14, PPC_NONE, PPC2_DBRX, CHK_NONE);
#endif /* TARGET_PPC64 */
/* sthbrx */
-static inline void gen_qemu_st16r(DisasContext *ctx, TCGv arg1, TCGv arg2)
-{
- TCGMemOp op = MO_UW | (ctx->default_tcg_memop_mask ^ MO_BSWAP);
- tcg_gen_qemu_st_tl(arg1, arg2, ctx->mem_idx, op);
-}
GEN_STX(sthbr, st16r, 0x16, 0x1C, PPC_INTEGER);
-
/* stwbrx */
-static inline void gen_qemu_st32r(DisasContext *ctx, TCGv arg1, TCGv arg2)
-{
- TCGMemOp op = MO_UL | (ctx->default_tcg_memop_mask ^ MO_BSWAP);
- tcg_gen_qemu_st_tl(arg1, arg2, ctx->mem_idx, op);
-}
GEN_STX(stwbr, st32r, 0x16, 0x14, PPC_INTEGER);
-#if defined(TARGET_PPC64)
-/* stdbrx */
-static inline void gen_qemu_st64r(DisasContext *ctx, TCGv arg1, TCGv arg2)
-{
- TCGMemOp op = MO_Q | (ctx->default_tcg_memop_mask ^ MO_BSWAP);
- tcg_gen_qemu_st_i64(arg1, arg2, ctx->mem_idx, op);
-}
-GEN_STX_E(stdbr, st64r, 0x14, 0x14, PPC_NONE, PPC2_DBRX, CHK_NONE);
-#endif /* TARGET_PPC64 */
-
/*** Integer load and store multiple ***/
/* lmw */
@@ -3260,9 +2897,12 @@ static void gen_lmw(DisasContext *ctx)
{
TCGv t0;
TCGv_i32 t1;
+
+ if (ctx->le_mode) {
+ gen_align_no_le(ctx);
+ return;
+ }
gen_set_access_type(ctx, ACCESS_INT);
- /* NIP cannot be restored if the memory exception comes from an helper */
- gen_update_nip(ctx, ctx->nip - 4);
t0 = tcg_temp_new();
t1 = tcg_const_i32(rD(ctx->opcode));
gen_addr_imm_index(ctx, t0, 0);
@@ -3276,9 +2916,12 @@ static void gen_stmw(DisasContext *ctx)
{
TCGv t0;
TCGv_i32 t1;
+
+ if (ctx->le_mode) {
+ gen_align_no_le(ctx);
+ return;
+ }
gen_set_access_type(ctx, ACCESS_INT);
- /* NIP cannot be restored if the memory exception comes from an helper */
- gen_update_nip(ctx, ctx->nip - 4);
t0 = tcg_temp_new();
t1 = tcg_const_i32(rS(ctx->opcode));
gen_addr_imm_index(ctx, t0, 0);
@@ -3304,6 +2947,10 @@ static void gen_lswi(DisasContext *ctx)
int ra = rA(ctx->opcode);
int nr;
+ if (ctx->le_mode) {
+ gen_align_no_le(ctx);
+ return;
+ }
if (nb == 0)
nb = 32;
nr = (nb + 3) / 4;
@@ -3312,8 +2959,6 @@ static void gen_lswi(DisasContext *ctx)
return;
}
gen_set_access_type(ctx, ACCESS_INT);
- /* NIP cannot be restored if the memory exception comes from an helper */
- gen_update_nip(ctx, ctx->nip - 4);
t0 = tcg_temp_new();
gen_addr_register(ctx, t0);
t1 = tcg_const_i32(nb);
@@ -3329,9 +2974,12 @@ static void gen_lswx(DisasContext *ctx)
{
TCGv t0;
TCGv_i32 t1, t2, t3;
+
+ if (ctx->le_mode) {
+ gen_align_no_le(ctx);
+ return;
+ }
gen_set_access_type(ctx, ACCESS_INT);
- /* NIP cannot be restored if the memory exception comes from an helper */
- gen_update_nip(ctx, ctx->nip - 4);
t0 = tcg_temp_new();
gen_addr_reg_index(ctx, t0);
t1 = tcg_const_i32(rD(ctx->opcode));
@@ -3350,9 +2998,12 @@ static void gen_stswi(DisasContext *ctx)
TCGv t0;
TCGv_i32 t1, t2;
int nb = NB(ctx->opcode);
+
+ if (ctx->le_mode) {
+ gen_align_no_le(ctx);
+ return;
+ }
gen_set_access_type(ctx, ACCESS_INT);
- /* NIP cannot be restored if the memory exception comes from an helper */
- gen_update_nip(ctx, ctx->nip - 4);
t0 = tcg_temp_new();
gen_addr_register(ctx, t0);
if (nb == 0)
@@ -3370,9 +3021,12 @@ static void gen_stswx(DisasContext *ctx)
{
TCGv t0;
TCGv_i32 t1, t2;
+
+ if (ctx->le_mode) {
+ gen_align_no_le(ctx);
+ return;
+ }
gen_set_access_type(ctx, ACCESS_INT);
- /* NIP cannot be restored if the memory exception comes from an helper */
- gen_update_nip(ctx, ctx->nip - 4);
t0 = tcg_temp_new();
gen_addr_reg_index(ctx, t0);
t1 = tcg_temp_new_i32();
@@ -3392,7 +3046,7 @@ static void gen_eieio(DisasContext *ctx)
}
#if !defined(CONFIG_USER_ONLY)
-static inline void gen_check_tlb_flush(DisasContext *ctx)
+static inline void gen_check_tlb_flush(DisasContext *ctx, bool global)
{
TCGv_i32 t;
TCGLabel *l;
@@ -3404,12 +3058,16 @@ static inline void gen_check_tlb_flush(DisasContext *ctx)
t = tcg_temp_new_i32();
tcg_gen_ld_i32(t, cpu_env, offsetof(CPUPPCState, tlb_need_flush));
tcg_gen_brcondi_i32(TCG_COND_EQ, t, 0, l);
- gen_helper_check_tlb_flush(cpu_env);
+ if (global) {
+ gen_helper_check_tlb_flush_global(cpu_env);
+ } else {
+ gen_helper_check_tlb_flush_local(cpu_env);
+ }
gen_set_label(l);
tcg_temp_free_i32(t);
}
#else
-static inline void gen_check_tlb_flush(DisasContext *ctx) { }
+static inline void gen_check_tlb_flush(DisasContext *ctx, bool global) { }
#endif
/* isync */
@@ -3420,53 +3078,51 @@ static void gen_isync(DisasContext *ctx)
* kernel mode however so check MSR_PR
*/
if (!ctx->pr) {
- gen_check_tlb_flush(ctx);
+ gen_check_tlb_flush(ctx, false);
}
gen_stop_exception(ctx);
}
-#define LARX(name, len, loadop) \
+#define MEMOP_GET_SIZE(x) (1 << ((x) & MO_SIZE))
+
+#define LARX(name, memop) \
static void gen_##name(DisasContext *ctx) \
{ \
TCGv t0; \
TCGv gpr = cpu_gpr[rD(ctx->opcode)]; \
+ int len = MEMOP_GET_SIZE(memop); \
gen_set_access_type(ctx, ACCESS_RES); \
t0 = tcg_temp_local_new(); \
gen_addr_reg_index(ctx, t0); \
if ((len) > 1) { \
gen_check_align(ctx, t0, (len)-1); \
} \
- gen_qemu_##loadop(ctx, gpr, t0); \
+ tcg_gen_qemu_ld_tl(gpr, t0, ctx->mem_idx, memop); \
tcg_gen_mov_tl(cpu_reserve, t0); \
tcg_gen_st_tl(gpr, cpu_env, offsetof(CPUPPCState, reserve_val)); \
tcg_temp_free(t0); \
}
/* lwarx */
-LARX(lbarx, 1, ld8u);
-LARX(lharx, 2, ld16u);
-LARX(lwarx, 4, ld32u);
-
+LARX(lbarx, DEF_MEMOP(MO_UB))
+LARX(lharx, DEF_MEMOP(MO_UW))
+LARX(lwarx, DEF_MEMOP(MO_UL))
#if defined(CONFIG_USER_ONLY)
static void gen_conditional_store(DisasContext *ctx, TCGv EA,
- int reg, int size)
+ int reg, int memop)
{
TCGv t0 = tcg_temp_new();
- uint32_t save_exception = ctx->exception;
tcg_gen_st_tl(EA, cpu_env, offsetof(CPUPPCState, reserve_ea));
- tcg_gen_movi_tl(t0, (size << 5) | reg);
+ tcg_gen_movi_tl(t0, (MEMOP_GET_SIZE(memop) << 5) | reg);
tcg_gen_st_tl(t0, cpu_env, offsetof(CPUPPCState, reserve_info));
tcg_temp_free(t0);
- gen_update_nip(ctx, ctx->nip-4);
- ctx->exception = POWERPC_EXCP_BRANCH;
- gen_exception(ctx, POWERPC_EXCP_STCX);
- ctx->exception = save_exception;
+ gen_exception_err(ctx, POWERPC_EXCP_STCX, 0);
}
#else
static void gen_conditional_store(DisasContext *ctx, TCGv EA,
- int reg, int size)
+ int reg, int memop)
{
TCGLabel *l1;
@@ -3474,65 +3130,36 @@ static void gen_conditional_store(DisasContext *ctx, TCGv EA,
l1 = gen_new_label();
tcg_gen_brcond_tl(TCG_COND_NE, EA, cpu_reserve, l1);
tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], 1 << CRF_EQ);
-#if defined(TARGET_PPC64)
- if (size == 8) {
- gen_qemu_st64(ctx, cpu_gpr[reg], EA);
- } else
-#endif
- if (size == 4) {
- gen_qemu_st32(ctx, cpu_gpr[reg], EA);
- } else if (size == 2) {
- gen_qemu_st16(ctx, cpu_gpr[reg], EA);
-#if defined(TARGET_PPC64)
- } else if (size == 16) {
- TCGv gpr1, gpr2 , EA8;
- if (unlikely(ctx->le_mode)) {
- gpr1 = cpu_gpr[reg+1];
- gpr2 = cpu_gpr[reg];
- } else {
- gpr1 = cpu_gpr[reg];
- gpr2 = cpu_gpr[reg+1];
- }
- gen_qemu_st64(ctx, gpr1, EA);
- EA8 = tcg_temp_local_new();
- gen_addr_add(ctx, EA8, EA, 8);
- gen_qemu_st64(ctx, gpr2, EA8);
- tcg_temp_free(EA8);
-#endif
- } else {
- gen_qemu_st8(ctx, cpu_gpr[reg], EA);
- }
+ tcg_gen_qemu_st_tl(cpu_gpr[reg], EA, ctx->mem_idx, memop);
gen_set_label(l1);
tcg_gen_movi_tl(cpu_reserve, -1);
}
#endif
-#define STCX(name, len) \
-static void gen_##name(DisasContext *ctx) \
-{ \
- TCGv t0; \
- if (unlikely((len == 16) && (rD(ctx->opcode) & 1))) { \
- gen_inval_exception(ctx, \
- POWERPC_EXCP_INVAL_INVAL); \
- return; \
- } \
- gen_set_access_type(ctx, ACCESS_RES); \
- t0 = tcg_temp_local_new(); \
- gen_addr_reg_index(ctx, t0); \
- if (len > 1) { \
- gen_check_align(ctx, t0, (len)-1); \
- } \
- gen_conditional_store(ctx, t0, rS(ctx->opcode), len); \
- tcg_temp_free(t0); \
-}
-
-STCX(stbcx_, 1);
-STCX(sthcx_, 2);
-STCX(stwcx_, 4);
+#define STCX(name, memop) \
+static void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv t0; \
+ int len = MEMOP_GET_SIZE(memop); \
+ gen_set_access_type(ctx, ACCESS_RES); \
+ t0 = tcg_temp_local_new(); \
+ gen_addr_reg_index(ctx, t0); \
+ if (len > 1) { \
+ gen_check_align(ctx, t0, (len) - 1); \
+ } \
+ gen_conditional_store(ctx, t0, rS(ctx->opcode), memop); \
+ tcg_temp_free(t0); \
+}
+
+STCX(stbcx_, DEF_MEMOP(MO_UB))
+STCX(sthcx_, DEF_MEMOP(MO_UW))
+STCX(stwcx_, DEF_MEMOP(MO_UL))
#if defined(TARGET_PPC64)
/* ldarx */
-LARX(ldarx, 8, ld64);
+LARX(ldarx, DEF_MEMOP(MO_Q))
+/* stdcx. */
+STCX(stdcx_, DEF_MEMOP(MO_Q))
/* lqarx */
static void gen_lqarx(DisasContext *ctx)
@@ -3558,21 +3185,63 @@ static void gen_lqarx(DisasContext *ctx)
gpr1 = cpu_gpr[rd];
gpr2 = cpu_gpr[rd+1];
}
- gen_qemu_ld64(ctx, gpr1, EA);
+ tcg_gen_qemu_ld_i64(gpr1, EA, ctx->mem_idx, DEF_MEMOP(MO_Q));
tcg_gen_mov_tl(cpu_reserve, EA);
-
gen_addr_add(ctx, EA, EA, 8);
- gen_qemu_ld64(ctx, gpr2, EA);
+ tcg_gen_qemu_ld_i64(gpr2, EA, ctx->mem_idx, DEF_MEMOP(MO_Q));
tcg_gen_st_tl(gpr1, cpu_env, offsetof(CPUPPCState, reserve_val));
tcg_gen_st_tl(gpr2, cpu_env, offsetof(CPUPPCState, reserve_val2));
+ tcg_temp_free(EA);
+}
+
+/* stqcx. */
+static void gen_stqcx_(DisasContext *ctx)
+{
+ TCGv EA;
+ int reg = rS(ctx->opcode);
+ int len = 16;
+#if !defined(CONFIG_USER_ONLY)
+ TCGLabel *l1;
+ TCGv gpr1, gpr2;
+#endif
+
+ if (unlikely((rD(ctx->opcode) & 1))) {
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
+ return;
+ }
+ gen_set_access_type(ctx, ACCESS_RES);
+ EA = tcg_temp_local_new();
+ gen_addr_reg_index(ctx, EA);
+ if (len > 1) {
+ gen_check_align(ctx, EA, (len) - 1);
+ }
+
+#if defined(CONFIG_USER_ONLY)
+ gen_conditional_store(ctx, EA, reg, 16);
+#else
+ tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so);
+ l1 = gen_new_label();
+ tcg_gen_brcond_tl(TCG_COND_NE, EA, cpu_reserve, l1);
+ tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], 1 << CRF_EQ);
+
+ if (unlikely(ctx->le_mode)) {
+ gpr1 = cpu_gpr[reg + 1];
+ gpr2 = cpu_gpr[reg];
+ } else {
+ gpr1 = cpu_gpr[reg];
+ gpr2 = cpu_gpr[reg + 1];
+ }
+ tcg_gen_qemu_st_tl(gpr1, EA, ctx->mem_idx, DEF_MEMOP(MO_Q));
+ gen_addr_add(ctx, EA, EA, 8);
+ tcg_gen_qemu_st_tl(gpr2, EA, ctx->mem_idx, DEF_MEMOP(MO_Q));
+ gen_set_label(l1);
+ tcg_gen_movi_tl(cpu_reserve, -1);
+#endif
tcg_temp_free(EA);
}
-/* stdcx. */
-STCX(stdcx_, 8);
-STCX(stqcx_, 16);
#endif /* defined(TARGET_PPC64) */
/* sync */
@@ -3589,7 +3258,7 @@ static void gen_sync(DisasContext *ctx)
* check MSR_PR as well.
*/
if (((l == 2) || !(ctx->insns_flags & PPC_64B)) && !ctx->pr) {
- gen_check_tlb_flush(ctx);
+ gen_check_tlb_flush(ctx, true);
}
}
@@ -3601,7 +3270,7 @@ static void gen_wait(DisasContext *ctx)
-offsetof(PowerPCCPU, env) + offsetof(CPUState, halted));
tcg_temp_free_i32(t0);
/* Stop translation, as the CPU is supposed to sleep from now */
- gen_exception_err(ctx, EXCP_HLT, 1);
+ gen_exception_nip(ctx, EXCP_HLT, ctx->nip);
}
#if defined(TARGET_PPC64)
@@ -3666,336 +3335,6 @@ static void gen_rvwinkle(DisasContext *ctx)
}
#endif /* #if defined(TARGET_PPC64) */
-/*** Floating-point load ***/
-#define GEN_LDF(name, ldop, opc, type) \
-static void glue(gen_, name)(DisasContext *ctx) \
-{ \
- TCGv EA; \
- if (unlikely(!ctx->fpu_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_FPU); \
- return; \
- } \
- gen_set_access_type(ctx, ACCESS_FLOAT); \
- EA = tcg_temp_new(); \
- gen_addr_imm_index(ctx, EA, 0); \
- gen_qemu_##ldop(ctx, cpu_fpr[rD(ctx->opcode)], EA); \
- tcg_temp_free(EA); \
-}
-
-#define GEN_LDUF(name, ldop, opc, type) \
-static void glue(gen_, name##u)(DisasContext *ctx) \
-{ \
- TCGv EA; \
- if (unlikely(!ctx->fpu_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_FPU); \
- return; \
- } \
- if (unlikely(rA(ctx->opcode) == 0)) { \
- gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
- return; \
- } \
- gen_set_access_type(ctx, ACCESS_FLOAT); \
- EA = tcg_temp_new(); \
- gen_addr_imm_index(ctx, EA, 0); \
- gen_qemu_##ldop(ctx, cpu_fpr[rD(ctx->opcode)], EA); \
- tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
- tcg_temp_free(EA); \
-}
-
-#define GEN_LDUXF(name, ldop, opc, type) \
-static void glue(gen_, name##ux)(DisasContext *ctx) \
-{ \
- TCGv EA; \
- if (unlikely(!ctx->fpu_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_FPU); \
- return; \
- } \
- if (unlikely(rA(ctx->opcode) == 0)) { \
- gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
- return; \
- } \
- gen_set_access_type(ctx, ACCESS_FLOAT); \
- EA = tcg_temp_new(); \
- gen_addr_reg_index(ctx, EA); \
- gen_qemu_##ldop(ctx, cpu_fpr[rD(ctx->opcode)], EA); \
- tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
- tcg_temp_free(EA); \
-}
-
-#define GEN_LDXF(name, ldop, opc2, opc3, type) \
-static void glue(gen_, name##x)(DisasContext *ctx) \
-{ \
- TCGv EA; \
- if (unlikely(!ctx->fpu_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_FPU); \
- return; \
- } \
- gen_set_access_type(ctx, ACCESS_FLOAT); \
- EA = tcg_temp_new(); \
- gen_addr_reg_index(ctx, EA); \
- gen_qemu_##ldop(ctx, cpu_fpr[rD(ctx->opcode)], EA); \
- tcg_temp_free(EA); \
-}
-
-#define GEN_LDFS(name, ldop, op, type) \
-GEN_LDF(name, ldop, op | 0x20, type); \
-GEN_LDUF(name, ldop, op | 0x21, type); \
-GEN_LDUXF(name, ldop, op | 0x01, type); \
-GEN_LDXF(name, ldop, 0x17, op | 0x00, type)
-
-static inline void gen_qemu_ld32fs(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2)
-{
- TCGv t0 = tcg_temp_new();
- TCGv_i32 t1 = tcg_temp_new_i32();
- gen_qemu_ld32u(ctx, t0, arg2);
- tcg_gen_trunc_tl_i32(t1, t0);
- tcg_temp_free(t0);
- gen_helper_float32_to_float64(arg1, cpu_env, t1);
- tcg_temp_free_i32(t1);
-}
-
- /* lfd lfdu lfdux lfdx */
-GEN_LDFS(lfd, ld64, 0x12, PPC_FLOAT);
- /* lfs lfsu lfsux lfsx */
-GEN_LDFS(lfs, ld32fs, 0x10, PPC_FLOAT);
-
-/* lfdp */
-static void gen_lfdp(DisasContext *ctx)
-{
- TCGv EA;
- if (unlikely(!ctx->fpu_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_FPU);
- return;
- }
- gen_set_access_type(ctx, ACCESS_FLOAT);
- EA = tcg_temp_new();
- gen_addr_imm_index(ctx, EA, 0);
- /* We only need to swap high and low halves. gen_qemu_ld64 does necessary
- 64-bit byteswap already. */
- if (unlikely(ctx->le_mode)) {
- gen_qemu_ld64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
- tcg_gen_addi_tl(EA, EA, 8);
- gen_qemu_ld64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
- } else {
- gen_qemu_ld64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
- tcg_gen_addi_tl(EA, EA, 8);
- gen_qemu_ld64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
- }
- tcg_temp_free(EA);
-}
-
-/* lfdpx */
-static void gen_lfdpx(DisasContext *ctx)
-{
- TCGv EA;
- if (unlikely(!ctx->fpu_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_FPU);
- return;
- }
- gen_set_access_type(ctx, ACCESS_FLOAT);
- EA = tcg_temp_new();
- gen_addr_reg_index(ctx, EA);
- /* We only need to swap high and low halves. gen_qemu_ld64 does necessary
- 64-bit byteswap already. */
- if (unlikely(ctx->le_mode)) {
- gen_qemu_ld64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
- tcg_gen_addi_tl(EA, EA, 8);
- gen_qemu_ld64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
- } else {
- gen_qemu_ld64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
- tcg_gen_addi_tl(EA, EA, 8);
- gen_qemu_ld64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
- }
- tcg_temp_free(EA);
-}
-
-/* lfiwax */
-static void gen_lfiwax(DisasContext *ctx)
-{
- TCGv EA;
- TCGv t0;
- if (unlikely(!ctx->fpu_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_FPU);
- return;
- }
- gen_set_access_type(ctx, ACCESS_FLOAT);
- EA = tcg_temp_new();
- t0 = tcg_temp_new();
- gen_addr_reg_index(ctx, EA);
- gen_qemu_ld32s(ctx, t0, EA);
- tcg_gen_ext_tl_i64(cpu_fpr[rD(ctx->opcode)], t0);
- tcg_temp_free(EA);
- tcg_temp_free(t0);
-}
-
-/* lfiwzx */
-static void gen_lfiwzx(DisasContext *ctx)
-{
- TCGv EA;
- if (unlikely(!ctx->fpu_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_FPU);
- return;
- }
- gen_set_access_type(ctx, ACCESS_FLOAT);
- EA = tcg_temp_new();
- gen_addr_reg_index(ctx, EA);
- gen_qemu_ld32u_i64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
- tcg_temp_free(EA);
-}
-/*** Floating-point store ***/
-#define GEN_STF(name, stop, opc, type) \
-static void glue(gen_, name)(DisasContext *ctx) \
-{ \
- TCGv EA; \
- if (unlikely(!ctx->fpu_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_FPU); \
- return; \
- } \
- gen_set_access_type(ctx, ACCESS_FLOAT); \
- EA = tcg_temp_new(); \
- gen_addr_imm_index(ctx, EA, 0); \
- gen_qemu_##stop(ctx, cpu_fpr[rS(ctx->opcode)], EA); \
- tcg_temp_free(EA); \
-}
-
-#define GEN_STUF(name, stop, opc, type) \
-static void glue(gen_, name##u)(DisasContext *ctx) \
-{ \
- TCGv EA; \
- if (unlikely(!ctx->fpu_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_FPU); \
- return; \
- } \
- if (unlikely(rA(ctx->opcode) == 0)) { \
- gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
- return; \
- } \
- gen_set_access_type(ctx, ACCESS_FLOAT); \
- EA = tcg_temp_new(); \
- gen_addr_imm_index(ctx, EA, 0); \
- gen_qemu_##stop(ctx, cpu_fpr[rS(ctx->opcode)], EA); \
- tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
- tcg_temp_free(EA); \
-}
-
-#define GEN_STUXF(name, stop, opc, type) \
-static void glue(gen_, name##ux)(DisasContext *ctx) \
-{ \
- TCGv EA; \
- if (unlikely(!ctx->fpu_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_FPU); \
- return; \
- } \
- if (unlikely(rA(ctx->opcode) == 0)) { \
- gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
- return; \
- } \
- gen_set_access_type(ctx, ACCESS_FLOAT); \
- EA = tcg_temp_new(); \
- gen_addr_reg_index(ctx, EA); \
- gen_qemu_##stop(ctx, cpu_fpr[rS(ctx->opcode)], EA); \
- tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
- tcg_temp_free(EA); \
-}
-
-#define GEN_STXF(name, stop, opc2, opc3, type) \
-static void glue(gen_, name##x)(DisasContext *ctx) \
-{ \
- TCGv EA; \
- if (unlikely(!ctx->fpu_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_FPU); \
- return; \
- } \
- gen_set_access_type(ctx, ACCESS_FLOAT); \
- EA = tcg_temp_new(); \
- gen_addr_reg_index(ctx, EA); \
- gen_qemu_##stop(ctx, cpu_fpr[rS(ctx->opcode)], EA); \
- tcg_temp_free(EA); \
-}
-
-#define GEN_STFS(name, stop, op, type) \
-GEN_STF(name, stop, op | 0x20, type); \
-GEN_STUF(name, stop, op | 0x21, type); \
-GEN_STUXF(name, stop, op | 0x01, type); \
-GEN_STXF(name, stop, 0x17, op | 0x00, type)
-
-static inline void gen_qemu_st32fs(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2)
-{
- TCGv_i32 t0 = tcg_temp_new_i32();
- TCGv t1 = tcg_temp_new();
- gen_helper_float64_to_float32(t0, cpu_env, arg1);
- tcg_gen_extu_i32_tl(t1, t0);
- tcg_temp_free_i32(t0);
- gen_qemu_st32(ctx, t1, arg2);
- tcg_temp_free(t1);
-}
-
-/* stfd stfdu stfdux stfdx */
-GEN_STFS(stfd, st64, 0x16, PPC_FLOAT);
-/* stfs stfsu stfsux stfsx */
-GEN_STFS(stfs, st32fs, 0x14, PPC_FLOAT);
-
-/* stfdp */
-static void gen_stfdp(DisasContext *ctx)
-{
- TCGv EA;
- if (unlikely(!ctx->fpu_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_FPU);
- return;
- }
- gen_set_access_type(ctx, ACCESS_FLOAT);
- EA = tcg_temp_new();
- gen_addr_imm_index(ctx, EA, 0);
- /* We only need to swap high and low halves. gen_qemu_st64 does necessary
- 64-bit byteswap already. */
- if (unlikely(ctx->le_mode)) {
- gen_qemu_st64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
- tcg_gen_addi_tl(EA, EA, 8);
- gen_qemu_st64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
- } else {
- gen_qemu_st64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
- tcg_gen_addi_tl(EA, EA, 8);
- gen_qemu_st64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
- }
- tcg_temp_free(EA);
-}
-
-/* stfdpx */
-static void gen_stfdpx(DisasContext *ctx)
-{
- TCGv EA;
- if (unlikely(!ctx->fpu_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_FPU);
- return;
- }
- gen_set_access_type(ctx, ACCESS_FLOAT);
- EA = tcg_temp_new();
- gen_addr_reg_index(ctx, EA);
- /* We only need to swap high and low halves. gen_qemu_st64 does necessary
- 64-bit byteswap already. */
- if (unlikely(ctx->le_mode)) {
- gen_qemu_st64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
- tcg_gen_addi_tl(EA, EA, 8);
- gen_qemu_st64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
- } else {
- gen_qemu_st64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
- tcg_gen_addi_tl(EA, EA, 8);
- gen_qemu_st64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
- }
- tcg_temp_free(EA);
-}
-
-/* Optional: */
-static inline void gen_qemu_st32fiw(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2)
-{
- TCGv t0 = tcg_temp_new();
- tcg_gen_trunc_i64_tl(t0, arg1),
- gen_qemu_st32(ctx, t0, arg2);
- tcg_temp_free(t0);
-}
-/* stfiwx */
-GEN_STXF(stfiw, st32fiw, 0x17, 0x1E, PPC_FLOAT_STFIWX);
-
static inline void gen_update_cfar(DisasContext *ctx, target_ulong nip)
{
#if defined(TARGET_PPC64)
@@ -4034,10 +3373,7 @@ static inline void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
(CPU_BRANCH_STEP | CPU_SINGLE_STEP)) &&
(ctx->exception == POWERPC_EXCP_BRANCH ||
ctx->exception == POWERPC_EXCP_TRACE)) {
- target_ulong tmp = ctx->nip;
- ctx->nip = dest;
- gen_exception(ctx, POWERPC_EXCP_TRACE);
- ctx->nip = tmp;
+ gen_exception_nip(ctx, POWERPC_EXCP_TRACE, dest);
}
if (ctx->singlestep_enabled & GDBSTUB_SINGLE_STEP) {
gen_debug_exception(ctx);
@@ -4072,7 +3408,7 @@ static void gen_b(DisasContext *ctx)
if (LK(ctx->opcode)) {
gen_setlr(ctx, ctx->nip);
}
- gen_update_cfar(ctx, ctx->nip);
+ gen_update_cfar(ctx, ctx->nip - 4);
gen_goto_tb(ctx, 0, target);
}
@@ -4137,7 +3473,7 @@ static inline void gen_bcond(DisasContext *ctx, int type)
}
tcg_temp_free_i32(temp);
}
- gen_update_cfar(ctx, ctx->nip);
+ gen_update_cfar(ctx, ctx->nip - 4);
if (type == BCOND_IM) {
target_ulong li = (target_long)((int16_t)(BD(ctx->opcode)));
if (likely(AA(ctx->opcode) == 0)) {
@@ -4145,8 +3481,10 @@ static inline void gen_bcond(DisasContext *ctx, int type)
} else {
gen_goto_tb(ctx, 0, li);
}
- gen_set_label(l1);
- gen_goto_tb(ctx, 1, ctx->nip);
+ if ((bo & 0x14) != 0x14) {
+ gen_set_label(l1);
+ gen_goto_tb(ctx, 1, ctx->nip);
+ }
} else {
if (NARROW_MODE(ctx)) {
tcg_gen_andi_tl(cpu_nip, target, (uint32_t)~3);
@@ -4154,9 +3492,11 @@ static inline void gen_bcond(DisasContext *ctx, int type)
tcg_gen_andi_tl(cpu_nip, target, ~3);
}
tcg_gen_exit_tb(0);
- gen_set_label(l1);
- gen_update_nip(ctx, ctx->nip);
- tcg_gen_exit_tb(0);
+ if ((bo & 0x14) != 0x14) {
+ gen_set_label(l1);
+ gen_update_nip(ctx, ctx->nip);
+ tcg_gen_exit_tb(0);
+ }
}
if (type == BCOND_LR || type == BCOND_CTR || type == BCOND_TAR) {
tcg_temp_free(target);
@@ -4246,13 +3586,16 @@ static void gen_rfi(DisasContext *ctx)
#if defined(CONFIG_USER_ONLY)
GEN_PRIV;
#else
- /* FIXME: This instruction doesn't exist anymore on 64-bit server
- * processors compliant with arch 2.x, we should remove it there,
- * but we need to fix OpenBIOS not to use it on 970 first
+ /* This instruction doesn't exist anymore on 64-bit server
+ * processors compliant with arch 2.x
*/
+ if (ctx->insns_flags & PPC_SEGMENT_64B) {
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
+ return;
+ }
/* Restore CPU state */
CHK_SV;
- gen_update_cfar(ctx, ctx->nip);
+ gen_update_cfar(ctx, ctx->nip - 4);
gen_helper_rfi(cpu_env);
gen_sync_exception(ctx);
#endif
@@ -4266,7 +3609,7 @@ static void gen_rfid(DisasContext *ctx)
#else
/* Restore CPU state */
CHK_SV;
- gen_update_cfar(ctx, ctx->nip);
+ gen_update_cfar(ctx, ctx->nip - 4);
gen_helper_rfid(cpu_env);
gen_sync_exception(ctx);
#endif
@@ -4301,12 +3644,30 @@ static void gen_sc(DisasContext *ctx)
/*** Trap ***/
+/* Check for unconditional traps (always or never) */
+static bool check_unconditional_trap(DisasContext *ctx)
+{
+ /* Trap never */
+ if (TO(ctx->opcode) == 0) {
+ return true;
+ }
+ /* Trap always */
+ if (TO(ctx->opcode) == 31) {
+ gen_exception_err(ctx, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
+ return true;
+ }
+ return false;
+}
+
/* tw */
static void gen_tw(DisasContext *ctx)
{
- TCGv_i32 t0 = tcg_const_i32(TO(ctx->opcode));
- /* Update the nip since this might generate a trap exception */
- gen_update_nip(ctx, ctx->nip);
+ TCGv_i32 t0;
+
+ if (check_unconditional_trap(ctx)) {
+ return;
+ }
+ t0 = tcg_const_i32(TO(ctx->opcode));
gen_helper_tw(cpu_env, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
t0);
tcg_temp_free_i32(t0);
@@ -4315,10 +3676,14 @@ static void gen_tw(DisasContext *ctx)
/* twi */
static void gen_twi(DisasContext *ctx)
{
- TCGv t0 = tcg_const_tl(SIMM(ctx->opcode));
- TCGv_i32 t1 = tcg_const_i32(TO(ctx->opcode));
- /* Update the nip since this might generate a trap exception */
- gen_update_nip(ctx, ctx->nip);
+ TCGv t0;
+ TCGv_i32 t1;
+
+ if (check_unconditional_trap(ctx)) {
+ return;
+ }
+ t0 = tcg_const_tl(SIMM(ctx->opcode));
+ t1 = tcg_const_i32(TO(ctx->opcode));
gen_helper_tw(cpu_env, cpu_gpr[rA(ctx->opcode)], t0, t1);
tcg_temp_free(t0);
tcg_temp_free_i32(t1);
@@ -4328,9 +3693,12 @@ static void gen_twi(DisasContext *ctx)
/* td */
static void gen_td(DisasContext *ctx)
{
- TCGv_i32 t0 = tcg_const_i32(TO(ctx->opcode));
- /* Update the nip since this might generate a trap exception */
- gen_update_nip(ctx, ctx->nip);
+ TCGv_i32 t0;
+
+ if (check_unconditional_trap(ctx)) {
+ return;
+ }
+ t0 = tcg_const_i32(TO(ctx->opcode));
gen_helper_td(cpu_env, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
t0);
tcg_temp_free_i32(t0);
@@ -4339,10 +3707,14 @@ static void gen_td(DisasContext *ctx)
/* tdi */
static void gen_tdi(DisasContext *ctx)
{
- TCGv t0 = tcg_const_tl(SIMM(ctx->opcode));
- TCGv_i32 t1 = tcg_const_i32(TO(ctx->opcode));
- /* Update the nip since this might generate a trap exception */
- gen_update_nip(ctx, ctx->nip);
+ TCGv t0;
+ TCGv_i32 t1;
+
+ if (check_unconditional_trap(ctx)) {
+ return;
+ }
+ t0 = tcg_const_tl(SIMM(ctx->opcode));
+ t1 = tcg_const_i32(TO(ctx->opcode));
gen_helper_td(cpu_env, cpu_gpr[rA(ctx->opcode)], t0, t1);
tcg_temp_free(t0);
tcg_temp_free_i32(t1);
@@ -4684,6 +4056,27 @@ static void gen_mtspr(DisasContext *ctx)
}
}
+#if defined(TARGET_PPC64)
+/* setb */
+static void gen_setb(DisasContext *ctx)
+{
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ TCGv_i32 t8 = tcg_temp_new_i32();
+ TCGv_i32 tm1 = tcg_temp_new_i32();
+ int crf = crfS(ctx->opcode);
+
+ tcg_gen_setcondi_i32(TCG_COND_GEU, t0, cpu_crf[crf], 4);
+ tcg_gen_movi_i32(t8, 8);
+ tcg_gen_movi_i32(tm1, -1);
+ tcg_gen_movcond_i32(TCG_COND_GEU, t0, cpu_crf[crf], t8, tm1, t0);
+ tcg_gen_ext_i32_tl(cpu_gpr[rD(ctx->opcode)], t0);
+
+ tcg_temp_free_i32(t0);
+ tcg_temp_free_i32(t8);
+ tcg_temp_free_i32(tm1);
+}
+#endif
+
/*** Cache management ***/
/* dcbf */
@@ -4764,27 +4157,22 @@ static void gen_dcbtls(DisasContext *ctx)
static void gen_dcbz(DisasContext *ctx)
{
TCGv tcgv_addr;
- TCGv_i32 tcgv_is_dcbzl;
- int is_dcbzl = ctx->opcode & 0x00200000 ? 1 : 0;
+ TCGv_i32 tcgv_op;
gen_set_access_type(ctx, ACCESS_CACHE);
- /* NIP cannot be restored if the memory exception comes from an helper */
- gen_update_nip(ctx, ctx->nip - 4);
tcgv_addr = tcg_temp_new();
- tcgv_is_dcbzl = tcg_const_i32(is_dcbzl);
-
+ tcgv_op = tcg_const_i32(ctx->opcode & 0x03FF000);
gen_addr_reg_index(ctx, tcgv_addr);
- gen_helper_dcbz(cpu_env, tcgv_addr, tcgv_is_dcbzl);
-
+ gen_helper_dcbz(cpu_env, tcgv_addr, tcgv_op);
tcg_temp_free(tcgv_addr);
- tcg_temp_free_i32(tcgv_is_dcbzl);
+ tcg_temp_free_i32(tcgv_op);
}
/* dst / dstt */
static void gen_dst(DisasContext *ctx)
{
if (rA(ctx->opcode) == 0) {
- gen_inval_exception(ctx, POWERPC_EXCP_INVAL_LSWX);
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
} else {
/* interpreted as no-op */
}
@@ -4794,7 +4182,7 @@ static void gen_dst(DisasContext *ctx)
static void gen_dstst(DisasContext *ctx)
{
if (rA(ctx->opcode) == 0) {
- gen_inval_exception(ctx, POWERPC_EXCP_INVAL_LSWX);
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
} else {
/* interpreted as no-op */
}
@@ -4812,8 +4200,6 @@ static void gen_icbi(DisasContext *ctx)
{
TCGv t0;
gen_set_access_type(ctx, ACCESS_CACHE);
- /* NIP cannot be restored if the memory exception comes from an helper */
- gen_update_nip(ctx, ctx->nip - 4);
t0 = tcg_temp_new();
gen_addr_reg_index(ctx, t0);
gen_helper_icbi(cpu_env, t0);
@@ -5060,6 +4446,7 @@ static void gen_tlbie(DisasContext *ctx)
#if defined(CONFIG_USER_ONLY)
GEN_PRIV;
#else
+ TCGv_i32 t1;
CHK_HV;
if (NARROW_MODE(ctx)) {
@@ -5070,6 +4457,11 @@ static void gen_tlbie(DisasContext *ctx)
} else {
gen_helper_tlbie(cpu_env, cpu_gpr[rB(ctx->opcode)]);
}
+ t1 = tcg_temp_new_i32();
+ tcg_gen_ld_i32(t1, cpu_env, offsetof(CPUPPCState, tlb_need_flush));
+ tcg_gen_ori_i32(t1, t1, TLB_NEED_GLOBAL_FLUSH);
+ tcg_gen_st_i32(t1, cpu_env, offsetof(CPUPPCState, tlb_need_flush));
+ tcg_temp_free_i32(t1);
#endif /* defined(CONFIG_USER_ONLY) */
}
@@ -5081,11 +4473,10 @@ static void gen_tlbsync(DisasContext *ctx)
#else
CHK_HV;
- /* tlbsync is a nop for server, ptesync handles delayed tlb flush,
- * embedded however needs to deal with tlbsync. We don't try to be
- * fancy and swallow the overhead of checking for both.
- */
- gen_check_tlb_flush(ctx);
+ /* BookS does both ptesync and tlbsync make tlbsync a nop for server */
+ if (ctx->insns_flags & PPC_BOOKE) {
+ gen_check_tlb_flush(ctx, true);
+ }
#endif /* defined(CONFIG_USER_ONLY) */
}
@@ -5299,8 +4690,6 @@ static void gen_lscbx(DisasContext *ctx)
TCGv_i32 t3 = tcg_const_i32(rB(ctx->opcode));
gen_addr_reg_index(ctx, t0);
- /* NIP cannot be restored if the memory exception comes from an helper */
- gen_update_nip(ctx, ctx->nip - 4);
gen_helper_lscbx(t0, cpu_env, t0, t1, t2, t3);
tcg_temp_free_i32(t1);
tcg_temp_free_i32(t2);
@@ -5942,141 +5331,6 @@ static void gen_rfsvc(DisasContext *ctx)
/* svc is not implemented for now */
-/* POWER2 specific instructions */
-/* Quad manipulation (load/store two floats at a time) */
-
-/* lfq */
-static void gen_lfq(DisasContext *ctx)
-{
- int rd = rD(ctx->opcode);
- TCGv t0;
- gen_set_access_type(ctx, ACCESS_FLOAT);
- t0 = tcg_temp_new();
- gen_addr_imm_index(ctx, t0, 0);
- gen_qemu_ld64(ctx, cpu_fpr[rd], t0);
- gen_addr_add(ctx, t0, t0, 8);
- gen_qemu_ld64(ctx, cpu_fpr[(rd + 1) % 32], t0);
- tcg_temp_free(t0);
-}
-
-/* lfqu */
-static void gen_lfqu(DisasContext *ctx)
-{
- int ra = rA(ctx->opcode);
- int rd = rD(ctx->opcode);
- TCGv t0, t1;
- gen_set_access_type(ctx, ACCESS_FLOAT);
- t0 = tcg_temp_new();
- t1 = tcg_temp_new();
- gen_addr_imm_index(ctx, t0, 0);
- gen_qemu_ld64(ctx, cpu_fpr[rd], t0);
- gen_addr_add(ctx, t1, t0, 8);
- gen_qemu_ld64(ctx, cpu_fpr[(rd + 1) % 32], t1);
- if (ra != 0)
- tcg_gen_mov_tl(cpu_gpr[ra], t0);
- tcg_temp_free(t0);
- tcg_temp_free(t1);
-}
-
-/* lfqux */
-static void gen_lfqux(DisasContext *ctx)
-{
- int ra = rA(ctx->opcode);
- int rd = rD(ctx->opcode);
- gen_set_access_type(ctx, ACCESS_FLOAT);
- TCGv t0, t1;
- t0 = tcg_temp_new();
- gen_addr_reg_index(ctx, t0);
- gen_qemu_ld64(ctx, cpu_fpr[rd], t0);
- t1 = tcg_temp_new();
- gen_addr_add(ctx, t1, t0, 8);
- gen_qemu_ld64(ctx, cpu_fpr[(rd + 1) % 32], t1);
- tcg_temp_free(t1);
- if (ra != 0)
- tcg_gen_mov_tl(cpu_gpr[ra], t0);
- tcg_temp_free(t0);
-}
-
-/* lfqx */
-static void gen_lfqx(DisasContext *ctx)
-{
- int rd = rD(ctx->opcode);
- TCGv t0;
- gen_set_access_type(ctx, ACCESS_FLOAT);
- t0 = tcg_temp_new();
- gen_addr_reg_index(ctx, t0);
- gen_qemu_ld64(ctx, cpu_fpr[rd], t0);
- gen_addr_add(ctx, t0, t0, 8);
- gen_qemu_ld64(ctx, cpu_fpr[(rd + 1) % 32], t0);
- tcg_temp_free(t0);
-}
-
-/* stfq */
-static void gen_stfq(DisasContext *ctx)
-{
- int rd = rD(ctx->opcode);
- TCGv t0;
- gen_set_access_type(ctx, ACCESS_FLOAT);
- t0 = tcg_temp_new();
- gen_addr_imm_index(ctx, t0, 0);
- gen_qemu_st64(ctx, cpu_fpr[rd], t0);
- gen_addr_add(ctx, t0, t0, 8);
- gen_qemu_st64(ctx, cpu_fpr[(rd + 1) % 32], t0);
- tcg_temp_free(t0);
-}
-
-/* stfqu */
-static void gen_stfqu(DisasContext *ctx)
-{
- int ra = rA(ctx->opcode);
- int rd = rD(ctx->opcode);
- TCGv t0, t1;
- gen_set_access_type(ctx, ACCESS_FLOAT);
- t0 = tcg_temp_new();
- gen_addr_imm_index(ctx, t0, 0);
- gen_qemu_st64(ctx, cpu_fpr[rd], t0);
- t1 = tcg_temp_new();
- gen_addr_add(ctx, t1, t0, 8);
- gen_qemu_st64(ctx, cpu_fpr[(rd + 1) % 32], t1);
- tcg_temp_free(t1);
- if (ra != 0)
- tcg_gen_mov_tl(cpu_gpr[ra], t0);
- tcg_temp_free(t0);
-}
-
-/* stfqux */
-static void gen_stfqux(DisasContext *ctx)
-{
- int ra = rA(ctx->opcode);
- int rd = rD(ctx->opcode);
- TCGv t0, t1;
- gen_set_access_type(ctx, ACCESS_FLOAT);
- t0 = tcg_temp_new();
- gen_addr_reg_index(ctx, t0);
- gen_qemu_st64(ctx, cpu_fpr[rd], t0);
- t1 = tcg_temp_new();
- gen_addr_add(ctx, t1, t0, 8);
- gen_qemu_st64(ctx, cpu_fpr[(rd + 1) % 32], t1);
- tcg_temp_free(t1);
- if (ra != 0)
- tcg_gen_mov_tl(cpu_gpr[ra], t0);
- tcg_temp_free(t0);
-}
-
-/* stfqx */
-static void gen_stfqx(DisasContext *ctx)
-{
- int rd = rD(ctx->opcode);
- TCGv t0;
- gen_set_access_type(ctx, ACCESS_FLOAT);
- t0 = tcg_temp_new();
- gen_addr_reg_index(ctx, t0);
- gen_qemu_st64(ctx, cpu_fpr[rd], t0);
- gen_addr_add(ctx, t0, t0, 8);
- gen_qemu_st64(ctx, cpu_fpr[(rd + 1) % 32], t0);
- tcg_temp_free(t0);
-}
-
/* BookE specific instructions */
/* XXX: not implemented on 440 ? */
@@ -6326,8 +5580,6 @@ static void gen_mfdcr(DisasContext *ctx)
TCGv dcrn;
CHK_SV;
- /* NIP cannot be restored if the memory exception comes from an helper */
- gen_update_nip(ctx, ctx->nip - 4);
dcrn = tcg_const_tl(SPR(ctx->opcode));
gen_helper_load_dcr(cpu_gpr[rD(ctx->opcode)], cpu_env, dcrn);
tcg_temp_free(dcrn);
@@ -6343,8 +5595,6 @@ static void gen_mtdcr(DisasContext *ctx)
TCGv dcrn;
CHK_SV;
- /* NIP cannot be restored if the memory exception comes from an helper */
- gen_update_nip(ctx, ctx->nip - 4);
dcrn = tcg_const_tl(SPR(ctx->opcode));
gen_helper_store_dcr(cpu_env, dcrn, cpu_gpr[rS(ctx->opcode)]);
tcg_temp_free(dcrn);
@@ -6359,8 +5609,6 @@ static void gen_mfdcrx(DisasContext *ctx)
GEN_PRIV;
#else
CHK_SV;
- /* NIP cannot be restored if the memory exception comes from an helper */
- gen_update_nip(ctx, ctx->nip - 4);
gen_helper_load_dcr(cpu_gpr[rD(ctx->opcode)], cpu_env,
cpu_gpr[rA(ctx->opcode)]);
/* Note: Rc update flag set leads to undefined state of Rc0 */
@@ -6375,8 +5623,6 @@ static void gen_mtdcrx(DisasContext *ctx)
GEN_PRIV;
#else
CHK_SV;
- /* NIP cannot be restored if the memory exception comes from an helper */
- gen_update_nip(ctx, ctx->nip - 4);
gen_helper_store_dcr(cpu_env, cpu_gpr[rA(ctx->opcode)],
cpu_gpr[rS(ctx->opcode)]);
/* Note: Rc update flag set leads to undefined state of Rc0 */
@@ -6386,8 +5632,6 @@ static void gen_mtdcrx(DisasContext *ctx)
/* mfdcrux (PPC 460) : user-mode access to DCR */
static void gen_mfdcrux(DisasContext *ctx)
{
- /* NIP cannot be restored if the memory exception comes from an helper */
- gen_update_nip(ctx, ctx->nip - 4);
gen_helper_load_dcr(cpu_gpr[rD(ctx->opcode)], cpu_env,
cpu_gpr[rA(ctx->opcode)]);
/* Note: Rc update flag set leads to undefined state of Rc0 */
@@ -6396,8 +5640,6 @@ static void gen_mfdcrux(DisasContext *ctx)
/* mtdcrux (PPC 460) : user-mode access to DCR */
static void gen_mtdcrux(DisasContext *ctx)
{
- /* NIP cannot be restored if the memory exception comes from an helper */
- gen_update_nip(ctx, ctx->nip - 4);
gen_helper_store_dcr(cpu_env, cpu_gpr[rA(ctx->opcode)],
cpu_gpr[rS(ctx->opcode)]);
/* Note: Rc update flag set leads to undefined state of Rc0 */
@@ -6696,7 +5938,6 @@ static void gen_tlbwe_booke206(DisasContext *ctx)
GEN_PRIV;
#else
CHK_SV;
- gen_update_nip(ctx, ctx->nip - 4);
gen_helper_booke206_tlbwe(cpu_env);
#endif /* defined(CONFIG_USER_ONLY) */
}
@@ -6839,2952 +6080,40 @@ static void gen_msgsnd(DisasContext *ctx)
#endif /* defined(CONFIG_USER_ONLY) */
}
-/*** Altivec vector extension ***/
-/* Altivec registers moves */
-
-static inline TCGv_ptr gen_avr_ptr(int reg)
-{
- TCGv_ptr r = tcg_temp_new_ptr();
- tcg_gen_addi_ptr(r, cpu_env, offsetof(CPUPPCState, avr[reg]));
- return r;
-}
-
-#define GEN_VR_LDX(name, opc2, opc3) \
-static void glue(gen_, name)(DisasContext *ctx) \
-{ \
- TCGv EA; \
- if (unlikely(!ctx->altivec_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_VPU); \
- return; \
- } \
- gen_set_access_type(ctx, ACCESS_INT); \
- EA = tcg_temp_new(); \
- gen_addr_reg_index(ctx, EA); \
- tcg_gen_andi_tl(EA, EA, ~0xf); \
- /* We only need to swap high and low halves. gen_qemu_ld64 does necessary \
- 64-bit byteswap already. */ \
- if (ctx->le_mode) { \
- gen_qemu_ld64(ctx, cpu_avrl[rD(ctx->opcode)], EA); \
- tcg_gen_addi_tl(EA, EA, 8); \
- gen_qemu_ld64(ctx, cpu_avrh[rD(ctx->opcode)], EA); \
- } else { \
- gen_qemu_ld64(ctx, cpu_avrh[rD(ctx->opcode)], EA); \
- tcg_gen_addi_tl(EA, EA, 8); \
- gen_qemu_ld64(ctx, cpu_avrl[rD(ctx->opcode)], EA); \
- } \
- tcg_temp_free(EA); \
-}
-
-#define GEN_VR_STX(name, opc2, opc3) \
-static void gen_st##name(DisasContext *ctx) \
-{ \
- TCGv EA; \
- if (unlikely(!ctx->altivec_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_VPU); \
- return; \
- } \
- gen_set_access_type(ctx, ACCESS_INT); \
- EA = tcg_temp_new(); \
- gen_addr_reg_index(ctx, EA); \
- tcg_gen_andi_tl(EA, EA, ~0xf); \
- /* We only need to swap high and low halves. gen_qemu_st64 does necessary \
- 64-bit byteswap already. */ \
- if (ctx->le_mode) { \
- gen_qemu_st64(ctx, cpu_avrl[rD(ctx->opcode)], EA); \
- tcg_gen_addi_tl(EA, EA, 8); \
- gen_qemu_st64(ctx, cpu_avrh[rD(ctx->opcode)], EA); \
- } else { \
- gen_qemu_st64(ctx, cpu_avrh[rD(ctx->opcode)], EA); \
- tcg_gen_addi_tl(EA, EA, 8); \
- gen_qemu_st64(ctx, cpu_avrl[rD(ctx->opcode)], EA); \
- } \
- tcg_temp_free(EA); \
-}
-
-#define GEN_VR_LVE(name, opc2, opc3, size) \
-static void gen_lve##name(DisasContext *ctx) \
- { \
- TCGv EA; \
- TCGv_ptr rs; \
- if (unlikely(!ctx->altivec_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_VPU); \
- return; \
- } \
- gen_set_access_type(ctx, ACCESS_INT); \
- EA = tcg_temp_new(); \
- gen_addr_reg_index(ctx, EA); \
- if (size > 1) { \
- tcg_gen_andi_tl(EA, EA, ~(size - 1)); \
- } \
- rs = gen_avr_ptr(rS(ctx->opcode)); \
- gen_helper_lve##name(cpu_env, rs, EA); \
- tcg_temp_free(EA); \
- tcg_temp_free_ptr(rs); \
- }
-
-#define GEN_VR_STVE(name, opc2, opc3, size) \
-static void gen_stve##name(DisasContext *ctx) \
- { \
- TCGv EA; \
- TCGv_ptr rs; \
- if (unlikely(!ctx->altivec_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_VPU); \
- return; \
- } \
- gen_set_access_type(ctx, ACCESS_INT); \
- EA = tcg_temp_new(); \
- gen_addr_reg_index(ctx, EA); \
- if (size > 1) { \
- tcg_gen_andi_tl(EA, EA, ~(size - 1)); \
- } \
- rs = gen_avr_ptr(rS(ctx->opcode)); \
- gen_helper_stve##name(cpu_env, rs, EA); \
- tcg_temp_free(EA); \
- tcg_temp_free_ptr(rs); \
- }
-
-GEN_VR_LDX(lvx, 0x07, 0x03);
-/* As we don't emulate the cache, lvxl is stricly equivalent to lvx */
-GEN_VR_LDX(lvxl, 0x07, 0x0B);
-
-GEN_VR_LVE(bx, 0x07, 0x00, 1);
-GEN_VR_LVE(hx, 0x07, 0x01, 2);
-GEN_VR_LVE(wx, 0x07, 0x02, 4);
-
-GEN_VR_STX(svx, 0x07, 0x07);
-/* As we don't emulate the cache, stvxl is stricly equivalent to stvx */
-GEN_VR_STX(svxl, 0x07, 0x0F);
-
-GEN_VR_STVE(bx, 0x07, 0x04, 1);
-GEN_VR_STVE(hx, 0x07, 0x05, 2);
-GEN_VR_STVE(wx, 0x07, 0x06, 4);
-
-static void gen_lvsl(DisasContext *ctx)
-{
- TCGv_ptr rd;
- TCGv EA;
- if (unlikely(!ctx->altivec_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_VPU);
- return;
- }
- EA = tcg_temp_new();
- gen_addr_reg_index(ctx, EA);
- rd = gen_avr_ptr(rD(ctx->opcode));
- gen_helper_lvsl(rd, EA);
- tcg_temp_free(EA);
- tcg_temp_free_ptr(rd);
-}
-
-static void gen_lvsr(DisasContext *ctx)
-{
- TCGv_ptr rd;
- TCGv EA;
- if (unlikely(!ctx->altivec_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_VPU);
- return;
- }
- EA = tcg_temp_new();
- gen_addr_reg_index(ctx, EA);
- rd = gen_avr_ptr(rD(ctx->opcode));
- gen_helper_lvsr(rd, EA);
- tcg_temp_free(EA);
- tcg_temp_free_ptr(rd);
-}
-
-static void gen_mfvscr(DisasContext *ctx)
-{
- TCGv_i32 t;
- if (unlikely(!ctx->altivec_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_VPU);
- return;
- }
- tcg_gen_movi_i64(cpu_avrh[rD(ctx->opcode)], 0);
- t = tcg_temp_new_i32();
- tcg_gen_ld_i32(t, cpu_env, offsetof(CPUPPCState, vscr));
- tcg_gen_extu_i32_i64(cpu_avrl[rD(ctx->opcode)], t);
- tcg_temp_free_i32(t);
-}
-
-static void gen_mtvscr(DisasContext *ctx)
-{
- TCGv_ptr p;
- if (unlikely(!ctx->altivec_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_VPU);
- return;
- }
- p = gen_avr_ptr(rB(ctx->opcode));
- gen_helper_mtvscr(cpu_env, p);
- tcg_temp_free_ptr(p);
-}
-
-/* Logical operations */
-#define GEN_VX_LOGICAL(name, tcg_op, opc2, opc3) \
-static void glue(gen_, name)(DisasContext *ctx) \
-{ \
- if (unlikely(!ctx->altivec_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_VPU); \
- return; \
- } \
- tcg_op(cpu_avrh[rD(ctx->opcode)], cpu_avrh[rA(ctx->opcode)], cpu_avrh[rB(ctx->opcode)]); \
- tcg_op(cpu_avrl[rD(ctx->opcode)], cpu_avrl[rA(ctx->opcode)], cpu_avrl[rB(ctx->opcode)]); \
-}
-
-GEN_VX_LOGICAL(vand, tcg_gen_and_i64, 2, 16);
-GEN_VX_LOGICAL(vandc, tcg_gen_andc_i64, 2, 17);
-GEN_VX_LOGICAL(vor, tcg_gen_or_i64, 2, 18);
-GEN_VX_LOGICAL(vxor, tcg_gen_xor_i64, 2, 19);
-GEN_VX_LOGICAL(vnor, tcg_gen_nor_i64, 2, 20);
-GEN_VX_LOGICAL(veqv, tcg_gen_eqv_i64, 2, 26);
-GEN_VX_LOGICAL(vnand, tcg_gen_nand_i64, 2, 22);
-GEN_VX_LOGICAL(vorc, tcg_gen_orc_i64, 2, 21);
-
-#define GEN_VXFORM(name, opc2, opc3) \
-static void glue(gen_, name)(DisasContext *ctx) \
-{ \
- TCGv_ptr ra, rb, rd; \
- if (unlikely(!ctx->altivec_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_VPU); \
- return; \
- } \
- ra = gen_avr_ptr(rA(ctx->opcode)); \
- rb = gen_avr_ptr(rB(ctx->opcode)); \
- rd = gen_avr_ptr(rD(ctx->opcode)); \
- gen_helper_##name (rd, ra, rb); \
- tcg_temp_free_ptr(ra); \
- tcg_temp_free_ptr(rb); \
- tcg_temp_free_ptr(rd); \
-}
-
-#define GEN_VXFORM_ENV(name, opc2, opc3) \
-static void glue(gen_, name)(DisasContext *ctx) \
-{ \
- TCGv_ptr ra, rb, rd; \
- if (unlikely(!ctx->altivec_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_VPU); \
- return; \
- } \
- ra = gen_avr_ptr(rA(ctx->opcode)); \
- rb = gen_avr_ptr(rB(ctx->opcode)); \
- rd = gen_avr_ptr(rD(ctx->opcode)); \
- gen_helper_##name(cpu_env, rd, ra, rb); \
- tcg_temp_free_ptr(ra); \
- tcg_temp_free_ptr(rb); \
- tcg_temp_free_ptr(rd); \
-}
-
-#define GEN_VXFORM3(name, opc2, opc3) \
-static void glue(gen_, name)(DisasContext *ctx) \
-{ \
- TCGv_ptr ra, rb, rc, rd; \
- if (unlikely(!ctx->altivec_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_VPU); \
- return; \
- } \
- ra = gen_avr_ptr(rA(ctx->opcode)); \
- rb = gen_avr_ptr(rB(ctx->opcode)); \
- rc = gen_avr_ptr(rC(ctx->opcode)); \
- rd = gen_avr_ptr(rD(ctx->opcode)); \
- gen_helper_##name(rd, ra, rb, rc); \
- tcg_temp_free_ptr(ra); \
- tcg_temp_free_ptr(rb); \
- tcg_temp_free_ptr(rc); \
- tcg_temp_free_ptr(rd); \
-}
-
-/*
- * Support for Altivec instruction pairs that use bit 31 (Rc) as
- * an opcode bit. In general, these pairs come from different
- * versions of the ISA, so we must also support a pair of flags for
- * each instruction.
- */
-#define GEN_VXFORM_DUAL(name0, flg0, flg2_0, name1, flg1, flg2_1) \
-static void glue(gen_, name0##_##name1)(DisasContext *ctx) \
-{ \
- if ((Rc(ctx->opcode) == 0) && \
- ((ctx->insns_flags & flg0) || (ctx->insns_flags2 & flg2_0))) { \
- gen_##name0(ctx); \
- } else if ((Rc(ctx->opcode) == 1) && \
- ((ctx->insns_flags & flg1) || (ctx->insns_flags2 & flg2_1))) { \
- gen_##name1(ctx); \
- } else { \
- gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
- } \
-}
-
-GEN_VXFORM(vaddubm, 0, 0);
-GEN_VXFORM(vadduhm, 0, 1);
-GEN_VXFORM(vadduwm, 0, 2);
-GEN_VXFORM(vaddudm, 0, 3);
-GEN_VXFORM(vsububm, 0, 16);
-GEN_VXFORM(vsubuhm, 0, 17);
-GEN_VXFORM(vsubuwm, 0, 18);
-GEN_VXFORM(vsubudm, 0, 19);
-GEN_VXFORM(vmaxub, 1, 0);
-GEN_VXFORM(vmaxuh, 1, 1);
-GEN_VXFORM(vmaxuw, 1, 2);
-GEN_VXFORM(vmaxud, 1, 3);
-GEN_VXFORM(vmaxsb, 1, 4);
-GEN_VXFORM(vmaxsh, 1, 5);
-GEN_VXFORM(vmaxsw, 1, 6);
-GEN_VXFORM(vmaxsd, 1, 7);
-GEN_VXFORM(vminub, 1, 8);
-GEN_VXFORM(vminuh, 1, 9);
-GEN_VXFORM(vminuw, 1, 10);
-GEN_VXFORM(vminud, 1, 11);
-GEN_VXFORM(vminsb, 1, 12);
-GEN_VXFORM(vminsh, 1, 13);
-GEN_VXFORM(vminsw, 1, 14);
-GEN_VXFORM(vminsd, 1, 15);
-GEN_VXFORM(vavgub, 1, 16);
-GEN_VXFORM(vavguh, 1, 17);
-GEN_VXFORM(vavguw, 1, 18);
-GEN_VXFORM(vavgsb, 1, 20);
-GEN_VXFORM(vavgsh, 1, 21);
-GEN_VXFORM(vavgsw, 1, 22);
-GEN_VXFORM(vmrghb, 6, 0);
-GEN_VXFORM(vmrghh, 6, 1);
-GEN_VXFORM(vmrghw, 6, 2);
-GEN_VXFORM(vmrglb, 6, 4);
-GEN_VXFORM(vmrglh, 6, 5);
-GEN_VXFORM(vmrglw, 6, 6);
-
-static void gen_vmrgew(DisasContext *ctx)
-{
- TCGv_i64 tmp;
- int VT, VA, VB;
- if (unlikely(!ctx->altivec_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_VPU);
- return;
- }
- VT = rD(ctx->opcode);
- VA = rA(ctx->opcode);
- VB = rB(ctx->opcode);
- tmp = tcg_temp_new_i64();
- tcg_gen_shri_i64(tmp, cpu_avrh[VB], 32);
- tcg_gen_deposit_i64(cpu_avrh[VT], cpu_avrh[VA], tmp, 0, 32);
- tcg_gen_shri_i64(tmp, cpu_avrl[VB], 32);
- tcg_gen_deposit_i64(cpu_avrl[VT], cpu_avrl[VA], tmp, 0, 32);
- tcg_temp_free_i64(tmp);
-}
-
-static void gen_vmrgow(DisasContext *ctx)
-{
- int VT, VA, VB;
- if (unlikely(!ctx->altivec_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_VPU);
- return;
- }
- VT = rD(ctx->opcode);
- VA = rA(ctx->opcode);
- VB = rB(ctx->opcode);
-
- tcg_gen_deposit_i64(cpu_avrh[VT], cpu_avrh[VB], cpu_avrh[VA], 32, 32);
- tcg_gen_deposit_i64(cpu_avrl[VT], cpu_avrl[VB], cpu_avrl[VA], 32, 32);
-}
-
-GEN_VXFORM(vmuloub, 4, 0);
-GEN_VXFORM(vmulouh, 4, 1);
-GEN_VXFORM(vmulouw, 4, 2);
-GEN_VXFORM(vmuluwm, 4, 2);
-GEN_VXFORM_DUAL(vmulouw, PPC_ALTIVEC, PPC_NONE,
- vmuluwm, PPC_NONE, PPC2_ALTIVEC_207)
-GEN_VXFORM(vmulosb, 4, 4);
-GEN_VXFORM(vmulosh, 4, 5);
-GEN_VXFORM(vmulosw, 4, 6);
-GEN_VXFORM(vmuleub, 4, 8);
-GEN_VXFORM(vmuleuh, 4, 9);
-GEN_VXFORM(vmuleuw, 4, 10);
-GEN_VXFORM(vmulesb, 4, 12);
-GEN_VXFORM(vmulesh, 4, 13);
-GEN_VXFORM(vmulesw, 4, 14);
-GEN_VXFORM(vslb, 2, 4);
-GEN_VXFORM(vslh, 2, 5);
-GEN_VXFORM(vslw, 2, 6);
-GEN_VXFORM(vsld, 2, 23);
-GEN_VXFORM(vsrb, 2, 8);
-GEN_VXFORM(vsrh, 2, 9);
-GEN_VXFORM(vsrw, 2, 10);
-GEN_VXFORM(vsrd, 2, 27);
-GEN_VXFORM(vsrab, 2, 12);
-GEN_VXFORM(vsrah, 2, 13);
-GEN_VXFORM(vsraw, 2, 14);
-GEN_VXFORM(vsrad, 2, 15);
-GEN_VXFORM(vslo, 6, 16);
-GEN_VXFORM(vsro, 6, 17);
-GEN_VXFORM(vaddcuw, 0, 6);
-GEN_VXFORM(vsubcuw, 0, 22);
-GEN_VXFORM_ENV(vaddubs, 0, 8);
-GEN_VXFORM_ENV(vadduhs, 0, 9);
-GEN_VXFORM_ENV(vadduws, 0, 10);
-GEN_VXFORM_ENV(vaddsbs, 0, 12);
-GEN_VXFORM_ENV(vaddshs, 0, 13);
-GEN_VXFORM_ENV(vaddsws, 0, 14);
-GEN_VXFORM_ENV(vsububs, 0, 24);
-GEN_VXFORM_ENV(vsubuhs, 0, 25);
-GEN_VXFORM_ENV(vsubuws, 0, 26);
-GEN_VXFORM_ENV(vsubsbs, 0, 28);
-GEN_VXFORM_ENV(vsubshs, 0, 29);
-GEN_VXFORM_ENV(vsubsws, 0, 30);
-GEN_VXFORM(vadduqm, 0, 4);
-GEN_VXFORM(vaddcuq, 0, 5);
-GEN_VXFORM3(vaddeuqm, 30, 0);
-GEN_VXFORM3(vaddecuq, 30, 0);
-GEN_VXFORM_DUAL(vaddeuqm, PPC_NONE, PPC2_ALTIVEC_207, \
- vaddecuq, PPC_NONE, PPC2_ALTIVEC_207)
-GEN_VXFORM(vsubuqm, 0, 20);
-GEN_VXFORM(vsubcuq, 0, 21);
-GEN_VXFORM3(vsubeuqm, 31, 0);
-GEN_VXFORM3(vsubecuq, 31, 0);
-GEN_VXFORM_DUAL(vsubeuqm, PPC_NONE, PPC2_ALTIVEC_207, \
- vsubecuq, PPC_NONE, PPC2_ALTIVEC_207)
-GEN_VXFORM(vrlb, 2, 0);
-GEN_VXFORM(vrlh, 2, 1);
-GEN_VXFORM(vrlw, 2, 2);
-GEN_VXFORM(vrld, 2, 3);
-GEN_VXFORM(vsl, 2, 7);
-GEN_VXFORM(vsr, 2, 11);
-GEN_VXFORM_ENV(vpkuhum, 7, 0);
-GEN_VXFORM_ENV(vpkuwum, 7, 1);
-GEN_VXFORM_ENV(vpkudum, 7, 17);
-GEN_VXFORM_ENV(vpkuhus, 7, 2);
-GEN_VXFORM_ENV(vpkuwus, 7, 3);
-GEN_VXFORM_ENV(vpkudus, 7, 19);
-GEN_VXFORM_ENV(vpkshus, 7, 4);
-GEN_VXFORM_ENV(vpkswus, 7, 5);
-GEN_VXFORM_ENV(vpksdus, 7, 21);
-GEN_VXFORM_ENV(vpkshss, 7, 6);
-GEN_VXFORM_ENV(vpkswss, 7, 7);
-GEN_VXFORM_ENV(vpksdss, 7, 23);
-GEN_VXFORM(vpkpx, 7, 12);
-GEN_VXFORM_ENV(vsum4ubs, 4, 24);
-GEN_VXFORM_ENV(vsum4sbs, 4, 28);
-GEN_VXFORM_ENV(vsum4shs, 4, 25);
-GEN_VXFORM_ENV(vsum2sws, 4, 26);
-GEN_VXFORM_ENV(vsumsws, 4, 30);
-GEN_VXFORM_ENV(vaddfp, 5, 0);
-GEN_VXFORM_ENV(vsubfp, 5, 1);
-GEN_VXFORM_ENV(vmaxfp, 5, 16);
-GEN_VXFORM_ENV(vminfp, 5, 17);
-
-#define GEN_VXRFORM1(opname, name, str, opc2, opc3) \
-static void glue(gen_, name)(DisasContext *ctx) \
- { \
- TCGv_ptr ra, rb, rd; \
- if (unlikely(!ctx->altivec_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_VPU); \
- return; \
- } \
- ra = gen_avr_ptr(rA(ctx->opcode)); \
- rb = gen_avr_ptr(rB(ctx->opcode)); \
- rd = gen_avr_ptr(rD(ctx->opcode)); \
- gen_helper_##opname(cpu_env, rd, ra, rb); \
- tcg_temp_free_ptr(ra); \
- tcg_temp_free_ptr(rb); \
- tcg_temp_free_ptr(rd); \
- }
-
-#define GEN_VXRFORM(name, opc2, opc3) \
- GEN_VXRFORM1(name, name, #name, opc2, opc3) \
- GEN_VXRFORM1(name##_dot, name##_, #name ".", opc2, (opc3 | (0x1 << 4)))
-
-/*
- * Support for Altivec instructions that use bit 31 (Rc) as an opcode
- * bit but also use bit 21 as an actual Rc bit. In general, thse pairs
- * come from different versions of the ISA, so we must also support a
- * pair of flags for each instruction.
- */
-#define GEN_VXRFORM_DUAL(name0, flg0, flg2_0, name1, flg1, flg2_1) \
-static void glue(gen_, name0##_##name1)(DisasContext *ctx) \
-{ \
- if ((Rc(ctx->opcode) == 0) && \
- ((ctx->insns_flags & flg0) || (ctx->insns_flags2 & flg2_0))) { \
- if (Rc21(ctx->opcode) == 0) { \
- gen_##name0(ctx); \
- } else { \
- gen_##name0##_(ctx); \
- } \
- } else if ((Rc(ctx->opcode) == 1) && \
- ((ctx->insns_flags & flg1) || (ctx->insns_flags2 & flg2_1))) { \
- if (Rc21(ctx->opcode) == 0) { \
- gen_##name1(ctx); \
- } else { \
- gen_##name1##_(ctx); \
- } \
- } else { \
- gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
- } \
-}
-
-GEN_VXRFORM(vcmpequb, 3, 0)
-GEN_VXRFORM(vcmpequh, 3, 1)
-GEN_VXRFORM(vcmpequw, 3, 2)
-GEN_VXRFORM(vcmpequd, 3, 3)
-GEN_VXRFORM(vcmpgtsb, 3, 12)
-GEN_VXRFORM(vcmpgtsh, 3, 13)
-GEN_VXRFORM(vcmpgtsw, 3, 14)
-GEN_VXRFORM(vcmpgtsd, 3, 15)
-GEN_VXRFORM(vcmpgtub, 3, 8)
-GEN_VXRFORM(vcmpgtuh, 3, 9)
-GEN_VXRFORM(vcmpgtuw, 3, 10)
-GEN_VXRFORM(vcmpgtud, 3, 11)
-GEN_VXRFORM(vcmpeqfp, 3, 3)
-GEN_VXRFORM(vcmpgefp, 3, 7)
-GEN_VXRFORM(vcmpgtfp, 3, 11)
-GEN_VXRFORM(vcmpbfp, 3, 15)
-
-GEN_VXRFORM_DUAL(vcmpeqfp, PPC_ALTIVEC, PPC_NONE, \
- vcmpequd, PPC_NONE, PPC2_ALTIVEC_207)
-GEN_VXRFORM_DUAL(vcmpbfp, PPC_ALTIVEC, PPC_NONE, \
- vcmpgtsd, PPC_NONE, PPC2_ALTIVEC_207)
-GEN_VXRFORM_DUAL(vcmpgtfp, PPC_ALTIVEC, PPC_NONE, \
- vcmpgtud, PPC_NONE, PPC2_ALTIVEC_207)
-
-#define GEN_VXFORM_SIMM(name, opc2, opc3) \
-static void glue(gen_, name)(DisasContext *ctx) \
- { \
- TCGv_ptr rd; \
- TCGv_i32 simm; \
- if (unlikely(!ctx->altivec_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_VPU); \
- return; \
- } \
- simm = tcg_const_i32(SIMM5(ctx->opcode)); \
- rd = gen_avr_ptr(rD(ctx->opcode)); \
- gen_helper_##name (rd, simm); \
- tcg_temp_free_i32(simm); \
- tcg_temp_free_ptr(rd); \
- }
-
-GEN_VXFORM_SIMM(vspltisb, 6, 12);
-GEN_VXFORM_SIMM(vspltish, 6, 13);
-GEN_VXFORM_SIMM(vspltisw, 6, 14);
-
-#define GEN_VXFORM_NOA(name, opc2, opc3) \
-static void glue(gen_, name)(DisasContext *ctx) \
- { \
- TCGv_ptr rb, rd; \
- if (unlikely(!ctx->altivec_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_VPU); \
- return; \
- } \
- rb = gen_avr_ptr(rB(ctx->opcode)); \
- rd = gen_avr_ptr(rD(ctx->opcode)); \
- gen_helper_##name (rd, rb); \
- tcg_temp_free_ptr(rb); \
- tcg_temp_free_ptr(rd); \
- }
-
-#define GEN_VXFORM_NOA_ENV(name, opc2, opc3) \
-static void glue(gen_, name)(DisasContext *ctx) \
- { \
- TCGv_ptr rb, rd; \
- \
- if (unlikely(!ctx->altivec_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_VPU); \
- return; \
- } \
- rb = gen_avr_ptr(rB(ctx->opcode)); \
- rd = gen_avr_ptr(rD(ctx->opcode)); \
- gen_helper_##name(cpu_env, rd, rb); \
- tcg_temp_free_ptr(rb); \
- tcg_temp_free_ptr(rd); \
- }
-
-GEN_VXFORM_NOA(vupkhsb, 7, 8);
-GEN_VXFORM_NOA(vupkhsh, 7, 9);
-GEN_VXFORM_NOA(vupkhsw, 7, 25);
-GEN_VXFORM_NOA(vupklsb, 7, 10);
-GEN_VXFORM_NOA(vupklsh, 7, 11);
-GEN_VXFORM_NOA(vupklsw, 7, 27);
-GEN_VXFORM_NOA(vupkhpx, 7, 13);
-GEN_VXFORM_NOA(vupklpx, 7, 15);
-GEN_VXFORM_NOA_ENV(vrefp, 5, 4);
-GEN_VXFORM_NOA_ENV(vrsqrtefp, 5, 5);
-GEN_VXFORM_NOA_ENV(vexptefp, 5, 6);
-GEN_VXFORM_NOA_ENV(vlogefp, 5, 7);
-GEN_VXFORM_NOA_ENV(vrfim, 5, 11);
-GEN_VXFORM_NOA_ENV(vrfin, 5, 8);
-GEN_VXFORM_NOA_ENV(vrfip, 5, 10);
-GEN_VXFORM_NOA_ENV(vrfiz, 5, 9);
-
-#define GEN_VXFORM_SIMM(name, opc2, opc3) \
-static void glue(gen_, name)(DisasContext *ctx) \
- { \
- TCGv_ptr rd; \
- TCGv_i32 simm; \
- if (unlikely(!ctx->altivec_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_VPU); \
- return; \
- } \
- simm = tcg_const_i32(SIMM5(ctx->opcode)); \
- rd = gen_avr_ptr(rD(ctx->opcode)); \
- gen_helper_##name (rd, simm); \
- tcg_temp_free_i32(simm); \
- tcg_temp_free_ptr(rd); \
- }
-
-#define GEN_VXFORM_UIMM(name, opc2, opc3) \
-static void glue(gen_, name)(DisasContext *ctx) \
- { \
- TCGv_ptr rb, rd; \
- TCGv_i32 uimm; \
- if (unlikely(!ctx->altivec_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_VPU); \
- return; \
- } \
- uimm = tcg_const_i32(UIMM5(ctx->opcode)); \
- rb = gen_avr_ptr(rB(ctx->opcode)); \
- rd = gen_avr_ptr(rD(ctx->opcode)); \
- gen_helper_##name (rd, rb, uimm); \
- tcg_temp_free_i32(uimm); \
- tcg_temp_free_ptr(rb); \
- tcg_temp_free_ptr(rd); \
- }
-
-#define GEN_VXFORM_UIMM_ENV(name, opc2, opc3) \
-static void glue(gen_, name)(DisasContext *ctx) \
- { \
- TCGv_ptr rb, rd; \
- TCGv_i32 uimm; \
- \
- if (unlikely(!ctx->altivec_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_VPU); \
- return; \
- } \
- uimm = tcg_const_i32(UIMM5(ctx->opcode)); \
- rb = gen_avr_ptr(rB(ctx->opcode)); \
- rd = gen_avr_ptr(rD(ctx->opcode)); \
- gen_helper_##name(cpu_env, rd, rb, uimm); \
- tcg_temp_free_i32(uimm); \
- tcg_temp_free_ptr(rb); \
- tcg_temp_free_ptr(rd); \
- }
-
-GEN_VXFORM_UIMM(vspltb, 6, 8);
-GEN_VXFORM_UIMM(vsplth, 6, 9);
-GEN_VXFORM_UIMM(vspltw, 6, 10);
-GEN_VXFORM_UIMM_ENV(vcfux, 5, 12);
-GEN_VXFORM_UIMM_ENV(vcfsx, 5, 13);
-GEN_VXFORM_UIMM_ENV(vctuxs, 5, 14);
-GEN_VXFORM_UIMM_ENV(vctsxs, 5, 15);
-
-static void gen_vsldoi(DisasContext *ctx)
-{
- TCGv_ptr ra, rb, rd;
- TCGv_i32 sh;
- if (unlikely(!ctx->altivec_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_VPU);
- return;
- }
- ra = gen_avr_ptr(rA(ctx->opcode));
- rb = gen_avr_ptr(rB(ctx->opcode));
- rd = gen_avr_ptr(rD(ctx->opcode));
- sh = tcg_const_i32(VSH(ctx->opcode));
- gen_helper_vsldoi (rd, ra, rb, sh);
- tcg_temp_free_ptr(ra);
- tcg_temp_free_ptr(rb);
- tcg_temp_free_ptr(rd);
- tcg_temp_free_i32(sh);
-}
-
-#define GEN_VAFORM_PAIRED(name0, name1, opc2) \
-static void glue(gen_, name0##_##name1)(DisasContext *ctx) \
- { \
- TCGv_ptr ra, rb, rc, rd; \
- if (unlikely(!ctx->altivec_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_VPU); \
- return; \
- } \
- ra = gen_avr_ptr(rA(ctx->opcode)); \
- rb = gen_avr_ptr(rB(ctx->opcode)); \
- rc = gen_avr_ptr(rC(ctx->opcode)); \
- rd = gen_avr_ptr(rD(ctx->opcode)); \
- if (Rc(ctx->opcode)) { \
- gen_helper_##name1(cpu_env, rd, ra, rb, rc); \
- } else { \
- gen_helper_##name0(cpu_env, rd, ra, rb, rc); \
- } \
- tcg_temp_free_ptr(ra); \
- tcg_temp_free_ptr(rb); \
- tcg_temp_free_ptr(rc); \
- tcg_temp_free_ptr(rd); \
- }
-
-GEN_VAFORM_PAIRED(vmhaddshs, vmhraddshs, 16)
-
-static void gen_vmladduhm(DisasContext *ctx)
-{
- TCGv_ptr ra, rb, rc, rd;
- if (unlikely(!ctx->altivec_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_VPU);
- return;
- }
- ra = gen_avr_ptr(rA(ctx->opcode));
- rb = gen_avr_ptr(rB(ctx->opcode));
- rc = gen_avr_ptr(rC(ctx->opcode));
- rd = gen_avr_ptr(rD(ctx->opcode));
- gen_helper_vmladduhm(rd, ra, rb, rc);
- tcg_temp_free_ptr(ra);
- tcg_temp_free_ptr(rb);
- tcg_temp_free_ptr(rc);
- tcg_temp_free_ptr(rd);
-}
-
-GEN_VAFORM_PAIRED(vmsumubm, vmsummbm, 18)
-GEN_VAFORM_PAIRED(vmsumuhm, vmsumuhs, 19)
-GEN_VAFORM_PAIRED(vmsumshm, vmsumshs, 20)
-GEN_VAFORM_PAIRED(vsel, vperm, 21)
-GEN_VAFORM_PAIRED(vmaddfp, vnmsubfp, 23)
-
-GEN_VXFORM_NOA(vclzb, 1, 28)
-GEN_VXFORM_NOA(vclzh, 1, 29)
-GEN_VXFORM_NOA(vclzw, 1, 30)
-GEN_VXFORM_NOA(vclzd, 1, 31)
-GEN_VXFORM_NOA(vpopcntb, 1, 28)
-GEN_VXFORM_NOA(vpopcnth, 1, 29)
-GEN_VXFORM_NOA(vpopcntw, 1, 30)
-GEN_VXFORM_NOA(vpopcntd, 1, 31)
-GEN_VXFORM_DUAL(vclzb, PPC_NONE, PPC2_ALTIVEC_207, \
- vpopcntb, PPC_NONE, PPC2_ALTIVEC_207)
-GEN_VXFORM_DUAL(vclzh, PPC_NONE, PPC2_ALTIVEC_207, \
- vpopcnth, PPC_NONE, PPC2_ALTIVEC_207)
-GEN_VXFORM_DUAL(vclzw, PPC_NONE, PPC2_ALTIVEC_207, \
- vpopcntw, PPC_NONE, PPC2_ALTIVEC_207)
-GEN_VXFORM_DUAL(vclzd, PPC_NONE, PPC2_ALTIVEC_207, \
- vpopcntd, PPC_NONE, PPC2_ALTIVEC_207)
-GEN_VXFORM(vbpermq, 6, 21);
-GEN_VXFORM_NOA(vgbbd, 6, 20);
-GEN_VXFORM(vpmsumb, 4, 16)
-GEN_VXFORM(vpmsumh, 4, 17)
-GEN_VXFORM(vpmsumw, 4, 18)
-GEN_VXFORM(vpmsumd, 4, 19)
-
-#define GEN_BCD(op) \
-static void gen_##op(DisasContext *ctx) \
-{ \
- TCGv_ptr ra, rb, rd; \
- TCGv_i32 ps; \
- \
- if (unlikely(!ctx->altivec_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_VPU); \
- return; \
- } \
- \
- ra = gen_avr_ptr(rA(ctx->opcode)); \
- rb = gen_avr_ptr(rB(ctx->opcode)); \
- rd = gen_avr_ptr(rD(ctx->opcode)); \
- \
- ps = tcg_const_i32((ctx->opcode & 0x200) != 0); \
- \
- gen_helper_##op(cpu_crf[6], rd, ra, rb, ps); \
- \
- tcg_temp_free_ptr(ra); \
- tcg_temp_free_ptr(rb); \
- tcg_temp_free_ptr(rd); \
- tcg_temp_free_i32(ps); \
-}
-
-GEN_BCD(bcdadd)
-GEN_BCD(bcdsub)
-
-GEN_VXFORM_DUAL(vsububm, PPC_ALTIVEC, PPC_NONE, \
- bcdadd, PPC_NONE, PPC2_ALTIVEC_207)
-GEN_VXFORM_DUAL(vsububs, PPC_ALTIVEC, PPC_NONE, \
- bcdadd, PPC_NONE, PPC2_ALTIVEC_207)
-GEN_VXFORM_DUAL(vsubuhm, PPC_ALTIVEC, PPC_NONE, \
- bcdsub, PPC_NONE, PPC2_ALTIVEC_207)
-GEN_VXFORM_DUAL(vsubuhs, PPC_ALTIVEC, PPC_NONE, \
- bcdsub, PPC_NONE, PPC2_ALTIVEC_207)
-
-static void gen_vsbox(DisasContext *ctx)
-{
- TCGv_ptr ra, rd;
- if (unlikely(!ctx->altivec_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_VPU);
- return;
- }
- ra = gen_avr_ptr(rA(ctx->opcode));
- rd = gen_avr_ptr(rD(ctx->opcode));
- gen_helper_vsbox(rd, ra);
- tcg_temp_free_ptr(ra);
- tcg_temp_free_ptr(rd);
-}
-
-GEN_VXFORM(vcipher, 4, 20)
-GEN_VXFORM(vcipherlast, 4, 20)
-GEN_VXFORM(vncipher, 4, 21)
-GEN_VXFORM(vncipherlast, 4, 21)
-
-GEN_VXFORM_DUAL(vcipher, PPC_NONE, PPC2_ALTIVEC_207,
- vcipherlast, PPC_NONE, PPC2_ALTIVEC_207)
-GEN_VXFORM_DUAL(vncipher, PPC_NONE, PPC2_ALTIVEC_207,
- vncipherlast, PPC_NONE, PPC2_ALTIVEC_207)
-
-#define VSHASIGMA(op) \
-static void gen_##op(DisasContext *ctx) \
-{ \
- TCGv_ptr ra, rd; \
- TCGv_i32 st_six; \
- if (unlikely(!ctx->altivec_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_VPU); \
- return; \
- } \
- ra = gen_avr_ptr(rA(ctx->opcode)); \
- rd = gen_avr_ptr(rD(ctx->opcode)); \
- st_six = tcg_const_i32(rB(ctx->opcode)); \
- gen_helper_##op(rd, ra, st_six); \
- tcg_temp_free_ptr(ra); \
- tcg_temp_free_ptr(rd); \
- tcg_temp_free_i32(st_six); \
-}
-
-VSHASIGMA(vshasigmaw)
-VSHASIGMA(vshasigmad)
-
-GEN_VXFORM3(vpermxor, 22, 0xFF)
-GEN_VXFORM_DUAL(vsldoi, PPC_ALTIVEC, PPC_NONE,
- vpermxor, PPC_NONE, PPC2_ALTIVEC_207)
-
-/*** VSX extension ***/
-
-static inline TCGv_i64 cpu_vsrh(int n)
-{
- if (n < 32) {
- return cpu_fpr[n];
- } else {
- return cpu_avrh[n-32];
- }
-}
-
-static inline TCGv_i64 cpu_vsrl(int n)
-{
- if (n < 32) {
- return cpu_vsr[n];
- } else {
- return cpu_avrl[n-32];
- }
-}
-
-#define VSX_LOAD_SCALAR(name, operation) \
-static void gen_##name(DisasContext *ctx) \
-{ \
- TCGv EA; \
- if (unlikely(!ctx->vsx_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_VSXU); \
- return; \
- } \
- gen_set_access_type(ctx, ACCESS_INT); \
- EA = tcg_temp_new(); \
- gen_addr_reg_index(ctx, EA); \
- gen_qemu_##operation(ctx, cpu_vsrh(xT(ctx->opcode)), EA); \
- /* NOTE: cpu_vsrl is undefined */ \
- tcg_temp_free(EA); \
-}
-
-VSX_LOAD_SCALAR(lxsdx, ld64)
-VSX_LOAD_SCALAR(lxsiwax, ld32s_i64)
-VSX_LOAD_SCALAR(lxsiwzx, ld32u_i64)
-VSX_LOAD_SCALAR(lxsspx, ld32fs)
-
-static void gen_lxvd2x(DisasContext *ctx)
-{
- TCGv EA;
- if (unlikely(!ctx->vsx_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_VSXU);
- return;
- }
- gen_set_access_type(ctx, ACCESS_INT);
- EA = tcg_temp_new();
- gen_addr_reg_index(ctx, EA);
- gen_qemu_ld64(ctx, cpu_vsrh(xT(ctx->opcode)), EA);
- tcg_gen_addi_tl(EA, EA, 8);
- gen_qemu_ld64(ctx, cpu_vsrl(xT(ctx->opcode)), EA);
- tcg_temp_free(EA);
-}
-
-static void gen_lxvdsx(DisasContext *ctx)
-{
- TCGv EA;
- if (unlikely(!ctx->vsx_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_VSXU);
- return;
- }
- gen_set_access_type(ctx, ACCESS_INT);
- EA = tcg_temp_new();
- gen_addr_reg_index(ctx, EA);
- gen_qemu_ld64(ctx, cpu_vsrh(xT(ctx->opcode)), EA);
- tcg_gen_mov_i64(cpu_vsrl(xT(ctx->opcode)), cpu_vsrh(xT(ctx->opcode)));
- tcg_temp_free(EA);
-}
-
-static void gen_lxvw4x(DisasContext *ctx)
-{
- TCGv EA;
- TCGv_i64 tmp;
- TCGv_i64 xth = cpu_vsrh(xT(ctx->opcode));
- TCGv_i64 xtl = cpu_vsrl(xT(ctx->opcode));
- if (unlikely(!ctx->vsx_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_VSXU);
- return;
- }
- gen_set_access_type(ctx, ACCESS_INT);
- EA = tcg_temp_new();
- tmp = tcg_temp_new_i64();
-
- gen_addr_reg_index(ctx, EA);
- gen_qemu_ld32u_i64(ctx, tmp, EA);
- tcg_gen_addi_tl(EA, EA, 4);
- gen_qemu_ld32u_i64(ctx, xth, EA);
- tcg_gen_deposit_i64(xth, xth, tmp, 32, 32);
-
- tcg_gen_addi_tl(EA, EA, 4);
- gen_qemu_ld32u_i64(ctx, tmp, EA);
- tcg_gen_addi_tl(EA, EA, 4);
- gen_qemu_ld32u_i64(ctx, xtl, EA);
- tcg_gen_deposit_i64(xtl, xtl, tmp, 32, 32);
-
- tcg_temp_free(EA);
- tcg_temp_free_i64(tmp);
-}
-
-#define VSX_STORE_SCALAR(name, operation) \
-static void gen_##name(DisasContext *ctx) \
-{ \
- TCGv EA; \
- if (unlikely(!ctx->vsx_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_VSXU); \
- return; \
- } \
- gen_set_access_type(ctx, ACCESS_INT); \
- EA = tcg_temp_new(); \
- gen_addr_reg_index(ctx, EA); \
- gen_qemu_##operation(ctx, cpu_vsrh(xS(ctx->opcode)), EA); \
- tcg_temp_free(EA); \
-}
-
-VSX_STORE_SCALAR(stxsdx, st64)
-VSX_STORE_SCALAR(stxsiwx, st32_i64)
-VSX_STORE_SCALAR(stxsspx, st32fs)
-
-static void gen_stxvd2x(DisasContext *ctx)
-{
- TCGv EA;
- if (unlikely(!ctx->vsx_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_VSXU);
- return;
- }
- gen_set_access_type(ctx, ACCESS_INT);
- EA = tcg_temp_new();
- gen_addr_reg_index(ctx, EA);
- gen_qemu_st64(ctx, cpu_vsrh(xS(ctx->opcode)), EA);
- tcg_gen_addi_tl(EA, EA, 8);
- gen_qemu_st64(ctx, cpu_vsrl(xS(ctx->opcode)), EA);
- tcg_temp_free(EA);
-}
-
-static void gen_stxvw4x(DisasContext *ctx)
-{
- TCGv_i64 tmp;
- TCGv EA;
- if (unlikely(!ctx->vsx_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_VSXU);
- return;
- }
- gen_set_access_type(ctx, ACCESS_INT);
- EA = tcg_temp_new();
- gen_addr_reg_index(ctx, EA);
- tmp = tcg_temp_new_i64();
-
- tcg_gen_shri_i64(tmp, cpu_vsrh(xS(ctx->opcode)), 32);
- gen_qemu_st32_i64(ctx, tmp, EA);
- tcg_gen_addi_tl(EA, EA, 4);
- gen_qemu_st32_i64(ctx, cpu_vsrh(xS(ctx->opcode)), EA);
-
- tcg_gen_shri_i64(tmp, cpu_vsrl(xS(ctx->opcode)), 32);
- tcg_gen_addi_tl(EA, EA, 4);
- gen_qemu_st32_i64(ctx, tmp, EA);
- tcg_gen_addi_tl(EA, EA, 4);
- gen_qemu_st32_i64(ctx, cpu_vsrl(xS(ctx->opcode)), EA);
-
- tcg_temp_free(EA);
- tcg_temp_free_i64(tmp);
-}
-
-#define MV_VSRW(name, tcgop1, tcgop2, target, source) \
-static void gen_##name(DisasContext *ctx) \
-{ \
- if (xS(ctx->opcode) < 32) { \
- if (unlikely(!ctx->fpu_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_FPU); \
- return; \
- } \
- } else { \
- if (unlikely(!ctx->altivec_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_VPU); \
- return; \
- } \
- } \
- TCGv_i64 tmp = tcg_temp_new_i64(); \
- tcg_gen_##tcgop1(tmp, source); \
- tcg_gen_##tcgop2(target, tmp); \
- tcg_temp_free_i64(tmp); \
-}
-
-
-MV_VSRW(mfvsrwz, ext32u_i64, trunc_i64_tl, cpu_gpr[rA(ctx->opcode)], \
- cpu_vsrh(xS(ctx->opcode)))
-MV_VSRW(mtvsrwa, extu_tl_i64, ext32s_i64, cpu_vsrh(xT(ctx->opcode)), \
- cpu_gpr[rA(ctx->opcode)])
-MV_VSRW(mtvsrwz, extu_tl_i64, ext32u_i64, cpu_vsrh(xT(ctx->opcode)), \
- cpu_gpr[rA(ctx->opcode)])
#if defined(TARGET_PPC64)
-#define MV_VSRD(name, target, source) \
-static void gen_##name(DisasContext *ctx) \
-{ \
- if (xS(ctx->opcode) < 32) { \
- if (unlikely(!ctx->fpu_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_FPU); \
- return; \
- } \
- } else { \
- if (unlikely(!ctx->altivec_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_VPU); \
- return; \
- } \
- } \
- tcg_gen_mov_i64(target, source); \
-}
-
-MV_VSRD(mfvsrd, cpu_gpr[rA(ctx->opcode)], cpu_vsrh(xS(ctx->opcode)))
-MV_VSRD(mtvsrd, cpu_vsrh(xT(ctx->opcode)), cpu_gpr[rA(ctx->opcode)])
-
-#endif
-
-static void gen_xxpermdi(DisasContext *ctx)
-{
- if (unlikely(!ctx->vsx_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_VSXU);
- return;
- }
-
- if (unlikely((xT(ctx->opcode) == xA(ctx->opcode)) ||
- (xT(ctx->opcode) == xB(ctx->opcode)))) {
- TCGv_i64 xh, xl;
-
- xh = tcg_temp_new_i64();
- xl = tcg_temp_new_i64();
-
- if ((DM(ctx->opcode) & 2) == 0) {
- tcg_gen_mov_i64(xh, cpu_vsrh(xA(ctx->opcode)));
- } else {
- tcg_gen_mov_i64(xh, cpu_vsrl(xA(ctx->opcode)));
- }
- if ((DM(ctx->opcode) & 1) == 0) {
- tcg_gen_mov_i64(xl, cpu_vsrh(xB(ctx->opcode)));
- } else {
- tcg_gen_mov_i64(xl, cpu_vsrl(xB(ctx->opcode)));
- }
-
- tcg_gen_mov_i64(cpu_vsrh(xT(ctx->opcode)), xh);
- tcg_gen_mov_i64(cpu_vsrl(xT(ctx->opcode)), xl);
-
- tcg_temp_free_i64(xh);
- tcg_temp_free_i64(xl);
- } else {
- if ((DM(ctx->opcode) & 2) == 0) {
- tcg_gen_mov_i64(cpu_vsrh(xT(ctx->opcode)), cpu_vsrh(xA(ctx->opcode)));
- } else {
- tcg_gen_mov_i64(cpu_vsrh(xT(ctx->opcode)), cpu_vsrl(xA(ctx->opcode)));
- }
- if ((DM(ctx->opcode) & 1) == 0) {
- tcg_gen_mov_i64(cpu_vsrl(xT(ctx->opcode)), cpu_vsrh(xB(ctx->opcode)));
- } else {
- tcg_gen_mov_i64(cpu_vsrl(xT(ctx->opcode)), cpu_vsrl(xB(ctx->opcode)));
- }
- }
-}
-
-#define OP_ABS 1
-#define OP_NABS 2
-#define OP_NEG 3
-#define OP_CPSGN 4
-#define SGN_MASK_DP 0x8000000000000000ull
-#define SGN_MASK_SP 0x8000000080000000ull
-
-#define VSX_SCALAR_MOVE(name, op, sgn_mask) \
-static void glue(gen_, name)(DisasContext * ctx) \
- { \
- TCGv_i64 xb, sgm; \
- if (unlikely(!ctx->vsx_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_VSXU); \
- return; \
- } \
- xb = tcg_temp_new_i64(); \
- sgm = tcg_temp_new_i64(); \
- tcg_gen_mov_i64(xb, cpu_vsrh(xB(ctx->opcode))); \
- tcg_gen_movi_i64(sgm, sgn_mask); \
- switch (op) { \
- case OP_ABS: { \
- tcg_gen_andc_i64(xb, xb, sgm); \
- break; \
- } \
- case OP_NABS: { \
- tcg_gen_or_i64(xb, xb, sgm); \
- break; \
- } \
- case OP_NEG: { \
- tcg_gen_xor_i64(xb, xb, sgm); \
- break; \
- } \
- case OP_CPSGN: { \
- TCGv_i64 xa = tcg_temp_new_i64(); \
- tcg_gen_mov_i64(xa, cpu_vsrh(xA(ctx->opcode))); \
- tcg_gen_and_i64(xa, xa, sgm); \
- tcg_gen_andc_i64(xb, xb, sgm); \
- tcg_gen_or_i64(xb, xb, xa); \
- tcg_temp_free_i64(xa); \
- break; \
- } \
- } \
- tcg_gen_mov_i64(cpu_vsrh(xT(ctx->opcode)), xb); \
- tcg_temp_free_i64(xb); \
- tcg_temp_free_i64(sgm); \
- }
-
-VSX_SCALAR_MOVE(xsabsdp, OP_ABS, SGN_MASK_DP)
-VSX_SCALAR_MOVE(xsnabsdp, OP_NABS, SGN_MASK_DP)
-VSX_SCALAR_MOVE(xsnegdp, OP_NEG, SGN_MASK_DP)
-VSX_SCALAR_MOVE(xscpsgndp, OP_CPSGN, SGN_MASK_DP)
-
-#define VSX_VECTOR_MOVE(name, op, sgn_mask) \
-static void glue(gen_, name)(DisasContext * ctx) \
- { \
- TCGv_i64 xbh, xbl, sgm; \
- if (unlikely(!ctx->vsx_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_VSXU); \
- return; \
- } \
- xbh = tcg_temp_new_i64(); \
- xbl = tcg_temp_new_i64(); \
- sgm = tcg_temp_new_i64(); \
- tcg_gen_mov_i64(xbh, cpu_vsrh(xB(ctx->opcode))); \
- tcg_gen_mov_i64(xbl, cpu_vsrl(xB(ctx->opcode))); \
- tcg_gen_movi_i64(sgm, sgn_mask); \
- switch (op) { \
- case OP_ABS: { \
- tcg_gen_andc_i64(xbh, xbh, sgm); \
- tcg_gen_andc_i64(xbl, xbl, sgm); \
- break; \
- } \
- case OP_NABS: { \
- tcg_gen_or_i64(xbh, xbh, sgm); \
- tcg_gen_or_i64(xbl, xbl, sgm); \
- break; \
- } \
- case OP_NEG: { \
- tcg_gen_xor_i64(xbh, xbh, sgm); \
- tcg_gen_xor_i64(xbl, xbl, sgm); \
- break; \
- } \
- case OP_CPSGN: { \
- TCGv_i64 xah = tcg_temp_new_i64(); \
- TCGv_i64 xal = tcg_temp_new_i64(); \
- tcg_gen_mov_i64(xah, cpu_vsrh(xA(ctx->opcode))); \
- tcg_gen_mov_i64(xal, cpu_vsrl(xA(ctx->opcode))); \
- tcg_gen_and_i64(xah, xah, sgm); \
- tcg_gen_and_i64(xal, xal, sgm); \
- tcg_gen_andc_i64(xbh, xbh, sgm); \
- tcg_gen_andc_i64(xbl, xbl, sgm); \
- tcg_gen_or_i64(xbh, xbh, xah); \
- tcg_gen_or_i64(xbl, xbl, xal); \
- tcg_temp_free_i64(xah); \
- tcg_temp_free_i64(xal); \
- break; \
- } \
- } \
- tcg_gen_mov_i64(cpu_vsrh(xT(ctx->opcode)), xbh); \
- tcg_gen_mov_i64(cpu_vsrl(xT(ctx->opcode)), xbl); \
- tcg_temp_free_i64(xbh); \
- tcg_temp_free_i64(xbl); \
- tcg_temp_free_i64(sgm); \
- }
-
-VSX_VECTOR_MOVE(xvabsdp, OP_ABS, SGN_MASK_DP)
-VSX_VECTOR_MOVE(xvnabsdp, OP_NABS, SGN_MASK_DP)
-VSX_VECTOR_MOVE(xvnegdp, OP_NEG, SGN_MASK_DP)
-VSX_VECTOR_MOVE(xvcpsgndp, OP_CPSGN, SGN_MASK_DP)
-VSX_VECTOR_MOVE(xvabssp, OP_ABS, SGN_MASK_SP)
-VSX_VECTOR_MOVE(xvnabssp, OP_NABS, SGN_MASK_SP)
-VSX_VECTOR_MOVE(xvnegsp, OP_NEG, SGN_MASK_SP)
-VSX_VECTOR_MOVE(xvcpsgnsp, OP_CPSGN, SGN_MASK_SP)
-
-#define GEN_VSX_HELPER_2(name, op1, op2, inval, type) \
-static void gen_##name(DisasContext * ctx) \
-{ \
- TCGv_i32 opc; \
- if (unlikely(!ctx->vsx_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_VSXU); \
- return; \
- } \
- /* NIP cannot be restored if the memory exception comes from an helper */ \
- gen_update_nip(ctx, ctx->nip - 4); \
- opc = tcg_const_i32(ctx->opcode); \
- gen_helper_##name(cpu_env, opc); \
- tcg_temp_free_i32(opc); \
-}
-
-#define GEN_VSX_HELPER_XT_XB_ENV(name, op1, op2, inval, type) \
-static void gen_##name(DisasContext * ctx) \
-{ \
- if (unlikely(!ctx->vsx_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_VSXU); \
- return; \
- } \
- /* NIP cannot be restored if the exception comes */ \
- /* from a helper. */ \
- gen_update_nip(ctx, ctx->nip - 4); \
- \
- gen_helper_##name(cpu_vsrh(xT(ctx->opcode)), cpu_env, \
- cpu_vsrh(xB(ctx->opcode))); \
-}
-
-GEN_VSX_HELPER_2(xsadddp, 0x00, 0x04, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xssubdp, 0x00, 0x05, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xsmuldp, 0x00, 0x06, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xsdivdp, 0x00, 0x07, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xsredp, 0x14, 0x05, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xssqrtdp, 0x16, 0x04, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xsrsqrtedp, 0x14, 0x04, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xstdivdp, 0x14, 0x07, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xstsqrtdp, 0x14, 0x06, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xsmaddadp, 0x04, 0x04, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xsmaddmdp, 0x04, 0x05, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xsmsubadp, 0x04, 0x06, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xsmsubmdp, 0x04, 0x07, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xsnmaddadp, 0x04, 0x14, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xsnmaddmdp, 0x04, 0x15, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xsnmsubadp, 0x04, 0x16, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xsnmsubmdp, 0x04, 0x17, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xscmpodp, 0x0C, 0x05, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xscmpudp, 0x0C, 0x04, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xsmaxdp, 0x00, 0x14, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xsmindp, 0x00, 0x15, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xscvdpsp, 0x12, 0x10, 0, PPC2_VSX)
-GEN_VSX_HELPER_XT_XB_ENV(xscvdpspn, 0x16, 0x10, 0, PPC2_VSX207)
-GEN_VSX_HELPER_2(xscvspdp, 0x12, 0x14, 0, PPC2_VSX)
-GEN_VSX_HELPER_XT_XB_ENV(xscvspdpn, 0x16, 0x14, 0, PPC2_VSX207)
-GEN_VSX_HELPER_2(xscvdpsxds, 0x10, 0x15, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xscvdpsxws, 0x10, 0x05, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xscvdpuxds, 0x10, 0x14, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xscvdpuxws, 0x10, 0x04, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xscvsxddp, 0x10, 0x17, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xscvuxddp, 0x10, 0x16, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xsrdpi, 0x12, 0x04, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xsrdpic, 0x16, 0x06, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xsrdpim, 0x12, 0x07, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xsrdpip, 0x12, 0x06, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xsrdpiz, 0x12, 0x05, 0, PPC2_VSX)
-GEN_VSX_HELPER_XT_XB_ENV(xsrsp, 0x12, 0x11, 0, PPC2_VSX207)
-
-GEN_VSX_HELPER_2(xsaddsp, 0x00, 0x00, 0, PPC2_VSX207)
-GEN_VSX_HELPER_2(xssubsp, 0x00, 0x01, 0, PPC2_VSX207)
-GEN_VSX_HELPER_2(xsmulsp, 0x00, 0x02, 0, PPC2_VSX207)
-GEN_VSX_HELPER_2(xsdivsp, 0x00, 0x03, 0, PPC2_VSX207)
-GEN_VSX_HELPER_2(xsresp, 0x14, 0x01, 0, PPC2_VSX207)
-GEN_VSX_HELPER_2(xssqrtsp, 0x16, 0x00, 0, PPC2_VSX207)
-GEN_VSX_HELPER_2(xsrsqrtesp, 0x14, 0x00, 0, PPC2_VSX207)
-GEN_VSX_HELPER_2(xsmaddasp, 0x04, 0x00, 0, PPC2_VSX207)
-GEN_VSX_HELPER_2(xsmaddmsp, 0x04, 0x01, 0, PPC2_VSX207)
-GEN_VSX_HELPER_2(xsmsubasp, 0x04, 0x02, 0, PPC2_VSX207)
-GEN_VSX_HELPER_2(xsmsubmsp, 0x04, 0x03, 0, PPC2_VSX207)
-GEN_VSX_HELPER_2(xsnmaddasp, 0x04, 0x10, 0, PPC2_VSX207)
-GEN_VSX_HELPER_2(xsnmaddmsp, 0x04, 0x11, 0, PPC2_VSX207)
-GEN_VSX_HELPER_2(xsnmsubasp, 0x04, 0x12, 0, PPC2_VSX207)
-GEN_VSX_HELPER_2(xsnmsubmsp, 0x04, 0x13, 0, PPC2_VSX207)
-GEN_VSX_HELPER_2(xscvsxdsp, 0x10, 0x13, 0, PPC2_VSX207)
-GEN_VSX_HELPER_2(xscvuxdsp, 0x10, 0x12, 0, PPC2_VSX207)
-
-GEN_VSX_HELPER_2(xvadddp, 0x00, 0x0C, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvsubdp, 0x00, 0x0D, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvmuldp, 0x00, 0x0E, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvdivdp, 0x00, 0x0F, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvredp, 0x14, 0x0D, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvsqrtdp, 0x16, 0x0C, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvrsqrtedp, 0x14, 0x0C, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvtdivdp, 0x14, 0x0F, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvtsqrtdp, 0x14, 0x0E, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvmaddadp, 0x04, 0x0C, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvmaddmdp, 0x04, 0x0D, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvmsubadp, 0x04, 0x0E, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvmsubmdp, 0x04, 0x0F, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvnmaddadp, 0x04, 0x1C, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvnmaddmdp, 0x04, 0x1D, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvnmsubadp, 0x04, 0x1E, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvnmsubmdp, 0x04, 0x1F, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvmaxdp, 0x00, 0x1C, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvmindp, 0x00, 0x1D, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvcmpeqdp, 0x0C, 0x0C, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvcmpgtdp, 0x0C, 0x0D, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvcmpgedp, 0x0C, 0x0E, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvcvdpsp, 0x12, 0x18, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvcvdpsxds, 0x10, 0x1D, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvcvdpsxws, 0x10, 0x0D, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvcvdpuxds, 0x10, 0x1C, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvcvdpuxws, 0x10, 0x0C, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvcvsxddp, 0x10, 0x1F, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvcvuxddp, 0x10, 0x1E, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvcvsxwdp, 0x10, 0x0F, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvcvuxwdp, 0x10, 0x0E, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvrdpi, 0x12, 0x0C, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvrdpic, 0x16, 0x0E, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvrdpim, 0x12, 0x0F, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvrdpip, 0x12, 0x0E, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvrdpiz, 0x12, 0x0D, 0, PPC2_VSX)
-
-GEN_VSX_HELPER_2(xvaddsp, 0x00, 0x08, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvsubsp, 0x00, 0x09, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvmulsp, 0x00, 0x0A, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvdivsp, 0x00, 0x0B, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvresp, 0x14, 0x09, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvsqrtsp, 0x16, 0x08, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvrsqrtesp, 0x14, 0x08, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvtdivsp, 0x14, 0x0B, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvtsqrtsp, 0x14, 0x0A, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvmaddasp, 0x04, 0x08, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvmaddmsp, 0x04, 0x09, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvmsubasp, 0x04, 0x0A, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvmsubmsp, 0x04, 0x0B, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvnmaddasp, 0x04, 0x18, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvnmaddmsp, 0x04, 0x19, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvnmsubasp, 0x04, 0x1A, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvnmsubmsp, 0x04, 0x1B, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvmaxsp, 0x00, 0x18, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvminsp, 0x00, 0x19, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvcmpeqsp, 0x0C, 0x08, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvcmpgtsp, 0x0C, 0x09, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvcmpgesp, 0x0C, 0x0A, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvcvspdp, 0x12, 0x1C, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvcvspsxds, 0x10, 0x19, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvcvspsxws, 0x10, 0x09, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvcvspuxds, 0x10, 0x18, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvcvspuxws, 0x10, 0x08, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvcvsxdsp, 0x10, 0x1B, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvcvuxdsp, 0x10, 0x1A, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvcvsxwsp, 0x10, 0x0B, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvcvuxwsp, 0x10, 0x0A, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvrspi, 0x12, 0x08, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvrspic, 0x16, 0x0A, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvrspim, 0x12, 0x0B, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvrspip, 0x12, 0x0A, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvrspiz, 0x12, 0x09, 0, PPC2_VSX)
-
-#define VSX_LOGICAL(name, tcg_op) \
-static void glue(gen_, name)(DisasContext * ctx) \
- { \
- if (unlikely(!ctx->vsx_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_VSXU); \
- return; \
- } \
- tcg_op(cpu_vsrh(xT(ctx->opcode)), cpu_vsrh(xA(ctx->opcode)), \
- cpu_vsrh(xB(ctx->opcode))); \
- tcg_op(cpu_vsrl(xT(ctx->opcode)), cpu_vsrl(xA(ctx->opcode)), \
- cpu_vsrl(xB(ctx->opcode))); \
- }
-
-VSX_LOGICAL(xxland, tcg_gen_and_i64)
-VSX_LOGICAL(xxlandc, tcg_gen_andc_i64)
-VSX_LOGICAL(xxlor, tcg_gen_or_i64)
-VSX_LOGICAL(xxlxor, tcg_gen_xor_i64)
-VSX_LOGICAL(xxlnor, tcg_gen_nor_i64)
-VSX_LOGICAL(xxleqv, tcg_gen_eqv_i64)
-VSX_LOGICAL(xxlnand, tcg_gen_nand_i64)
-VSX_LOGICAL(xxlorc, tcg_gen_orc_i64)
-
-#define VSX_XXMRG(name, high) \
-static void glue(gen_, name)(DisasContext * ctx) \
- { \
- TCGv_i64 a0, a1, b0, b1; \
- if (unlikely(!ctx->vsx_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_VSXU); \
- return; \
- } \
- a0 = tcg_temp_new_i64(); \
- a1 = tcg_temp_new_i64(); \
- b0 = tcg_temp_new_i64(); \
- b1 = tcg_temp_new_i64(); \
- if (high) { \
- tcg_gen_mov_i64(a0, cpu_vsrh(xA(ctx->opcode))); \
- tcg_gen_mov_i64(a1, cpu_vsrh(xA(ctx->opcode))); \
- tcg_gen_mov_i64(b0, cpu_vsrh(xB(ctx->opcode))); \
- tcg_gen_mov_i64(b1, cpu_vsrh(xB(ctx->opcode))); \
- } else { \
- tcg_gen_mov_i64(a0, cpu_vsrl(xA(ctx->opcode))); \
- tcg_gen_mov_i64(a1, cpu_vsrl(xA(ctx->opcode))); \
- tcg_gen_mov_i64(b0, cpu_vsrl(xB(ctx->opcode))); \
- tcg_gen_mov_i64(b1, cpu_vsrl(xB(ctx->opcode))); \
- } \
- tcg_gen_shri_i64(a0, a0, 32); \
- tcg_gen_shri_i64(b0, b0, 32); \
- tcg_gen_deposit_i64(cpu_vsrh(xT(ctx->opcode)), \
- b0, a0, 32, 32); \
- tcg_gen_deposit_i64(cpu_vsrl(xT(ctx->opcode)), \
- b1, a1, 32, 32); \
- tcg_temp_free_i64(a0); \
- tcg_temp_free_i64(a1); \
- tcg_temp_free_i64(b0); \
- tcg_temp_free_i64(b1); \
- }
-
-VSX_XXMRG(xxmrghw, 1)
-VSX_XXMRG(xxmrglw, 0)
-
-static void gen_xxsel(DisasContext * ctx)
-{
- TCGv_i64 a, b, c;
- if (unlikely(!ctx->vsx_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_VSXU);
- return;
- }
- a = tcg_temp_new_i64();
- b = tcg_temp_new_i64();
- c = tcg_temp_new_i64();
-
- tcg_gen_mov_i64(a, cpu_vsrh(xA(ctx->opcode)));
- tcg_gen_mov_i64(b, cpu_vsrh(xB(ctx->opcode)));
- tcg_gen_mov_i64(c, cpu_vsrh(xC(ctx->opcode)));
-
- tcg_gen_and_i64(b, b, c);
- tcg_gen_andc_i64(a, a, c);
- tcg_gen_or_i64(cpu_vsrh(xT(ctx->opcode)), a, b);
-
- tcg_gen_mov_i64(a, cpu_vsrl(xA(ctx->opcode)));
- tcg_gen_mov_i64(b, cpu_vsrl(xB(ctx->opcode)));
- tcg_gen_mov_i64(c, cpu_vsrl(xC(ctx->opcode)));
-
- tcg_gen_and_i64(b, b, c);
- tcg_gen_andc_i64(a, a, c);
- tcg_gen_or_i64(cpu_vsrl(xT(ctx->opcode)), a, b);
-
- tcg_temp_free_i64(a);
- tcg_temp_free_i64(b);
- tcg_temp_free_i64(c);
-}
-
-static void gen_xxspltw(DisasContext *ctx)
-{
- TCGv_i64 b, b2;
- TCGv_i64 vsr = (UIM(ctx->opcode) & 2) ?
- cpu_vsrl(xB(ctx->opcode)) :
- cpu_vsrh(xB(ctx->opcode));
-
- if (unlikely(!ctx->vsx_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_VSXU);
- return;
- }
-
- b = tcg_temp_new_i64();
- b2 = tcg_temp_new_i64();
-
- if (UIM(ctx->opcode) & 1) {
- tcg_gen_ext32u_i64(b, vsr);
- } else {
- tcg_gen_shri_i64(b, vsr, 32);
- }
-
- tcg_gen_shli_i64(b2, b, 32);
- tcg_gen_or_i64(cpu_vsrh(xT(ctx->opcode)), b, b2);
- tcg_gen_mov_i64(cpu_vsrl(xT(ctx->opcode)), cpu_vsrh(xT(ctx->opcode)));
-
- tcg_temp_free_i64(b);
- tcg_temp_free_i64(b2);
-}
-
-static void gen_xxsldwi(DisasContext *ctx)
-{
- TCGv_i64 xth, xtl;
- if (unlikely(!ctx->vsx_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_VSXU);
- return;
- }
- xth = tcg_temp_new_i64();
- xtl = tcg_temp_new_i64();
-
- switch (SHW(ctx->opcode)) {
- case 0: {
- tcg_gen_mov_i64(xth, cpu_vsrh(xA(ctx->opcode)));
- tcg_gen_mov_i64(xtl, cpu_vsrl(xA(ctx->opcode)));
- break;
- }
- case 1: {
- TCGv_i64 t0 = tcg_temp_new_i64();
- tcg_gen_mov_i64(xth, cpu_vsrh(xA(ctx->opcode)));
- tcg_gen_shli_i64(xth, xth, 32);
- tcg_gen_mov_i64(t0, cpu_vsrl(xA(ctx->opcode)));
- tcg_gen_shri_i64(t0, t0, 32);
- tcg_gen_or_i64(xth, xth, t0);
- tcg_gen_mov_i64(xtl, cpu_vsrl(xA(ctx->opcode)));
- tcg_gen_shli_i64(xtl, xtl, 32);
- tcg_gen_mov_i64(t0, cpu_vsrh(xB(ctx->opcode)));
- tcg_gen_shri_i64(t0, t0, 32);
- tcg_gen_or_i64(xtl, xtl, t0);
- tcg_temp_free_i64(t0);
- break;
- }
- case 2: {
- tcg_gen_mov_i64(xth, cpu_vsrl(xA(ctx->opcode)));
- tcg_gen_mov_i64(xtl, cpu_vsrh(xB(ctx->opcode)));
- break;
- }
- case 3: {
- TCGv_i64 t0 = tcg_temp_new_i64();
- tcg_gen_mov_i64(xth, cpu_vsrl(xA(ctx->opcode)));
- tcg_gen_shli_i64(xth, xth, 32);
- tcg_gen_mov_i64(t0, cpu_vsrh(xB(ctx->opcode)));
- tcg_gen_shri_i64(t0, t0, 32);
- tcg_gen_or_i64(xth, xth, t0);
- tcg_gen_mov_i64(xtl, cpu_vsrh(xB(ctx->opcode)));
- tcg_gen_shli_i64(xtl, xtl, 32);
- tcg_gen_mov_i64(t0, cpu_vsrl(xB(ctx->opcode)));
- tcg_gen_shri_i64(t0, t0, 32);
- tcg_gen_or_i64(xtl, xtl, t0);
- tcg_temp_free_i64(t0);
- break;
- }
- }
-
- tcg_gen_mov_i64(cpu_vsrh(xT(ctx->opcode)), xth);
- tcg_gen_mov_i64(cpu_vsrl(xT(ctx->opcode)), xtl);
-
- tcg_temp_free_i64(xth);
- tcg_temp_free_i64(xtl);
-}
-
-/*** Decimal Floating Point ***/
-
-static inline TCGv_ptr gen_fprp_ptr(int reg)
-{
- TCGv_ptr r = tcg_temp_new_ptr();
- tcg_gen_addi_ptr(r, cpu_env, offsetof(CPUPPCState, fpr[reg]));
- return r;
-}
-
-#define GEN_DFP_T_A_B_Rc(name) \
-static void gen_##name(DisasContext *ctx) \
-{ \
- TCGv_ptr rd, ra, rb; \
- if (unlikely(!ctx->fpu_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_FPU); \
- return; \
- } \
- gen_update_nip(ctx, ctx->nip - 4); \
- rd = gen_fprp_ptr(rD(ctx->opcode)); \
- ra = gen_fprp_ptr(rA(ctx->opcode)); \
- rb = gen_fprp_ptr(rB(ctx->opcode)); \
- gen_helper_##name(cpu_env, rd, ra, rb); \
- if (unlikely(Rc(ctx->opcode) != 0)) { \
- gen_set_cr1_from_fpscr(ctx); \
- } \
- tcg_temp_free_ptr(rd); \
- tcg_temp_free_ptr(ra); \
- tcg_temp_free_ptr(rb); \
-}
-
-#define GEN_DFP_BF_A_B(name) \
-static void gen_##name(DisasContext *ctx) \
-{ \
- TCGv_ptr ra, rb; \
- if (unlikely(!ctx->fpu_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_FPU); \
- return; \
- } \
- gen_update_nip(ctx, ctx->nip - 4); \
- ra = gen_fprp_ptr(rA(ctx->opcode)); \
- rb = gen_fprp_ptr(rB(ctx->opcode)); \
- gen_helper_##name(cpu_crf[crfD(ctx->opcode)], \
- cpu_env, ra, rb); \
- tcg_temp_free_ptr(ra); \
- tcg_temp_free_ptr(rb); \
-}
-
-#define GEN_DFP_BF_A_DCM(name) \
-static void gen_##name(DisasContext *ctx) \
-{ \
- TCGv_ptr ra; \
- TCGv_i32 dcm; \
- if (unlikely(!ctx->fpu_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_FPU); \
- return; \
- } \
- gen_update_nip(ctx, ctx->nip - 4); \
- ra = gen_fprp_ptr(rA(ctx->opcode)); \
- dcm = tcg_const_i32(DCM(ctx->opcode)); \
- gen_helper_##name(cpu_crf[crfD(ctx->opcode)], \
- cpu_env, ra, dcm); \
- tcg_temp_free_ptr(ra); \
- tcg_temp_free_i32(dcm); \
-}
-
-#define GEN_DFP_T_B_U32_U32_Rc(name, u32f1, u32f2) \
-static void gen_##name(DisasContext *ctx) \
-{ \
- TCGv_ptr rt, rb; \
- TCGv_i32 u32_1, u32_2; \
- if (unlikely(!ctx->fpu_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_FPU); \
- return; \
- } \
- gen_update_nip(ctx, ctx->nip - 4); \
- rt = gen_fprp_ptr(rD(ctx->opcode)); \
- rb = gen_fprp_ptr(rB(ctx->opcode)); \
- u32_1 = tcg_const_i32(u32f1(ctx->opcode)); \
- u32_2 = tcg_const_i32(u32f2(ctx->opcode)); \
- gen_helper_##name(cpu_env, rt, rb, u32_1, u32_2); \
- if (unlikely(Rc(ctx->opcode) != 0)) { \
- gen_set_cr1_from_fpscr(ctx); \
- } \
- tcg_temp_free_ptr(rt); \
- tcg_temp_free_ptr(rb); \
- tcg_temp_free_i32(u32_1); \
- tcg_temp_free_i32(u32_2); \
-}
-
-#define GEN_DFP_T_A_B_I32_Rc(name, i32fld) \
-static void gen_##name(DisasContext *ctx) \
-{ \
- TCGv_ptr rt, ra, rb; \
- TCGv_i32 i32; \
- if (unlikely(!ctx->fpu_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_FPU); \
- return; \
- } \
- gen_update_nip(ctx, ctx->nip - 4); \
- rt = gen_fprp_ptr(rD(ctx->opcode)); \
- ra = gen_fprp_ptr(rA(ctx->opcode)); \
- rb = gen_fprp_ptr(rB(ctx->opcode)); \
- i32 = tcg_const_i32(i32fld(ctx->opcode)); \
- gen_helper_##name(cpu_env, rt, ra, rb, i32); \
- if (unlikely(Rc(ctx->opcode) != 0)) { \
- gen_set_cr1_from_fpscr(ctx); \
- } \
- tcg_temp_free_ptr(rt); \
- tcg_temp_free_ptr(rb); \
- tcg_temp_free_ptr(ra); \
- tcg_temp_free_i32(i32); \
- }
-
-#define GEN_DFP_T_B_Rc(name) \
-static void gen_##name(DisasContext *ctx) \
-{ \
- TCGv_ptr rt, rb; \
- if (unlikely(!ctx->fpu_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_FPU); \
- return; \
- } \
- gen_update_nip(ctx, ctx->nip - 4); \
- rt = gen_fprp_ptr(rD(ctx->opcode)); \
- rb = gen_fprp_ptr(rB(ctx->opcode)); \
- gen_helper_##name(cpu_env, rt, rb); \
- if (unlikely(Rc(ctx->opcode) != 0)) { \
- gen_set_cr1_from_fpscr(ctx); \
- } \
- tcg_temp_free_ptr(rt); \
- tcg_temp_free_ptr(rb); \
- }
-
-#define GEN_DFP_T_FPR_I32_Rc(name, fprfld, i32fld) \
-static void gen_##name(DisasContext *ctx) \
-{ \
- TCGv_ptr rt, rs; \
- TCGv_i32 i32; \
- if (unlikely(!ctx->fpu_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_FPU); \
- return; \
- } \
- gen_update_nip(ctx, ctx->nip - 4); \
- rt = gen_fprp_ptr(rD(ctx->opcode)); \
- rs = gen_fprp_ptr(fprfld(ctx->opcode)); \
- i32 = tcg_const_i32(i32fld(ctx->opcode)); \
- gen_helper_##name(cpu_env, rt, rs, i32); \
- if (unlikely(Rc(ctx->opcode) != 0)) { \
- gen_set_cr1_from_fpscr(ctx); \
- } \
- tcg_temp_free_ptr(rt); \
- tcg_temp_free_ptr(rs); \
- tcg_temp_free_i32(i32); \
-}
-
-GEN_DFP_T_A_B_Rc(dadd)
-GEN_DFP_T_A_B_Rc(daddq)
-GEN_DFP_T_A_B_Rc(dsub)
-GEN_DFP_T_A_B_Rc(dsubq)
-GEN_DFP_T_A_B_Rc(dmul)
-GEN_DFP_T_A_B_Rc(dmulq)
-GEN_DFP_T_A_B_Rc(ddiv)
-GEN_DFP_T_A_B_Rc(ddivq)
-GEN_DFP_BF_A_B(dcmpu)
-GEN_DFP_BF_A_B(dcmpuq)
-GEN_DFP_BF_A_B(dcmpo)
-GEN_DFP_BF_A_B(dcmpoq)
-GEN_DFP_BF_A_DCM(dtstdc)
-GEN_DFP_BF_A_DCM(dtstdcq)
-GEN_DFP_BF_A_DCM(dtstdg)
-GEN_DFP_BF_A_DCM(dtstdgq)
-GEN_DFP_BF_A_B(dtstex)
-GEN_DFP_BF_A_B(dtstexq)
-GEN_DFP_BF_A_B(dtstsf)
-GEN_DFP_BF_A_B(dtstsfq)
-GEN_DFP_T_B_U32_U32_Rc(dquai, SIMM5, RMC)
-GEN_DFP_T_B_U32_U32_Rc(dquaiq, SIMM5, RMC)
-GEN_DFP_T_A_B_I32_Rc(dqua, RMC)
-GEN_DFP_T_A_B_I32_Rc(dquaq, RMC)
-GEN_DFP_T_A_B_I32_Rc(drrnd, RMC)
-GEN_DFP_T_A_B_I32_Rc(drrndq, RMC)
-GEN_DFP_T_B_U32_U32_Rc(drintx, FPW, RMC)
-GEN_DFP_T_B_U32_U32_Rc(drintxq, FPW, RMC)
-GEN_DFP_T_B_U32_U32_Rc(drintn, FPW, RMC)
-GEN_DFP_T_B_U32_U32_Rc(drintnq, FPW, RMC)
-GEN_DFP_T_B_Rc(dctdp)
-GEN_DFP_T_B_Rc(dctqpq)
-GEN_DFP_T_B_Rc(drsp)
-GEN_DFP_T_B_Rc(drdpq)
-GEN_DFP_T_B_Rc(dcffix)
-GEN_DFP_T_B_Rc(dcffixq)
-GEN_DFP_T_B_Rc(dctfix)
-GEN_DFP_T_B_Rc(dctfixq)
-GEN_DFP_T_FPR_I32_Rc(ddedpd, rB, SP)
-GEN_DFP_T_FPR_I32_Rc(ddedpdq, rB, SP)
-GEN_DFP_T_FPR_I32_Rc(denbcd, rB, SP)
-GEN_DFP_T_FPR_I32_Rc(denbcdq, rB, SP)
-GEN_DFP_T_B_Rc(dxex)
-GEN_DFP_T_B_Rc(dxexq)
-GEN_DFP_T_A_B_Rc(diex)
-GEN_DFP_T_A_B_Rc(diexq)
-GEN_DFP_T_FPR_I32_Rc(dscli, rA, DCM)
-GEN_DFP_T_FPR_I32_Rc(dscliq, rA, DCM)
-GEN_DFP_T_FPR_I32_Rc(dscri, rA, DCM)
-GEN_DFP_T_FPR_I32_Rc(dscriq, rA, DCM)
-
-/*** SPE extension ***/
-/* Register moves */
-
-static inline void gen_evmra(DisasContext *ctx)
-{
-
- if (unlikely(!ctx->spe_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_SPEU);
- return;
- }
-
- TCGv_i64 tmp = tcg_temp_new_i64();
-
- /* tmp := rA_lo + rA_hi << 32 */
- tcg_gen_concat_tl_i64(tmp, cpu_gpr[rA(ctx->opcode)], cpu_gprh[rA(ctx->opcode)]);
-
- /* spe_acc := tmp */
- tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUPPCState, spe_acc));
- tcg_temp_free_i64(tmp);
-
- /* rD := rA */
- tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
- tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)]);
-}
-
-static inline void gen_load_gpr64(TCGv_i64 t, int reg)
-{
- tcg_gen_concat_tl_i64(t, cpu_gpr[reg], cpu_gprh[reg]);
-}
-
-static inline void gen_store_gpr64(int reg, TCGv_i64 t)
-{
- tcg_gen_extr_i64_tl(cpu_gpr[reg], cpu_gprh[reg], t);
-}
-
-#define GEN_SPE(name0, name1, opc2, opc3, inval0, inval1, type) \
-static void glue(gen_, name0##_##name1)(DisasContext *ctx) \
-{ \
- if (Rc(ctx->opcode)) \
- gen_##name1(ctx); \
- else \
- gen_##name0(ctx); \
-}
-
-/* Handler for undefined SPE opcodes */
-static inline void gen_speundef(DisasContext *ctx)
-{
- gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
-}
-
-/* SPE logic */
-#define GEN_SPEOP_LOGIC2(name, tcg_op) \
-static inline void gen_##name(DisasContext *ctx) \
-{ \
- if (unlikely(!ctx->spe_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_SPEU); \
- return; \
- } \
- tcg_op(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], \
- cpu_gpr[rB(ctx->opcode)]); \
- tcg_op(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)], \
- cpu_gprh[rB(ctx->opcode)]); \
-}
-
-GEN_SPEOP_LOGIC2(evand, tcg_gen_and_tl);
-GEN_SPEOP_LOGIC2(evandc, tcg_gen_andc_tl);
-GEN_SPEOP_LOGIC2(evxor, tcg_gen_xor_tl);
-GEN_SPEOP_LOGIC2(evor, tcg_gen_or_tl);
-GEN_SPEOP_LOGIC2(evnor, tcg_gen_nor_tl);
-GEN_SPEOP_LOGIC2(eveqv, tcg_gen_eqv_tl);
-GEN_SPEOP_LOGIC2(evorc, tcg_gen_orc_tl);
-GEN_SPEOP_LOGIC2(evnand, tcg_gen_nand_tl);
-
-/* SPE logic immediate */
-#define GEN_SPEOP_TCG_LOGIC_IMM2(name, tcg_opi) \
-static inline void gen_##name(DisasContext *ctx) \
-{ \
- TCGv_i32 t0; \
- if (unlikely(!ctx->spe_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_SPEU); \
- return; \
- } \
- t0 = tcg_temp_new_i32(); \
- \
- tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]); \
- tcg_opi(t0, t0, rB(ctx->opcode)); \
- tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t0); \
- \
- tcg_gen_trunc_tl_i32(t0, cpu_gprh[rA(ctx->opcode)]); \
- tcg_opi(t0, t0, rB(ctx->opcode)); \
- tcg_gen_extu_i32_tl(cpu_gprh[rD(ctx->opcode)], t0); \
- \
- tcg_temp_free_i32(t0); \
-}
-GEN_SPEOP_TCG_LOGIC_IMM2(evslwi, tcg_gen_shli_i32);
-GEN_SPEOP_TCG_LOGIC_IMM2(evsrwiu, tcg_gen_shri_i32);
-GEN_SPEOP_TCG_LOGIC_IMM2(evsrwis, tcg_gen_sari_i32);
-GEN_SPEOP_TCG_LOGIC_IMM2(evrlwi, tcg_gen_rotli_i32);
-
-/* SPE arithmetic */
-#define GEN_SPEOP_ARITH1(name, tcg_op) \
-static inline void gen_##name(DisasContext *ctx) \
-{ \
- TCGv_i32 t0; \
- if (unlikely(!ctx->spe_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_SPEU); \
- return; \
- } \
- t0 = tcg_temp_new_i32(); \
- \
- tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]); \
- tcg_op(t0, t0); \
- tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t0); \
- \
- tcg_gen_trunc_tl_i32(t0, cpu_gprh[rA(ctx->opcode)]); \
- tcg_op(t0, t0); \
- tcg_gen_extu_i32_tl(cpu_gprh[rD(ctx->opcode)], t0); \
- \
- tcg_temp_free_i32(t0); \
-}
-
-static inline void gen_op_evabs(TCGv_i32 ret, TCGv_i32 arg1)
-{
- TCGLabel *l1 = gen_new_label();
- TCGLabel *l2 = gen_new_label();
-
- tcg_gen_brcondi_i32(TCG_COND_GE, arg1, 0, l1);
- tcg_gen_neg_i32(ret, arg1);
- tcg_gen_br(l2);
- gen_set_label(l1);
- tcg_gen_mov_i32(ret, arg1);
- gen_set_label(l2);
-}
-GEN_SPEOP_ARITH1(evabs, gen_op_evabs);
-GEN_SPEOP_ARITH1(evneg, tcg_gen_neg_i32);
-GEN_SPEOP_ARITH1(evextsb, tcg_gen_ext8s_i32);
-GEN_SPEOP_ARITH1(evextsh, tcg_gen_ext16s_i32);
-static inline void gen_op_evrndw(TCGv_i32 ret, TCGv_i32 arg1)
-{
- tcg_gen_addi_i32(ret, arg1, 0x8000);
- tcg_gen_ext16u_i32(ret, ret);
-}
-GEN_SPEOP_ARITH1(evrndw, gen_op_evrndw);
-GEN_SPEOP_ARITH1(evcntlsw, gen_helper_cntlsw32);
-GEN_SPEOP_ARITH1(evcntlzw, gen_helper_cntlzw32);
-
-#define GEN_SPEOP_ARITH2(name, tcg_op) \
-static inline void gen_##name(DisasContext *ctx) \
-{ \
- TCGv_i32 t0, t1; \
- if (unlikely(!ctx->spe_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_SPEU); \
- return; \
- } \
- t0 = tcg_temp_new_i32(); \
- t1 = tcg_temp_new_i32(); \
- \
- tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]); \
- tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]); \
- tcg_op(t0, t0, t1); \
- tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t0); \
- \
- tcg_gen_trunc_tl_i32(t0, cpu_gprh[rA(ctx->opcode)]); \
- tcg_gen_trunc_tl_i32(t1, cpu_gprh[rB(ctx->opcode)]); \
- tcg_op(t0, t0, t1); \
- tcg_gen_extu_i32_tl(cpu_gprh[rD(ctx->opcode)], t0); \
- \
- tcg_temp_free_i32(t0); \
- tcg_temp_free_i32(t1); \
-}
-
-static inline void gen_op_evsrwu(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
-{
- TCGLabel *l1 = gen_new_label();
- TCGLabel *l2 = gen_new_label();
- TCGv_i32 t0 = tcg_temp_local_new_i32();
-
- /* No error here: 6 bits are used */
- tcg_gen_andi_i32(t0, arg2, 0x3F);
- tcg_gen_brcondi_i32(TCG_COND_GE, t0, 32, l1);
- tcg_gen_shr_i32(ret, arg1, t0);
- tcg_gen_br(l2);
- gen_set_label(l1);
- tcg_gen_movi_i32(ret, 0);
- gen_set_label(l2);
- tcg_temp_free_i32(t0);
-}
-GEN_SPEOP_ARITH2(evsrwu, gen_op_evsrwu);
-static inline void gen_op_evsrws(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
-{
- TCGLabel *l1 = gen_new_label();
- TCGLabel *l2 = gen_new_label();
- TCGv_i32 t0 = tcg_temp_local_new_i32();
-
- /* No error here: 6 bits are used */
- tcg_gen_andi_i32(t0, arg2, 0x3F);
- tcg_gen_brcondi_i32(TCG_COND_GE, t0, 32, l1);
- tcg_gen_sar_i32(ret, arg1, t0);
- tcg_gen_br(l2);
- gen_set_label(l1);
- tcg_gen_movi_i32(ret, 0);
- gen_set_label(l2);
- tcg_temp_free_i32(t0);
-}
-GEN_SPEOP_ARITH2(evsrws, gen_op_evsrws);
-static inline void gen_op_evslw(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
-{
- TCGLabel *l1 = gen_new_label();
- TCGLabel *l2 = gen_new_label();
- TCGv_i32 t0 = tcg_temp_local_new_i32();
-
- /* No error here: 6 bits are used */
- tcg_gen_andi_i32(t0, arg2, 0x3F);
- tcg_gen_brcondi_i32(TCG_COND_GE, t0, 32, l1);
- tcg_gen_shl_i32(ret, arg1, t0);
- tcg_gen_br(l2);
- gen_set_label(l1);
- tcg_gen_movi_i32(ret, 0);
- gen_set_label(l2);
- tcg_temp_free_i32(t0);
-}
-GEN_SPEOP_ARITH2(evslw, gen_op_evslw);
-static inline void gen_op_evrlw(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
-{
- TCGv_i32 t0 = tcg_temp_new_i32();
- tcg_gen_andi_i32(t0, arg2, 0x1F);
- tcg_gen_rotl_i32(ret, arg1, t0);
- tcg_temp_free_i32(t0);
-}
-GEN_SPEOP_ARITH2(evrlw, gen_op_evrlw);
-static inline void gen_evmergehi(DisasContext *ctx)
-{
- if (unlikely(!ctx->spe_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_SPEU);
- return;
- }
- tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gprh[rB(ctx->opcode)]);
- tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)]);
-}
-GEN_SPEOP_ARITH2(evaddw, tcg_gen_add_i32);
-static inline void gen_op_evsubf(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
-{
- tcg_gen_sub_i32(ret, arg2, arg1);
-}
-GEN_SPEOP_ARITH2(evsubfw, gen_op_evsubf);
-
-/* SPE arithmetic immediate */
-#define GEN_SPEOP_ARITH_IMM2(name, tcg_op) \
-static inline void gen_##name(DisasContext *ctx) \
-{ \
- TCGv_i32 t0; \
- if (unlikely(!ctx->spe_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_SPEU); \
- return; \
- } \
- t0 = tcg_temp_new_i32(); \
- \
- tcg_gen_trunc_tl_i32(t0, cpu_gpr[rB(ctx->opcode)]); \
- tcg_op(t0, t0, rA(ctx->opcode)); \
- tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t0); \
- \
- tcg_gen_trunc_tl_i32(t0, cpu_gprh[rB(ctx->opcode)]); \
- tcg_op(t0, t0, rA(ctx->opcode)); \
- tcg_gen_extu_i32_tl(cpu_gprh[rD(ctx->opcode)], t0); \
- \
- tcg_temp_free_i32(t0); \
-}
-GEN_SPEOP_ARITH_IMM2(evaddiw, tcg_gen_addi_i32);
-GEN_SPEOP_ARITH_IMM2(evsubifw, tcg_gen_subi_i32);
-
-/* SPE comparison */
-#define GEN_SPEOP_COMP(name, tcg_cond) \
-static inline void gen_##name(DisasContext *ctx) \
-{ \
- if (unlikely(!ctx->spe_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_SPEU); \
- return; \
- } \
- TCGLabel *l1 = gen_new_label(); \
- TCGLabel *l2 = gen_new_label(); \
- TCGLabel *l3 = gen_new_label(); \
- TCGLabel *l4 = gen_new_label(); \
- \
- tcg_gen_ext32s_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); \
- tcg_gen_ext32s_tl(cpu_gpr[rB(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); \
- tcg_gen_ext32s_tl(cpu_gprh[rA(ctx->opcode)], cpu_gprh[rA(ctx->opcode)]); \
- tcg_gen_ext32s_tl(cpu_gprh[rB(ctx->opcode)], cpu_gprh[rB(ctx->opcode)]); \
- \
- tcg_gen_brcond_tl(tcg_cond, cpu_gpr[rA(ctx->opcode)], \
- cpu_gpr[rB(ctx->opcode)], l1); \
- tcg_gen_movi_i32(cpu_crf[crfD(ctx->opcode)], 0); \
- tcg_gen_br(l2); \
- gen_set_label(l1); \
- tcg_gen_movi_i32(cpu_crf[crfD(ctx->opcode)], \
- CRF_CL | CRF_CH_OR_CL | CRF_CH_AND_CL); \
- gen_set_label(l2); \
- tcg_gen_brcond_tl(tcg_cond, cpu_gprh[rA(ctx->opcode)], \
- cpu_gprh[rB(ctx->opcode)], l3); \
- tcg_gen_andi_i32(cpu_crf[crfD(ctx->opcode)], cpu_crf[crfD(ctx->opcode)], \
- ~(CRF_CH | CRF_CH_AND_CL)); \
- tcg_gen_br(l4); \
- gen_set_label(l3); \
- tcg_gen_ori_i32(cpu_crf[crfD(ctx->opcode)], cpu_crf[crfD(ctx->opcode)], \
- CRF_CH | CRF_CH_OR_CL); \
- gen_set_label(l4); \
-}
-GEN_SPEOP_COMP(evcmpgtu, TCG_COND_GTU);
-GEN_SPEOP_COMP(evcmpgts, TCG_COND_GT);
-GEN_SPEOP_COMP(evcmpltu, TCG_COND_LTU);
-GEN_SPEOP_COMP(evcmplts, TCG_COND_LT);
-GEN_SPEOP_COMP(evcmpeq, TCG_COND_EQ);
-
-/* SPE misc */
-static inline void gen_brinc(DisasContext *ctx)
-{
- /* Note: brinc is usable even if SPE is disabled */
- gen_helper_brinc(cpu_gpr[rD(ctx->opcode)],
- cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
-}
-static inline void gen_evmergelo(DisasContext *ctx)
-{
- if (unlikely(!ctx->spe_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_SPEU);
- return;
- }
- tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
- tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
-}
-static inline void gen_evmergehilo(DisasContext *ctx)
-{
- if (unlikely(!ctx->spe_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_SPEU);
- return;
- }
- tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
- tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)]);
-}
-static inline void gen_evmergelohi(DisasContext *ctx)
-{
- if (unlikely(!ctx->spe_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_SPEU);
- return;
- }
- if (rD(ctx->opcode) == rA(ctx->opcode)) {
- TCGv tmp = tcg_temp_new();
- tcg_gen_mov_tl(tmp, cpu_gpr[rA(ctx->opcode)]);
- tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gprh[rB(ctx->opcode)]);
- tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], tmp);
- tcg_temp_free(tmp);
- } else {
- tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gprh[rB(ctx->opcode)]);
- tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
- }
-}
-static inline void gen_evsplati(DisasContext *ctx)
-{
- uint64_t imm = ((int32_t)(rA(ctx->opcode) << 27)) >> 27;
-
- tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], imm);
- tcg_gen_movi_tl(cpu_gprh[rD(ctx->opcode)], imm);
-}
-static inline void gen_evsplatfi(DisasContext *ctx)
-{
- uint64_t imm = rA(ctx->opcode) << 27;
-
- tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], imm);
- tcg_gen_movi_tl(cpu_gprh[rD(ctx->opcode)], imm);
-}
-
-static inline void gen_evsel(DisasContext *ctx)
-{
- TCGLabel *l1 = gen_new_label();
- TCGLabel *l2 = gen_new_label();
- TCGLabel *l3 = gen_new_label();
- TCGLabel *l4 = gen_new_label();
- TCGv_i32 t0 = tcg_temp_local_new_i32();
-
- tcg_gen_andi_i32(t0, cpu_crf[ctx->opcode & 0x07], 1 << 3);
- tcg_gen_brcondi_i32(TCG_COND_EQ, t0, 0, l1);
- tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)]);
- tcg_gen_br(l2);
- gen_set_label(l1);
- tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rB(ctx->opcode)]);
- gen_set_label(l2);
- tcg_gen_andi_i32(t0, cpu_crf[ctx->opcode & 0x07], 1 << 2);
- tcg_gen_brcondi_i32(TCG_COND_EQ, t0, 0, l3);
- tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
- tcg_gen_br(l4);
- gen_set_label(l3);
- tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
- gen_set_label(l4);
- tcg_temp_free_i32(t0);
-}
-
-static void gen_evsel0(DisasContext *ctx)
-{
- gen_evsel(ctx);
-}
-
-static void gen_evsel1(DisasContext *ctx)
-{
- gen_evsel(ctx);
-}
-
-static void gen_evsel2(DisasContext *ctx)
-{
- gen_evsel(ctx);
-}
-
-static void gen_evsel3(DisasContext *ctx)
-{
- gen_evsel(ctx);
-}
-
-/* Multiply */
-
-static inline void gen_evmwumi(DisasContext *ctx)
-{
- TCGv_i64 t0, t1;
-
- if (unlikely(!ctx->spe_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_SPEU);
- return;
- }
-
- t0 = tcg_temp_new_i64();
- t1 = tcg_temp_new_i64();
-
- /* t0 := rA; t1 := rB */
- tcg_gen_extu_tl_i64(t0, cpu_gpr[rA(ctx->opcode)]);
- tcg_gen_ext32u_i64(t0, t0);
- tcg_gen_extu_tl_i64(t1, cpu_gpr[rB(ctx->opcode)]);
- tcg_gen_ext32u_i64(t1, t1);
-
- tcg_gen_mul_i64(t0, t0, t1); /* t0 := rA * rB */
-
- gen_store_gpr64(rD(ctx->opcode), t0); /* rD := t0 */
-
- tcg_temp_free_i64(t0);
- tcg_temp_free_i64(t1);
-}
-
-static inline void gen_evmwumia(DisasContext *ctx)
-{
- TCGv_i64 tmp;
-
- if (unlikely(!ctx->spe_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_SPEU);
- return;
- }
-
- gen_evmwumi(ctx); /* rD := rA * rB */
-
- tmp = tcg_temp_new_i64();
-
- /* acc := rD */
- gen_load_gpr64(tmp, rD(ctx->opcode));
- tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUPPCState, spe_acc));
- tcg_temp_free_i64(tmp);
-}
-
-static inline void gen_evmwumiaa(DisasContext *ctx)
-{
- TCGv_i64 acc;
- TCGv_i64 tmp;
-
- if (unlikely(!ctx->spe_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_SPEU);
- return;
- }
-
- gen_evmwumi(ctx); /* rD := rA * rB */
-
- acc = tcg_temp_new_i64();
- tmp = tcg_temp_new_i64();
-
- /* tmp := rD */
- gen_load_gpr64(tmp, rD(ctx->opcode));
-
- /* Load acc */
- tcg_gen_ld_i64(acc, cpu_env, offsetof(CPUPPCState, spe_acc));
-
- /* acc := tmp + acc */
- tcg_gen_add_i64(acc, acc, tmp);
-
- /* Store acc */
- tcg_gen_st_i64(acc, cpu_env, offsetof(CPUPPCState, spe_acc));
-
- /* rD := acc */
- gen_store_gpr64(rD(ctx->opcode), acc);
-
- tcg_temp_free_i64(acc);
- tcg_temp_free_i64(tmp);
-}
-
-static inline void gen_evmwsmi(DisasContext *ctx)
+static void gen_maddld(DisasContext *ctx)
{
- TCGv_i64 t0, t1;
-
- if (unlikely(!ctx->spe_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_SPEU);
- return;
- }
-
- t0 = tcg_temp_new_i64();
- t1 = tcg_temp_new_i64();
-
- /* t0 := rA; t1 := rB */
- tcg_gen_extu_tl_i64(t0, cpu_gpr[rA(ctx->opcode)]);
- tcg_gen_ext32s_i64(t0, t0);
- tcg_gen_extu_tl_i64(t1, cpu_gpr[rB(ctx->opcode)]);
- tcg_gen_ext32s_i64(t1, t1);
-
- tcg_gen_mul_i64(t0, t0, t1); /* t0 := rA * rB */
-
- gen_store_gpr64(rD(ctx->opcode), t0); /* rD := t0 */
+ TCGv_i64 t1 = tcg_temp_new_i64();
- tcg_temp_free_i64(t0);
+ tcg_gen_mul_i64(t1, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
+ tcg_gen_add_i64(cpu_gpr[rD(ctx->opcode)], t1, cpu_gpr[rC(ctx->opcode)]);
tcg_temp_free_i64(t1);
}
-static inline void gen_evmwsmia(DisasContext *ctx)
-{
- TCGv_i64 tmp;
-
- gen_evmwsmi(ctx); /* rD := rA * rB */
-
- tmp = tcg_temp_new_i64();
-
- /* acc := rD */
- gen_load_gpr64(tmp, rD(ctx->opcode));
- tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUPPCState, spe_acc));
-
- tcg_temp_free_i64(tmp);
-}
-
-static inline void gen_evmwsmiaa(DisasContext *ctx)
-{
- TCGv_i64 acc = tcg_temp_new_i64();
- TCGv_i64 tmp = tcg_temp_new_i64();
-
- gen_evmwsmi(ctx); /* rD := rA * rB */
-
- acc = tcg_temp_new_i64();
- tmp = tcg_temp_new_i64();
-
- /* tmp := rD */
- gen_load_gpr64(tmp, rD(ctx->opcode));
-
- /* Load acc */
- tcg_gen_ld_i64(acc, cpu_env, offsetof(CPUPPCState, spe_acc));
-
- /* acc := tmp + acc */
- tcg_gen_add_i64(acc, acc, tmp);
-
- /* Store acc */
- tcg_gen_st_i64(acc, cpu_env, offsetof(CPUPPCState, spe_acc));
-
- /* rD := acc */
- gen_store_gpr64(rD(ctx->opcode), acc);
-
- tcg_temp_free_i64(acc);
- tcg_temp_free_i64(tmp);
-}
-
-GEN_SPE(evaddw, speundef, 0x00, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE); ////
-GEN_SPE(evaddiw, speundef, 0x01, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE);
-GEN_SPE(evsubfw, speundef, 0x02, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE); ////
-GEN_SPE(evsubifw, speundef, 0x03, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE);
-GEN_SPE(evabs, evneg, 0x04, 0x08, 0x0000F800, 0x0000F800, PPC_SPE); ////
-GEN_SPE(evextsb, evextsh, 0x05, 0x08, 0x0000F800, 0x0000F800, PPC_SPE); ////
-GEN_SPE(evrndw, evcntlzw, 0x06, 0x08, 0x0000F800, 0x0000F800, PPC_SPE); ////
-GEN_SPE(evcntlsw, brinc, 0x07, 0x08, 0x0000F800, 0x00000000, PPC_SPE); //
-GEN_SPE(evmra, speundef, 0x02, 0x13, 0x0000F800, 0xFFFFFFFF, PPC_SPE);
-GEN_SPE(speundef, evand, 0x08, 0x08, 0xFFFFFFFF, 0x00000000, PPC_SPE); ////
-GEN_SPE(evandc, speundef, 0x09, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE); ////
-GEN_SPE(evxor, evor, 0x0B, 0x08, 0x00000000, 0x00000000, PPC_SPE); ////
-GEN_SPE(evnor, eveqv, 0x0C, 0x08, 0x00000000, 0x00000000, PPC_SPE); ////
-GEN_SPE(evmwumi, evmwsmi, 0x0C, 0x11, 0x00000000, 0x00000000, PPC_SPE);
-GEN_SPE(evmwumia, evmwsmia, 0x1C, 0x11, 0x00000000, 0x00000000, PPC_SPE);
-GEN_SPE(evmwumiaa, evmwsmiaa, 0x0C, 0x15, 0x00000000, 0x00000000, PPC_SPE);
-GEN_SPE(speundef, evorc, 0x0D, 0x08, 0xFFFFFFFF, 0x00000000, PPC_SPE); ////
-GEN_SPE(evnand, speundef, 0x0F, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE); ////
-GEN_SPE(evsrwu, evsrws, 0x10, 0x08, 0x00000000, 0x00000000, PPC_SPE); ////
-GEN_SPE(evsrwiu, evsrwis, 0x11, 0x08, 0x00000000, 0x00000000, PPC_SPE);
-GEN_SPE(evslw, speundef, 0x12, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE); ////
-GEN_SPE(evslwi, speundef, 0x13, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE);
-GEN_SPE(evrlw, evsplati, 0x14, 0x08, 0x00000000, 0x0000F800, PPC_SPE); //
-GEN_SPE(evrlwi, evsplatfi, 0x15, 0x08, 0x00000000, 0x0000F800, PPC_SPE);
-GEN_SPE(evmergehi, evmergelo, 0x16, 0x08, 0x00000000, 0x00000000, PPC_SPE); ////
-GEN_SPE(evmergehilo, evmergelohi, 0x17, 0x08, 0x00000000, 0x00000000, PPC_SPE); ////
-GEN_SPE(evcmpgtu, evcmpgts, 0x18, 0x08, 0x00600000, 0x00600000, PPC_SPE); ////
-GEN_SPE(evcmpltu, evcmplts, 0x19, 0x08, 0x00600000, 0x00600000, PPC_SPE); ////
-GEN_SPE(evcmpeq, speundef, 0x1A, 0x08, 0x00600000, 0xFFFFFFFF, PPC_SPE); ////
-
-/* SPE load and stores */
-static inline void gen_addr_spe_imm_index(DisasContext *ctx, TCGv EA, int sh)
+/* maddhd maddhdu */
+static void gen_maddhd_maddhdu(DisasContext *ctx)
{
- target_ulong uimm = rB(ctx->opcode);
+ TCGv_i64 lo = tcg_temp_new_i64();
+ TCGv_i64 hi = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
- if (rA(ctx->opcode) == 0) {
- tcg_gen_movi_tl(EA, uimm << sh);
+ if (Rc(ctx->opcode)) {
+ tcg_gen_mulu2_i64(lo, hi, cpu_gpr[rA(ctx->opcode)],
+ cpu_gpr[rB(ctx->opcode)]);
+ tcg_gen_movi_i64(t1, 0);
} else {
- tcg_gen_addi_tl(EA, cpu_gpr[rA(ctx->opcode)], uimm << sh);
- if (NARROW_MODE(ctx)) {
- tcg_gen_ext32u_tl(EA, EA);
- }
- }
-}
-
-static inline void gen_op_evldd(DisasContext *ctx, TCGv addr)
-{
- TCGv_i64 t0 = tcg_temp_new_i64();
- gen_qemu_ld64(ctx, t0, addr);
- gen_store_gpr64(rD(ctx->opcode), t0);
- tcg_temp_free_i64(t0);
-}
-
-static inline void gen_op_evldw(DisasContext *ctx, TCGv addr)
-{
- gen_qemu_ld32u(ctx, cpu_gprh[rD(ctx->opcode)], addr);
- gen_addr_add(ctx, addr, addr, 4);
- gen_qemu_ld32u(ctx, cpu_gpr[rD(ctx->opcode)], addr);
-}
-
-static inline void gen_op_evldh(DisasContext *ctx, TCGv addr)
-{
- TCGv t0 = tcg_temp_new();
- gen_qemu_ld16u(ctx, t0, addr);
- tcg_gen_shli_tl(cpu_gprh[rD(ctx->opcode)], t0, 16);
- gen_addr_add(ctx, addr, addr, 2);
- gen_qemu_ld16u(ctx, t0, addr);
- tcg_gen_or_tl(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rD(ctx->opcode)], t0);
- gen_addr_add(ctx, addr, addr, 2);
- gen_qemu_ld16u(ctx, t0, addr);
- tcg_gen_shli_tl(cpu_gprh[rD(ctx->opcode)], t0, 16);
- gen_addr_add(ctx, addr, addr, 2);
- gen_qemu_ld16u(ctx, t0, addr);
- tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0);
- tcg_temp_free(t0);
-}
-
-static inline void gen_op_evlhhesplat(DisasContext *ctx, TCGv addr)
-{
- TCGv t0 = tcg_temp_new();
- gen_qemu_ld16u(ctx, t0, addr);
- tcg_gen_shli_tl(t0, t0, 16);
- tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], t0);
- tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], t0);
- tcg_temp_free(t0);
-}
-
-static inline void gen_op_evlhhousplat(DisasContext *ctx, TCGv addr)
-{
- TCGv t0 = tcg_temp_new();
- gen_qemu_ld16u(ctx, t0, addr);
- tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], t0);
- tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], t0);
- tcg_temp_free(t0);
-}
-
-static inline void gen_op_evlhhossplat(DisasContext *ctx, TCGv addr)
-{
- TCGv t0 = tcg_temp_new();
- gen_qemu_ld16s(ctx, t0, addr);
- tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], t0);
- tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], t0);
- tcg_temp_free(t0);
-}
-
-static inline void gen_op_evlwhe(DisasContext *ctx, TCGv addr)
-{
- TCGv t0 = tcg_temp_new();
- gen_qemu_ld16u(ctx, t0, addr);
- tcg_gen_shli_tl(cpu_gprh[rD(ctx->opcode)], t0, 16);
- gen_addr_add(ctx, addr, addr, 2);
- gen_qemu_ld16u(ctx, t0, addr);
- tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)], t0, 16);
- tcg_temp_free(t0);
-}
-
-static inline void gen_op_evlwhou(DisasContext *ctx, TCGv addr)
-{
- gen_qemu_ld16u(ctx, cpu_gprh[rD(ctx->opcode)], addr);
- gen_addr_add(ctx, addr, addr, 2);
- gen_qemu_ld16u(ctx, cpu_gpr[rD(ctx->opcode)], addr);
-}
-
-static inline void gen_op_evlwhos(DisasContext *ctx, TCGv addr)
-{
- gen_qemu_ld16s(ctx, cpu_gprh[rD(ctx->opcode)], addr);
- gen_addr_add(ctx, addr, addr, 2);
- gen_qemu_ld16s(ctx, cpu_gpr[rD(ctx->opcode)], addr);
-}
-
-static inline void gen_op_evlwwsplat(DisasContext *ctx, TCGv addr)
-{
- TCGv t0 = tcg_temp_new();
- gen_qemu_ld32u(ctx, t0, addr);
- tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], t0);
- tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], t0);
- tcg_temp_free(t0);
-}
-
-static inline void gen_op_evlwhsplat(DisasContext *ctx, TCGv addr)
-{
- TCGv t0 = tcg_temp_new();
- gen_qemu_ld16u(ctx, t0, addr);
- tcg_gen_shli_tl(cpu_gprh[rD(ctx->opcode)], t0, 16);
- tcg_gen_or_tl(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rD(ctx->opcode)], t0);
- gen_addr_add(ctx, addr, addr, 2);
- gen_qemu_ld16u(ctx, t0, addr);
- tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)], t0, 16);
- tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gprh[rD(ctx->opcode)], t0);
- tcg_temp_free(t0);
-}
-
-static inline void gen_op_evstdd(DisasContext *ctx, TCGv addr)
-{
- TCGv_i64 t0 = tcg_temp_new_i64();
- gen_load_gpr64(t0, rS(ctx->opcode));
- gen_qemu_st64(ctx, t0, addr);
- tcg_temp_free_i64(t0);
-}
-
-static inline void gen_op_evstdw(DisasContext *ctx, TCGv addr)
-{
- gen_qemu_st32(ctx, cpu_gprh[rS(ctx->opcode)], addr);
- gen_addr_add(ctx, addr, addr, 4);
- gen_qemu_st32(ctx, cpu_gpr[rS(ctx->opcode)], addr);
-}
-
-static inline void gen_op_evstdh(DisasContext *ctx, TCGv addr)
-{
- TCGv t0 = tcg_temp_new();
- tcg_gen_shri_tl(t0, cpu_gprh[rS(ctx->opcode)], 16);
- gen_qemu_st16(ctx, t0, addr);
- gen_addr_add(ctx, addr, addr, 2);
- gen_qemu_st16(ctx, cpu_gprh[rS(ctx->opcode)], addr);
- gen_addr_add(ctx, addr, addr, 2);
- tcg_gen_shri_tl(t0, cpu_gpr[rS(ctx->opcode)], 16);
- gen_qemu_st16(ctx, t0, addr);
- tcg_temp_free(t0);
- gen_addr_add(ctx, addr, addr, 2);
- gen_qemu_st16(ctx, cpu_gpr[rS(ctx->opcode)], addr);
-}
-
-static inline void gen_op_evstwhe(DisasContext *ctx, TCGv addr)
-{
- TCGv t0 = tcg_temp_new();
- tcg_gen_shri_tl(t0, cpu_gprh[rS(ctx->opcode)], 16);
- gen_qemu_st16(ctx, t0, addr);
- gen_addr_add(ctx, addr, addr, 2);
- tcg_gen_shri_tl(t0, cpu_gpr[rS(ctx->opcode)], 16);
- gen_qemu_st16(ctx, t0, addr);
- tcg_temp_free(t0);
-}
-
-static inline void gen_op_evstwho(DisasContext *ctx, TCGv addr)
-{
- gen_qemu_st16(ctx, cpu_gprh[rS(ctx->opcode)], addr);
- gen_addr_add(ctx, addr, addr, 2);
- gen_qemu_st16(ctx, cpu_gpr[rS(ctx->opcode)], addr);
-}
-
-static inline void gen_op_evstwwe(DisasContext *ctx, TCGv addr)
-{
- gen_qemu_st32(ctx, cpu_gprh[rS(ctx->opcode)], addr);
-}
-
-static inline void gen_op_evstwwo(DisasContext *ctx, TCGv addr)
-{
- gen_qemu_st32(ctx, cpu_gpr[rS(ctx->opcode)], addr);
-}
-
-#define GEN_SPEOP_LDST(name, opc2, sh) \
-static void glue(gen_, name)(DisasContext *ctx) \
-{ \
- TCGv t0; \
- if (unlikely(!ctx->spe_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_SPEU); \
- return; \
- } \
- gen_set_access_type(ctx, ACCESS_INT); \
- t0 = tcg_temp_new(); \
- if (Rc(ctx->opcode)) { \
- gen_addr_spe_imm_index(ctx, t0, sh); \
- } else { \
- gen_addr_reg_index(ctx, t0); \
- } \
- gen_op_##name(ctx, t0); \
- tcg_temp_free(t0); \
-}
-
-GEN_SPEOP_LDST(evldd, 0x00, 3);
-GEN_SPEOP_LDST(evldw, 0x01, 3);
-GEN_SPEOP_LDST(evldh, 0x02, 3);
-GEN_SPEOP_LDST(evlhhesplat, 0x04, 1);
-GEN_SPEOP_LDST(evlhhousplat, 0x06, 1);
-GEN_SPEOP_LDST(evlhhossplat, 0x07, 1);
-GEN_SPEOP_LDST(evlwhe, 0x08, 2);
-GEN_SPEOP_LDST(evlwhou, 0x0A, 2);
-GEN_SPEOP_LDST(evlwhos, 0x0B, 2);
-GEN_SPEOP_LDST(evlwwsplat, 0x0C, 2);
-GEN_SPEOP_LDST(evlwhsplat, 0x0E, 2);
-
-GEN_SPEOP_LDST(evstdd, 0x10, 3);
-GEN_SPEOP_LDST(evstdw, 0x11, 3);
-GEN_SPEOP_LDST(evstdh, 0x12, 3);
-GEN_SPEOP_LDST(evstwhe, 0x18, 2);
-GEN_SPEOP_LDST(evstwho, 0x1A, 2);
-GEN_SPEOP_LDST(evstwwe, 0x1C, 2);
-GEN_SPEOP_LDST(evstwwo, 0x1E, 2);
-
-/* Multiply and add - TODO */
-#if 0
-GEN_SPE(speundef, evmhessf, 0x01, 0x10, 0xFFFFFFFF, 0x00000000, PPC_SPE);//
-GEN_SPE(speundef, evmhossf, 0x03, 0x10, 0xFFFFFFFF, 0x00000000, PPC_SPE);
-GEN_SPE(evmheumi, evmhesmi, 0x04, 0x10, 0x00000000, 0x00000000, PPC_SPE);
-GEN_SPE(speundef, evmhesmf, 0x05, 0x10, 0xFFFFFFFF, 0x00000000, PPC_SPE);
-GEN_SPE(evmhoumi, evmhosmi, 0x06, 0x10, 0x00000000, 0x00000000, PPC_SPE);
-GEN_SPE(speundef, evmhosmf, 0x07, 0x10, 0xFFFFFFFF, 0x00000000, PPC_SPE);
-GEN_SPE(speundef, evmhessfa, 0x11, 0x10, 0xFFFFFFFF, 0x00000000, PPC_SPE);
-GEN_SPE(speundef, evmhossfa, 0x13, 0x10, 0xFFFFFFFF, 0x00000000, PPC_SPE);
-GEN_SPE(evmheumia, evmhesmia, 0x14, 0x10, 0x00000000, 0x00000000, PPC_SPE);
-GEN_SPE(speundef, evmhesmfa, 0x15, 0x10, 0xFFFFFFFF, 0x00000000, PPC_SPE);
-GEN_SPE(evmhoumia, evmhosmia, 0x16, 0x10, 0x00000000, 0x00000000, PPC_SPE);
-GEN_SPE(speundef, evmhosmfa, 0x17, 0x10, 0xFFFFFFFF, 0x00000000, PPC_SPE);
-
-GEN_SPE(speundef, evmwhssf, 0x03, 0x11, 0xFFFFFFFF, 0x00000000, PPC_SPE);
-GEN_SPE(evmwlumi, speundef, 0x04, 0x11, 0x00000000, 0xFFFFFFFF, PPC_SPE);
-GEN_SPE(evmwhumi, evmwhsmi, 0x06, 0x11, 0x00000000, 0x00000000, PPC_SPE);
-GEN_SPE(speundef, evmwhsmf, 0x07, 0x11, 0xFFFFFFFF, 0x00000000, PPC_SPE);
-GEN_SPE(speundef, evmwssf, 0x09, 0x11, 0xFFFFFFFF, 0x00000000, PPC_SPE);
-GEN_SPE(speundef, evmwsmf, 0x0D, 0x11, 0xFFFFFFFF, 0x00000000, PPC_SPE);
-GEN_SPE(speundef, evmwhssfa, 0x13, 0x11, 0xFFFFFFFF, 0x00000000, PPC_SPE);
-GEN_SPE(evmwlumia, speundef, 0x14, 0x11, 0x00000000, 0xFFFFFFFF, PPC_SPE);
-GEN_SPE(evmwhumia, evmwhsmia, 0x16, 0x11, 0x00000000, 0x00000000, PPC_SPE);
-GEN_SPE(speundef, evmwhsmfa, 0x17, 0x11, 0xFFFFFFFF, 0x00000000, PPC_SPE);
-GEN_SPE(speundef, evmwssfa, 0x19, 0x11, 0xFFFFFFFF, 0x00000000, PPC_SPE);
-GEN_SPE(speundef, evmwsmfa, 0x1D, 0x11, 0xFFFFFFFF, 0x00000000, PPC_SPE);
-
-GEN_SPE(evadduiaaw, evaddsiaaw, 0x00, 0x13, 0x0000F800, 0x0000F800, PPC_SPE);
-GEN_SPE(evsubfusiaaw, evsubfssiaaw, 0x01, 0x13, 0x0000F800, 0x0000F800, PPC_SPE);
-GEN_SPE(evaddumiaaw, evaddsmiaaw, 0x04, 0x13, 0x0000F800, 0x0000F800, PPC_SPE);
-GEN_SPE(evsubfumiaaw, evsubfsmiaaw, 0x05, 0x13, 0x0000F800, 0x0000F800, PPC_SPE);
-GEN_SPE(evdivws, evdivwu, 0x06, 0x13, 0x00000000, 0x00000000, PPC_SPE);
-
-GEN_SPE(evmheusiaaw, evmhessiaaw, 0x00, 0x14, 0x00000000, 0x00000000, PPC_SPE);
-GEN_SPE(speundef, evmhessfaaw, 0x01, 0x14, 0xFFFFFFFF, 0x00000000, PPC_SPE);
-GEN_SPE(evmhousiaaw, evmhossiaaw, 0x02, 0x14, 0x00000000, 0x00000000, PPC_SPE);
-GEN_SPE(speundef, evmhossfaaw, 0x03, 0x14, 0xFFFFFFFF, 0x00000000, PPC_SPE);
-GEN_SPE(evmheumiaaw, evmhesmiaaw, 0x04, 0x14, 0x00000000, 0x00000000, PPC_SPE);
-GEN_SPE(speundef, evmhesmfaaw, 0x05, 0x14, 0xFFFFFFFF, 0x00000000, PPC_SPE);
-GEN_SPE(evmhoumiaaw, evmhosmiaaw, 0x06, 0x14, 0x00000000, 0x00000000, PPC_SPE);
-GEN_SPE(speundef, evmhosmfaaw, 0x07, 0x14, 0xFFFFFFFF, 0x00000000, PPC_SPE);
-GEN_SPE(evmhegumiaa, evmhegsmiaa, 0x14, 0x14, 0x00000000, 0x00000000, PPC_SPE);
-GEN_SPE(speundef, evmhegsmfaa, 0x15, 0x14, 0xFFFFFFFF, 0x00000000, PPC_SPE);
-GEN_SPE(evmhogumiaa, evmhogsmiaa, 0x16, 0x14, 0x00000000, 0x00000000, PPC_SPE);
-GEN_SPE(speundef, evmhogsmfaa, 0x17, 0x14, 0xFFFFFFFF, 0x00000000, PPC_SPE);
-
-GEN_SPE(evmwlusiaaw, evmwlssiaaw, 0x00, 0x15, 0x00000000, 0x00000000, PPC_SPE);
-GEN_SPE(evmwlumiaaw, evmwlsmiaaw, 0x04, 0x15, 0x00000000, 0x00000000, PPC_SPE);
-GEN_SPE(speundef, evmwssfaa, 0x09, 0x15, 0xFFFFFFFF, 0x00000000, PPC_SPE);
-GEN_SPE(speundef, evmwsmfaa, 0x0D, 0x15, 0xFFFFFFFF, 0x00000000, PPC_SPE);
-
-GEN_SPE(evmheusianw, evmhessianw, 0x00, 0x16, 0x00000000, 0x00000000, PPC_SPE);
-GEN_SPE(speundef, evmhessfanw, 0x01, 0x16, 0xFFFFFFFF, 0x00000000, PPC_SPE);
-GEN_SPE(evmhousianw, evmhossianw, 0x02, 0x16, 0x00000000, 0x00000000, PPC_SPE);
-GEN_SPE(speundef, evmhossfanw, 0x03, 0x16, 0xFFFFFFFF, 0x00000000, PPC_SPE);
-GEN_SPE(evmheumianw, evmhesmianw, 0x04, 0x16, 0x00000000, 0x00000000, PPC_SPE);
-GEN_SPE(speundef, evmhesmfanw, 0x05, 0x16, 0xFFFFFFFF, 0x00000000, PPC_SPE);
-GEN_SPE(evmhoumianw, evmhosmianw, 0x06, 0x16, 0x00000000, 0x00000000, PPC_SPE);
-GEN_SPE(speundef, evmhosmfanw, 0x07, 0x16, 0xFFFFFFFF, 0x00000000, PPC_SPE);
-GEN_SPE(evmhegumian, evmhegsmian, 0x14, 0x16, 0x00000000, 0x00000000, PPC_SPE);
-GEN_SPE(speundef, evmhegsmfan, 0x15, 0x16, 0xFFFFFFFF, 0x00000000, PPC_SPE);
-GEN_SPE(evmhigumian, evmhigsmian, 0x16, 0x16, 0x00000000, 0x00000000, PPC_SPE);
-GEN_SPE(speundef, evmhogsmfan, 0x17, 0x16, 0xFFFFFFFF, 0x00000000, PPC_SPE);
-
-GEN_SPE(evmwlusianw, evmwlssianw, 0x00, 0x17, 0x00000000, 0x00000000, PPC_SPE);
-GEN_SPE(evmwlumianw, evmwlsmianw, 0x04, 0x17, 0x00000000, 0x00000000, PPC_SPE);
-GEN_SPE(speundef, evmwssfan, 0x09, 0x17, 0xFFFFFFFF, 0x00000000, PPC_SPE);
-GEN_SPE(evmwumian, evmwsmian, 0x0C, 0x17, 0x00000000, 0x00000000, PPC_SPE);
-GEN_SPE(speundef, evmwsmfan, 0x0D, 0x17, 0xFFFFFFFF, 0x00000000, PPC_SPE);
-#endif
-
-/*** SPE floating-point extension ***/
-#define GEN_SPEFPUOP_CONV_32_32(name) \
-static inline void gen_##name(DisasContext *ctx) \
-{ \
- TCGv_i32 t0 = tcg_temp_new_i32(); \
- tcg_gen_trunc_tl_i32(t0, cpu_gpr[rB(ctx->opcode)]); \
- gen_helper_##name(t0, cpu_env, t0); \
- tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t0); \
- tcg_temp_free_i32(t0); \
-}
-#define GEN_SPEFPUOP_CONV_32_64(name) \
-static inline void gen_##name(DisasContext *ctx) \
-{ \
- TCGv_i64 t0 = tcg_temp_new_i64(); \
- TCGv_i32 t1 = tcg_temp_new_i32(); \
- gen_load_gpr64(t0, rB(ctx->opcode)); \
- gen_helper_##name(t1, cpu_env, t0); \
- tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t1); \
- tcg_temp_free_i64(t0); \
- tcg_temp_free_i32(t1); \
-}
-#define GEN_SPEFPUOP_CONV_64_32(name) \
-static inline void gen_##name(DisasContext *ctx) \
-{ \
- TCGv_i64 t0 = tcg_temp_new_i64(); \
- TCGv_i32 t1 = tcg_temp_new_i32(); \
- tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]); \
- gen_helper_##name(t0, cpu_env, t1); \
- gen_store_gpr64(rD(ctx->opcode), t0); \
- tcg_temp_free_i64(t0); \
- tcg_temp_free_i32(t1); \
-}
-#define GEN_SPEFPUOP_CONV_64_64(name) \
-static inline void gen_##name(DisasContext *ctx) \
-{ \
- TCGv_i64 t0 = tcg_temp_new_i64(); \
- gen_load_gpr64(t0, rB(ctx->opcode)); \
- gen_helper_##name(t0, cpu_env, t0); \
- gen_store_gpr64(rD(ctx->opcode), t0); \
- tcg_temp_free_i64(t0); \
-}
-#define GEN_SPEFPUOP_ARITH2_32_32(name) \
-static inline void gen_##name(DisasContext *ctx) \
-{ \
- TCGv_i32 t0, t1; \
- if (unlikely(!ctx->spe_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_SPEU); \
- return; \
- } \
- t0 = tcg_temp_new_i32(); \
- t1 = tcg_temp_new_i32(); \
- tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]); \
- tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]); \
- gen_helper_##name(t0, cpu_env, t0, t1); \
- tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t0); \
- \
- tcg_temp_free_i32(t0); \
- tcg_temp_free_i32(t1); \
-}
-#define GEN_SPEFPUOP_ARITH2_64_64(name) \
-static inline void gen_##name(DisasContext *ctx) \
-{ \
- TCGv_i64 t0, t1; \
- if (unlikely(!ctx->spe_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_SPEU); \
- return; \
- } \
- t0 = tcg_temp_new_i64(); \
- t1 = tcg_temp_new_i64(); \
- gen_load_gpr64(t0, rA(ctx->opcode)); \
- gen_load_gpr64(t1, rB(ctx->opcode)); \
- gen_helper_##name(t0, cpu_env, t0, t1); \
- gen_store_gpr64(rD(ctx->opcode), t0); \
- tcg_temp_free_i64(t0); \
- tcg_temp_free_i64(t1); \
-}
-#define GEN_SPEFPUOP_COMP_32(name) \
-static inline void gen_##name(DisasContext *ctx) \
-{ \
- TCGv_i32 t0, t1; \
- if (unlikely(!ctx->spe_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_SPEU); \
- return; \
- } \
- t0 = tcg_temp_new_i32(); \
- t1 = tcg_temp_new_i32(); \
- \
- tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]); \
- tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]); \
- gen_helper_##name(cpu_crf[crfD(ctx->opcode)], cpu_env, t0, t1); \
- \
- tcg_temp_free_i32(t0); \
- tcg_temp_free_i32(t1); \
-}
-#define GEN_SPEFPUOP_COMP_64(name) \
-static inline void gen_##name(DisasContext *ctx) \
-{ \
- TCGv_i64 t0, t1; \
- if (unlikely(!ctx->spe_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_SPEU); \
- return; \
- } \
- t0 = tcg_temp_new_i64(); \
- t1 = tcg_temp_new_i64(); \
- gen_load_gpr64(t0, rA(ctx->opcode)); \
- gen_load_gpr64(t1, rB(ctx->opcode)); \
- gen_helper_##name(cpu_crf[crfD(ctx->opcode)], cpu_env, t0, t1); \
- tcg_temp_free_i64(t0); \
- tcg_temp_free_i64(t1); \
-}
-
-/* Single precision floating-point vectors operations */
-/* Arithmetic */
-GEN_SPEFPUOP_ARITH2_64_64(evfsadd);
-GEN_SPEFPUOP_ARITH2_64_64(evfssub);
-GEN_SPEFPUOP_ARITH2_64_64(evfsmul);
-GEN_SPEFPUOP_ARITH2_64_64(evfsdiv);
-static inline void gen_evfsabs(DisasContext *ctx)
-{
- if (unlikely(!ctx->spe_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_SPEU);
- return;
- }
- tcg_gen_andi_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
- ~0x80000000);
- tcg_gen_andi_tl(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)],
- ~0x80000000);
-}
-static inline void gen_evfsnabs(DisasContext *ctx)
-{
- if (unlikely(!ctx->spe_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_SPEU);
- return;
- }
- tcg_gen_ori_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
- 0x80000000);
- tcg_gen_ori_tl(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)],
- 0x80000000);
-}
-static inline void gen_evfsneg(DisasContext *ctx)
-{
- if (unlikely(!ctx->spe_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_SPEU);
- return;
- }
- tcg_gen_xori_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
- 0x80000000);
- tcg_gen_xori_tl(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)],
- 0x80000000);
-}
-
-/* Conversion */
-GEN_SPEFPUOP_CONV_64_64(evfscfui);
-GEN_SPEFPUOP_CONV_64_64(evfscfsi);
-GEN_SPEFPUOP_CONV_64_64(evfscfuf);
-GEN_SPEFPUOP_CONV_64_64(evfscfsf);
-GEN_SPEFPUOP_CONV_64_64(evfsctui);
-GEN_SPEFPUOP_CONV_64_64(evfsctsi);
-GEN_SPEFPUOP_CONV_64_64(evfsctuf);
-GEN_SPEFPUOP_CONV_64_64(evfsctsf);
-GEN_SPEFPUOP_CONV_64_64(evfsctuiz);
-GEN_SPEFPUOP_CONV_64_64(evfsctsiz);
-
-/* Comparison */
-GEN_SPEFPUOP_COMP_64(evfscmpgt);
-GEN_SPEFPUOP_COMP_64(evfscmplt);
-GEN_SPEFPUOP_COMP_64(evfscmpeq);
-GEN_SPEFPUOP_COMP_64(evfststgt);
-GEN_SPEFPUOP_COMP_64(evfststlt);
-GEN_SPEFPUOP_COMP_64(evfststeq);
-
-/* Opcodes definitions */
-GEN_SPE(evfsadd, evfssub, 0x00, 0x0A, 0x00000000, 0x00000000, PPC_SPE_SINGLE); //
-GEN_SPE(evfsabs, evfsnabs, 0x02, 0x0A, 0x0000F800, 0x0000F800, PPC_SPE_SINGLE); //
-GEN_SPE(evfsneg, speundef, 0x03, 0x0A, 0x0000F800, 0xFFFFFFFF, PPC_SPE_SINGLE); //
-GEN_SPE(evfsmul, evfsdiv, 0x04, 0x0A, 0x00000000, 0x00000000, PPC_SPE_SINGLE); //
-GEN_SPE(evfscmpgt, evfscmplt, 0x06, 0x0A, 0x00600000, 0x00600000, PPC_SPE_SINGLE); //
-GEN_SPE(evfscmpeq, speundef, 0x07, 0x0A, 0x00600000, 0xFFFFFFFF, PPC_SPE_SINGLE); //
-GEN_SPE(evfscfui, evfscfsi, 0x08, 0x0A, 0x00180000, 0x00180000, PPC_SPE_SINGLE); //
-GEN_SPE(evfscfuf, evfscfsf, 0x09, 0x0A, 0x00180000, 0x00180000, PPC_SPE_SINGLE); //
-GEN_SPE(evfsctui, evfsctsi, 0x0A, 0x0A, 0x00180000, 0x00180000, PPC_SPE_SINGLE); //
-GEN_SPE(evfsctuf, evfsctsf, 0x0B, 0x0A, 0x00180000, 0x00180000, PPC_SPE_SINGLE); //
-GEN_SPE(evfsctuiz, speundef, 0x0C, 0x0A, 0x00180000, 0xFFFFFFFF, PPC_SPE_SINGLE); //
-GEN_SPE(evfsctsiz, speundef, 0x0D, 0x0A, 0x00180000, 0xFFFFFFFF, PPC_SPE_SINGLE); //
-GEN_SPE(evfststgt, evfststlt, 0x0E, 0x0A, 0x00600000, 0x00600000, PPC_SPE_SINGLE); //
-GEN_SPE(evfststeq, speundef, 0x0F, 0x0A, 0x00600000, 0xFFFFFFFF, PPC_SPE_SINGLE); //
-
-/* Single precision floating-point operations */
-/* Arithmetic */
-GEN_SPEFPUOP_ARITH2_32_32(efsadd);
-GEN_SPEFPUOP_ARITH2_32_32(efssub);
-GEN_SPEFPUOP_ARITH2_32_32(efsmul);
-GEN_SPEFPUOP_ARITH2_32_32(efsdiv);
-static inline void gen_efsabs(DisasContext *ctx)
-{
- if (unlikely(!ctx->spe_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_SPEU);
- return;
- }
- tcg_gen_andi_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], (target_long)~0x80000000LL);
-}
-static inline void gen_efsnabs(DisasContext *ctx)
-{
- if (unlikely(!ctx->spe_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_SPEU);
- return;
- }
- tcg_gen_ori_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 0x80000000);
-}
-static inline void gen_efsneg(DisasContext *ctx)
-{
- if (unlikely(!ctx->spe_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_SPEU);
- return;
- }
- tcg_gen_xori_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 0x80000000);
-}
-
-/* Conversion */
-GEN_SPEFPUOP_CONV_32_32(efscfui);
-GEN_SPEFPUOP_CONV_32_32(efscfsi);
-GEN_SPEFPUOP_CONV_32_32(efscfuf);
-GEN_SPEFPUOP_CONV_32_32(efscfsf);
-GEN_SPEFPUOP_CONV_32_32(efsctui);
-GEN_SPEFPUOP_CONV_32_32(efsctsi);
-GEN_SPEFPUOP_CONV_32_32(efsctuf);
-GEN_SPEFPUOP_CONV_32_32(efsctsf);
-GEN_SPEFPUOP_CONV_32_32(efsctuiz);
-GEN_SPEFPUOP_CONV_32_32(efsctsiz);
-GEN_SPEFPUOP_CONV_32_64(efscfd);
-
-/* Comparison */
-GEN_SPEFPUOP_COMP_32(efscmpgt);
-GEN_SPEFPUOP_COMP_32(efscmplt);
-GEN_SPEFPUOP_COMP_32(efscmpeq);
-GEN_SPEFPUOP_COMP_32(efststgt);
-GEN_SPEFPUOP_COMP_32(efststlt);
-GEN_SPEFPUOP_COMP_32(efststeq);
-
-/* Opcodes definitions */
-GEN_SPE(efsadd, efssub, 0x00, 0x0B, 0x00000000, 0x00000000, PPC_SPE_SINGLE); //
-GEN_SPE(efsabs, efsnabs, 0x02, 0x0B, 0x0000F800, 0x0000F800, PPC_SPE_SINGLE); //
-GEN_SPE(efsneg, speundef, 0x03, 0x0B, 0x0000F800, 0xFFFFFFFF, PPC_SPE_SINGLE); //
-GEN_SPE(efsmul, efsdiv, 0x04, 0x0B, 0x00000000, 0x00000000, PPC_SPE_SINGLE); //
-GEN_SPE(efscmpgt, efscmplt, 0x06, 0x0B, 0x00600000, 0x00600000, PPC_SPE_SINGLE); //
-GEN_SPE(efscmpeq, efscfd, 0x07, 0x0B, 0x00600000, 0x00180000, PPC_SPE_SINGLE); //
-GEN_SPE(efscfui, efscfsi, 0x08, 0x0B, 0x00180000, 0x00180000, PPC_SPE_SINGLE); //
-GEN_SPE(efscfuf, efscfsf, 0x09, 0x0B, 0x00180000, 0x00180000, PPC_SPE_SINGLE); //
-GEN_SPE(efsctui, efsctsi, 0x0A, 0x0B, 0x00180000, 0x00180000, PPC_SPE_SINGLE); //
-GEN_SPE(efsctuf, efsctsf, 0x0B, 0x0B, 0x00180000, 0x00180000, PPC_SPE_SINGLE); //
-GEN_SPE(efsctuiz, speundef, 0x0C, 0x0B, 0x00180000, 0xFFFFFFFF, PPC_SPE_SINGLE); //
-GEN_SPE(efsctsiz, speundef, 0x0D, 0x0B, 0x00180000, 0xFFFFFFFF, PPC_SPE_SINGLE); //
-GEN_SPE(efststgt, efststlt, 0x0E, 0x0B, 0x00600000, 0x00600000, PPC_SPE_SINGLE); //
-GEN_SPE(efststeq, speundef, 0x0F, 0x0B, 0x00600000, 0xFFFFFFFF, PPC_SPE_SINGLE); //
-
-/* Double precision floating-point operations */
-/* Arithmetic */
-GEN_SPEFPUOP_ARITH2_64_64(efdadd);
-GEN_SPEFPUOP_ARITH2_64_64(efdsub);
-GEN_SPEFPUOP_ARITH2_64_64(efdmul);
-GEN_SPEFPUOP_ARITH2_64_64(efddiv);
-static inline void gen_efdabs(DisasContext *ctx)
-{
- if (unlikely(!ctx->spe_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_SPEU);
- return;
- }
- tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
- tcg_gen_andi_tl(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)],
- ~0x80000000);
-}
-static inline void gen_efdnabs(DisasContext *ctx)
-{
- if (unlikely(!ctx->spe_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_SPEU);
- return;
- }
- tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
- tcg_gen_ori_tl(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)],
- 0x80000000);
+ tcg_gen_muls2_i64(lo, hi, cpu_gpr[rA(ctx->opcode)],
+ cpu_gpr[rB(ctx->opcode)]);
+ tcg_gen_sari_i64(t1, cpu_gpr[rC(ctx->opcode)], 63);
+ }
+ tcg_gen_add2_i64(t1, cpu_gpr[rD(ctx->opcode)], lo, hi,
+ cpu_gpr[rC(ctx->opcode)], t1);
+ tcg_temp_free_i64(lo);
+ tcg_temp_free_i64(hi);
+ tcg_temp_free_i64(t1);
}
-static inline void gen_efdneg(DisasContext *ctx)
-{
- if (unlikely(!ctx->spe_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_SPEU);
- return;
- }
- tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
- tcg_gen_xori_tl(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)],
- 0x80000000);
-}
-
-/* Conversion */
-GEN_SPEFPUOP_CONV_64_32(efdcfui);
-GEN_SPEFPUOP_CONV_64_32(efdcfsi);
-GEN_SPEFPUOP_CONV_64_32(efdcfuf);
-GEN_SPEFPUOP_CONV_64_32(efdcfsf);
-GEN_SPEFPUOP_CONV_32_64(efdctui);
-GEN_SPEFPUOP_CONV_32_64(efdctsi);
-GEN_SPEFPUOP_CONV_32_64(efdctuf);
-GEN_SPEFPUOP_CONV_32_64(efdctsf);
-GEN_SPEFPUOP_CONV_32_64(efdctuiz);
-GEN_SPEFPUOP_CONV_32_64(efdctsiz);
-GEN_SPEFPUOP_CONV_64_32(efdcfs);
-GEN_SPEFPUOP_CONV_64_64(efdcfuid);
-GEN_SPEFPUOP_CONV_64_64(efdcfsid);
-GEN_SPEFPUOP_CONV_64_64(efdctuidz);
-GEN_SPEFPUOP_CONV_64_64(efdctsidz);
-
-/* Comparison */
-GEN_SPEFPUOP_COMP_64(efdcmpgt);
-GEN_SPEFPUOP_COMP_64(efdcmplt);
-GEN_SPEFPUOP_COMP_64(efdcmpeq);
-GEN_SPEFPUOP_COMP_64(efdtstgt);
-GEN_SPEFPUOP_COMP_64(efdtstlt);
-GEN_SPEFPUOP_COMP_64(efdtsteq);
-
-/* Opcodes definitions */
-GEN_SPE(efdadd, efdsub, 0x10, 0x0B, 0x00000000, 0x00000000, PPC_SPE_DOUBLE); //
-GEN_SPE(efdcfuid, efdcfsid, 0x11, 0x0B, 0x00180000, 0x00180000, PPC_SPE_DOUBLE); //
-GEN_SPE(efdabs, efdnabs, 0x12, 0x0B, 0x0000F800, 0x0000F800, PPC_SPE_DOUBLE); //
-GEN_SPE(efdneg, speundef, 0x13, 0x0B, 0x0000F800, 0xFFFFFFFF, PPC_SPE_DOUBLE); //
-GEN_SPE(efdmul, efddiv, 0x14, 0x0B, 0x00000000, 0x00000000, PPC_SPE_DOUBLE); //
-GEN_SPE(efdctuidz, efdctsidz, 0x15, 0x0B, 0x00180000, 0x00180000, PPC_SPE_DOUBLE); //
-GEN_SPE(efdcmpgt, efdcmplt, 0x16, 0x0B, 0x00600000, 0x00600000, PPC_SPE_DOUBLE); //
-GEN_SPE(efdcmpeq, efdcfs, 0x17, 0x0B, 0x00600000, 0x00180000, PPC_SPE_DOUBLE); //
-GEN_SPE(efdcfui, efdcfsi, 0x18, 0x0B, 0x00180000, 0x00180000, PPC_SPE_DOUBLE); //
-GEN_SPE(efdcfuf, efdcfsf, 0x19, 0x0B, 0x00180000, 0x00180000, PPC_SPE_DOUBLE); //
-GEN_SPE(efdctui, efdctsi, 0x1A, 0x0B, 0x00180000, 0x00180000, PPC_SPE_DOUBLE); //
-GEN_SPE(efdctuf, efdctsf, 0x1B, 0x0B, 0x00180000, 0x00180000, PPC_SPE_DOUBLE); //
-GEN_SPE(efdctuiz, speundef, 0x1C, 0x0B, 0x00180000, 0xFFFFFFFF, PPC_SPE_DOUBLE); //
-GEN_SPE(efdctsiz, speundef, 0x1D, 0x0B, 0x00180000, 0xFFFFFFFF, PPC_SPE_DOUBLE); //
-GEN_SPE(efdtstgt, efdtstlt, 0x1E, 0x0B, 0x00600000, 0x00600000, PPC_SPE_DOUBLE); //
-GEN_SPE(efdtsteq, speundef, 0x1F, 0x0B, 0x00600000, 0xFFFFFFFF, PPC_SPE_DOUBLE); //
+#endif /* defined(TARGET_PPC64) */
static void gen_tbegin(DisasContext *ctx)
{
@@ -9865,18 +6194,33 @@ static inline void gen_##name(DisasContext *ctx) \
GEN_TM_PRIV_NOOP(treclaim);
GEN_TM_PRIV_NOOP(trechkpt);
+#include "translate/fp-impl.inc.c"
+
+#include "translate/vmx-impl.inc.c"
+
+#include "translate/vsx-impl.inc.c"
+
+#include "translate/dfp-impl.inc.c"
+
+#include "translate/spe-impl.inc.c"
+
static opcode_t opcodes[] = {
GEN_HANDLER(invalid, 0x00, 0x00, 0x00, 0xFFFFFFFF, PPC_NONE),
GEN_HANDLER(cmp, 0x1F, 0x00, 0x00, 0x00400000, PPC_INTEGER),
GEN_HANDLER(cmpi, 0x0B, 0xFF, 0xFF, 0x00400000, PPC_INTEGER),
-GEN_HANDLER(cmpl, 0x1F, 0x00, 0x01, 0x00400000, PPC_INTEGER),
+GEN_HANDLER(cmpl, 0x1F, 0x00, 0x01, 0x00400001, PPC_INTEGER),
GEN_HANDLER(cmpli, 0x0A, 0xFF, 0xFF, 0x00400000, PPC_INTEGER),
+#if defined(TARGET_PPC64)
+GEN_HANDLER_E(cmpeqb, 0x1F, 0x00, 0x07, 0x00600000, PPC_NONE, PPC2_ISA300),
+#endif
GEN_HANDLER_E(cmpb, 0x1F, 0x1C, 0x0F, 0x00000001, PPC_NONE, PPC2_ISA205),
+GEN_HANDLER_E(cmprb, 0x1F, 0x00, 0x06, 0x00400001, PPC_NONE, PPC2_ISA300),
GEN_HANDLER(isel, 0x1F, 0x0F, 0xFF, 0x00000001, PPC_ISEL),
GEN_HANDLER(addi, 0x0E, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
GEN_HANDLER(addic, 0x0C, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
GEN_HANDLER2(addic_, "addic.", 0x0D, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
GEN_HANDLER(addis, 0x0F, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
+GEN_HANDLER_E(addpcis, 0x13, 0x2, 0xFF, 0x00000000, PPC_NONE, PPC2_ISA300),
GEN_HANDLER(mulhw, 0x1F, 0x0B, 0x02, 0x00000400, PPC_INTEGER),
GEN_HANDLER(mulhwu, 0x1F, 0x0B, 0x00, 0x00000400, PPC_INTEGER),
GEN_HANDLER(mullw, 0x1F, 0x0B, 0x07, 0x00000000, PPC_INTEGER),
@@ -9891,6 +6235,7 @@ GEN_HANDLER(subfic, 0x08, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
GEN_HANDLER2(andi_, "andi.", 0x1C, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
GEN_HANDLER2(andis_, "andis.", 0x1D, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
GEN_HANDLER(cntlzw, 0x1F, 0x1A, 0x00, 0x00000000, PPC_INTEGER),
+GEN_HANDLER_E(cnttzw, 0x1F, 0x1A, 0x10, 0x00000000, PPC_NONE, PPC2_ISA300),
GEN_HANDLER(or, 0x1F, 0x1C, 0x0D, 0x00000000, PPC_INTEGER),
GEN_HANDLER(xor, 0x1F, 0x1C, 0x09, 0x00000000, PPC_INTEGER),
GEN_HANDLER(ori, 0x18, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
@@ -9903,6 +6248,8 @@ GEN_HANDLER_E(prtyw, 0x1F, 0x1A, 0x04, 0x0000F801, PPC_NONE, PPC2_ISA205),
#if defined(TARGET_PPC64)
GEN_HANDLER(popcntd, 0x1F, 0x1A, 0x0F, 0x0000F801, PPC_POPCNTWD),
GEN_HANDLER(cntlzd, 0x1F, 0x1A, 0x01, 0x00000000, PPC_64B),
+GEN_HANDLER_E(cnttzd, 0x1F, 0x1A, 0x11, 0x00000000, PPC_NONE, PPC2_ISA300),
+GEN_HANDLER_E(darn, 0x1F, 0x13, 0x17, 0x001CF801, PPC_NONE, PPC2_ISA300),
GEN_HANDLER_E(prtyd, 0x1F, 0x1A, 0x05, 0x0000F801, PPC_NONE, PPC2_ISA205),
GEN_HANDLER_E(bpermd, 0x1F, 0x1C, 0x07, 0x00000001, PPC_NONE, PPC2_PERM_ISA206),
#endif
@@ -9919,25 +6266,11 @@ GEN_HANDLER(srad, 0x1F, 0x1A, 0x18, 0x00000000, PPC_64B),
GEN_HANDLER2(sradi0, "sradi", 0x1F, 0x1A, 0x19, 0x00000000, PPC_64B),
GEN_HANDLER2(sradi1, "sradi", 0x1F, 0x1B, 0x19, 0x00000000, PPC_64B),
GEN_HANDLER(srd, 0x1F, 0x1B, 0x10, 0x00000000, PPC_64B),
+GEN_HANDLER2_E(extswsli0, "extswsli", 0x1F, 0x1A, 0x1B, 0x00000000,
+ PPC_NONE, PPC2_ISA300),
+GEN_HANDLER2_E(extswsli1, "extswsli", 0x1F, 0x1B, 0x1B, 0x00000000,
+ PPC_NONE, PPC2_ISA300),
#endif
-GEN_HANDLER(frsqrtes, 0x3B, 0x1A, 0xFF, 0x001F07C0, PPC_FLOAT_FRSQRTES),
-GEN_HANDLER(fsqrt, 0x3F, 0x16, 0xFF, 0x001F07C0, PPC_FLOAT_FSQRT),
-GEN_HANDLER(fsqrts, 0x3B, 0x16, 0xFF, 0x001F07C0, PPC_FLOAT_FSQRT),
-GEN_HANDLER(fcmpo, 0x3F, 0x00, 0x01, 0x00600001, PPC_FLOAT),
-GEN_HANDLER(fcmpu, 0x3F, 0x00, 0x00, 0x00600001, PPC_FLOAT),
-GEN_HANDLER(fabs, 0x3F, 0x08, 0x08, 0x001F0000, PPC_FLOAT),
-GEN_HANDLER(fmr, 0x3F, 0x08, 0x02, 0x001F0000, PPC_FLOAT),
-GEN_HANDLER(fnabs, 0x3F, 0x08, 0x04, 0x001F0000, PPC_FLOAT),
-GEN_HANDLER(fneg, 0x3F, 0x08, 0x01, 0x001F0000, PPC_FLOAT),
-GEN_HANDLER_E(fcpsgn, 0x3F, 0x08, 0x00, 0x00000000, PPC_NONE, PPC2_ISA205),
-GEN_HANDLER_E(fmrgew, 0x3F, 0x06, 0x1E, 0x00000001, PPC_NONE, PPC2_VSX207),
-GEN_HANDLER_E(fmrgow, 0x3F, 0x06, 0x1A, 0x00000001, PPC_NONE, PPC2_VSX207),
-GEN_HANDLER(mcrfs, 0x3F, 0x00, 0x02, 0x0063F801, PPC_FLOAT),
-GEN_HANDLER(mffs, 0x3F, 0x07, 0x12, 0x001FF800, PPC_FLOAT),
-GEN_HANDLER(mtfsb0, 0x3F, 0x06, 0x02, 0x001FF800, PPC_FLOAT),
-GEN_HANDLER(mtfsb1, 0x3F, 0x06, 0x01, 0x001FF800, PPC_FLOAT),
-GEN_HANDLER(mtfsf, 0x3F, 0x07, 0x16, 0x00000000, PPC_FLOAT),
-GEN_HANDLER(mtfsfi, 0x3F, 0x06, 0x04, 0x006e0800, PPC_FLOAT),
#if defined(TARGET_PPC64)
GEN_HANDLER(ld, 0x3A, 0xFF, 0xFF, 0x00000000, PPC_64B),
GEN_HANDLER(lq, 0x38, 0xFF, 0xFF, 0x00000000, PPC_64BX),
@@ -9969,7 +6302,7 @@ GEN_HANDLER(b, 0x12, 0xFF, 0xFF, 0x00000000, PPC_FLOW),
GEN_HANDLER(bc, 0x10, 0xFF, 0xFF, 0x00000000, PPC_FLOW),
GEN_HANDLER(bcctr, 0x13, 0x10, 0x10, 0x00000000, PPC_FLOW),
GEN_HANDLER(bclr, 0x13, 0x10, 0x00, 0x00000000, PPC_FLOW),
-GEN_HANDLER_E(bctar, 0x13, 0x10, 0x11, 0, PPC_NONE, PPC2_BCTAR_ISA207),
+GEN_HANDLER_E(bctar, 0x13, 0x10, 0x11, 0x0000E000, PPC_NONE, PPC2_BCTAR_ISA207),
GEN_HANDLER(mcrf, 0x13, 0x00, 0xFF, 0x00000001, PPC_INTEGER),
GEN_HANDLER(rfi, 0x13, 0x12, 0x01, 0x03FF8001, PPC_FLOW),
#if defined(TARGET_PPC64)
@@ -9995,6 +6328,7 @@ GEN_HANDLER(mftb, 0x1F, 0x13, 0x0B, 0x00000001, PPC_MFTB),
GEN_HANDLER(mtcrf, 0x1F, 0x10, 0x04, 0x00000801, PPC_MISC),
#if defined(TARGET_PPC64)
GEN_HANDLER(mtmsrd, 0x1F, 0x12, 0x05, 0x001EF801, PPC_64B),
+GEN_HANDLER_E(setb, 0x1F, 0x00, 0x04, 0x0003F801, PPC_NONE, PPC2_ISA300),
#endif
GEN_HANDLER(mtmsr, 0x1F, 0x12, 0x04, 0x001EF801, PPC_MISC),
GEN_HANDLER(mtspr, 0x1F, 0x13, 0x0E, 0x00000000, PPC_MISC),
@@ -10143,10 +6477,11 @@ GEN_HANDLER(lvsr, 0x1f, 0x06, 0x01, 0x00000001, PPC_ALTIVEC),
GEN_HANDLER(mfvscr, 0x04, 0x2, 0x18, 0x001ff800, PPC_ALTIVEC),
GEN_HANDLER(mtvscr, 0x04, 0x2, 0x19, 0x03ff0000, PPC_ALTIVEC),
GEN_HANDLER(vmladduhm, 0x04, 0x11, 0xFF, 0x00000000, PPC_ALTIVEC),
-GEN_HANDLER2(evsel0, "evsel", 0x04, 0x1c, 0x09, 0x00000000, PPC_SPE),
-GEN_HANDLER2(evsel1, "evsel", 0x04, 0x1d, 0x09, 0x00000000, PPC_SPE),
-GEN_HANDLER2(evsel2, "evsel", 0x04, 0x1e, 0x09, 0x00000000, PPC_SPE),
-GEN_HANDLER2(evsel3, "evsel", 0x04, 0x1f, 0x09, 0x00000000, PPC_SPE),
+#if defined(TARGET_PPC64)
+GEN_HANDLER_E(maddhd_maddhdu, 0x04, 0x18, 0xFF, 0x00000000, PPC_NONE,
+ PPC2_ISA300),
+GEN_HANDLER_E(maddld, 0x04, 0x19, 0xFF, 0x00000000, PPC_NONE, PPC2_ISA300),
+#endif
#undef GEN_INT_ARITH_ADD
#undef GEN_INT_ARITH_ADD_CONST
@@ -10177,6 +6512,8 @@ GEN_HANDLER_E(divwe, 0x1F, 0x0B, 0x0D, 0, PPC_NONE, PPC2_DIVE_ISA206),
GEN_HANDLER_E(divweo, 0x1F, 0x0B, 0x1D, 0, PPC_NONE, PPC2_DIVE_ISA206),
GEN_HANDLER_E(divweu, 0x1F, 0x0B, 0x0C, 0, PPC_NONE, PPC2_DIVE_ISA206),
GEN_HANDLER_E(divweuo, 0x1F, 0x0B, 0x1C, 0, PPC_NONE, PPC2_DIVE_ISA206),
+GEN_HANDLER_E(modsw, 0x1F, 0x0B, 0x18, 0x00000001, PPC_NONE, PPC2_ISA300),
+GEN_HANDLER_E(moduw, 0x1F, 0x0B, 0x08, 0x00000001, PPC_NONE, PPC2_ISA300),
#if defined(TARGET_PPC64)
#undef GEN_INT_ARITH_DIVD
@@ -10191,6 +6528,8 @@ GEN_HANDLER_E(divdeu, 0x1F, 0x09, 0x0C, 0, PPC_NONE, PPC2_DIVE_ISA206),
GEN_HANDLER_E(divdeuo, 0x1F, 0x09, 0x1C, 0, PPC_NONE, PPC2_DIVE_ISA206),
GEN_HANDLER_E(divde, 0x1F, 0x09, 0x0D, 0, PPC_NONE, PPC2_DIVE_ISA206),
GEN_HANDLER_E(divdeo, 0x1F, 0x09, 0x1D, 0, PPC_NONE, PPC2_DIVE_ISA206),
+GEN_HANDLER_E(modsd, 0x1F, 0x09, 0x18, 0x00000001, PPC_NONE, PPC2_ISA300),
+GEN_HANDLER_E(modud, 0x1F, 0x09, 0x08, 0x00000001, PPC_NONE, PPC2_ISA300),
#undef GEN_INT_ARITH_MUL_HELPER
#define GEN_INT_ARITH_MUL_HELPER(name, opc3) \
@@ -10259,66 +6598,6 @@ GEN_PPC64_R2(rldcr, 0x1E, 0x09),
GEN_PPC64_R4(rldimi, 0x1E, 0x06),
#endif
-#undef _GEN_FLOAT_ACB
-#undef GEN_FLOAT_ACB
-#undef _GEN_FLOAT_AB
-#undef GEN_FLOAT_AB
-#undef _GEN_FLOAT_AC
-#undef GEN_FLOAT_AC
-#undef GEN_FLOAT_B
-#undef GEN_FLOAT_BS
-#define _GEN_FLOAT_ACB(name, op, op1, op2, isfloat, set_fprf, type) \
-GEN_HANDLER(f##name, op1, op2, 0xFF, 0x00000000, type)
-#define GEN_FLOAT_ACB(name, op2, set_fprf, type) \
-_GEN_FLOAT_ACB(name, name, 0x3F, op2, 0, set_fprf, type), \
-_GEN_FLOAT_ACB(name##s, name, 0x3B, op2, 1, set_fprf, type)
-#define _GEN_FLOAT_AB(name, op, op1, op2, inval, isfloat, set_fprf, type) \
-GEN_HANDLER(f##name, op1, op2, 0xFF, inval, type)
-#define GEN_FLOAT_AB(name, op2, inval, set_fprf, type) \
-_GEN_FLOAT_AB(name, name, 0x3F, op2, inval, 0, set_fprf, type), \
-_GEN_FLOAT_AB(name##s, name, 0x3B, op2, inval, 1, set_fprf, type)
-#define _GEN_FLOAT_AC(name, op, op1, op2, inval, isfloat, set_fprf, type) \
-GEN_HANDLER(f##name, op1, op2, 0xFF, inval, type)
-#define GEN_FLOAT_AC(name, op2, inval, set_fprf, type) \
-_GEN_FLOAT_AC(name, name, 0x3F, op2, inval, 0, set_fprf, type), \
-_GEN_FLOAT_AC(name##s, name, 0x3B, op2, inval, 1, set_fprf, type)
-#define GEN_FLOAT_B(name, op2, op3, set_fprf, type) \
-GEN_HANDLER(f##name, 0x3F, op2, op3, 0x001F0000, type)
-#define GEN_FLOAT_BS(name, op1, op2, set_fprf, type) \
-GEN_HANDLER(f##name, op1, op2, 0xFF, 0x001F07C0, type)
-
-GEN_FLOAT_AB(add, 0x15, 0x000007C0, 1, PPC_FLOAT),
-GEN_FLOAT_AB(div, 0x12, 0x000007C0, 1, PPC_FLOAT),
-GEN_FLOAT_AC(mul, 0x19, 0x0000F800, 1, PPC_FLOAT),
-GEN_FLOAT_BS(re, 0x3F, 0x18, 1, PPC_FLOAT_EXT),
-GEN_FLOAT_BS(res, 0x3B, 0x18, 1, PPC_FLOAT_FRES),
-GEN_FLOAT_BS(rsqrte, 0x3F, 0x1A, 1, PPC_FLOAT_FRSQRTE),
-_GEN_FLOAT_ACB(sel, sel, 0x3F, 0x17, 0, 0, PPC_FLOAT_FSEL),
-GEN_FLOAT_AB(sub, 0x14, 0x000007C0, 1, PPC_FLOAT),
-GEN_FLOAT_ACB(madd, 0x1D, 1, PPC_FLOAT),
-GEN_FLOAT_ACB(msub, 0x1C, 1, PPC_FLOAT),
-GEN_FLOAT_ACB(nmadd, 0x1F, 1, PPC_FLOAT),
-GEN_FLOAT_ACB(nmsub, 0x1E, 1, PPC_FLOAT),
-GEN_HANDLER_E(ftdiv, 0x3F, 0x00, 0x04, 1, PPC_NONE, PPC2_FP_TST_ISA206),
-GEN_HANDLER_E(ftsqrt, 0x3F, 0x00, 0x05, 1, PPC_NONE, PPC2_FP_TST_ISA206),
-GEN_FLOAT_B(ctiw, 0x0E, 0x00, 0, PPC_FLOAT),
-GEN_HANDLER_E(fctiwu, 0x3F, 0x0E, 0x04, 0, PPC_NONE, PPC2_FP_CVT_ISA206),
-GEN_FLOAT_B(ctiwz, 0x0F, 0x00, 0, PPC_FLOAT),
-GEN_HANDLER_E(fctiwuz, 0x3F, 0x0F, 0x04, 0, PPC_NONE, PPC2_FP_CVT_ISA206),
-GEN_FLOAT_B(rsp, 0x0C, 0x00, 1, PPC_FLOAT),
-GEN_HANDLER_E(fcfid, 0x3F, 0x0E, 0x1A, 0x001F0000, PPC_NONE, PPC2_FP_CVT_S64),
-GEN_HANDLER_E(fcfids, 0x3B, 0x0E, 0x1A, 0, PPC_NONE, PPC2_FP_CVT_ISA206),
-GEN_HANDLER_E(fcfidu, 0x3F, 0x0E, 0x1E, 0, PPC_NONE, PPC2_FP_CVT_ISA206),
-GEN_HANDLER_E(fcfidus, 0x3B, 0x0E, 0x1E, 0, PPC_NONE, PPC2_FP_CVT_ISA206),
-GEN_HANDLER_E(fctid, 0x3F, 0x0E, 0x19, 0x001F0000, PPC_NONE, PPC2_FP_CVT_S64),
-GEN_HANDLER_E(fctidu, 0x3F, 0x0E, 0x1D, 0, PPC_NONE, PPC2_FP_CVT_ISA206),
-GEN_HANDLER_E(fctidz, 0x3F, 0x0F, 0x19, 0x001F0000, PPC_NONE, PPC2_FP_CVT_S64),
-GEN_HANDLER_E(fctiduz, 0x3F, 0x0F, 0x1D, 0, PPC_NONE, PPC2_FP_CVT_ISA206),
-GEN_FLOAT_B(rin, 0x08, 0x0C, 1, PPC_FLOAT_EXT),
-GEN_FLOAT_B(riz, 0x08, 0x0D, 1, PPC_FLOAT_EXT),
-GEN_FLOAT_B(rip, 0x08, 0x0E, 1, PPC_FLOAT_EXT),
-GEN_FLOAT_B(rim, 0x08, 0x0F, 1, PPC_FLOAT_EXT),
-
#undef GEN_LD
#undef GEN_LDU
#undef GEN_LDUX
@@ -10345,12 +6624,12 @@ GEN_LDS(lwz, ld32u, 0x00, PPC_INTEGER)
#if defined(TARGET_PPC64)
GEN_LDUX(lwa, ld32s, 0x15, 0x0B, PPC_64B)
GEN_LDX(lwa, ld32s, 0x15, 0x0A, PPC_64B)
-GEN_LDUX(ld, ld64, 0x15, 0x01, PPC_64B)
-GEN_LDX(ld, ld64, 0x15, 0x00, PPC_64B)
-GEN_LDX_E(ldbr, ld64ur, 0x14, 0x10, PPC_NONE, PPC2_DBRX, CHK_NONE)
+GEN_LDUX(ld, ld64_i64, 0x15, 0x01, PPC_64B)
+GEN_LDX(ld, ld64_i64, 0x15, 0x00, PPC_64B)
+GEN_LDX_E(ldbr, ld64ur_i64, 0x14, 0x10, PPC_NONE, PPC2_DBRX, CHK_NONE)
/* HV/P7 and later only */
-GEN_LDX_HVRM(ldcix, ld64, 0x15, 0x1b, PPC_CILDST)
+GEN_LDX_HVRM(ldcix, ld64_i64, 0x15, 0x1b, PPC_CILDST)
GEN_LDX_HVRM(lwzcix, ld32u, 0x15, 0x18, PPC_CILDST)
GEN_LDX_HVRM(lhzcix, ld16u, 0x15, 0x19, PPC_CILDST)
GEN_LDX_HVRM(lbzcix, ld8u, 0x15, 0x1a, PPC_CILDST)
@@ -10381,10 +6660,10 @@ GEN_STS(stb, st8, 0x06, PPC_INTEGER)
GEN_STS(sth, st16, 0x0C, PPC_INTEGER)
GEN_STS(stw, st32, 0x04, PPC_INTEGER)
#if defined(TARGET_PPC64)
-GEN_STUX(std, st64, 0x15, 0x05, PPC_64B)
-GEN_STX(std, st64, 0x15, 0x04, PPC_64B)
-GEN_STX_E(stdbr, st64r, 0x14, 0x14, PPC_NONE, PPC2_DBRX, CHK_NONE)
-GEN_STX_HVRM(stdcix, st64, 0x15, 0x1f, PPC_CILDST)
+GEN_STUX(std, st64_i64, 0x15, 0x05, PPC_64B)
+GEN_STX(std, st64_i64, 0x15, 0x04, PPC_64B)
+GEN_STX_E(stdbr, st64r_i64, 0x14, 0x14, PPC_NONE, PPC2_DBRX, CHK_NONE)
+GEN_STX_HVRM(stdcix, st64_i64, 0x15, 0x1f, PPC_CILDST)
GEN_STX_HVRM(stwcix, st32, 0x15, 0x1c, PPC_CILDST)
GEN_STX_HVRM(sthcix, st16, 0x15, 0x1d, PPC_CILDST)
GEN_STX_HVRM(stbcix, st8, 0x15, 0x1e, PPC_CILDST)
@@ -10392,57 +6671,6 @@ GEN_STX_HVRM(stbcix, st8, 0x15, 0x1e, PPC_CILDST)
GEN_STX(sthbr, st16r, 0x16, 0x1C, PPC_INTEGER)
GEN_STX(stwbr, st32r, 0x16, 0x14, PPC_INTEGER)
-#undef GEN_LDF
-#undef GEN_LDUF
-#undef GEN_LDUXF
-#undef GEN_LDXF
-#undef GEN_LDFS
-#define GEN_LDF(name, ldop, opc, type) \
-GEN_HANDLER(name, opc, 0xFF, 0xFF, 0x00000000, type),
-#define GEN_LDUF(name, ldop, opc, type) \
-GEN_HANDLER(name##u, opc, 0xFF, 0xFF, 0x00000000, type),
-#define GEN_LDUXF(name, ldop, opc, type) \
-GEN_HANDLER(name##ux, 0x1F, 0x17, opc, 0x00000001, type),
-#define GEN_LDXF(name, ldop, opc2, opc3, type) \
-GEN_HANDLER(name##x, 0x1F, opc2, opc3, 0x00000001, type),
-#define GEN_LDFS(name, ldop, op, type) \
-GEN_LDF(name, ldop, op | 0x20, type) \
-GEN_LDUF(name, ldop, op | 0x21, type) \
-GEN_LDUXF(name, ldop, op | 0x01, type) \
-GEN_LDXF(name, ldop, 0x17, op | 0x00, type)
-
-GEN_LDFS(lfd, ld64, 0x12, PPC_FLOAT)
-GEN_LDFS(lfs, ld32fs, 0x10, PPC_FLOAT)
-GEN_HANDLER_E(lfiwax, 0x1f, 0x17, 0x1a, 0x00000001, PPC_NONE, PPC2_ISA205),
-GEN_HANDLER_E(lfiwzx, 0x1f, 0x17, 0x1b, 0x1, PPC_NONE, PPC2_FP_CVT_ISA206),
-GEN_HANDLER_E(lfdp, 0x39, 0xFF, 0xFF, 0x00200003, PPC_NONE, PPC2_ISA205),
-GEN_HANDLER_E(lfdpx, 0x1F, 0x17, 0x18, 0x00200001, PPC_NONE, PPC2_ISA205),
-
-#undef GEN_STF
-#undef GEN_STUF
-#undef GEN_STUXF
-#undef GEN_STXF
-#undef GEN_STFS
-#define GEN_STF(name, stop, opc, type) \
-GEN_HANDLER(name, opc, 0xFF, 0xFF, 0x00000000, type),
-#define GEN_STUF(name, stop, opc, type) \
-GEN_HANDLER(name##u, opc, 0xFF, 0xFF, 0x00000000, type),
-#define GEN_STUXF(name, stop, opc, type) \
-GEN_HANDLER(name##ux, 0x1F, 0x17, opc, 0x00000001, type),
-#define GEN_STXF(name, stop, opc2, opc3, type) \
-GEN_HANDLER(name##x, 0x1F, opc2, opc3, 0x00000001, type),
-#define GEN_STFS(name, stop, op, type) \
-GEN_STF(name, stop, op | 0x20, type) \
-GEN_STUF(name, stop, op | 0x21, type) \
-GEN_STUXF(name, stop, op | 0x01, type) \
-GEN_STXF(name, stop, 0x17, op | 0x00, type)
-
-GEN_STFS(stfd, st64, 0x16, PPC_FLOAT)
-GEN_STFS(stfs, st32fs, 0x14, PPC_FLOAT)
-GEN_STXF(stfiw, st32fiw, 0x17, 0x1E, PPC_FLOAT_STFIWX)
-GEN_HANDLER_E(stfdp, 0x3D, 0xFF, 0xFF, 0x00200003, PPC_NONE, PPC2_ISA205),
-GEN_HANDLER_E(stfdpx, 0x1F, 0x17, 0x1C, 0x00200001, PPC_NONE, PPC2_ISA205),
-
#undef GEN_CRLOGIC
#define GEN_CRLOGIC(name, tcg_op, opc) \
GEN_HANDLER(name, 0x13, 0x01, opc, 0x00000001, PPC_INTEGER)
@@ -10501,807 +6729,6 @@ GEN_MAC_HANDLER(mulhhwu, 0x08, 0x00),
GEN_MAC_HANDLER(mullhw, 0x08, 0x0D),
GEN_MAC_HANDLER(mullhwu, 0x08, 0x0C),
-#undef GEN_VR_LDX
-#undef GEN_VR_STX
-#undef GEN_VR_LVE
-#undef GEN_VR_STVE
-#define GEN_VR_LDX(name, opc2, opc3) \
-GEN_HANDLER(name, 0x1F, opc2, opc3, 0x00000001, PPC_ALTIVEC)
-#define GEN_VR_STX(name, opc2, opc3) \
-GEN_HANDLER(st##name, 0x1F, opc2, opc3, 0x00000001, PPC_ALTIVEC)
-#define GEN_VR_LVE(name, opc2, opc3) \
- GEN_HANDLER(lve##name, 0x1F, opc2, opc3, 0x00000001, PPC_ALTIVEC)
-#define GEN_VR_STVE(name, opc2, opc3) \
- GEN_HANDLER(stve##name, 0x1F, opc2, opc3, 0x00000001, PPC_ALTIVEC)
-GEN_VR_LDX(lvx, 0x07, 0x03),
-GEN_VR_LDX(lvxl, 0x07, 0x0B),
-GEN_VR_LVE(bx, 0x07, 0x00),
-GEN_VR_LVE(hx, 0x07, 0x01),
-GEN_VR_LVE(wx, 0x07, 0x02),
-GEN_VR_STX(svx, 0x07, 0x07),
-GEN_VR_STX(svxl, 0x07, 0x0F),
-GEN_VR_STVE(bx, 0x07, 0x04),
-GEN_VR_STVE(hx, 0x07, 0x05),
-GEN_VR_STVE(wx, 0x07, 0x06),
-
-#undef GEN_VX_LOGICAL
-#define GEN_VX_LOGICAL(name, tcg_op, opc2, opc3) \
-GEN_HANDLER(name, 0x04, opc2, opc3, 0x00000000, PPC_ALTIVEC)
-
-#undef GEN_VX_LOGICAL_207
-#define GEN_VX_LOGICAL_207(name, tcg_op, opc2, opc3) \
-GEN_HANDLER_E(name, 0x04, opc2, opc3, 0x00000000, PPC_NONE, PPC2_ALTIVEC_207)
-
-GEN_VX_LOGICAL(vand, tcg_gen_and_i64, 2, 16),
-GEN_VX_LOGICAL(vandc, tcg_gen_andc_i64, 2, 17),
-GEN_VX_LOGICAL(vor, tcg_gen_or_i64, 2, 18),
-GEN_VX_LOGICAL(vxor, tcg_gen_xor_i64, 2, 19),
-GEN_VX_LOGICAL(vnor, tcg_gen_nor_i64, 2, 20),
-GEN_VX_LOGICAL_207(veqv, tcg_gen_eqv_i64, 2, 26),
-GEN_VX_LOGICAL_207(vnand, tcg_gen_nand_i64, 2, 22),
-GEN_VX_LOGICAL_207(vorc, tcg_gen_orc_i64, 2, 21),
-
-#undef GEN_VXFORM
-#define GEN_VXFORM(name, opc2, opc3) \
-GEN_HANDLER(name, 0x04, opc2, opc3, 0x00000000, PPC_ALTIVEC)
-
-#undef GEN_VXFORM_207
-#define GEN_VXFORM_207(name, opc2, opc3) \
-GEN_HANDLER_E(name, 0x04, opc2, opc3, 0x00000000, PPC_NONE, PPC2_ALTIVEC_207)
-
-#undef GEN_VXFORM_DUAL
-#define GEN_VXFORM_DUAL(name0, name1, opc2, opc3, type0, type1) \
-GEN_HANDLER_E(name0##_##name1, 0x4, opc2, opc3, 0x00000000, type0, type1)
-
-#undef GEN_VXRFORM_DUAL
-#define GEN_VXRFORM_DUAL(name0, name1, opc2, opc3, tp0, tp1) \
-GEN_HANDLER_E(name0##_##name1, 0x4, opc2, opc3, 0x00000000, tp0, tp1), \
-GEN_HANDLER_E(name0##_##name1, 0x4, opc2, (opc3 | 0x10), 0x00000000, tp0, tp1),
-
-GEN_VXFORM(vaddubm, 0, 0),
-GEN_VXFORM(vadduhm, 0, 1),
-GEN_VXFORM(vadduwm, 0, 2),
-GEN_VXFORM_207(vaddudm, 0, 3),
-GEN_VXFORM_DUAL(vsububm, bcdadd, 0, 16, PPC_ALTIVEC, PPC_NONE),
-GEN_VXFORM_DUAL(vsubuhm, bcdsub, 0, 17, PPC_ALTIVEC, PPC_NONE),
-GEN_VXFORM(vsubuwm, 0, 18),
-GEN_VXFORM_207(vsubudm, 0, 19),
-GEN_VXFORM(vmaxub, 1, 0),
-GEN_VXFORM(vmaxuh, 1, 1),
-GEN_VXFORM(vmaxuw, 1, 2),
-GEN_VXFORM_207(vmaxud, 1, 3),
-GEN_VXFORM(vmaxsb, 1, 4),
-GEN_VXFORM(vmaxsh, 1, 5),
-GEN_VXFORM(vmaxsw, 1, 6),
-GEN_VXFORM_207(vmaxsd, 1, 7),
-GEN_VXFORM(vminub, 1, 8),
-GEN_VXFORM(vminuh, 1, 9),
-GEN_VXFORM(vminuw, 1, 10),
-GEN_VXFORM_207(vminud, 1, 11),
-GEN_VXFORM(vminsb, 1, 12),
-GEN_VXFORM(vminsh, 1, 13),
-GEN_VXFORM(vminsw, 1, 14),
-GEN_VXFORM_207(vminsd, 1, 15),
-GEN_VXFORM(vavgub, 1, 16),
-GEN_VXFORM(vavguh, 1, 17),
-GEN_VXFORM(vavguw, 1, 18),
-GEN_VXFORM(vavgsb, 1, 20),
-GEN_VXFORM(vavgsh, 1, 21),
-GEN_VXFORM(vavgsw, 1, 22),
-GEN_VXFORM(vmrghb, 6, 0),
-GEN_VXFORM(vmrghh, 6, 1),
-GEN_VXFORM(vmrghw, 6, 2),
-GEN_VXFORM(vmrglb, 6, 4),
-GEN_VXFORM(vmrglh, 6, 5),
-GEN_VXFORM(vmrglw, 6, 6),
-GEN_VXFORM_207(vmrgew, 6, 30),
-GEN_VXFORM_207(vmrgow, 6, 26),
-GEN_VXFORM(vmuloub, 4, 0),
-GEN_VXFORM(vmulouh, 4, 1),
-GEN_VXFORM_DUAL(vmulouw, vmuluwm, 4, 2, PPC_ALTIVEC, PPC_NONE),
-GEN_VXFORM(vmulosb, 4, 4),
-GEN_VXFORM(vmulosh, 4, 5),
-GEN_VXFORM_207(vmulosw, 4, 6),
-GEN_VXFORM(vmuleub, 4, 8),
-GEN_VXFORM(vmuleuh, 4, 9),
-GEN_VXFORM_207(vmuleuw, 4, 10),
-GEN_VXFORM(vmulesb, 4, 12),
-GEN_VXFORM(vmulesh, 4, 13),
-GEN_VXFORM_207(vmulesw, 4, 14),
-GEN_VXFORM(vslb, 2, 4),
-GEN_VXFORM(vslh, 2, 5),
-GEN_VXFORM(vslw, 2, 6),
-GEN_VXFORM_207(vsld, 2, 23),
-GEN_VXFORM(vsrb, 2, 8),
-GEN_VXFORM(vsrh, 2, 9),
-GEN_VXFORM(vsrw, 2, 10),
-GEN_VXFORM_207(vsrd, 2, 27),
-GEN_VXFORM(vsrab, 2, 12),
-GEN_VXFORM(vsrah, 2, 13),
-GEN_VXFORM(vsraw, 2, 14),
-GEN_VXFORM_207(vsrad, 2, 15),
-GEN_VXFORM(vslo, 6, 16),
-GEN_VXFORM(vsro, 6, 17),
-GEN_VXFORM(vaddcuw, 0, 6),
-GEN_VXFORM(vsubcuw, 0, 22),
-GEN_VXFORM(vaddubs, 0, 8),
-GEN_VXFORM(vadduhs, 0, 9),
-GEN_VXFORM(vadduws, 0, 10),
-GEN_VXFORM(vaddsbs, 0, 12),
-GEN_VXFORM(vaddshs, 0, 13),
-GEN_VXFORM(vaddsws, 0, 14),
-GEN_VXFORM_DUAL(vsububs, bcdadd, 0, 24, PPC_ALTIVEC, PPC_NONE),
-GEN_VXFORM_DUAL(vsubuhs, bcdsub, 0, 25, PPC_ALTIVEC, PPC_NONE),
-GEN_VXFORM(vsubuws, 0, 26),
-GEN_VXFORM(vsubsbs, 0, 28),
-GEN_VXFORM(vsubshs, 0, 29),
-GEN_VXFORM(vsubsws, 0, 30),
-GEN_VXFORM_207(vadduqm, 0, 4),
-GEN_VXFORM_207(vaddcuq, 0, 5),
-GEN_VXFORM_DUAL(vaddeuqm, vaddecuq, 30, 0xFF, PPC_NONE, PPC2_ALTIVEC_207),
-GEN_VXFORM_207(vsubuqm, 0, 20),
-GEN_VXFORM_207(vsubcuq, 0, 21),
-GEN_VXFORM_DUAL(vsubeuqm, vsubecuq, 31, 0xFF, PPC_NONE, PPC2_ALTIVEC_207),
-GEN_VXFORM(vrlb, 2, 0),
-GEN_VXFORM(vrlh, 2, 1),
-GEN_VXFORM(vrlw, 2, 2),
-GEN_VXFORM_207(vrld, 2, 3),
-GEN_VXFORM(vsl, 2, 7),
-GEN_VXFORM(vsr, 2, 11),
-GEN_VXFORM(vpkuhum, 7, 0),
-GEN_VXFORM(vpkuwum, 7, 1),
-GEN_VXFORM_207(vpkudum, 7, 17),
-GEN_VXFORM(vpkuhus, 7, 2),
-GEN_VXFORM(vpkuwus, 7, 3),
-GEN_VXFORM_207(vpkudus, 7, 19),
-GEN_VXFORM(vpkshus, 7, 4),
-GEN_VXFORM(vpkswus, 7, 5),
-GEN_VXFORM_207(vpksdus, 7, 21),
-GEN_VXFORM(vpkshss, 7, 6),
-GEN_VXFORM(vpkswss, 7, 7),
-GEN_VXFORM_207(vpksdss, 7, 23),
-GEN_VXFORM(vpkpx, 7, 12),
-GEN_VXFORM(vsum4ubs, 4, 24),
-GEN_VXFORM(vsum4sbs, 4, 28),
-GEN_VXFORM(vsum4shs, 4, 25),
-GEN_VXFORM(vsum2sws, 4, 26),
-GEN_VXFORM(vsumsws, 4, 30),
-GEN_VXFORM(vaddfp, 5, 0),
-GEN_VXFORM(vsubfp, 5, 1),
-GEN_VXFORM(vmaxfp, 5, 16),
-GEN_VXFORM(vminfp, 5, 17),
-
-#undef GEN_VXRFORM1
-#undef GEN_VXRFORM
-#define GEN_VXRFORM1(opname, name, str, opc2, opc3) \
- GEN_HANDLER2(name, str, 0x4, opc2, opc3, 0x00000000, PPC_ALTIVEC),
-#define GEN_VXRFORM(name, opc2, opc3) \
- GEN_VXRFORM1(name, name, #name, opc2, opc3) \
- GEN_VXRFORM1(name##_dot, name##_, #name ".", opc2, (opc3 | (0x1 << 4)))
-GEN_VXRFORM(vcmpequb, 3, 0)
-GEN_VXRFORM(vcmpequh, 3, 1)
-GEN_VXRFORM(vcmpequw, 3, 2)
-GEN_VXRFORM(vcmpgtsb, 3, 12)
-GEN_VXRFORM(vcmpgtsh, 3, 13)
-GEN_VXRFORM(vcmpgtsw, 3, 14)
-GEN_VXRFORM(vcmpgtub, 3, 8)
-GEN_VXRFORM(vcmpgtuh, 3, 9)
-GEN_VXRFORM(vcmpgtuw, 3, 10)
-GEN_VXRFORM_DUAL(vcmpeqfp, vcmpequd, 3, 3, PPC_ALTIVEC, PPC_NONE)
-GEN_VXRFORM(vcmpgefp, 3, 7)
-GEN_VXRFORM_DUAL(vcmpgtfp, vcmpgtud, 3, 11, PPC_ALTIVEC, PPC_NONE)
-GEN_VXRFORM_DUAL(vcmpbfp, vcmpgtsd, 3, 15, PPC_ALTIVEC, PPC_NONE)
-
-#undef GEN_VXFORM_SIMM
-#define GEN_VXFORM_SIMM(name, opc2, opc3) \
- GEN_HANDLER(name, 0x04, opc2, opc3, 0x00000000, PPC_ALTIVEC)
-GEN_VXFORM_SIMM(vspltisb, 6, 12),
-GEN_VXFORM_SIMM(vspltish, 6, 13),
-GEN_VXFORM_SIMM(vspltisw, 6, 14),
-
-#undef GEN_VXFORM_NOA
-#define GEN_VXFORM_NOA(name, opc2, opc3) \
- GEN_HANDLER(name, 0x04, opc2, opc3, 0x001f0000, PPC_ALTIVEC)
-GEN_VXFORM_NOA(vupkhsb, 7, 8),
-GEN_VXFORM_NOA(vupkhsh, 7, 9),
-GEN_VXFORM_207(vupkhsw, 7, 25),
-GEN_VXFORM_NOA(vupklsb, 7, 10),
-GEN_VXFORM_NOA(vupklsh, 7, 11),
-GEN_VXFORM_207(vupklsw, 7, 27),
-GEN_VXFORM_NOA(vupkhpx, 7, 13),
-GEN_VXFORM_NOA(vupklpx, 7, 15),
-GEN_VXFORM_NOA(vrefp, 5, 4),
-GEN_VXFORM_NOA(vrsqrtefp, 5, 5),
-GEN_VXFORM_NOA(vexptefp, 5, 6),
-GEN_VXFORM_NOA(vlogefp, 5, 7),
-GEN_VXFORM_NOA(vrfim, 5, 11),
-GEN_VXFORM_NOA(vrfin, 5, 8),
-GEN_VXFORM_NOA(vrfip, 5, 10),
-GEN_VXFORM_NOA(vrfiz, 5, 9),
-
-#undef GEN_VXFORM_UIMM
-#define GEN_VXFORM_UIMM(name, opc2, opc3) \
- GEN_HANDLER(name, 0x04, opc2, opc3, 0x00000000, PPC_ALTIVEC)
-GEN_VXFORM_UIMM(vspltb, 6, 8),
-GEN_VXFORM_UIMM(vsplth, 6, 9),
-GEN_VXFORM_UIMM(vspltw, 6, 10),
-GEN_VXFORM_UIMM(vcfux, 5, 12),
-GEN_VXFORM_UIMM(vcfsx, 5, 13),
-GEN_VXFORM_UIMM(vctuxs, 5, 14),
-GEN_VXFORM_UIMM(vctsxs, 5, 15),
-
-#undef GEN_VAFORM_PAIRED
-#define GEN_VAFORM_PAIRED(name0, name1, opc2) \
- GEN_HANDLER(name0##_##name1, 0x04, opc2, 0xFF, 0x00000000, PPC_ALTIVEC)
-GEN_VAFORM_PAIRED(vmhaddshs, vmhraddshs, 16),
-GEN_VAFORM_PAIRED(vmsumubm, vmsummbm, 18),
-GEN_VAFORM_PAIRED(vmsumuhm, vmsumuhs, 19),
-GEN_VAFORM_PAIRED(vmsumshm, vmsumshs, 20),
-GEN_VAFORM_PAIRED(vsel, vperm, 21),
-GEN_VAFORM_PAIRED(vmaddfp, vnmsubfp, 23),
-
-GEN_VXFORM_DUAL(vclzb, vpopcntb, 1, 28, PPC_NONE, PPC2_ALTIVEC_207),
-GEN_VXFORM_DUAL(vclzh, vpopcnth, 1, 29, PPC_NONE, PPC2_ALTIVEC_207),
-GEN_VXFORM_DUAL(vclzw, vpopcntw, 1, 30, PPC_NONE, PPC2_ALTIVEC_207),
-GEN_VXFORM_DUAL(vclzd, vpopcntd, 1, 31, PPC_NONE, PPC2_ALTIVEC_207),
-
-GEN_VXFORM_207(vbpermq, 6, 21),
-GEN_VXFORM_207(vgbbd, 6, 20),
-GEN_VXFORM_207(vpmsumb, 4, 16),
-GEN_VXFORM_207(vpmsumh, 4, 17),
-GEN_VXFORM_207(vpmsumw, 4, 18),
-GEN_VXFORM_207(vpmsumd, 4, 19),
-
-GEN_VXFORM_207(vsbox, 4, 23),
-
-GEN_VXFORM_DUAL(vcipher, vcipherlast, 4, 20, PPC_NONE, PPC2_ALTIVEC_207),
-GEN_VXFORM_DUAL(vncipher, vncipherlast, 4, 21, PPC_NONE, PPC2_ALTIVEC_207),
-
-GEN_VXFORM_207(vshasigmaw, 1, 26),
-GEN_VXFORM_207(vshasigmad, 1, 27),
-
-GEN_VXFORM_DUAL(vsldoi, vpermxor, 22, 0xFF, PPC_ALTIVEC, PPC_NONE),
-
-GEN_HANDLER_E(lxsdx, 0x1F, 0x0C, 0x12, 0, PPC_NONE, PPC2_VSX),
-GEN_HANDLER_E(lxsiwax, 0x1F, 0x0C, 0x02, 0, PPC_NONE, PPC2_VSX207),
-GEN_HANDLER_E(lxsiwzx, 0x1F, 0x0C, 0x00, 0, PPC_NONE, PPC2_VSX207),
-GEN_HANDLER_E(lxsspx, 0x1F, 0x0C, 0x10, 0, PPC_NONE, PPC2_VSX207),
-GEN_HANDLER_E(lxvd2x, 0x1F, 0x0C, 0x1A, 0, PPC_NONE, PPC2_VSX),
-GEN_HANDLER_E(lxvdsx, 0x1F, 0x0C, 0x0A, 0, PPC_NONE, PPC2_VSX),
-GEN_HANDLER_E(lxvw4x, 0x1F, 0x0C, 0x18, 0, PPC_NONE, PPC2_VSX),
-
-GEN_HANDLER_E(stxsdx, 0x1F, 0xC, 0x16, 0, PPC_NONE, PPC2_VSX),
-GEN_HANDLER_E(stxsiwx, 0x1F, 0xC, 0x04, 0, PPC_NONE, PPC2_VSX207),
-GEN_HANDLER_E(stxsspx, 0x1F, 0xC, 0x14, 0, PPC_NONE, PPC2_VSX207),
-GEN_HANDLER_E(stxvd2x, 0x1F, 0xC, 0x1E, 0, PPC_NONE, PPC2_VSX),
-GEN_HANDLER_E(stxvw4x, 0x1F, 0xC, 0x1C, 0, PPC_NONE, PPC2_VSX),
-
-GEN_HANDLER_E(mfvsrwz, 0x1F, 0x13, 0x03, 0x0000F800, PPC_NONE, PPC2_VSX207),
-GEN_HANDLER_E(mtvsrwa, 0x1F, 0x13, 0x06, 0x0000F800, PPC_NONE, PPC2_VSX207),
-GEN_HANDLER_E(mtvsrwz, 0x1F, 0x13, 0x07, 0x0000F800, PPC_NONE, PPC2_VSX207),
-#if defined(TARGET_PPC64)
-GEN_HANDLER_E(mfvsrd, 0x1F, 0x13, 0x01, 0x0000F800, PPC_NONE, PPC2_VSX207),
-GEN_HANDLER_E(mtvsrd, 0x1F, 0x13, 0x05, 0x0000F800, PPC_NONE, PPC2_VSX207),
-#endif
-
-#undef GEN_XX2FORM
-#define GEN_XX2FORM(name, opc2, opc3, fl2) \
-GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0, opc3, 0, PPC_NONE, fl2), \
-GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 1, opc3, 0, PPC_NONE, fl2)
-
-#undef GEN_XX3FORM
-#define GEN_XX3FORM(name, opc2, opc3, fl2) \
-GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0, opc3, 0, PPC_NONE, fl2), \
-GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 1, opc3, 0, PPC_NONE, fl2), \
-GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 2, opc3, 0, PPC_NONE, fl2), \
-GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 3, opc3, 0, PPC_NONE, fl2)
-
-#undef GEN_XX2IFORM
-#define GEN_XX2IFORM(name, opc2, opc3, fl2) \
-GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0, opc3, 1, PPC_NONE, fl2), \
-GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 1, opc3, 1, PPC_NONE, fl2), \
-GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 2, opc3, 1, PPC_NONE, fl2), \
-GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 3, opc3, 1, PPC_NONE, fl2)
-
-#undef GEN_XX3_RC_FORM
-#define GEN_XX3_RC_FORM(name, opc2, opc3, fl2) \
-GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0x00, opc3 | 0x00, 0, PPC_NONE, fl2), \
-GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0x01, opc3 | 0x00, 0, PPC_NONE, fl2), \
-GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0x02, opc3 | 0x00, 0, PPC_NONE, fl2), \
-GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0x03, opc3 | 0x00, 0, PPC_NONE, fl2), \
-GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0x00, opc3 | 0x10, 0, PPC_NONE, fl2), \
-GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0x01, opc3 | 0x10, 0, PPC_NONE, fl2), \
-GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0x02, opc3 | 0x10, 0, PPC_NONE, fl2), \
-GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0x03, opc3 | 0x10, 0, PPC_NONE, fl2)
-
-#undef GEN_XX3FORM_DM
-#define GEN_XX3FORM_DM(name, opc2, opc3) \
-GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x00, opc3|0x00, 0, PPC_NONE, PPC2_VSX),\
-GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x01, opc3|0x00, 0, PPC_NONE, PPC2_VSX),\
-GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x02, opc3|0x00, 0, PPC_NONE, PPC2_VSX),\
-GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x03, opc3|0x00, 0, PPC_NONE, PPC2_VSX),\
-GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x00, opc3|0x04, 0, PPC_NONE, PPC2_VSX),\
-GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x01, opc3|0x04, 0, PPC_NONE, PPC2_VSX),\
-GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x02, opc3|0x04, 0, PPC_NONE, PPC2_VSX),\
-GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x03, opc3|0x04, 0, PPC_NONE, PPC2_VSX),\
-GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x00, opc3|0x08, 0, PPC_NONE, PPC2_VSX),\
-GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x01, opc3|0x08, 0, PPC_NONE, PPC2_VSX),\
-GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x02, opc3|0x08, 0, PPC_NONE, PPC2_VSX),\
-GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x03, opc3|0x08, 0, PPC_NONE, PPC2_VSX),\
-GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x00, opc3|0x0C, 0, PPC_NONE, PPC2_VSX),\
-GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x01, opc3|0x0C, 0, PPC_NONE, PPC2_VSX),\
-GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x02, opc3|0x0C, 0, PPC_NONE, PPC2_VSX),\
-GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x03, opc3|0x0C, 0, PPC_NONE, PPC2_VSX)
-
-GEN_XX2FORM(xsabsdp, 0x12, 0x15, PPC2_VSX),
-GEN_XX2FORM(xsnabsdp, 0x12, 0x16, PPC2_VSX),
-GEN_XX2FORM(xsnegdp, 0x12, 0x17, PPC2_VSX),
-GEN_XX3FORM(xscpsgndp, 0x00, 0x16, PPC2_VSX),
-
-GEN_XX2FORM(xvabsdp, 0x12, 0x1D, PPC2_VSX),
-GEN_XX2FORM(xvnabsdp, 0x12, 0x1E, PPC2_VSX),
-GEN_XX2FORM(xvnegdp, 0x12, 0x1F, PPC2_VSX),
-GEN_XX3FORM(xvcpsgndp, 0x00, 0x1E, PPC2_VSX),
-GEN_XX2FORM(xvabssp, 0x12, 0x19, PPC2_VSX),
-GEN_XX2FORM(xvnabssp, 0x12, 0x1A, PPC2_VSX),
-GEN_XX2FORM(xvnegsp, 0x12, 0x1B, PPC2_VSX),
-GEN_XX3FORM(xvcpsgnsp, 0x00, 0x1A, PPC2_VSX),
-
-GEN_XX3FORM(xsadddp, 0x00, 0x04, PPC2_VSX),
-GEN_XX3FORM(xssubdp, 0x00, 0x05, PPC2_VSX),
-GEN_XX3FORM(xsmuldp, 0x00, 0x06, PPC2_VSX),
-GEN_XX3FORM(xsdivdp, 0x00, 0x07, PPC2_VSX),
-GEN_XX2FORM(xsredp, 0x14, 0x05, PPC2_VSX),
-GEN_XX2FORM(xssqrtdp, 0x16, 0x04, PPC2_VSX),
-GEN_XX2FORM(xsrsqrtedp, 0x14, 0x04, PPC2_VSX),
-GEN_XX3FORM(xstdivdp, 0x14, 0x07, PPC2_VSX),
-GEN_XX2FORM(xstsqrtdp, 0x14, 0x06, PPC2_VSX),
-GEN_XX3FORM(xsmaddadp, 0x04, 0x04, PPC2_VSX),
-GEN_XX3FORM(xsmaddmdp, 0x04, 0x05, PPC2_VSX),
-GEN_XX3FORM(xsmsubadp, 0x04, 0x06, PPC2_VSX),
-GEN_XX3FORM(xsmsubmdp, 0x04, 0x07, PPC2_VSX),
-GEN_XX3FORM(xsnmaddadp, 0x04, 0x14, PPC2_VSX),
-GEN_XX3FORM(xsnmaddmdp, 0x04, 0x15, PPC2_VSX),
-GEN_XX3FORM(xsnmsubadp, 0x04, 0x16, PPC2_VSX),
-GEN_XX3FORM(xsnmsubmdp, 0x04, 0x17, PPC2_VSX),
-GEN_XX2IFORM(xscmpodp, 0x0C, 0x05, PPC2_VSX),
-GEN_XX2IFORM(xscmpudp, 0x0C, 0x04, PPC2_VSX),
-GEN_XX3FORM(xsmaxdp, 0x00, 0x14, PPC2_VSX),
-GEN_XX3FORM(xsmindp, 0x00, 0x15, PPC2_VSX),
-GEN_XX2FORM(xscvdpsp, 0x12, 0x10, PPC2_VSX),
-GEN_XX2FORM(xscvdpspn, 0x16, 0x10, PPC2_VSX207),
-GEN_XX2FORM(xscvspdp, 0x12, 0x14, PPC2_VSX),
-GEN_XX2FORM(xscvspdpn, 0x16, 0x14, PPC2_VSX207),
-GEN_XX2FORM(xscvdpsxds, 0x10, 0x15, PPC2_VSX),
-GEN_XX2FORM(xscvdpsxws, 0x10, 0x05, PPC2_VSX),
-GEN_XX2FORM(xscvdpuxds, 0x10, 0x14, PPC2_VSX),
-GEN_XX2FORM(xscvdpuxws, 0x10, 0x04, PPC2_VSX),
-GEN_XX2FORM(xscvsxddp, 0x10, 0x17, PPC2_VSX),
-GEN_XX2FORM(xscvuxddp, 0x10, 0x16, PPC2_VSX),
-GEN_XX2FORM(xsrdpi, 0x12, 0x04, PPC2_VSX),
-GEN_XX2FORM(xsrdpic, 0x16, 0x06, PPC2_VSX),
-GEN_XX2FORM(xsrdpim, 0x12, 0x07, PPC2_VSX),
-GEN_XX2FORM(xsrdpip, 0x12, 0x06, PPC2_VSX),
-GEN_XX2FORM(xsrdpiz, 0x12, 0x05, PPC2_VSX),
-
-GEN_XX3FORM(xsaddsp, 0x00, 0x00, PPC2_VSX207),
-GEN_XX3FORM(xssubsp, 0x00, 0x01, PPC2_VSX207),
-GEN_XX3FORM(xsmulsp, 0x00, 0x02, PPC2_VSX207),
-GEN_XX3FORM(xsdivsp, 0x00, 0x03, PPC2_VSX207),
-GEN_XX2FORM(xsresp, 0x14, 0x01, PPC2_VSX207),
-GEN_XX2FORM(xsrsp, 0x12, 0x11, PPC2_VSX207),
-GEN_XX2FORM(xssqrtsp, 0x16, 0x00, PPC2_VSX207),
-GEN_XX2FORM(xsrsqrtesp, 0x14, 0x00, PPC2_VSX207),
-GEN_XX3FORM(xsmaddasp, 0x04, 0x00, PPC2_VSX207),
-GEN_XX3FORM(xsmaddmsp, 0x04, 0x01, PPC2_VSX207),
-GEN_XX3FORM(xsmsubasp, 0x04, 0x02, PPC2_VSX207),
-GEN_XX3FORM(xsmsubmsp, 0x04, 0x03, PPC2_VSX207),
-GEN_XX3FORM(xsnmaddasp, 0x04, 0x10, PPC2_VSX207),
-GEN_XX3FORM(xsnmaddmsp, 0x04, 0x11, PPC2_VSX207),
-GEN_XX3FORM(xsnmsubasp, 0x04, 0x12, PPC2_VSX207),
-GEN_XX3FORM(xsnmsubmsp, 0x04, 0x13, PPC2_VSX207),
-GEN_XX2FORM(xscvsxdsp, 0x10, 0x13, PPC2_VSX207),
-GEN_XX2FORM(xscvuxdsp, 0x10, 0x12, PPC2_VSX207),
-
-GEN_XX3FORM(xvadddp, 0x00, 0x0C, PPC2_VSX),
-GEN_XX3FORM(xvsubdp, 0x00, 0x0D, PPC2_VSX),
-GEN_XX3FORM(xvmuldp, 0x00, 0x0E, PPC2_VSX),
-GEN_XX3FORM(xvdivdp, 0x00, 0x0F, PPC2_VSX),
-GEN_XX2FORM(xvredp, 0x14, 0x0D, PPC2_VSX),
-GEN_XX2FORM(xvsqrtdp, 0x16, 0x0C, PPC2_VSX),
-GEN_XX2FORM(xvrsqrtedp, 0x14, 0x0C, PPC2_VSX),
-GEN_XX3FORM(xvtdivdp, 0x14, 0x0F, PPC2_VSX),
-GEN_XX2FORM(xvtsqrtdp, 0x14, 0x0E, PPC2_VSX),
-GEN_XX3FORM(xvmaddadp, 0x04, 0x0C, PPC2_VSX),
-GEN_XX3FORM(xvmaddmdp, 0x04, 0x0D, PPC2_VSX),
-GEN_XX3FORM(xvmsubadp, 0x04, 0x0E, PPC2_VSX),
-GEN_XX3FORM(xvmsubmdp, 0x04, 0x0F, PPC2_VSX),
-GEN_XX3FORM(xvnmaddadp, 0x04, 0x1C, PPC2_VSX),
-GEN_XX3FORM(xvnmaddmdp, 0x04, 0x1D, PPC2_VSX),
-GEN_XX3FORM(xvnmsubadp, 0x04, 0x1E, PPC2_VSX),
-GEN_XX3FORM(xvnmsubmdp, 0x04, 0x1F, PPC2_VSX),
-GEN_XX3FORM(xvmaxdp, 0x00, 0x1C, PPC2_VSX),
-GEN_XX3FORM(xvmindp, 0x00, 0x1D, PPC2_VSX),
-GEN_XX3_RC_FORM(xvcmpeqdp, 0x0C, 0x0C, PPC2_VSX),
-GEN_XX3_RC_FORM(xvcmpgtdp, 0x0C, 0x0D, PPC2_VSX),
-GEN_XX3_RC_FORM(xvcmpgedp, 0x0C, 0x0E, PPC2_VSX),
-GEN_XX2FORM(xvcvdpsp, 0x12, 0x18, PPC2_VSX),
-GEN_XX2FORM(xvcvdpsxds, 0x10, 0x1D, PPC2_VSX),
-GEN_XX2FORM(xvcvdpsxws, 0x10, 0x0D, PPC2_VSX),
-GEN_XX2FORM(xvcvdpuxds, 0x10, 0x1C, PPC2_VSX),
-GEN_XX2FORM(xvcvdpuxws, 0x10, 0x0C, PPC2_VSX),
-GEN_XX2FORM(xvcvsxddp, 0x10, 0x1F, PPC2_VSX),
-GEN_XX2FORM(xvcvuxddp, 0x10, 0x1E, PPC2_VSX),
-GEN_XX2FORM(xvcvsxwdp, 0x10, 0x0F, PPC2_VSX),
-GEN_XX2FORM(xvcvuxwdp, 0x10, 0x0E, PPC2_VSX),
-GEN_XX2FORM(xvrdpi, 0x12, 0x0C, PPC2_VSX),
-GEN_XX2FORM(xvrdpic, 0x16, 0x0E, PPC2_VSX),
-GEN_XX2FORM(xvrdpim, 0x12, 0x0F, PPC2_VSX),
-GEN_XX2FORM(xvrdpip, 0x12, 0x0E, PPC2_VSX),
-GEN_XX2FORM(xvrdpiz, 0x12, 0x0D, PPC2_VSX),
-
-GEN_XX3FORM(xvaddsp, 0x00, 0x08, PPC2_VSX),
-GEN_XX3FORM(xvsubsp, 0x00, 0x09, PPC2_VSX),
-GEN_XX3FORM(xvmulsp, 0x00, 0x0A, PPC2_VSX),
-GEN_XX3FORM(xvdivsp, 0x00, 0x0B, PPC2_VSX),
-GEN_XX2FORM(xvresp, 0x14, 0x09, PPC2_VSX),
-GEN_XX2FORM(xvsqrtsp, 0x16, 0x08, PPC2_VSX),
-GEN_XX2FORM(xvrsqrtesp, 0x14, 0x08, PPC2_VSX),
-GEN_XX3FORM(xvtdivsp, 0x14, 0x0B, PPC2_VSX),
-GEN_XX2FORM(xvtsqrtsp, 0x14, 0x0A, PPC2_VSX),
-GEN_XX3FORM(xvmaddasp, 0x04, 0x08, PPC2_VSX),
-GEN_XX3FORM(xvmaddmsp, 0x04, 0x09, PPC2_VSX),
-GEN_XX3FORM(xvmsubasp, 0x04, 0x0A, PPC2_VSX),
-GEN_XX3FORM(xvmsubmsp, 0x04, 0x0B, PPC2_VSX),
-GEN_XX3FORM(xvnmaddasp, 0x04, 0x18, PPC2_VSX),
-GEN_XX3FORM(xvnmaddmsp, 0x04, 0x19, PPC2_VSX),
-GEN_XX3FORM(xvnmsubasp, 0x04, 0x1A, PPC2_VSX),
-GEN_XX3FORM(xvnmsubmsp, 0x04, 0x1B, PPC2_VSX),
-GEN_XX3FORM(xvmaxsp, 0x00, 0x18, PPC2_VSX),
-GEN_XX3FORM(xvminsp, 0x00, 0x19, PPC2_VSX),
-GEN_XX3_RC_FORM(xvcmpeqsp, 0x0C, 0x08, PPC2_VSX),
-GEN_XX3_RC_FORM(xvcmpgtsp, 0x0C, 0x09, PPC2_VSX),
-GEN_XX3_RC_FORM(xvcmpgesp, 0x0C, 0x0A, PPC2_VSX),
-GEN_XX2FORM(xvcvspdp, 0x12, 0x1C, PPC2_VSX),
-GEN_XX2FORM(xvcvspsxds, 0x10, 0x19, PPC2_VSX),
-GEN_XX2FORM(xvcvspsxws, 0x10, 0x09, PPC2_VSX),
-GEN_XX2FORM(xvcvspuxds, 0x10, 0x18, PPC2_VSX),
-GEN_XX2FORM(xvcvspuxws, 0x10, 0x08, PPC2_VSX),
-GEN_XX2FORM(xvcvsxdsp, 0x10, 0x1B, PPC2_VSX),
-GEN_XX2FORM(xvcvuxdsp, 0x10, 0x1A, PPC2_VSX),
-GEN_XX2FORM(xvcvsxwsp, 0x10, 0x0B, PPC2_VSX),
-GEN_XX2FORM(xvcvuxwsp, 0x10, 0x0A, PPC2_VSX),
-GEN_XX2FORM(xvrspi, 0x12, 0x08, PPC2_VSX),
-GEN_XX2FORM(xvrspic, 0x16, 0x0A, PPC2_VSX),
-GEN_XX2FORM(xvrspim, 0x12, 0x0B, PPC2_VSX),
-GEN_XX2FORM(xvrspip, 0x12, 0x0A, PPC2_VSX),
-GEN_XX2FORM(xvrspiz, 0x12, 0x09, PPC2_VSX),
-
-#undef VSX_LOGICAL
-#define VSX_LOGICAL(name, opc2, opc3, fl2) \
-GEN_XX3FORM(name, opc2, opc3, fl2)
-
-VSX_LOGICAL(xxland, 0x8, 0x10, PPC2_VSX),
-VSX_LOGICAL(xxlandc, 0x8, 0x11, PPC2_VSX),
-VSX_LOGICAL(xxlor, 0x8, 0x12, PPC2_VSX),
-VSX_LOGICAL(xxlxor, 0x8, 0x13, PPC2_VSX),
-VSX_LOGICAL(xxlnor, 0x8, 0x14, PPC2_VSX),
-VSX_LOGICAL(xxleqv, 0x8, 0x17, PPC2_VSX207),
-VSX_LOGICAL(xxlnand, 0x8, 0x16, PPC2_VSX207),
-VSX_LOGICAL(xxlorc, 0x8, 0x15, PPC2_VSX207),
-GEN_XX3FORM(xxmrghw, 0x08, 0x02, PPC2_VSX),
-GEN_XX3FORM(xxmrglw, 0x08, 0x06, PPC2_VSX),
-GEN_XX2FORM(xxspltw, 0x08, 0x0A, PPC2_VSX),
-GEN_XX3FORM_DM(xxsldwi, 0x08, 0x00),
-
-#define GEN_XXSEL_ROW(opc3) \
-GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x18, opc3, 0, PPC_NONE, PPC2_VSX), \
-GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x19, opc3, 0, PPC_NONE, PPC2_VSX), \
-GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x1A, opc3, 0, PPC_NONE, PPC2_VSX), \
-GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x1B, opc3, 0, PPC_NONE, PPC2_VSX), \
-GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x1C, opc3, 0, PPC_NONE, PPC2_VSX), \
-GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x1D, opc3, 0, PPC_NONE, PPC2_VSX), \
-GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x1E, opc3, 0, PPC_NONE, PPC2_VSX), \
-GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x1F, opc3, 0, PPC_NONE, PPC2_VSX), \
-
-GEN_XXSEL_ROW(0x00)
-GEN_XXSEL_ROW(0x01)
-GEN_XXSEL_ROW(0x02)
-GEN_XXSEL_ROW(0x03)
-GEN_XXSEL_ROW(0x04)
-GEN_XXSEL_ROW(0x05)
-GEN_XXSEL_ROW(0x06)
-GEN_XXSEL_ROW(0x07)
-GEN_XXSEL_ROW(0x08)
-GEN_XXSEL_ROW(0x09)
-GEN_XXSEL_ROW(0x0A)
-GEN_XXSEL_ROW(0x0B)
-GEN_XXSEL_ROW(0x0C)
-GEN_XXSEL_ROW(0x0D)
-GEN_XXSEL_ROW(0x0E)
-GEN_XXSEL_ROW(0x0F)
-GEN_XXSEL_ROW(0x10)
-GEN_XXSEL_ROW(0x11)
-GEN_XXSEL_ROW(0x12)
-GEN_XXSEL_ROW(0x13)
-GEN_XXSEL_ROW(0x14)
-GEN_XXSEL_ROW(0x15)
-GEN_XXSEL_ROW(0x16)
-GEN_XXSEL_ROW(0x17)
-GEN_XXSEL_ROW(0x18)
-GEN_XXSEL_ROW(0x19)
-GEN_XXSEL_ROW(0x1A)
-GEN_XXSEL_ROW(0x1B)
-GEN_XXSEL_ROW(0x1C)
-GEN_XXSEL_ROW(0x1D)
-GEN_XXSEL_ROW(0x1E)
-GEN_XXSEL_ROW(0x1F)
-
-GEN_XX3FORM_DM(xxpermdi, 0x08, 0x01),
-
-#undef GEN_DFP_T_A_B_Rc
-#undef GEN_DFP_BF_A_B
-#undef GEN_DFP_BF_A_DCM
-#undef GEN_DFP_T_B_U32_U32_Rc
-#undef GEN_DFP_T_A_B_I32_Rc
-#undef GEN_DFP_T_B_Rc
-#undef GEN_DFP_T_FPR_I32_Rc
-
-#define _GEN_DFP_LONG(name, op1, op2, mask) \
-GEN_HANDLER_E(name, 0x3B, op1, op2, mask, PPC_NONE, PPC2_DFP)
-
-#define _GEN_DFP_LONGx2(name, op1, op2, mask) \
-GEN_HANDLER_E(name, 0x3B, op1, 0x00 | op2, mask, PPC_NONE, PPC2_DFP), \
-GEN_HANDLER_E(name, 0x3B, op1, 0x10 | op2, mask, PPC_NONE, PPC2_DFP)
-
-#define _GEN_DFP_LONGx4(name, op1, op2, mask) \
-GEN_HANDLER_E(name, 0x3B, op1, 0x00 | op2, mask, PPC_NONE, PPC2_DFP), \
-GEN_HANDLER_E(name, 0x3B, op1, 0x08 | op2, mask, PPC_NONE, PPC2_DFP), \
-GEN_HANDLER_E(name, 0x3B, op1, 0x10 | op2, mask, PPC_NONE, PPC2_DFP), \
-GEN_HANDLER_E(name, 0x3B, op1, 0x18 | op2, mask, PPC_NONE, PPC2_DFP)
-
-#define _GEN_DFP_QUAD(name, op1, op2, mask) \
-GEN_HANDLER_E(name, 0x3F, op1, op2, mask, PPC_NONE, PPC2_DFP)
-
-#define _GEN_DFP_QUADx2(name, op1, op2, mask) \
-GEN_HANDLER_E(name, 0x3F, op1, 0x00 | op2, mask, PPC_NONE, PPC2_DFP), \
-GEN_HANDLER_E(name, 0x3F, op1, 0x10 | op2, mask, PPC_NONE, PPC2_DFP)
-
-#define _GEN_DFP_QUADx4(name, op1, op2, mask) \
-GEN_HANDLER_E(name, 0x3F, op1, 0x00 | op2, mask, PPC_NONE, PPC2_DFP), \
-GEN_HANDLER_E(name, 0x3F, op1, 0x08 | op2, mask, PPC_NONE, PPC2_DFP), \
-GEN_HANDLER_E(name, 0x3F, op1, 0x10 | op2, mask, PPC_NONE, PPC2_DFP), \
-GEN_HANDLER_E(name, 0x3F, op1, 0x18 | op2, mask, PPC_NONE, PPC2_DFP)
-
-#define GEN_DFP_T_A_B_Rc(name, op1, op2) \
-_GEN_DFP_LONG(name, op1, op2, 0x00000000)
-
-#define GEN_DFP_Tp_Ap_Bp_Rc(name, op1, op2) \
-_GEN_DFP_QUAD(name, op1, op2, 0x00210800)
-
-#define GEN_DFP_Tp_A_Bp_Rc(name, op1, op2) \
-_GEN_DFP_QUAD(name, op1, op2, 0x00200800)
-
-#define GEN_DFP_T_B_Rc(name, op1, op2) \
-_GEN_DFP_LONG(name, op1, op2, 0x001F0000)
-
-#define GEN_DFP_Tp_Bp_Rc(name, op1, op2) \
-_GEN_DFP_QUAD(name, op1, op2, 0x003F0800)
-
-#define GEN_DFP_Tp_B_Rc(name, op1, op2) \
-_GEN_DFP_QUAD(name, op1, op2, 0x003F0000)
-
-#define GEN_DFP_T_Bp_Rc(name, op1, op2) \
-_GEN_DFP_QUAD(name, op1, op2, 0x001F0800)
-
-#define GEN_DFP_BF_A_B(name, op1, op2) \
-_GEN_DFP_LONG(name, op1, op2, 0x00000001)
-
-#define GEN_DFP_BF_Ap_Bp(name, op1, op2) \
-_GEN_DFP_QUAD(name, op1, op2, 0x00610801)
-
-#define GEN_DFP_BF_A_Bp(name, op1, op2) \
-_GEN_DFP_QUAD(name, op1, op2, 0x00600801)
-
-#define GEN_DFP_BF_A_DCM(name, op1, op2) \
-_GEN_DFP_LONGx2(name, op1, op2, 0x00600001)
-
-#define GEN_DFP_BF_Ap_DCM(name, op1, op2) \
-_GEN_DFP_QUADx2(name, op1, op2, 0x00610001)
-
-#define GEN_DFP_T_A_B_RMC_Rc(name, op1, op2) \
-_GEN_DFP_LONGx4(name, op1, op2, 0x00000000)
-
-#define GEN_DFP_Tp_Ap_Bp_RMC_Rc(name, op1, op2) \
-_GEN_DFP_QUADx4(name, op1, op2, 0x02010800)
-
-#define GEN_DFP_Tp_A_Bp_RMC_Rc(name, op1, op2) \
-_GEN_DFP_QUADx4(name, op1, op2, 0x02000800)
-
-#define GEN_DFP_TE_T_B_RMC_Rc(name, op1, op2) \
-_GEN_DFP_LONGx4(name, op1, op2, 0x00000000)
-
-#define GEN_DFP_TE_Tp_Bp_RMC_Rc(name, op1, op2) \
-_GEN_DFP_QUADx4(name, op1, op2, 0x00200800)
-
-#define GEN_DFP_R_T_B_RMC_Rc(name, op1, op2) \
-_GEN_DFP_LONGx4(name, op1, op2, 0x001E0000)
-
-#define GEN_DFP_R_Tp_Bp_RMC_Rc(name, op1, op2) \
-_GEN_DFP_QUADx4(name, op1, op2, 0x003E0800)
-
-#define GEN_DFP_SP_T_B_Rc(name, op1, op2) \
-_GEN_DFP_LONG(name, op1, op2, 0x00070000)
-
-#define GEN_DFP_SP_Tp_Bp_Rc(name, op1, op2) \
-_GEN_DFP_QUAD(name, op1, op2, 0x00270800)
-
-#define GEN_DFP_S_T_B_Rc(name, op1, op2) \
-_GEN_DFP_LONG(name, op1, op2, 0x000F0000)
-
-#define GEN_DFP_S_Tp_Bp_Rc(name, op1, op2) \
-_GEN_DFP_QUAD(name, op1, op2, 0x002F0800)
-
-#define GEN_DFP_T_A_SH_Rc(name, op1, op2) \
-_GEN_DFP_LONGx2(name, op1, op2, 0x00000000)
-
-#define GEN_DFP_Tp_Ap_SH_Rc(name, op1, op2) \
-_GEN_DFP_QUADx2(name, op1, op2, 0x00210000)
-
-GEN_DFP_T_A_B_Rc(dadd, 0x02, 0x00),
-GEN_DFP_Tp_Ap_Bp_Rc(daddq, 0x02, 0x00),
-GEN_DFP_T_A_B_Rc(dsub, 0x02, 0x10),
-GEN_DFP_Tp_Ap_Bp_Rc(dsubq, 0x02, 0x10),
-GEN_DFP_T_A_B_Rc(dmul, 0x02, 0x01),
-GEN_DFP_Tp_Ap_Bp_Rc(dmulq, 0x02, 0x01),
-GEN_DFP_T_A_B_Rc(ddiv, 0x02, 0x11),
-GEN_DFP_Tp_Ap_Bp_Rc(ddivq, 0x02, 0x11),
-GEN_DFP_BF_A_B(dcmpu, 0x02, 0x14),
-GEN_DFP_BF_Ap_Bp(dcmpuq, 0x02, 0x14),
-GEN_DFP_BF_A_B(dcmpo, 0x02, 0x04),
-GEN_DFP_BF_Ap_Bp(dcmpoq, 0x02, 0x04),
-GEN_DFP_BF_A_DCM(dtstdc, 0x02, 0x06),
-GEN_DFP_BF_Ap_DCM(dtstdcq, 0x02, 0x06),
-GEN_DFP_BF_A_DCM(dtstdg, 0x02, 0x07),
-GEN_DFP_BF_Ap_DCM(dtstdgq, 0x02, 0x07),
-GEN_DFP_BF_A_B(dtstex, 0x02, 0x05),
-GEN_DFP_BF_Ap_Bp(dtstexq, 0x02, 0x05),
-GEN_DFP_BF_A_B(dtstsf, 0x02, 0x15),
-GEN_DFP_BF_A_Bp(dtstsfq, 0x02, 0x15),
-GEN_DFP_TE_T_B_RMC_Rc(dquai, 0x03, 0x02),
-GEN_DFP_TE_Tp_Bp_RMC_Rc(dquaiq, 0x03, 0x02),
-GEN_DFP_T_A_B_RMC_Rc(dqua, 0x03, 0x00),
-GEN_DFP_Tp_Ap_Bp_RMC_Rc(dquaq, 0x03, 0x00),
-GEN_DFP_T_A_B_RMC_Rc(drrnd, 0x03, 0x01),
-GEN_DFP_Tp_A_Bp_RMC_Rc(drrndq, 0x03, 0x01),
-GEN_DFP_R_T_B_RMC_Rc(drintx, 0x03, 0x03),
-GEN_DFP_R_Tp_Bp_RMC_Rc(drintxq, 0x03, 0x03),
-GEN_DFP_R_T_B_RMC_Rc(drintn, 0x03, 0x07),
-GEN_DFP_R_Tp_Bp_RMC_Rc(drintnq, 0x03, 0x07),
-GEN_DFP_T_B_Rc(dctdp, 0x02, 0x08),
-GEN_DFP_Tp_B_Rc(dctqpq, 0x02, 0x08),
-GEN_DFP_T_B_Rc(drsp, 0x02, 0x18),
-GEN_DFP_Tp_Bp_Rc(drdpq, 0x02, 0x18),
-GEN_DFP_T_B_Rc(dcffix, 0x02, 0x19),
-GEN_DFP_Tp_B_Rc(dcffixq, 0x02, 0x19),
-GEN_DFP_T_B_Rc(dctfix, 0x02, 0x09),
-GEN_DFP_T_Bp_Rc(dctfixq, 0x02, 0x09),
-GEN_DFP_SP_T_B_Rc(ddedpd, 0x02, 0x0a),
-GEN_DFP_SP_Tp_Bp_Rc(ddedpdq, 0x02, 0x0a),
-GEN_DFP_S_T_B_Rc(denbcd, 0x02, 0x1a),
-GEN_DFP_S_Tp_Bp_Rc(denbcdq, 0x02, 0x1a),
-GEN_DFP_T_B_Rc(dxex, 0x02, 0x0b),
-GEN_DFP_T_Bp_Rc(dxexq, 0x02, 0x0b),
-GEN_DFP_T_A_B_Rc(diex, 0x02, 0x1b),
-GEN_DFP_Tp_A_Bp_Rc(diexq, 0x02, 0x1b),
-GEN_DFP_T_A_SH_Rc(dscli, 0x02, 0x02),
-GEN_DFP_Tp_Ap_SH_Rc(dscliq, 0x02, 0x02),
-GEN_DFP_T_A_SH_Rc(dscri, 0x02, 0x03),
-GEN_DFP_Tp_Ap_SH_Rc(dscriq, 0x02, 0x03),
-
-#undef GEN_SPE
-#define GEN_SPE(name0, name1, opc2, opc3, inval0, inval1, type) \
- GEN_OPCODE_DUAL(name0##_##name1, 0x04, opc2, opc3, inval0, inval1, type, PPC_NONE)
-GEN_SPE(evaddw, speundef, 0x00, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE),
-GEN_SPE(evaddiw, speundef, 0x01, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE),
-GEN_SPE(evsubfw, speundef, 0x02, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE),
-GEN_SPE(evsubifw, speundef, 0x03, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE),
-GEN_SPE(evabs, evneg, 0x04, 0x08, 0x0000F800, 0x0000F800, PPC_SPE),
-GEN_SPE(evextsb, evextsh, 0x05, 0x08, 0x0000F800, 0x0000F800, PPC_SPE),
-GEN_SPE(evrndw, evcntlzw, 0x06, 0x08, 0x0000F800, 0x0000F800, PPC_SPE),
-GEN_SPE(evcntlsw, brinc, 0x07, 0x08, 0x0000F800, 0x00000000, PPC_SPE),
-GEN_SPE(evmra, speundef, 0x02, 0x13, 0x0000F800, 0xFFFFFFFF, PPC_SPE),
-GEN_SPE(speundef, evand, 0x08, 0x08, 0xFFFFFFFF, 0x00000000, PPC_SPE),
-GEN_SPE(evandc, speundef, 0x09, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE),
-GEN_SPE(evxor, evor, 0x0B, 0x08, 0x00000000, 0x00000000, PPC_SPE),
-GEN_SPE(evnor, eveqv, 0x0C, 0x08, 0x00000000, 0x00000000, PPC_SPE),
-GEN_SPE(evmwumi, evmwsmi, 0x0C, 0x11, 0x00000000, 0x00000000, PPC_SPE),
-GEN_SPE(evmwumia, evmwsmia, 0x1C, 0x11, 0x00000000, 0x00000000, PPC_SPE),
-GEN_SPE(evmwumiaa, evmwsmiaa, 0x0C, 0x15, 0x00000000, 0x00000000, PPC_SPE),
-GEN_SPE(speundef, evorc, 0x0D, 0x08, 0xFFFFFFFF, 0x00000000, PPC_SPE),
-GEN_SPE(evnand, speundef, 0x0F, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE),
-GEN_SPE(evsrwu, evsrws, 0x10, 0x08, 0x00000000, 0x00000000, PPC_SPE),
-GEN_SPE(evsrwiu, evsrwis, 0x11, 0x08, 0x00000000, 0x00000000, PPC_SPE),
-GEN_SPE(evslw, speundef, 0x12, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE),
-GEN_SPE(evslwi, speundef, 0x13, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE),
-GEN_SPE(evrlw, evsplati, 0x14, 0x08, 0x00000000, 0x0000F800, PPC_SPE),
-GEN_SPE(evrlwi, evsplatfi, 0x15, 0x08, 0x00000000, 0x0000F800, PPC_SPE),
-GEN_SPE(evmergehi, evmergelo, 0x16, 0x08, 0x00000000, 0x00000000, PPC_SPE),
-GEN_SPE(evmergehilo, evmergelohi, 0x17, 0x08, 0x00000000, 0x00000000, PPC_SPE),
-GEN_SPE(evcmpgtu, evcmpgts, 0x18, 0x08, 0x00600000, 0x00600000, PPC_SPE),
-GEN_SPE(evcmpltu, evcmplts, 0x19, 0x08, 0x00600000, 0x00600000, PPC_SPE),
-GEN_SPE(evcmpeq, speundef, 0x1A, 0x08, 0x00600000, 0xFFFFFFFF, PPC_SPE),
-
-GEN_SPE(evfsadd, evfssub, 0x00, 0x0A, 0x00000000, 0x00000000, PPC_SPE_SINGLE),
-GEN_SPE(evfsabs, evfsnabs, 0x02, 0x0A, 0x0000F800, 0x0000F800, PPC_SPE_SINGLE),
-GEN_SPE(evfsneg, speundef, 0x03, 0x0A, 0x0000F800, 0xFFFFFFFF, PPC_SPE_SINGLE),
-GEN_SPE(evfsmul, evfsdiv, 0x04, 0x0A, 0x00000000, 0x00000000, PPC_SPE_SINGLE),
-GEN_SPE(evfscmpgt, evfscmplt, 0x06, 0x0A, 0x00600000, 0x00600000, PPC_SPE_SINGLE),
-GEN_SPE(evfscmpeq, speundef, 0x07, 0x0A, 0x00600000, 0xFFFFFFFF, PPC_SPE_SINGLE),
-GEN_SPE(evfscfui, evfscfsi, 0x08, 0x0A, 0x00180000, 0x00180000, PPC_SPE_SINGLE),
-GEN_SPE(evfscfuf, evfscfsf, 0x09, 0x0A, 0x00180000, 0x00180000, PPC_SPE_SINGLE),
-GEN_SPE(evfsctui, evfsctsi, 0x0A, 0x0A, 0x00180000, 0x00180000, PPC_SPE_SINGLE),
-GEN_SPE(evfsctuf, evfsctsf, 0x0B, 0x0A, 0x00180000, 0x00180000, PPC_SPE_SINGLE),
-GEN_SPE(evfsctuiz, speundef, 0x0C, 0x0A, 0x00180000, 0xFFFFFFFF, PPC_SPE_SINGLE),
-GEN_SPE(evfsctsiz, speundef, 0x0D, 0x0A, 0x00180000, 0xFFFFFFFF, PPC_SPE_SINGLE),
-GEN_SPE(evfststgt, evfststlt, 0x0E, 0x0A, 0x00600000, 0x00600000, PPC_SPE_SINGLE),
-GEN_SPE(evfststeq, speundef, 0x0F, 0x0A, 0x00600000, 0xFFFFFFFF, PPC_SPE_SINGLE),
-
-GEN_SPE(efsadd, efssub, 0x00, 0x0B, 0x00000000, 0x00000000, PPC_SPE_SINGLE),
-GEN_SPE(efsabs, efsnabs, 0x02, 0x0B, 0x0000F800, 0x0000F800, PPC_SPE_SINGLE),
-GEN_SPE(efsneg, speundef, 0x03, 0x0B, 0x0000F800, 0xFFFFFFFF, PPC_SPE_SINGLE),
-GEN_SPE(efsmul, efsdiv, 0x04, 0x0B, 0x00000000, 0x00000000, PPC_SPE_SINGLE),
-GEN_SPE(efscmpgt, efscmplt, 0x06, 0x0B, 0x00600000, 0x00600000, PPC_SPE_SINGLE),
-GEN_SPE(efscmpeq, efscfd, 0x07, 0x0B, 0x00600000, 0x00180000, PPC_SPE_SINGLE),
-GEN_SPE(efscfui, efscfsi, 0x08, 0x0B, 0x00180000, 0x00180000, PPC_SPE_SINGLE),
-GEN_SPE(efscfuf, efscfsf, 0x09, 0x0B, 0x00180000, 0x00180000, PPC_SPE_SINGLE),
-GEN_SPE(efsctui, efsctsi, 0x0A, 0x0B, 0x00180000, 0x00180000, PPC_SPE_SINGLE),
-GEN_SPE(efsctuf, efsctsf, 0x0B, 0x0B, 0x00180000, 0x00180000, PPC_SPE_SINGLE),
-GEN_SPE(efsctuiz, speundef, 0x0C, 0x0B, 0x00180000, 0xFFFFFFFF, PPC_SPE_SINGLE),
-GEN_SPE(efsctsiz, speundef, 0x0D, 0x0B, 0x00180000, 0xFFFFFFFF, PPC_SPE_SINGLE),
-GEN_SPE(efststgt, efststlt, 0x0E, 0x0B, 0x00600000, 0x00600000, PPC_SPE_SINGLE),
-GEN_SPE(efststeq, speundef, 0x0F, 0x0B, 0x00600000, 0xFFFFFFFF, PPC_SPE_SINGLE),
-
-GEN_SPE(efdadd, efdsub, 0x10, 0x0B, 0x00000000, 0x00000000, PPC_SPE_DOUBLE),
-GEN_SPE(efdcfuid, efdcfsid, 0x11, 0x0B, 0x00180000, 0x00180000, PPC_SPE_DOUBLE),
-GEN_SPE(efdabs, efdnabs, 0x12, 0x0B, 0x0000F800, 0x0000F800, PPC_SPE_DOUBLE),
-GEN_SPE(efdneg, speundef, 0x13, 0x0B, 0x0000F800, 0xFFFFFFFF, PPC_SPE_DOUBLE),
-GEN_SPE(efdmul, efddiv, 0x14, 0x0B, 0x00000000, 0x00000000, PPC_SPE_DOUBLE),
-GEN_SPE(efdctuidz, efdctsidz, 0x15, 0x0B, 0x00180000, 0x00180000, PPC_SPE_DOUBLE),
-GEN_SPE(efdcmpgt, efdcmplt, 0x16, 0x0B, 0x00600000, 0x00600000, PPC_SPE_DOUBLE),
-GEN_SPE(efdcmpeq, efdcfs, 0x17, 0x0B, 0x00600000, 0x00180000, PPC_SPE_DOUBLE),
-GEN_SPE(efdcfui, efdcfsi, 0x18, 0x0B, 0x00180000, 0x00180000, PPC_SPE_DOUBLE),
-GEN_SPE(efdcfuf, efdcfsf, 0x19, 0x0B, 0x00180000, 0x00180000, PPC_SPE_DOUBLE),
-GEN_SPE(efdctui, efdctsi, 0x1A, 0x0B, 0x00180000, 0x00180000, PPC_SPE_DOUBLE),
-GEN_SPE(efdctuf, efdctsf, 0x1B, 0x0B, 0x00180000, 0x00180000, PPC_SPE_DOUBLE),
-GEN_SPE(efdctuiz, speundef, 0x1C, 0x0B, 0x00180000, 0xFFFFFFFF, PPC_SPE_DOUBLE),
-GEN_SPE(efdctsiz, speundef, 0x1D, 0x0B, 0x00180000, 0xFFFFFFFF, PPC_SPE_DOUBLE),
-GEN_SPE(efdtstgt, efdtstlt, 0x1E, 0x0B, 0x00600000, 0x00600000, PPC_SPE_DOUBLE),
-GEN_SPE(efdtsteq, speundef, 0x1F, 0x0B, 0x00600000, 0xFFFFFFFF, PPC_SPE_DOUBLE),
-
-#undef GEN_SPEOP_LDST
-#define GEN_SPEOP_LDST(name, opc2, sh) \
-GEN_HANDLER(name, 0x04, opc2, 0x0C, 0x00000000, PPC_SPE)
-GEN_SPEOP_LDST(evldd, 0x00, 3),
-GEN_SPEOP_LDST(evldw, 0x01, 3),
-GEN_SPEOP_LDST(evldh, 0x02, 3),
-GEN_SPEOP_LDST(evlhhesplat, 0x04, 1),
-GEN_SPEOP_LDST(evlhhousplat, 0x06, 1),
-GEN_SPEOP_LDST(evlhhossplat, 0x07, 1),
-GEN_SPEOP_LDST(evlwhe, 0x08, 2),
-GEN_SPEOP_LDST(evlwhou, 0x0A, 2),
-GEN_SPEOP_LDST(evlwhos, 0x0B, 2),
-GEN_SPEOP_LDST(evlwwsplat, 0x0C, 2),
-GEN_SPEOP_LDST(evlwhsplat, 0x0E, 2),
-
-GEN_SPEOP_LDST(evstdd, 0x10, 3),
-GEN_SPEOP_LDST(evstdw, 0x11, 3),
-GEN_SPEOP_LDST(evstdh, 0x12, 3),
-GEN_SPEOP_LDST(evstwhe, 0x18, 2),
-GEN_SPEOP_LDST(evstwho, 0x1A, 2),
-GEN_SPEOP_LDST(evstwwe, 0x1C, 2),
-GEN_SPEOP_LDST(evstwwo, 0x1E, 2),
-
GEN_HANDLER2_E(tbegin, "tbegin", 0x1F, 0x0E, 0x14, 0x01DFF800, \
PPC_NONE, PPC2_TM),
GEN_HANDLER2_E(tend, "tend", 0x1F, 0x0E, 0x15, 0x01FFF800, \
@@ -11324,6 +6751,16 @@ GEN_HANDLER2_E(treclaim, "treclaim", 0x1F, 0x0E, 0x1D, 0x03E0F800, \
PPC_NONE, PPC2_TM),
GEN_HANDLER2_E(trechkpt, "trechkpt", 0x1F, 0x0E, 0x1F, 0x03FFF800, \
PPC_NONE, PPC2_TM),
+
+#include "translate/fp-ops.inc.c"
+
+#include "translate/vmx-ops.inc.c"
+
+#include "translate/vsx-ops.inc.c"
+
+#include "translate/dfp-ops.inc.c"
+
+#include "translate/spe-ops.inc.c"
};
#include "helper_regs.h"
@@ -11569,6 +7006,7 @@ void gen_intermediate_code(CPUPPCState *env, struct TranslationBlock *tb)
ctx.insns_flags = env->insns_flags;
ctx.insns_flags2 = env->insns_flags2;
ctx.access_type = -1;
+ ctx.need_access_type = !(env->mmu_model & POWERPC_MMU_64B);
ctx.le_mode = !!(env->hflags & (1 << MSR_LE));
ctx.default_tcg_memop_mask = ctx.le_mode ? MO_LE : MO_BE;
#if defined(TARGET_PPC64)
@@ -11650,9 +7088,10 @@ void gen_intermediate_code(CPUPPCState *env, struct TranslationBlock *tb)
} else {
ctx.opcode = cpu_ldl_code(env, ctx.nip);
}
- LOG_DISAS("translate opcode %08x (%02x %02x %02x) (%s)\n",
- ctx.opcode, opc1(ctx.opcode), opc2(ctx.opcode),
- opc3(ctx.opcode), ctx.le_mode ? "little" : "big");
+ LOG_DISAS("translate opcode %08x (%02x %02x %02x %02x) (%s)\n",
+ ctx.opcode, opc1(ctx.opcode), opc2(ctx.opcode),
+ opc3(ctx.opcode), opc4(ctx.opcode),
+ ctx.le_mode ? "little" : "big");
ctx.nip += 4;
table = env->opcodes;
handler = table[opc1(ctx.opcode)];
@@ -11662,14 +7101,20 @@ void gen_intermediate_code(CPUPPCState *env, struct TranslationBlock *tb)
if (is_indirect_opcode(handler)) {
table = ind_table(handler);
handler = table[opc3(ctx.opcode)];
+ if (is_indirect_opcode(handler)) {
+ table = ind_table(handler);
+ handler = table[opc4(ctx.opcode)];
+ }
}
}
/* Is opcode *REALLY* valid ? */
if (unlikely(handler->handler == &gen_invalid)) {
qemu_log_mask(LOG_GUEST_ERROR, "invalid/unsupported opcode: "
- "%02x - %02x - %02x (%08x) " TARGET_FMT_lx " %d\n",
+ "%02x - %02x - %02x - %02x (%08x) "
+ TARGET_FMT_lx " %d\n",
opc1(ctx.opcode), opc2(ctx.opcode),
- opc3(ctx.opcode), ctx.opcode, ctx.nip - 4, (int)msr_ir);
+ opc3(ctx.opcode), opc4(ctx.opcode),
+ ctx.opcode, ctx.nip - 4, (int)msr_ir);
} else {
uint32_t inval;
@@ -11681,9 +7126,10 @@ void gen_intermediate_code(CPUPPCState *env, struct TranslationBlock *tb)
if (unlikely((ctx.opcode & inval) != 0)) {
qemu_log_mask(LOG_GUEST_ERROR, "invalid bits: %08x for opcode: "
- "%02x - %02x - %02x (%08x) " TARGET_FMT_lx "\n",
- ctx.opcode & inval, opc1(ctx.opcode),
- opc2(ctx.opcode), opc3(ctx.opcode),
+ "%02x - %02x - %02x - %02x (%08x) "
+ TARGET_FMT_lx "\n", ctx.opcode & inval,
+ opc1(ctx.opcode), opc2(ctx.opcode),
+ opc3(ctx.opcode), opc4(ctx.opcode),
ctx.opcode, ctx.nip - 4);
gen_inval_exception(ctxp, POWERPC_EXCP_INVAL_INVAL);
break;
@@ -11699,7 +7145,7 @@ void gen_intermediate_code(CPUPPCState *env, struct TranslationBlock *tb)
ctx.exception != POWERPC_SYSCALL &&
ctx.exception != POWERPC_EXCP_TRAP &&
ctx.exception != POWERPC_EXCP_BRANCH)) {
- gen_exception(ctxp, POWERPC_EXCP_TRACE);
+ gen_exception_nip(ctxp, POWERPC_EXCP_TRACE, ctx.nip);
} else if (unlikely(((ctx.nip & (TARGET_PAGE_SIZE - 1)) == 0) ||
(cs->singlestep_enabled) ||
singlestep ||
@@ -11710,9 +7156,9 @@ void gen_intermediate_code(CPUPPCState *env, struct TranslationBlock *tb)
break;
}
if (tcg_check_temp_count()) {
- fprintf(stderr, "Opcode %02x %02x %02x (%08x) leaked temporaries\n",
- opc1(ctx.opcode), opc2(ctx.opcode), opc3(ctx.opcode),
- ctx.opcode);
+ fprintf(stderr, "Opcode %02x %02x %02x %02x (%08x) leaked "
+ "temporaries\n", opc1(ctx.opcode), opc2(ctx.opcode),
+ opc3(ctx.opcode), opc4(ctx.opcode), ctx.opcode);
exit(1);
}
}
@@ -11738,9 +7184,11 @@ void gen_intermediate_code(CPUPPCState *env, struct TranslationBlock *tb)
int flags;
flags = env->bfd_mach;
flags |= ctx.le_mode << 16;
+ qemu_log_lock();
qemu_log("IN: %s\n", lookup_symbol(pc_start));
log_target_disas(cs, pc_start, ctx.nip - pc_start, flags);
qemu_log("\n");
+ qemu_log_unlock();
}
#endif
}
diff --git a/target-ppc/translate/dfp-impl.inc.c b/target-ppc/translate/dfp-impl.inc.c
new file mode 100644
index 0000000000..178d3044a7
--- /dev/null
+++ b/target-ppc/translate/dfp-impl.inc.c
@@ -0,0 +1,232 @@
+/*** Decimal Floating Point ***/
+
+static inline TCGv_ptr gen_fprp_ptr(int reg)
+{
+ TCGv_ptr r = tcg_temp_new_ptr();
+ tcg_gen_addi_ptr(r, cpu_env, offsetof(CPUPPCState, fpr[reg]));
+ return r;
+}
+
+#define GEN_DFP_T_A_B_Rc(name) \
+static void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_ptr rd, ra, rb; \
+ if (unlikely(!ctx->fpu_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
+ return; \
+ } \
+ gen_update_nip(ctx, ctx->nip - 4); \
+ rd = gen_fprp_ptr(rD(ctx->opcode)); \
+ ra = gen_fprp_ptr(rA(ctx->opcode)); \
+ rb = gen_fprp_ptr(rB(ctx->opcode)); \
+ gen_helper_##name(cpu_env, rd, ra, rb); \
+ if (unlikely(Rc(ctx->opcode) != 0)) { \
+ gen_set_cr1_from_fpscr(ctx); \
+ } \
+ tcg_temp_free_ptr(rd); \
+ tcg_temp_free_ptr(ra); \
+ tcg_temp_free_ptr(rb); \
+}
+
+#define GEN_DFP_BF_A_B(name) \
+static void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_ptr ra, rb; \
+ if (unlikely(!ctx->fpu_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
+ return; \
+ } \
+ gen_update_nip(ctx, ctx->nip - 4); \
+ ra = gen_fprp_ptr(rA(ctx->opcode)); \
+ rb = gen_fprp_ptr(rB(ctx->opcode)); \
+ gen_helper_##name(cpu_crf[crfD(ctx->opcode)], \
+ cpu_env, ra, rb); \
+ tcg_temp_free_ptr(ra); \
+ tcg_temp_free_ptr(rb); \
+}
+
+#define GEN_DFP_BF_I_B(name) \
+static void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_i32 uim; \
+ TCGv_ptr rb; \
+ if (unlikely(!ctx->fpu_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
+ return; \
+ } \
+ gen_update_nip(ctx, ctx->nip - 4); \
+ uim = tcg_const_i32(UIMM5(ctx->opcode)); \
+ rb = gen_fprp_ptr(rB(ctx->opcode)); \
+ gen_helper_##name(cpu_crf[crfD(ctx->opcode)], \
+ cpu_env, uim, rb); \
+ tcg_temp_free_i32(uim); \
+ tcg_temp_free_ptr(rb); \
+}
+
+#define GEN_DFP_BF_A_DCM(name) \
+static void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_ptr ra; \
+ TCGv_i32 dcm; \
+ if (unlikely(!ctx->fpu_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
+ return; \
+ } \
+ gen_update_nip(ctx, ctx->nip - 4); \
+ ra = gen_fprp_ptr(rA(ctx->opcode)); \
+ dcm = tcg_const_i32(DCM(ctx->opcode)); \
+ gen_helper_##name(cpu_crf[crfD(ctx->opcode)], \
+ cpu_env, ra, dcm); \
+ tcg_temp_free_ptr(ra); \
+ tcg_temp_free_i32(dcm); \
+}
+
+#define GEN_DFP_T_B_U32_U32_Rc(name, u32f1, u32f2) \
+static void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_ptr rt, rb; \
+ TCGv_i32 u32_1, u32_2; \
+ if (unlikely(!ctx->fpu_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
+ return; \
+ } \
+ gen_update_nip(ctx, ctx->nip - 4); \
+ rt = gen_fprp_ptr(rD(ctx->opcode)); \
+ rb = gen_fprp_ptr(rB(ctx->opcode)); \
+ u32_1 = tcg_const_i32(u32f1(ctx->opcode)); \
+ u32_2 = tcg_const_i32(u32f2(ctx->opcode)); \
+ gen_helper_##name(cpu_env, rt, rb, u32_1, u32_2); \
+ if (unlikely(Rc(ctx->opcode) != 0)) { \
+ gen_set_cr1_from_fpscr(ctx); \
+ } \
+ tcg_temp_free_ptr(rt); \
+ tcg_temp_free_ptr(rb); \
+ tcg_temp_free_i32(u32_1); \
+ tcg_temp_free_i32(u32_2); \
+}
+
+#define GEN_DFP_T_A_B_I32_Rc(name, i32fld) \
+static void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_ptr rt, ra, rb; \
+ TCGv_i32 i32; \
+ if (unlikely(!ctx->fpu_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
+ return; \
+ } \
+ gen_update_nip(ctx, ctx->nip - 4); \
+ rt = gen_fprp_ptr(rD(ctx->opcode)); \
+ ra = gen_fprp_ptr(rA(ctx->opcode)); \
+ rb = gen_fprp_ptr(rB(ctx->opcode)); \
+ i32 = tcg_const_i32(i32fld(ctx->opcode)); \
+ gen_helper_##name(cpu_env, rt, ra, rb, i32); \
+ if (unlikely(Rc(ctx->opcode) != 0)) { \
+ gen_set_cr1_from_fpscr(ctx); \
+ } \
+ tcg_temp_free_ptr(rt); \
+ tcg_temp_free_ptr(rb); \
+ tcg_temp_free_ptr(ra); \
+ tcg_temp_free_i32(i32); \
+ }
+
+#define GEN_DFP_T_B_Rc(name) \
+static void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_ptr rt, rb; \
+ if (unlikely(!ctx->fpu_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
+ return; \
+ } \
+ gen_update_nip(ctx, ctx->nip - 4); \
+ rt = gen_fprp_ptr(rD(ctx->opcode)); \
+ rb = gen_fprp_ptr(rB(ctx->opcode)); \
+ gen_helper_##name(cpu_env, rt, rb); \
+ if (unlikely(Rc(ctx->opcode) != 0)) { \
+ gen_set_cr1_from_fpscr(ctx); \
+ } \
+ tcg_temp_free_ptr(rt); \
+ tcg_temp_free_ptr(rb); \
+ }
+
+#define GEN_DFP_T_FPR_I32_Rc(name, fprfld, i32fld) \
+static void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_ptr rt, rs; \
+ TCGv_i32 i32; \
+ if (unlikely(!ctx->fpu_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
+ return; \
+ } \
+ gen_update_nip(ctx, ctx->nip - 4); \
+ rt = gen_fprp_ptr(rD(ctx->opcode)); \
+ rs = gen_fprp_ptr(fprfld(ctx->opcode)); \
+ i32 = tcg_const_i32(i32fld(ctx->opcode)); \
+ gen_helper_##name(cpu_env, rt, rs, i32); \
+ if (unlikely(Rc(ctx->opcode) != 0)) { \
+ gen_set_cr1_from_fpscr(ctx); \
+ } \
+ tcg_temp_free_ptr(rt); \
+ tcg_temp_free_ptr(rs); \
+ tcg_temp_free_i32(i32); \
+}
+
+GEN_DFP_T_A_B_Rc(dadd)
+GEN_DFP_T_A_B_Rc(daddq)
+GEN_DFP_T_A_B_Rc(dsub)
+GEN_DFP_T_A_B_Rc(dsubq)
+GEN_DFP_T_A_B_Rc(dmul)
+GEN_DFP_T_A_B_Rc(dmulq)
+GEN_DFP_T_A_B_Rc(ddiv)
+GEN_DFP_T_A_B_Rc(ddivq)
+GEN_DFP_BF_A_B(dcmpu)
+GEN_DFP_BF_A_B(dcmpuq)
+GEN_DFP_BF_A_B(dcmpo)
+GEN_DFP_BF_A_B(dcmpoq)
+GEN_DFP_BF_A_DCM(dtstdc)
+GEN_DFP_BF_A_DCM(dtstdcq)
+GEN_DFP_BF_A_DCM(dtstdg)
+GEN_DFP_BF_A_DCM(dtstdgq)
+GEN_DFP_BF_A_B(dtstex)
+GEN_DFP_BF_A_B(dtstexq)
+GEN_DFP_BF_A_B(dtstsf)
+GEN_DFP_BF_A_B(dtstsfq)
+GEN_DFP_BF_I_B(dtstsfi)
+GEN_DFP_BF_I_B(dtstsfiq)
+GEN_DFP_T_B_U32_U32_Rc(dquai, SIMM5, RMC)
+GEN_DFP_T_B_U32_U32_Rc(dquaiq, SIMM5, RMC)
+GEN_DFP_T_A_B_I32_Rc(dqua, RMC)
+GEN_DFP_T_A_B_I32_Rc(dquaq, RMC)
+GEN_DFP_T_A_B_I32_Rc(drrnd, RMC)
+GEN_DFP_T_A_B_I32_Rc(drrndq, RMC)
+GEN_DFP_T_B_U32_U32_Rc(drintx, FPW, RMC)
+GEN_DFP_T_B_U32_U32_Rc(drintxq, FPW, RMC)
+GEN_DFP_T_B_U32_U32_Rc(drintn, FPW, RMC)
+GEN_DFP_T_B_U32_U32_Rc(drintnq, FPW, RMC)
+GEN_DFP_T_B_Rc(dctdp)
+GEN_DFP_T_B_Rc(dctqpq)
+GEN_DFP_T_B_Rc(drsp)
+GEN_DFP_T_B_Rc(drdpq)
+GEN_DFP_T_B_Rc(dcffix)
+GEN_DFP_T_B_Rc(dcffixq)
+GEN_DFP_T_B_Rc(dctfix)
+GEN_DFP_T_B_Rc(dctfixq)
+GEN_DFP_T_FPR_I32_Rc(ddedpd, rB, SP)
+GEN_DFP_T_FPR_I32_Rc(ddedpdq, rB, SP)
+GEN_DFP_T_FPR_I32_Rc(denbcd, rB, SP)
+GEN_DFP_T_FPR_I32_Rc(denbcdq, rB, SP)
+GEN_DFP_T_B_Rc(dxex)
+GEN_DFP_T_B_Rc(dxexq)
+GEN_DFP_T_A_B_Rc(diex)
+GEN_DFP_T_A_B_Rc(diexq)
+GEN_DFP_T_FPR_I32_Rc(dscli, rA, DCM)
+GEN_DFP_T_FPR_I32_Rc(dscliq, rA, DCM)
+GEN_DFP_T_FPR_I32_Rc(dscri, rA, DCM)
+GEN_DFP_T_FPR_I32_Rc(dscriq, rA, DCM)
+
+#undef GEN_DFP_T_A_B_Rc
+#undef GEN_DFP_BF_A_B
+#undef GEN_DFP_BF_A_DCM
+#undef GEN_DFP_T_B_U32_U32_Rc
+#undef GEN_DFP_T_A_B_I32_Rc
+#undef GEN_DFP_T_B_Rc
+#undef GEN_DFP_T_FPR_I32_Rc
diff --git a/target-ppc/translate/dfp-ops.inc.c b/target-ppc/translate/dfp-ops.inc.c
new file mode 100644
index 0000000000..6ef38e5712
--- /dev/null
+++ b/target-ppc/translate/dfp-ops.inc.c
@@ -0,0 +1,165 @@
+#define _GEN_DFP_LONG(name, op1, op2, mask) \
+GEN_HANDLER_E(name, 0x3B, op1, op2, mask, PPC_NONE, PPC2_DFP)
+
+#define _GEN_DFP_LONG_300(name, op1, op2, mask) \
+GEN_HANDLER_E(name, 0x3B, op1, op2, mask, PPC_NONE, PPC2_ISA300)
+
+#define _GEN_DFP_LONGx2(name, op1, op2, mask) \
+GEN_HANDLER_E(name, 0x3B, op1, 0x00 | op2, mask, PPC_NONE, PPC2_DFP), \
+GEN_HANDLER_E(name, 0x3B, op1, 0x10 | op2, mask, PPC_NONE, PPC2_DFP)
+
+#define _GEN_DFP_LONGx4(name, op1, op2, mask) \
+GEN_HANDLER_E(name, 0x3B, op1, 0x00 | op2, mask, PPC_NONE, PPC2_DFP), \
+GEN_HANDLER_E(name, 0x3B, op1, 0x08 | op2, mask, PPC_NONE, PPC2_DFP), \
+GEN_HANDLER_E(name, 0x3B, op1, 0x10 | op2, mask, PPC_NONE, PPC2_DFP), \
+GEN_HANDLER_E(name, 0x3B, op1, 0x18 | op2, mask, PPC_NONE, PPC2_DFP)
+
+#define _GEN_DFP_QUAD(name, op1, op2, mask) \
+GEN_HANDLER_E(name, 0x3F, op1, op2, mask, PPC_NONE, PPC2_DFP)
+
+#define _GEN_DFP_QUAD_300(name, op1, op2, mask) \
+GEN_HANDLER_E(name, 0x3F, op1, op2, mask, PPC_NONE, PPC2_ISA300)
+
+#define _GEN_DFP_QUADx2(name, op1, op2, mask) \
+GEN_HANDLER_E(name, 0x3F, op1, 0x00 | op2, mask, PPC_NONE, PPC2_DFP), \
+GEN_HANDLER_E(name, 0x3F, op1, 0x10 | op2, mask, PPC_NONE, PPC2_DFP)
+
+#define _GEN_DFP_QUADx4(name, op1, op2, mask) \
+GEN_HANDLER_E(name, 0x3F, op1, 0x00 | op2, mask, PPC_NONE, PPC2_DFP), \
+GEN_HANDLER_E(name, 0x3F, op1, 0x08 | op2, mask, PPC_NONE, PPC2_DFP), \
+GEN_HANDLER_E(name, 0x3F, op1, 0x10 | op2, mask, PPC_NONE, PPC2_DFP), \
+GEN_HANDLER_E(name, 0x3F, op1, 0x18 | op2, mask, PPC_NONE, PPC2_DFP)
+
+#define GEN_DFP_T_A_B_Rc(name, op1, op2) \
+_GEN_DFP_LONG(name, op1, op2, 0x00000000)
+
+#define GEN_DFP_Tp_Ap_Bp_Rc(name, op1, op2) \
+_GEN_DFP_QUAD(name, op1, op2, 0x00210800)
+
+#define GEN_DFP_Tp_A_Bp_Rc(name, op1, op2) \
+_GEN_DFP_QUAD(name, op1, op2, 0x00200800)
+
+#define GEN_DFP_T_B_Rc(name, op1, op2) \
+_GEN_DFP_LONG(name, op1, op2, 0x001F0000)
+
+#define GEN_DFP_Tp_Bp_Rc(name, op1, op2) \
+_GEN_DFP_QUAD(name, op1, op2, 0x003F0800)
+
+#define GEN_DFP_Tp_B_Rc(name, op1, op2) \
+_GEN_DFP_QUAD(name, op1, op2, 0x003F0000)
+
+#define GEN_DFP_T_Bp_Rc(name, op1, op2) \
+_GEN_DFP_QUAD(name, op1, op2, 0x001F0800)
+
+#define GEN_DFP_BF_A_B(name, op1, op2) \
+_GEN_DFP_LONG(name, op1, op2, 0x00000001)
+
+#define GEN_DFP_BF_A_B_300(name, op1, op2) \
+_GEN_DFP_LONG_300(name, op1, op2, 0x00400001)
+
+#define GEN_DFP_BF_Ap_Bp(name, op1, op2) \
+_GEN_DFP_QUAD(name, op1, op2, 0x00610801)
+
+#define GEN_DFP_BF_A_Bp(name, op1, op2) \
+_GEN_DFP_QUAD(name, op1, op2, 0x00600801)
+
+#define GEN_DFP_BF_A_Bp_300(name, op1, op2) \
+_GEN_DFP_QUAD_300(name, op1, op2, 0x00400001)
+
+#define GEN_DFP_BF_A_DCM(name, op1, op2) \
+_GEN_DFP_LONGx2(name, op1, op2, 0x00600001)
+
+#define GEN_DFP_BF_Ap_DCM(name, op1, op2) \
+_GEN_DFP_QUADx2(name, op1, op2, 0x00610001)
+
+#define GEN_DFP_T_A_B_RMC_Rc(name, op1, op2) \
+_GEN_DFP_LONGx4(name, op1, op2, 0x00000000)
+
+#define GEN_DFP_Tp_Ap_Bp_RMC_Rc(name, op1, op2) \
+_GEN_DFP_QUADx4(name, op1, op2, 0x02010800)
+
+#define GEN_DFP_Tp_A_Bp_RMC_Rc(name, op1, op2) \
+_GEN_DFP_QUADx4(name, op1, op2, 0x02000800)
+
+#define GEN_DFP_TE_T_B_RMC_Rc(name, op1, op2) \
+_GEN_DFP_LONGx4(name, op1, op2, 0x00000000)
+
+#define GEN_DFP_TE_Tp_Bp_RMC_Rc(name, op1, op2) \
+_GEN_DFP_QUADx4(name, op1, op2, 0x00200800)
+
+#define GEN_DFP_R_T_B_RMC_Rc(name, op1, op2) \
+_GEN_DFP_LONGx4(name, op1, op2, 0x001E0000)
+
+#define GEN_DFP_R_Tp_Bp_RMC_Rc(name, op1, op2) \
+_GEN_DFP_QUADx4(name, op1, op2, 0x003E0800)
+
+#define GEN_DFP_SP_T_B_Rc(name, op1, op2) \
+_GEN_DFP_LONG(name, op1, op2, 0x00070000)
+
+#define GEN_DFP_SP_Tp_Bp_Rc(name, op1, op2) \
+_GEN_DFP_QUAD(name, op1, op2, 0x00270800)
+
+#define GEN_DFP_S_T_B_Rc(name, op1, op2) \
+_GEN_DFP_LONG(name, op1, op2, 0x000F0000)
+
+#define GEN_DFP_S_Tp_Bp_Rc(name, op1, op2) \
+_GEN_DFP_QUAD(name, op1, op2, 0x002F0800)
+
+#define GEN_DFP_T_A_SH_Rc(name, op1, op2) \
+_GEN_DFP_LONGx2(name, op1, op2, 0x00000000)
+
+#define GEN_DFP_Tp_Ap_SH_Rc(name, op1, op2) \
+_GEN_DFP_QUADx2(name, op1, op2, 0x00210000)
+
+GEN_DFP_T_A_B_Rc(dadd, 0x02, 0x00),
+GEN_DFP_Tp_Ap_Bp_Rc(daddq, 0x02, 0x00),
+GEN_DFP_T_A_B_Rc(dsub, 0x02, 0x10),
+GEN_DFP_Tp_Ap_Bp_Rc(dsubq, 0x02, 0x10),
+GEN_DFP_T_A_B_Rc(dmul, 0x02, 0x01),
+GEN_DFP_Tp_Ap_Bp_Rc(dmulq, 0x02, 0x01),
+GEN_DFP_T_A_B_Rc(ddiv, 0x02, 0x11),
+GEN_DFP_Tp_Ap_Bp_Rc(ddivq, 0x02, 0x11),
+GEN_DFP_BF_A_B(dcmpu, 0x02, 0x14),
+GEN_DFP_BF_Ap_Bp(dcmpuq, 0x02, 0x14),
+GEN_DFP_BF_A_B(dcmpo, 0x02, 0x04),
+GEN_DFP_BF_Ap_Bp(dcmpoq, 0x02, 0x04),
+GEN_DFP_BF_A_DCM(dtstdc, 0x02, 0x06),
+GEN_DFP_BF_Ap_DCM(dtstdcq, 0x02, 0x06),
+GEN_DFP_BF_A_DCM(dtstdg, 0x02, 0x07),
+GEN_DFP_BF_Ap_DCM(dtstdgq, 0x02, 0x07),
+GEN_DFP_BF_A_B(dtstex, 0x02, 0x05),
+GEN_DFP_BF_Ap_Bp(dtstexq, 0x02, 0x05),
+GEN_DFP_BF_A_B(dtstsf, 0x02, 0x15),
+GEN_DFP_BF_A_Bp(dtstsfq, 0x02, 0x15),
+GEN_DFP_BF_A_B_300(dtstsfi, 0x03, 0x15),
+GEN_DFP_BF_A_Bp_300(dtstsfiq, 0x03, 0x15),
+GEN_DFP_TE_T_B_RMC_Rc(dquai, 0x03, 0x02),
+GEN_DFP_TE_Tp_Bp_RMC_Rc(dquaiq, 0x03, 0x02),
+GEN_DFP_T_A_B_RMC_Rc(dqua, 0x03, 0x00),
+GEN_DFP_Tp_Ap_Bp_RMC_Rc(dquaq, 0x03, 0x00),
+GEN_DFP_T_A_B_RMC_Rc(drrnd, 0x03, 0x01),
+GEN_DFP_Tp_A_Bp_RMC_Rc(drrndq, 0x03, 0x01),
+GEN_DFP_R_T_B_RMC_Rc(drintx, 0x03, 0x03),
+GEN_DFP_R_Tp_Bp_RMC_Rc(drintxq, 0x03, 0x03),
+GEN_DFP_R_T_B_RMC_Rc(drintn, 0x03, 0x07),
+GEN_DFP_R_Tp_Bp_RMC_Rc(drintnq, 0x03, 0x07),
+GEN_DFP_T_B_Rc(dctdp, 0x02, 0x08),
+GEN_DFP_Tp_B_Rc(dctqpq, 0x02, 0x08),
+GEN_DFP_T_B_Rc(drsp, 0x02, 0x18),
+GEN_DFP_Tp_Bp_Rc(drdpq, 0x02, 0x18),
+GEN_DFP_T_B_Rc(dcffix, 0x02, 0x19),
+GEN_DFP_Tp_B_Rc(dcffixq, 0x02, 0x19),
+GEN_DFP_T_B_Rc(dctfix, 0x02, 0x09),
+GEN_DFP_T_Bp_Rc(dctfixq, 0x02, 0x09),
+GEN_DFP_SP_T_B_Rc(ddedpd, 0x02, 0x0a),
+GEN_DFP_SP_Tp_Bp_Rc(ddedpdq, 0x02, 0x0a),
+GEN_DFP_S_T_B_Rc(denbcd, 0x02, 0x1a),
+GEN_DFP_S_Tp_Bp_Rc(denbcdq, 0x02, 0x1a),
+GEN_DFP_T_B_Rc(dxex, 0x02, 0x0b),
+GEN_DFP_T_Bp_Rc(dxexq, 0x02, 0x0b),
+GEN_DFP_T_A_B_Rc(diex, 0x02, 0x1b),
+GEN_DFP_Tp_A_Bp_Rc(diexq, 0x02, 0x1b),
+GEN_DFP_T_A_SH_Rc(dscli, 0x02, 0x02),
+GEN_DFP_Tp_Ap_SH_Rc(dscliq, 0x02, 0x02),
+GEN_DFP_T_A_SH_Rc(dscri, 0x02, 0x03),
+GEN_DFP_Tp_Ap_SH_Rc(dscriq, 0x02, 0x03),
diff --git a/target-ppc/translate/fp-impl.inc.c b/target-ppc/translate/fp-impl.inc.c
new file mode 100644
index 0000000000..872af7b56f
--- /dev/null
+++ b/target-ppc/translate/fp-impl.inc.c
@@ -0,0 +1,1070 @@
+/*
+ * translate-fp.c
+ *
+ * Standard FPU translation
+ */
+
+static inline void gen_reset_fpstatus(void)
+{
+ gen_helper_reset_fpstatus(cpu_env);
+}
+
+static inline void gen_compute_fprf(TCGv_i64 arg)
+{
+ gen_helper_compute_fprf(cpu_env, arg);
+ gen_helper_float_check_status(cpu_env);
+}
+
+#if defined(TARGET_PPC64)
+static void gen_set_cr1_from_fpscr(DisasContext *ctx)
+{
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_trunc_tl_i32(tmp, cpu_fpscr);
+ tcg_gen_shri_i32(cpu_crf[1], tmp, 28);
+ tcg_temp_free_i32(tmp);
+}
+#else
+static void gen_set_cr1_from_fpscr(DisasContext *ctx)
+{
+ tcg_gen_shri_tl(cpu_crf[1], cpu_fpscr, 28);
+}
+#endif
+
+/*** Floating-Point arithmetic ***/
+#define _GEN_FLOAT_ACB(name, op, op1, op2, isfloat, set_fprf, type) \
+static void gen_f##name(DisasContext *ctx) \
+{ \
+ if (unlikely(!ctx->fpu_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
+ return; \
+ } \
+ gen_reset_fpstatus(); \
+ gen_helper_f##op(cpu_fpr[rD(ctx->opcode)], cpu_env, \
+ cpu_fpr[rA(ctx->opcode)], \
+ cpu_fpr[rC(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]); \
+ if (isfloat) { \
+ gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_env, \
+ cpu_fpr[rD(ctx->opcode)]); \
+ } \
+ if (set_fprf) { \
+ gen_compute_fprf(cpu_fpr[rD(ctx->opcode)]); \
+ } \
+ if (unlikely(Rc(ctx->opcode) != 0)) { \
+ gen_set_cr1_from_fpscr(ctx); \
+ } \
+}
+
+#define GEN_FLOAT_ACB(name, op2, set_fprf, type) \
+_GEN_FLOAT_ACB(name, name, 0x3F, op2, 0, set_fprf, type); \
+_GEN_FLOAT_ACB(name##s, name, 0x3B, op2, 1, set_fprf, type);
+
+#define _GEN_FLOAT_AB(name, op, op1, op2, inval, isfloat, set_fprf, type) \
+static void gen_f##name(DisasContext *ctx) \
+{ \
+ if (unlikely(!ctx->fpu_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
+ return; \
+ } \
+ gen_reset_fpstatus(); \
+ gen_helper_f##op(cpu_fpr[rD(ctx->opcode)], cpu_env, \
+ cpu_fpr[rA(ctx->opcode)], \
+ cpu_fpr[rB(ctx->opcode)]); \
+ if (isfloat) { \
+ gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_env, \
+ cpu_fpr[rD(ctx->opcode)]); \
+ } \
+ if (set_fprf) { \
+ gen_compute_fprf(cpu_fpr[rD(ctx->opcode)]); \
+ } \
+ if (unlikely(Rc(ctx->opcode) != 0)) { \
+ gen_set_cr1_from_fpscr(ctx); \
+ } \
+}
+#define GEN_FLOAT_AB(name, op2, inval, set_fprf, type) \
+_GEN_FLOAT_AB(name, name, 0x3F, op2, inval, 0, set_fprf, type); \
+_GEN_FLOAT_AB(name##s, name, 0x3B, op2, inval, 1, set_fprf, type);
+
+#define _GEN_FLOAT_AC(name, op, op1, op2, inval, isfloat, set_fprf, type) \
+static void gen_f##name(DisasContext *ctx) \
+{ \
+ if (unlikely(!ctx->fpu_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
+ return; \
+ } \
+ gen_reset_fpstatus(); \
+ gen_helper_f##op(cpu_fpr[rD(ctx->opcode)], cpu_env, \
+ cpu_fpr[rA(ctx->opcode)], \
+ cpu_fpr[rC(ctx->opcode)]); \
+ if (isfloat) { \
+ gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_env, \
+ cpu_fpr[rD(ctx->opcode)]); \
+ } \
+ if (set_fprf) { \
+ gen_compute_fprf(cpu_fpr[rD(ctx->opcode)]); \
+ } \
+ if (unlikely(Rc(ctx->opcode) != 0)) { \
+ gen_set_cr1_from_fpscr(ctx); \
+ } \
+}
+#define GEN_FLOAT_AC(name, op2, inval, set_fprf, type) \
+_GEN_FLOAT_AC(name, name, 0x3F, op2, inval, 0, set_fprf, type); \
+_GEN_FLOAT_AC(name##s, name, 0x3B, op2, inval, 1, set_fprf, type);
+
+#define GEN_FLOAT_B(name, op2, op3, set_fprf, type) \
+static void gen_f##name(DisasContext *ctx) \
+{ \
+ if (unlikely(!ctx->fpu_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
+ return; \
+ } \
+ gen_reset_fpstatus(); \
+ gen_helper_f##name(cpu_fpr[rD(ctx->opcode)], cpu_env, \
+ cpu_fpr[rB(ctx->opcode)]); \
+ if (set_fprf) { \
+ gen_compute_fprf(cpu_fpr[rD(ctx->opcode)]); \
+ } \
+ if (unlikely(Rc(ctx->opcode) != 0)) { \
+ gen_set_cr1_from_fpscr(ctx); \
+ } \
+}
+
+#define GEN_FLOAT_BS(name, op1, op2, set_fprf, type) \
+static void gen_f##name(DisasContext *ctx) \
+{ \
+ if (unlikely(!ctx->fpu_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
+ return; \
+ } \
+ gen_reset_fpstatus(); \
+ gen_helper_f##name(cpu_fpr[rD(ctx->opcode)], cpu_env, \
+ cpu_fpr[rB(ctx->opcode)]); \
+ if (set_fprf) { \
+ gen_compute_fprf(cpu_fpr[rD(ctx->opcode)]); \
+ } \
+ if (unlikely(Rc(ctx->opcode) != 0)) { \
+ gen_set_cr1_from_fpscr(ctx); \
+ } \
+}
+
+/* fadd - fadds */
+GEN_FLOAT_AB(add, 0x15, 0x000007C0, 1, PPC_FLOAT);
+/* fdiv - fdivs */
+GEN_FLOAT_AB(div, 0x12, 0x000007C0, 1, PPC_FLOAT);
+/* fmul - fmuls */
+GEN_FLOAT_AC(mul, 0x19, 0x0000F800, 1, PPC_FLOAT);
+
+/* fre */
+GEN_FLOAT_BS(re, 0x3F, 0x18, 1, PPC_FLOAT_EXT);
+
+/* fres */
+GEN_FLOAT_BS(res, 0x3B, 0x18, 1, PPC_FLOAT_FRES);
+
+/* frsqrte */
+GEN_FLOAT_BS(rsqrte, 0x3F, 0x1A, 1, PPC_FLOAT_FRSQRTE);
+
+/* frsqrtes */
+static void gen_frsqrtes(DisasContext *ctx)
+{
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ gen_reset_fpstatus();
+ gen_helper_frsqrte(cpu_fpr[rD(ctx->opcode)], cpu_env,
+ cpu_fpr[rB(ctx->opcode)]);
+ gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_env,
+ cpu_fpr[rD(ctx->opcode)]);
+ gen_compute_fprf(cpu_fpr[rD(ctx->opcode)]);
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_cr1_from_fpscr(ctx);
+ }
+}
+
+/* fsel */
+_GEN_FLOAT_ACB(sel, sel, 0x3F, 0x17, 0, 0, PPC_FLOAT_FSEL);
+/* fsub - fsubs */
+GEN_FLOAT_AB(sub, 0x14, 0x000007C0, 1, PPC_FLOAT);
+/* Optional: */
+
+/* fsqrt */
+static void gen_fsqrt(DisasContext *ctx)
+{
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ gen_reset_fpstatus();
+ gen_helper_fsqrt(cpu_fpr[rD(ctx->opcode)], cpu_env,
+ cpu_fpr[rB(ctx->opcode)]);
+ gen_compute_fprf(cpu_fpr[rD(ctx->opcode)]);
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_cr1_from_fpscr(ctx);
+ }
+}
+
+static void gen_fsqrts(DisasContext *ctx)
+{
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ gen_reset_fpstatus();
+ gen_helper_fsqrt(cpu_fpr[rD(ctx->opcode)], cpu_env,
+ cpu_fpr[rB(ctx->opcode)]);
+ gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_env,
+ cpu_fpr[rD(ctx->opcode)]);
+ gen_compute_fprf(cpu_fpr[rD(ctx->opcode)]);
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_cr1_from_fpscr(ctx);
+ }
+}
+
+/*** Floating-Point multiply-and-add ***/
+/* fmadd - fmadds */
+GEN_FLOAT_ACB(madd, 0x1D, 1, PPC_FLOAT);
+/* fmsub - fmsubs */
+GEN_FLOAT_ACB(msub, 0x1C, 1, PPC_FLOAT);
+/* fnmadd - fnmadds */
+GEN_FLOAT_ACB(nmadd, 0x1F, 1, PPC_FLOAT);
+/* fnmsub - fnmsubs */
+GEN_FLOAT_ACB(nmsub, 0x1E, 1, PPC_FLOAT);
+
+/*** Floating-Point round & convert ***/
+/* fctiw */
+GEN_FLOAT_B(ctiw, 0x0E, 0x00, 0, PPC_FLOAT);
+/* fctiwu */
+GEN_FLOAT_B(ctiwu, 0x0E, 0x04, 0, PPC2_FP_CVT_ISA206);
+/* fctiwz */
+GEN_FLOAT_B(ctiwz, 0x0F, 0x00, 0, PPC_FLOAT);
+/* fctiwuz */
+GEN_FLOAT_B(ctiwuz, 0x0F, 0x04, 0, PPC2_FP_CVT_ISA206);
+/* frsp */
+GEN_FLOAT_B(rsp, 0x0C, 0x00, 1, PPC_FLOAT);
+/* fcfid */
+GEN_FLOAT_B(cfid, 0x0E, 0x1A, 1, PPC2_FP_CVT_S64);
+/* fcfids */
+GEN_FLOAT_B(cfids, 0x0E, 0x1A, 0, PPC2_FP_CVT_ISA206);
+/* fcfidu */
+GEN_FLOAT_B(cfidu, 0x0E, 0x1E, 0, PPC2_FP_CVT_ISA206);
+/* fcfidus */
+GEN_FLOAT_B(cfidus, 0x0E, 0x1E, 0, PPC2_FP_CVT_ISA206);
+/* fctid */
+GEN_FLOAT_B(ctid, 0x0E, 0x19, 0, PPC2_FP_CVT_S64);
+/* fctidu */
+GEN_FLOAT_B(ctidu, 0x0E, 0x1D, 0, PPC2_FP_CVT_ISA206);
+/* fctidz */
+GEN_FLOAT_B(ctidz, 0x0F, 0x19, 0, PPC2_FP_CVT_S64);
+/* fctidu */
+GEN_FLOAT_B(ctiduz, 0x0F, 0x1D, 0, PPC2_FP_CVT_ISA206);
+
+/* frin */
+GEN_FLOAT_B(rin, 0x08, 0x0C, 1, PPC_FLOAT_EXT);
+/* friz */
+GEN_FLOAT_B(riz, 0x08, 0x0D, 1, PPC_FLOAT_EXT);
+/* frip */
+GEN_FLOAT_B(rip, 0x08, 0x0E, 1, PPC_FLOAT_EXT);
+/* frim */
+GEN_FLOAT_B(rim, 0x08, 0x0F, 1, PPC_FLOAT_EXT);
+
+static void gen_ftdiv(DisasContext *ctx)
+{
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ gen_helper_ftdiv(cpu_crf[crfD(ctx->opcode)], cpu_fpr[rA(ctx->opcode)],
+ cpu_fpr[rB(ctx->opcode)]);
+}
+
+static void gen_ftsqrt(DisasContext *ctx)
+{
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ gen_helper_ftsqrt(cpu_crf[crfD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]);
+}
+
+
+
+/*** Floating-Point compare ***/
+
+/* fcmpo */
+static void gen_fcmpo(DisasContext *ctx)
+{
+ TCGv_i32 crf;
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ gen_reset_fpstatus();
+ crf = tcg_const_i32(crfD(ctx->opcode));
+ gen_helper_fcmpo(cpu_env, cpu_fpr[rA(ctx->opcode)],
+ cpu_fpr[rB(ctx->opcode)], crf);
+ tcg_temp_free_i32(crf);
+ gen_helper_float_check_status(cpu_env);
+}
+
+/* fcmpu */
+static void gen_fcmpu(DisasContext *ctx)
+{
+ TCGv_i32 crf;
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ gen_reset_fpstatus();
+ crf = tcg_const_i32(crfD(ctx->opcode));
+ gen_helper_fcmpu(cpu_env, cpu_fpr[rA(ctx->opcode)],
+ cpu_fpr[rB(ctx->opcode)], crf);
+ tcg_temp_free_i32(crf);
+ gen_helper_float_check_status(cpu_env);
+}
+
+/*** Floating-point move ***/
+/* fabs */
+/* XXX: beware that fabs never checks for NaNs nor update FPSCR */
+static void gen_fabs(DisasContext *ctx)
+{
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ tcg_gen_andi_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)],
+ ~(1ULL << 63));
+ if (unlikely(Rc(ctx->opcode))) {
+ gen_set_cr1_from_fpscr(ctx);
+ }
+}
+
+/* fmr - fmr. */
+/* XXX: beware that fmr never checks for NaNs nor update FPSCR */
+static void gen_fmr(DisasContext *ctx)
+{
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ tcg_gen_mov_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]);
+ if (unlikely(Rc(ctx->opcode))) {
+ gen_set_cr1_from_fpscr(ctx);
+ }
+}
+
+/* fnabs */
+/* XXX: beware that fnabs never checks for NaNs nor update FPSCR */
+static void gen_fnabs(DisasContext *ctx)
+{
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ tcg_gen_ori_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)],
+ 1ULL << 63);
+ if (unlikely(Rc(ctx->opcode))) {
+ gen_set_cr1_from_fpscr(ctx);
+ }
+}
+
+/* fneg */
+/* XXX: beware that fneg never checks for NaNs nor update FPSCR */
+static void gen_fneg(DisasContext *ctx)
+{
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ tcg_gen_xori_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)],
+ 1ULL << 63);
+ if (unlikely(Rc(ctx->opcode))) {
+ gen_set_cr1_from_fpscr(ctx);
+ }
+}
+
+/* fcpsgn: PowerPC 2.05 specification */
+/* XXX: beware that fcpsgn never checks for NaNs nor update FPSCR */
+static void gen_fcpsgn(DisasContext *ctx)
+{
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ tcg_gen_deposit_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rA(ctx->opcode)],
+ cpu_fpr[rB(ctx->opcode)], 0, 63);
+ if (unlikely(Rc(ctx->opcode))) {
+ gen_set_cr1_from_fpscr(ctx);
+ }
+}
+
+static void gen_fmrgew(DisasContext *ctx)
+{
+ TCGv_i64 b0;
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ b0 = tcg_temp_new_i64();
+ tcg_gen_shri_i64(b0, cpu_fpr[rB(ctx->opcode)], 32);
+ tcg_gen_deposit_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rA(ctx->opcode)],
+ b0, 0, 32);
+ tcg_temp_free_i64(b0);
+}
+
+static void gen_fmrgow(DisasContext *ctx)
+{
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ tcg_gen_deposit_i64(cpu_fpr[rD(ctx->opcode)],
+ cpu_fpr[rB(ctx->opcode)],
+ cpu_fpr[rA(ctx->opcode)],
+ 32, 32);
+}
+
+/*** Floating-Point status & ctrl register ***/
+
+/* mcrfs */
+static void gen_mcrfs(DisasContext *ctx)
+{
+ TCGv tmp = tcg_temp_new();
+ TCGv_i32 tmask;
+ TCGv_i64 tnew_fpscr = tcg_temp_new_i64();
+ int bfa;
+ int nibble;
+ int shift;
+
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ bfa = crfS(ctx->opcode);
+ nibble = 7 - bfa;
+ shift = 4 * nibble;
+ tcg_gen_shri_tl(tmp, cpu_fpscr, shift);
+ tcg_gen_trunc_tl_i32(cpu_crf[crfD(ctx->opcode)], tmp);
+ tcg_gen_andi_i32(cpu_crf[crfD(ctx->opcode)], cpu_crf[crfD(ctx->opcode)], 0xf);
+ tcg_temp_free(tmp);
+ tcg_gen_extu_tl_i64(tnew_fpscr, cpu_fpscr);
+ /* Only the exception bits (including FX) should be cleared if read */
+ tcg_gen_andi_i64(tnew_fpscr, tnew_fpscr, ~((0xF << shift) & FP_EX_CLEAR_BITS));
+ /* FEX and VX need to be updated, so don't set fpscr directly */
+ tmask = tcg_const_i32(1 << nibble);
+ gen_helper_store_fpscr(cpu_env, tnew_fpscr, tmask);
+ tcg_temp_free_i32(tmask);
+ tcg_temp_free_i64(tnew_fpscr);
+}
+
+/* mffs */
+static void gen_mffs(DisasContext *ctx)
+{
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ gen_reset_fpstatus();
+ tcg_gen_extu_tl_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpscr);
+ if (unlikely(Rc(ctx->opcode))) {
+ gen_set_cr1_from_fpscr(ctx);
+ }
+}
+
+/* mtfsb0 */
+static void gen_mtfsb0(DisasContext *ctx)
+{
+ uint8_t crb;
+
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ crb = 31 - crbD(ctx->opcode);
+ gen_reset_fpstatus();
+ if (likely(crb != FPSCR_FEX && crb != FPSCR_VX)) {
+ TCGv_i32 t0;
+ t0 = tcg_const_i32(crb);
+ gen_helper_fpscr_clrbit(cpu_env, t0);
+ tcg_temp_free_i32(t0);
+ }
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr);
+ tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX);
+ }
+}
+
+/* mtfsb1 */
+static void gen_mtfsb1(DisasContext *ctx)
+{
+ uint8_t crb;
+
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ crb = 31 - crbD(ctx->opcode);
+ gen_reset_fpstatus();
+ /* XXX: we pretend we can only do IEEE floating-point computations */
+ if (likely(crb != FPSCR_FEX && crb != FPSCR_VX && crb != FPSCR_NI)) {
+ TCGv_i32 t0;
+ t0 = tcg_const_i32(crb);
+ gen_helper_fpscr_setbit(cpu_env, t0);
+ tcg_temp_free_i32(t0);
+ }
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr);
+ tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX);
+ }
+ /* We can raise a differed exception */
+ gen_helper_float_check_status(cpu_env);
+}
+
+/* mtfsf */
+static void gen_mtfsf(DisasContext *ctx)
+{
+ TCGv_i32 t0;
+ int flm, l, w;
+
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ flm = FPFLM(ctx->opcode);
+ l = FPL(ctx->opcode);
+ w = FPW(ctx->opcode);
+ if (unlikely(w & !(ctx->insns_flags2 & PPC2_ISA205))) {
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
+ return;
+ }
+ gen_reset_fpstatus();
+ if (l) {
+ t0 = tcg_const_i32((ctx->insns_flags2 & PPC2_ISA205) ? 0xffff : 0xff);
+ } else {
+ t0 = tcg_const_i32(flm << (w * 8));
+ }
+ gen_helper_store_fpscr(cpu_env, cpu_fpr[rB(ctx->opcode)], t0);
+ tcg_temp_free_i32(t0);
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr);
+ tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX);
+ }
+ /* We can raise a differed exception */
+ gen_helper_float_check_status(cpu_env);
+}
+
+/* mtfsfi */
+static void gen_mtfsfi(DisasContext *ctx)
+{
+ int bf, sh, w;
+ TCGv_i64 t0;
+ TCGv_i32 t1;
+
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ w = FPW(ctx->opcode);
+ bf = FPBF(ctx->opcode);
+ if (unlikely(w & !(ctx->insns_flags2 & PPC2_ISA205))) {
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
+ return;
+ }
+ sh = (8 * w) + 7 - bf;
+ gen_reset_fpstatus();
+ t0 = tcg_const_i64(((uint64_t)FPIMM(ctx->opcode)) << (4 * sh));
+ t1 = tcg_const_i32(1 << sh);
+ gen_helper_store_fpscr(cpu_env, t0, t1);
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i32(t1);
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr);
+ tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX);
+ }
+ /* We can raise a differed exception */
+ gen_helper_float_check_status(cpu_env);
+}
+
+/*** Floating-point load ***/
+#define GEN_LDF(name, ldop, opc, type) \
+static void glue(gen_, name)(DisasContext *ctx) \
+{ \
+ TCGv EA; \
+ if (unlikely(!ctx->fpu_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
+ return; \
+ } \
+ gen_set_access_type(ctx, ACCESS_FLOAT); \
+ EA = tcg_temp_new(); \
+ gen_addr_imm_index(ctx, EA, 0); \
+ gen_qemu_##ldop(ctx, cpu_fpr[rD(ctx->opcode)], EA); \
+ tcg_temp_free(EA); \
+}
+
+#define GEN_LDUF(name, ldop, opc, type) \
+static void glue(gen_, name##u)(DisasContext *ctx) \
+{ \
+ TCGv EA; \
+ if (unlikely(!ctx->fpu_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
+ return; \
+ } \
+ if (unlikely(rA(ctx->opcode) == 0)) { \
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
+ return; \
+ } \
+ gen_set_access_type(ctx, ACCESS_FLOAT); \
+ EA = tcg_temp_new(); \
+ gen_addr_imm_index(ctx, EA, 0); \
+ gen_qemu_##ldop(ctx, cpu_fpr[rD(ctx->opcode)], EA); \
+ tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
+ tcg_temp_free(EA); \
+}
+
+#define GEN_LDUXF(name, ldop, opc, type) \
+static void glue(gen_, name##ux)(DisasContext *ctx) \
+{ \
+ TCGv EA; \
+ if (unlikely(!ctx->fpu_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
+ return; \
+ } \
+ if (unlikely(rA(ctx->opcode) == 0)) { \
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
+ return; \
+ } \
+ gen_set_access_type(ctx, ACCESS_FLOAT); \
+ EA = tcg_temp_new(); \
+ gen_addr_reg_index(ctx, EA); \
+ gen_qemu_##ldop(ctx, cpu_fpr[rD(ctx->opcode)], EA); \
+ tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
+ tcg_temp_free(EA); \
+}
+
+#define GEN_LDXF(name, ldop, opc2, opc3, type) \
+static void glue(gen_, name##x)(DisasContext *ctx) \
+{ \
+ TCGv EA; \
+ if (unlikely(!ctx->fpu_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
+ return; \
+ } \
+ gen_set_access_type(ctx, ACCESS_FLOAT); \
+ EA = tcg_temp_new(); \
+ gen_addr_reg_index(ctx, EA); \
+ gen_qemu_##ldop(ctx, cpu_fpr[rD(ctx->opcode)], EA); \
+ tcg_temp_free(EA); \
+}
+
+#define GEN_LDFS(name, ldop, op, type) \
+GEN_LDF(name, ldop, op | 0x20, type); \
+GEN_LDUF(name, ldop, op | 0x21, type); \
+GEN_LDUXF(name, ldop, op | 0x01, type); \
+GEN_LDXF(name, ldop, 0x17, op | 0x00, type)
+
+static inline void gen_qemu_ld32fs(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2)
+{
+ TCGv t0 = tcg_temp_new();
+ TCGv_i32 t1 = tcg_temp_new_i32();
+ gen_qemu_ld32u(ctx, t0, arg2);
+ tcg_gen_trunc_tl_i32(t1, t0);
+ tcg_temp_free(t0);
+ gen_helper_float32_to_float64(arg1, cpu_env, t1);
+ tcg_temp_free_i32(t1);
+}
+
+ /* lfd lfdu lfdux lfdx */
+GEN_LDFS(lfd, ld64_i64, 0x12, PPC_FLOAT);
+ /* lfs lfsu lfsux lfsx */
+GEN_LDFS(lfs, ld32fs, 0x10, PPC_FLOAT);
+
+/* lfdp */
+static void gen_lfdp(DisasContext *ctx)
+{
+ TCGv EA;
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ gen_set_access_type(ctx, ACCESS_FLOAT);
+ EA = tcg_temp_new();
+ gen_addr_imm_index(ctx, EA, 0);
+ /* We only need to swap high and low halves. gen_qemu_ld64_i64 does
+ necessary 64-bit byteswap already. */
+ if (unlikely(ctx->le_mode)) {
+ gen_qemu_ld64_i64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
+ tcg_gen_addi_tl(EA, EA, 8);
+ gen_qemu_ld64_i64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
+ } else {
+ gen_qemu_ld64_i64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
+ tcg_gen_addi_tl(EA, EA, 8);
+ gen_qemu_ld64_i64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
+ }
+ tcg_temp_free(EA);
+}
+
+/* lfdpx */
+static void gen_lfdpx(DisasContext *ctx)
+{
+ TCGv EA;
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ gen_set_access_type(ctx, ACCESS_FLOAT);
+ EA = tcg_temp_new();
+ gen_addr_reg_index(ctx, EA);
+ /* We only need to swap high and low halves. gen_qemu_ld64_i64 does
+ necessary 64-bit byteswap already. */
+ if (unlikely(ctx->le_mode)) {
+ gen_qemu_ld64_i64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
+ tcg_gen_addi_tl(EA, EA, 8);
+ gen_qemu_ld64_i64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
+ } else {
+ gen_qemu_ld64_i64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
+ tcg_gen_addi_tl(EA, EA, 8);
+ gen_qemu_ld64_i64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
+ }
+ tcg_temp_free(EA);
+}
+
+/* lfiwax */
+static void gen_lfiwax(DisasContext *ctx)
+{
+ TCGv EA;
+ TCGv t0;
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ gen_set_access_type(ctx, ACCESS_FLOAT);
+ EA = tcg_temp_new();
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, EA);
+ gen_qemu_ld32s(ctx, t0, EA);
+ tcg_gen_ext_tl_i64(cpu_fpr[rD(ctx->opcode)], t0);
+ tcg_temp_free(EA);
+ tcg_temp_free(t0);
+}
+
+/* lfiwzx */
+static void gen_lfiwzx(DisasContext *ctx)
+{
+ TCGv EA;
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ gen_set_access_type(ctx, ACCESS_FLOAT);
+ EA = tcg_temp_new();
+ gen_addr_reg_index(ctx, EA);
+ gen_qemu_ld32u_i64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
+ tcg_temp_free(EA);
+}
+/*** Floating-point store ***/
+#define GEN_STF(name, stop, opc, type) \
+static void glue(gen_, name)(DisasContext *ctx) \
+{ \
+ TCGv EA; \
+ if (unlikely(!ctx->fpu_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
+ return; \
+ } \
+ gen_set_access_type(ctx, ACCESS_FLOAT); \
+ EA = tcg_temp_new(); \
+ gen_addr_imm_index(ctx, EA, 0); \
+ gen_qemu_##stop(ctx, cpu_fpr[rS(ctx->opcode)], EA); \
+ tcg_temp_free(EA); \
+}
+
+#define GEN_STUF(name, stop, opc, type) \
+static void glue(gen_, name##u)(DisasContext *ctx) \
+{ \
+ TCGv EA; \
+ if (unlikely(!ctx->fpu_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
+ return; \
+ } \
+ if (unlikely(rA(ctx->opcode) == 0)) { \
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
+ return; \
+ } \
+ gen_set_access_type(ctx, ACCESS_FLOAT); \
+ EA = tcg_temp_new(); \
+ gen_addr_imm_index(ctx, EA, 0); \
+ gen_qemu_##stop(ctx, cpu_fpr[rS(ctx->opcode)], EA); \
+ tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
+ tcg_temp_free(EA); \
+}
+
+#define GEN_STUXF(name, stop, opc, type) \
+static void glue(gen_, name##ux)(DisasContext *ctx) \
+{ \
+ TCGv EA; \
+ if (unlikely(!ctx->fpu_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
+ return; \
+ } \
+ if (unlikely(rA(ctx->opcode) == 0)) { \
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
+ return; \
+ } \
+ gen_set_access_type(ctx, ACCESS_FLOAT); \
+ EA = tcg_temp_new(); \
+ gen_addr_reg_index(ctx, EA); \
+ gen_qemu_##stop(ctx, cpu_fpr[rS(ctx->opcode)], EA); \
+ tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
+ tcg_temp_free(EA); \
+}
+
+#define GEN_STXF(name, stop, opc2, opc3, type) \
+static void glue(gen_, name##x)(DisasContext *ctx) \
+{ \
+ TCGv EA; \
+ if (unlikely(!ctx->fpu_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
+ return; \
+ } \
+ gen_set_access_type(ctx, ACCESS_FLOAT); \
+ EA = tcg_temp_new(); \
+ gen_addr_reg_index(ctx, EA); \
+ gen_qemu_##stop(ctx, cpu_fpr[rS(ctx->opcode)], EA); \
+ tcg_temp_free(EA); \
+}
+
+#define GEN_STFS(name, stop, op, type) \
+GEN_STF(name, stop, op | 0x20, type); \
+GEN_STUF(name, stop, op | 0x21, type); \
+GEN_STUXF(name, stop, op | 0x01, type); \
+GEN_STXF(name, stop, 0x17, op | 0x00, type)
+
+static inline void gen_qemu_st32fs(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2)
+{
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ TCGv t1 = tcg_temp_new();
+ gen_helper_float64_to_float32(t0, cpu_env, arg1);
+ tcg_gen_extu_i32_tl(t1, t0);
+ tcg_temp_free_i32(t0);
+ gen_qemu_st32(ctx, t1, arg2);
+ tcg_temp_free(t1);
+}
+
+/* stfd stfdu stfdux stfdx */
+GEN_STFS(stfd, st64_i64, 0x16, PPC_FLOAT);
+/* stfs stfsu stfsux stfsx */
+GEN_STFS(stfs, st32fs, 0x14, PPC_FLOAT);
+
+/* stfdp */
+static void gen_stfdp(DisasContext *ctx)
+{
+ TCGv EA;
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ gen_set_access_type(ctx, ACCESS_FLOAT);
+ EA = tcg_temp_new();
+ gen_addr_imm_index(ctx, EA, 0);
+ /* We only need to swap high and low halves. gen_qemu_st64_i64 does
+ necessary 64-bit byteswap already. */
+ if (unlikely(ctx->le_mode)) {
+ gen_qemu_st64_i64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
+ tcg_gen_addi_tl(EA, EA, 8);
+ gen_qemu_st64_i64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
+ } else {
+ gen_qemu_st64_i64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
+ tcg_gen_addi_tl(EA, EA, 8);
+ gen_qemu_st64_i64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
+ }
+ tcg_temp_free(EA);
+}
+
+/* stfdpx */
+static void gen_stfdpx(DisasContext *ctx)
+{
+ TCGv EA;
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ gen_set_access_type(ctx, ACCESS_FLOAT);
+ EA = tcg_temp_new();
+ gen_addr_reg_index(ctx, EA);
+ /* We only need to swap high and low halves. gen_qemu_st64_i64 does
+ necessary 64-bit byteswap already. */
+ if (unlikely(ctx->le_mode)) {
+ gen_qemu_st64_i64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
+ tcg_gen_addi_tl(EA, EA, 8);
+ gen_qemu_st64_i64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
+ } else {
+ gen_qemu_st64_i64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
+ tcg_gen_addi_tl(EA, EA, 8);
+ gen_qemu_st64_i64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
+ }
+ tcg_temp_free(EA);
+}
+
+/* Optional: */
+static inline void gen_qemu_st32fiw(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2)
+{
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_trunc_i64_tl(t0, arg1),
+ gen_qemu_st32(ctx, t0, arg2);
+ tcg_temp_free(t0);
+}
+/* stfiwx */
+GEN_STXF(stfiw, st32fiw, 0x17, 0x1E, PPC_FLOAT_STFIWX);
+
+/* POWER2 specific instructions */
+/* Quad manipulation (load/store two floats at a time) */
+
+/* lfq */
+static void gen_lfq(DisasContext *ctx)
+{
+ int rd = rD(ctx->opcode);
+ TCGv t0;
+ gen_set_access_type(ctx, ACCESS_FLOAT);
+ t0 = tcg_temp_new();
+ gen_addr_imm_index(ctx, t0, 0);
+ gen_qemu_ld64_i64(ctx, cpu_fpr[rd], t0);
+ gen_addr_add(ctx, t0, t0, 8);
+ gen_qemu_ld64_i64(ctx, cpu_fpr[(rd + 1) % 32], t0);
+ tcg_temp_free(t0);
+}
+
+/* lfqu */
+static void gen_lfqu(DisasContext *ctx)
+{
+ int ra = rA(ctx->opcode);
+ int rd = rD(ctx->opcode);
+ TCGv t0, t1;
+ gen_set_access_type(ctx, ACCESS_FLOAT);
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+ gen_addr_imm_index(ctx, t0, 0);
+ gen_qemu_ld64_i64(ctx, cpu_fpr[rd], t0);
+ gen_addr_add(ctx, t1, t0, 8);
+ gen_qemu_ld64_i64(ctx, cpu_fpr[(rd + 1) % 32], t1);
+ if (ra != 0)
+ tcg_gen_mov_tl(cpu_gpr[ra], t0);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+}
+
+/* lfqux */
+static void gen_lfqux(DisasContext *ctx)
+{
+ int ra = rA(ctx->opcode);
+ int rd = rD(ctx->opcode);
+ gen_set_access_type(ctx, ACCESS_FLOAT);
+ TCGv t0, t1;
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ gen_qemu_ld64_i64(ctx, cpu_fpr[rd], t0);
+ t1 = tcg_temp_new();
+ gen_addr_add(ctx, t1, t0, 8);
+ gen_qemu_ld64_i64(ctx, cpu_fpr[(rd + 1) % 32], t1);
+ tcg_temp_free(t1);
+ if (ra != 0)
+ tcg_gen_mov_tl(cpu_gpr[ra], t0);
+ tcg_temp_free(t0);
+}
+
+/* lfqx */
+static void gen_lfqx(DisasContext *ctx)
+{
+ int rd = rD(ctx->opcode);
+ TCGv t0;
+ gen_set_access_type(ctx, ACCESS_FLOAT);
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ gen_qemu_ld64_i64(ctx, cpu_fpr[rd], t0);
+ gen_addr_add(ctx, t0, t0, 8);
+ gen_qemu_ld64_i64(ctx, cpu_fpr[(rd + 1) % 32], t0);
+ tcg_temp_free(t0);
+}
+
+/* stfq */
+static void gen_stfq(DisasContext *ctx)
+{
+ int rd = rD(ctx->opcode);
+ TCGv t0;
+ gen_set_access_type(ctx, ACCESS_FLOAT);
+ t0 = tcg_temp_new();
+ gen_addr_imm_index(ctx, t0, 0);
+ gen_qemu_st64_i64(ctx, cpu_fpr[rd], t0);
+ gen_addr_add(ctx, t0, t0, 8);
+ gen_qemu_st64_i64(ctx, cpu_fpr[(rd + 1) % 32], t0);
+ tcg_temp_free(t0);
+}
+
+/* stfqu */
+static void gen_stfqu(DisasContext *ctx)
+{
+ int ra = rA(ctx->opcode);
+ int rd = rD(ctx->opcode);
+ TCGv t0, t1;
+ gen_set_access_type(ctx, ACCESS_FLOAT);
+ t0 = tcg_temp_new();
+ gen_addr_imm_index(ctx, t0, 0);
+ gen_qemu_st64_i64(ctx, cpu_fpr[rd], t0);
+ t1 = tcg_temp_new();
+ gen_addr_add(ctx, t1, t0, 8);
+ gen_qemu_st64_i64(ctx, cpu_fpr[(rd + 1) % 32], t1);
+ tcg_temp_free(t1);
+ if (ra != 0)
+ tcg_gen_mov_tl(cpu_gpr[ra], t0);
+ tcg_temp_free(t0);
+}
+
+/* stfqux */
+static void gen_stfqux(DisasContext *ctx)
+{
+ int ra = rA(ctx->opcode);
+ int rd = rD(ctx->opcode);
+ TCGv t0, t1;
+ gen_set_access_type(ctx, ACCESS_FLOAT);
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ gen_qemu_st64_i64(ctx, cpu_fpr[rd], t0);
+ t1 = tcg_temp_new();
+ gen_addr_add(ctx, t1, t0, 8);
+ gen_qemu_st64_i64(ctx, cpu_fpr[(rd + 1) % 32], t1);
+ tcg_temp_free(t1);
+ if (ra != 0)
+ tcg_gen_mov_tl(cpu_gpr[ra], t0);
+ tcg_temp_free(t0);
+}
+
+/* stfqx */
+static void gen_stfqx(DisasContext *ctx)
+{
+ int rd = rD(ctx->opcode);
+ TCGv t0;
+ gen_set_access_type(ctx, ACCESS_FLOAT);
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ gen_qemu_st64_i64(ctx, cpu_fpr[rd], t0);
+ gen_addr_add(ctx, t0, t0, 8);
+ gen_qemu_st64_i64(ctx, cpu_fpr[(rd + 1) % 32], t0);
+ tcg_temp_free(t0);
+}
+
+#undef _GEN_FLOAT_ACB
+#undef GEN_FLOAT_ACB
+#undef _GEN_FLOAT_AB
+#undef GEN_FLOAT_AB
+#undef _GEN_FLOAT_AC
+#undef GEN_FLOAT_AC
+#undef GEN_FLOAT_B
+#undef GEN_FLOAT_BS
+
+#undef GEN_LDF
+#undef GEN_LDUF
+#undef GEN_LDUXF
+#undef GEN_LDXF
+#undef GEN_LDFS
+
+#undef GEN_STF
+#undef GEN_STUF
+#undef GEN_STUXF
+#undef GEN_STXF
+#undef GEN_STFS
diff --git a/target-ppc/translate/fp-ops.inc.c b/target-ppc/translate/fp-ops.inc.c
new file mode 100644
index 0000000000..d36ab4ecc3
--- /dev/null
+++ b/target-ppc/translate/fp-ops.inc.c
@@ -0,0 +1,111 @@
+#define _GEN_FLOAT_ACB(name, op, op1, op2, isfloat, set_fprf, type) \
+GEN_HANDLER(f##name, op1, op2, 0xFF, 0x00000000, type)
+#define GEN_FLOAT_ACB(name, op2, set_fprf, type) \
+_GEN_FLOAT_ACB(name, name, 0x3F, op2, 0, set_fprf, type), \
+_GEN_FLOAT_ACB(name##s, name, 0x3B, op2, 1, set_fprf, type)
+#define _GEN_FLOAT_AB(name, op, op1, op2, inval, isfloat, set_fprf, type) \
+GEN_HANDLER(f##name, op1, op2, 0xFF, inval, type)
+#define GEN_FLOAT_AB(name, op2, inval, set_fprf, type) \
+_GEN_FLOAT_AB(name, name, 0x3F, op2, inval, 0, set_fprf, type), \
+_GEN_FLOAT_AB(name##s, name, 0x3B, op2, inval, 1, set_fprf, type)
+#define _GEN_FLOAT_AC(name, op, op1, op2, inval, isfloat, set_fprf, type) \
+GEN_HANDLER(f##name, op1, op2, 0xFF, inval, type)
+#define GEN_FLOAT_AC(name, op2, inval, set_fprf, type) \
+_GEN_FLOAT_AC(name, name, 0x3F, op2, inval, 0, set_fprf, type), \
+_GEN_FLOAT_AC(name##s, name, 0x3B, op2, inval, 1, set_fprf, type)
+#define GEN_FLOAT_B(name, op2, op3, set_fprf, type) \
+GEN_HANDLER(f##name, 0x3F, op2, op3, 0x001F0000, type)
+#define GEN_FLOAT_BS(name, op1, op2, set_fprf, type) \
+GEN_HANDLER(f##name, op1, op2, 0xFF, 0x001F07C0, type)
+
+GEN_FLOAT_AB(add, 0x15, 0x000007C0, 1, PPC_FLOAT),
+GEN_FLOAT_AB(div, 0x12, 0x000007C0, 1, PPC_FLOAT),
+GEN_FLOAT_AC(mul, 0x19, 0x0000F800, 1, PPC_FLOAT),
+GEN_FLOAT_BS(re, 0x3F, 0x18, 1, PPC_FLOAT_EXT),
+GEN_FLOAT_BS(res, 0x3B, 0x18, 1, PPC_FLOAT_FRES),
+GEN_FLOAT_BS(rsqrte, 0x3F, 0x1A, 1, PPC_FLOAT_FRSQRTE),
+_GEN_FLOAT_ACB(sel, sel, 0x3F, 0x17, 0, 0, PPC_FLOAT_FSEL),
+GEN_FLOAT_AB(sub, 0x14, 0x000007C0, 1, PPC_FLOAT),
+GEN_FLOAT_ACB(madd, 0x1D, 1, PPC_FLOAT),
+GEN_FLOAT_ACB(msub, 0x1C, 1, PPC_FLOAT),
+GEN_FLOAT_ACB(nmadd, 0x1F, 1, PPC_FLOAT),
+GEN_FLOAT_ACB(nmsub, 0x1E, 1, PPC_FLOAT),
+GEN_HANDLER_E(ftdiv, 0x3F, 0x00, 0x04, 1, PPC_NONE, PPC2_FP_TST_ISA206),
+GEN_HANDLER_E(ftsqrt, 0x3F, 0x00, 0x05, 1, PPC_NONE, PPC2_FP_TST_ISA206),
+GEN_FLOAT_B(ctiw, 0x0E, 0x00, 0, PPC_FLOAT),
+GEN_HANDLER_E(fctiwu, 0x3F, 0x0E, 0x04, 0, PPC_NONE, PPC2_FP_CVT_ISA206),
+GEN_FLOAT_B(ctiwz, 0x0F, 0x00, 0, PPC_FLOAT),
+GEN_HANDLER_E(fctiwuz, 0x3F, 0x0F, 0x04, 0, PPC_NONE, PPC2_FP_CVT_ISA206),
+GEN_FLOAT_B(rsp, 0x0C, 0x00, 1, PPC_FLOAT),
+GEN_HANDLER_E(fcfid, 0x3F, 0x0E, 0x1A, 0x001F0000, PPC_NONE, PPC2_FP_CVT_S64),
+GEN_HANDLER_E(fcfids, 0x3B, 0x0E, 0x1A, 0, PPC_NONE, PPC2_FP_CVT_ISA206),
+GEN_HANDLER_E(fcfidu, 0x3F, 0x0E, 0x1E, 0, PPC_NONE, PPC2_FP_CVT_ISA206),
+GEN_HANDLER_E(fcfidus, 0x3B, 0x0E, 0x1E, 0, PPC_NONE, PPC2_FP_CVT_ISA206),
+GEN_HANDLER_E(fctid, 0x3F, 0x0E, 0x19, 0x001F0000, PPC_NONE, PPC2_FP_CVT_S64),
+GEN_HANDLER_E(fctidu, 0x3F, 0x0E, 0x1D, 0, PPC_NONE, PPC2_FP_CVT_ISA206),
+GEN_HANDLER_E(fctidz, 0x3F, 0x0F, 0x19, 0x001F0000, PPC_NONE, PPC2_FP_CVT_S64),
+GEN_HANDLER_E(fctiduz, 0x3F, 0x0F, 0x1D, 0, PPC_NONE, PPC2_FP_CVT_ISA206),
+GEN_FLOAT_B(rin, 0x08, 0x0C, 1, PPC_FLOAT_EXT),
+GEN_FLOAT_B(riz, 0x08, 0x0D, 1, PPC_FLOAT_EXT),
+GEN_FLOAT_B(rip, 0x08, 0x0E, 1, PPC_FLOAT_EXT),
+GEN_FLOAT_B(rim, 0x08, 0x0F, 1, PPC_FLOAT_EXT),
+
+#define GEN_LDF(name, ldop, opc, type) \
+GEN_HANDLER(name, opc, 0xFF, 0xFF, 0x00000000, type),
+#define GEN_LDUF(name, ldop, opc, type) \
+GEN_HANDLER(name##u, opc, 0xFF, 0xFF, 0x00000000, type),
+#define GEN_LDUXF(name, ldop, opc, type) \
+GEN_HANDLER(name##ux, 0x1F, 0x17, opc, 0x00000001, type),
+#define GEN_LDXF(name, ldop, opc2, opc3, type) \
+GEN_HANDLER(name##x, 0x1F, opc2, opc3, 0x00000001, type),
+#define GEN_LDFS(name, ldop, op, type) \
+GEN_LDF(name, ldop, op | 0x20, type) \
+GEN_LDUF(name, ldop, op | 0x21, type) \
+GEN_LDUXF(name, ldop, op | 0x01, type) \
+GEN_LDXF(name, ldop, 0x17, op | 0x00, type)
+
+GEN_LDFS(lfd, ld64, 0x12, PPC_FLOAT)
+GEN_LDFS(lfs, ld32fs, 0x10, PPC_FLOAT)
+GEN_HANDLER_E(lfiwax, 0x1f, 0x17, 0x1a, 0x00000001, PPC_NONE, PPC2_ISA205),
+GEN_HANDLER_E(lfiwzx, 0x1f, 0x17, 0x1b, 0x1, PPC_NONE, PPC2_FP_CVT_ISA206),
+GEN_HANDLER_E(lfdp, 0x39, 0xFF, 0xFF, 0x00200003, PPC_NONE, PPC2_ISA205),
+GEN_HANDLER_E(lfdpx, 0x1F, 0x17, 0x18, 0x00200001, PPC_NONE, PPC2_ISA205),
+
+#define GEN_STF(name, stop, opc, type) \
+GEN_HANDLER(name, opc, 0xFF, 0xFF, 0x00000000, type),
+#define GEN_STUF(name, stop, opc, type) \
+GEN_HANDLER(name##u, opc, 0xFF, 0xFF, 0x00000000, type),
+#define GEN_STUXF(name, stop, opc, type) \
+GEN_HANDLER(name##ux, 0x1F, 0x17, opc, 0x00000001, type),
+#define GEN_STXF(name, stop, opc2, opc3, type) \
+GEN_HANDLER(name##x, 0x1F, opc2, opc3, 0x00000001, type),
+#define GEN_STFS(name, stop, op, type) \
+GEN_STF(name, stop, op | 0x20, type) \
+GEN_STUF(name, stop, op | 0x21, type) \
+GEN_STUXF(name, stop, op | 0x01, type) \
+GEN_STXF(name, stop, 0x17, op | 0x00, type)
+
+GEN_STFS(stfd, st64_i64, 0x16, PPC_FLOAT)
+GEN_STFS(stfs, st32fs, 0x14, PPC_FLOAT)
+GEN_STXF(stfiw, st32fiw, 0x17, 0x1E, PPC_FLOAT_STFIWX)
+GEN_HANDLER_E(stfdp, 0x3D, 0xFF, 0xFF, 0x00200003, PPC_NONE, PPC2_ISA205),
+GEN_HANDLER_E(stfdpx, 0x1F, 0x17, 0x1C, 0x00200001, PPC_NONE, PPC2_ISA205),
+
+GEN_HANDLER(frsqrtes, 0x3B, 0x1A, 0xFF, 0x001F07C0, PPC_FLOAT_FRSQRTES),
+GEN_HANDLER(fsqrt, 0x3F, 0x16, 0xFF, 0x001F07C0, PPC_FLOAT_FSQRT),
+GEN_HANDLER(fsqrts, 0x3B, 0x16, 0xFF, 0x001F07C0, PPC_FLOAT_FSQRT),
+GEN_HANDLER(fcmpo, 0x3F, 0x00, 0x01, 0x00600001, PPC_FLOAT),
+GEN_HANDLER(fcmpu, 0x3F, 0x00, 0x00, 0x00600001, PPC_FLOAT),
+GEN_HANDLER(fabs, 0x3F, 0x08, 0x08, 0x001F0000, PPC_FLOAT),
+GEN_HANDLER(fmr, 0x3F, 0x08, 0x02, 0x001F0000, PPC_FLOAT),
+GEN_HANDLER(fnabs, 0x3F, 0x08, 0x04, 0x001F0000, PPC_FLOAT),
+GEN_HANDLER(fneg, 0x3F, 0x08, 0x01, 0x001F0000, PPC_FLOAT),
+GEN_HANDLER_E(fcpsgn, 0x3F, 0x08, 0x00, 0x00000000, PPC_NONE, PPC2_ISA205),
+GEN_HANDLER_E(fmrgew, 0x3F, 0x06, 0x1E, 0x00000001, PPC_NONE, PPC2_VSX207),
+GEN_HANDLER_E(fmrgow, 0x3F, 0x06, 0x1A, 0x00000001, PPC_NONE, PPC2_VSX207),
+GEN_HANDLER(mcrfs, 0x3F, 0x00, 0x02, 0x0063F801, PPC_FLOAT),
+GEN_HANDLER(mffs, 0x3F, 0x07, 0x12, 0x001FF800, PPC_FLOAT),
+GEN_HANDLER(mtfsb0, 0x3F, 0x06, 0x02, 0x001FF800, PPC_FLOAT),
+GEN_HANDLER(mtfsb1, 0x3F, 0x06, 0x01, 0x001FF800, PPC_FLOAT),
+GEN_HANDLER(mtfsf, 0x3F, 0x07, 0x16, 0x00000000, PPC_FLOAT),
+GEN_HANDLER(mtfsfi, 0x3F, 0x06, 0x04, 0x006e0800, PPC_FLOAT),
diff --git a/target-ppc/translate/spe-impl.inc.c b/target-ppc/translate/spe-impl.inc.c
new file mode 100644
index 0000000000..8c1c16c63e
--- /dev/null
+++ b/target-ppc/translate/spe-impl.inc.c
@@ -0,0 +1,1229 @@
+/*
+ * translate-spe.c
+ *
+ * Freescale SPE extension translation
+ */
+
+/*** SPE extension ***/
+/* Register moves */
+
+static inline void gen_evmra(DisasContext *ctx)
+{
+
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_SPEU);
+ return;
+ }
+
+ TCGv_i64 tmp = tcg_temp_new_i64();
+
+ /* tmp := rA_lo + rA_hi << 32 */
+ tcg_gen_concat_tl_i64(tmp, cpu_gpr[rA(ctx->opcode)], cpu_gprh[rA(ctx->opcode)]);
+
+ /* spe_acc := tmp */
+ tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUPPCState, spe_acc));
+ tcg_temp_free_i64(tmp);
+
+ /* rD := rA */
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)]);
+}
+
+static inline void gen_load_gpr64(TCGv_i64 t, int reg)
+{
+ tcg_gen_concat_tl_i64(t, cpu_gpr[reg], cpu_gprh[reg]);
+}
+
+static inline void gen_store_gpr64(int reg, TCGv_i64 t)
+{
+ tcg_gen_extr_i64_tl(cpu_gpr[reg], cpu_gprh[reg], t);
+}
+
+#define GEN_SPE(name0, name1, opc2, opc3, inval0, inval1, type) \
+static void glue(gen_, name0##_##name1)(DisasContext *ctx) \
+{ \
+ if (Rc(ctx->opcode)) \
+ gen_##name1(ctx); \
+ else \
+ gen_##name0(ctx); \
+}
+
+/* Handler for undefined SPE opcodes */
+static inline void gen_speundef(DisasContext *ctx)
+{
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
+}
+
+/* SPE logic */
+#define GEN_SPEOP_LOGIC2(name, tcg_op) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ if (unlikely(!ctx->spe_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_SPEU); \
+ return; \
+ } \
+ tcg_op(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], \
+ cpu_gpr[rB(ctx->opcode)]); \
+ tcg_op(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)], \
+ cpu_gprh[rB(ctx->opcode)]); \
+}
+
+GEN_SPEOP_LOGIC2(evand, tcg_gen_and_tl);
+GEN_SPEOP_LOGIC2(evandc, tcg_gen_andc_tl);
+GEN_SPEOP_LOGIC2(evxor, tcg_gen_xor_tl);
+GEN_SPEOP_LOGIC2(evor, tcg_gen_or_tl);
+GEN_SPEOP_LOGIC2(evnor, tcg_gen_nor_tl);
+GEN_SPEOP_LOGIC2(eveqv, tcg_gen_eqv_tl);
+GEN_SPEOP_LOGIC2(evorc, tcg_gen_orc_tl);
+GEN_SPEOP_LOGIC2(evnand, tcg_gen_nand_tl);
+
+/* SPE logic immediate */
+#define GEN_SPEOP_TCG_LOGIC_IMM2(name, tcg_opi) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_i32 t0; \
+ if (unlikely(!ctx->spe_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_SPEU); \
+ return; \
+ } \
+ t0 = tcg_temp_new_i32(); \
+ \
+ tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]); \
+ tcg_opi(t0, t0, rB(ctx->opcode)); \
+ tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t0); \
+ \
+ tcg_gen_trunc_tl_i32(t0, cpu_gprh[rA(ctx->opcode)]); \
+ tcg_opi(t0, t0, rB(ctx->opcode)); \
+ tcg_gen_extu_i32_tl(cpu_gprh[rD(ctx->opcode)], t0); \
+ \
+ tcg_temp_free_i32(t0); \
+}
+GEN_SPEOP_TCG_LOGIC_IMM2(evslwi, tcg_gen_shli_i32);
+GEN_SPEOP_TCG_LOGIC_IMM2(evsrwiu, tcg_gen_shri_i32);
+GEN_SPEOP_TCG_LOGIC_IMM2(evsrwis, tcg_gen_sari_i32);
+GEN_SPEOP_TCG_LOGIC_IMM2(evrlwi, tcg_gen_rotli_i32);
+
+/* SPE arithmetic */
+#define GEN_SPEOP_ARITH1(name, tcg_op) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_i32 t0; \
+ if (unlikely(!ctx->spe_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_SPEU); \
+ return; \
+ } \
+ t0 = tcg_temp_new_i32(); \
+ \
+ tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]); \
+ tcg_op(t0, t0); \
+ tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t0); \
+ \
+ tcg_gen_trunc_tl_i32(t0, cpu_gprh[rA(ctx->opcode)]); \
+ tcg_op(t0, t0); \
+ tcg_gen_extu_i32_tl(cpu_gprh[rD(ctx->opcode)], t0); \
+ \
+ tcg_temp_free_i32(t0); \
+}
+
+static inline void gen_op_evabs(TCGv_i32 ret, TCGv_i32 arg1)
+{
+ TCGLabel *l1 = gen_new_label();
+ TCGLabel *l2 = gen_new_label();
+
+ tcg_gen_brcondi_i32(TCG_COND_GE, arg1, 0, l1);
+ tcg_gen_neg_i32(ret, arg1);
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_mov_i32(ret, arg1);
+ gen_set_label(l2);
+}
+GEN_SPEOP_ARITH1(evabs, gen_op_evabs);
+GEN_SPEOP_ARITH1(evneg, tcg_gen_neg_i32);
+GEN_SPEOP_ARITH1(evextsb, tcg_gen_ext8s_i32);
+GEN_SPEOP_ARITH1(evextsh, tcg_gen_ext16s_i32);
+static inline void gen_op_evrndw(TCGv_i32 ret, TCGv_i32 arg1)
+{
+ tcg_gen_addi_i32(ret, arg1, 0x8000);
+ tcg_gen_ext16u_i32(ret, ret);
+}
+GEN_SPEOP_ARITH1(evrndw, gen_op_evrndw);
+GEN_SPEOP_ARITH1(evcntlsw, gen_helper_cntlsw32);
+GEN_SPEOP_ARITH1(evcntlzw, gen_helper_cntlzw32);
+
+#define GEN_SPEOP_ARITH2(name, tcg_op) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_i32 t0, t1; \
+ if (unlikely(!ctx->spe_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_SPEU); \
+ return; \
+ } \
+ t0 = tcg_temp_new_i32(); \
+ t1 = tcg_temp_new_i32(); \
+ \
+ tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]); \
+ tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]); \
+ tcg_op(t0, t0, t1); \
+ tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t0); \
+ \
+ tcg_gen_trunc_tl_i32(t0, cpu_gprh[rA(ctx->opcode)]); \
+ tcg_gen_trunc_tl_i32(t1, cpu_gprh[rB(ctx->opcode)]); \
+ tcg_op(t0, t0, t1); \
+ tcg_gen_extu_i32_tl(cpu_gprh[rD(ctx->opcode)], t0); \
+ \
+ tcg_temp_free_i32(t0); \
+ tcg_temp_free_i32(t1); \
+}
+
+static inline void gen_op_evsrwu(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
+{
+ TCGLabel *l1 = gen_new_label();
+ TCGLabel *l2 = gen_new_label();
+ TCGv_i32 t0 = tcg_temp_local_new_i32();
+
+ /* No error here: 6 bits are used */
+ tcg_gen_andi_i32(t0, arg2, 0x3F);
+ tcg_gen_brcondi_i32(TCG_COND_GE, t0, 32, l1);
+ tcg_gen_shr_i32(ret, arg1, t0);
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_movi_i32(ret, 0);
+ gen_set_label(l2);
+ tcg_temp_free_i32(t0);
+}
+GEN_SPEOP_ARITH2(evsrwu, gen_op_evsrwu);
+static inline void gen_op_evsrws(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
+{
+ TCGLabel *l1 = gen_new_label();
+ TCGLabel *l2 = gen_new_label();
+ TCGv_i32 t0 = tcg_temp_local_new_i32();
+
+ /* No error here: 6 bits are used */
+ tcg_gen_andi_i32(t0, arg2, 0x3F);
+ tcg_gen_brcondi_i32(TCG_COND_GE, t0, 32, l1);
+ tcg_gen_sar_i32(ret, arg1, t0);
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_movi_i32(ret, 0);
+ gen_set_label(l2);
+ tcg_temp_free_i32(t0);
+}
+GEN_SPEOP_ARITH2(evsrws, gen_op_evsrws);
+static inline void gen_op_evslw(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
+{
+ TCGLabel *l1 = gen_new_label();
+ TCGLabel *l2 = gen_new_label();
+ TCGv_i32 t0 = tcg_temp_local_new_i32();
+
+ /* No error here: 6 bits are used */
+ tcg_gen_andi_i32(t0, arg2, 0x3F);
+ tcg_gen_brcondi_i32(TCG_COND_GE, t0, 32, l1);
+ tcg_gen_shl_i32(ret, arg1, t0);
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_movi_i32(ret, 0);
+ gen_set_label(l2);
+ tcg_temp_free_i32(t0);
+}
+GEN_SPEOP_ARITH2(evslw, gen_op_evslw);
+static inline void gen_op_evrlw(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
+{
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ tcg_gen_andi_i32(t0, arg2, 0x1F);
+ tcg_gen_rotl_i32(ret, arg1, t0);
+ tcg_temp_free_i32(t0);
+}
+GEN_SPEOP_ARITH2(evrlw, gen_op_evrlw);
+static inline void gen_evmergehi(DisasContext *ctx)
+{
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_SPEU);
+ return;
+ }
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gprh[rB(ctx->opcode)]);
+ tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)]);
+}
+GEN_SPEOP_ARITH2(evaddw, tcg_gen_add_i32);
+static inline void gen_op_evsubf(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
+{
+ tcg_gen_sub_i32(ret, arg2, arg1);
+}
+GEN_SPEOP_ARITH2(evsubfw, gen_op_evsubf);
+
+/* SPE arithmetic immediate */
+#define GEN_SPEOP_ARITH_IMM2(name, tcg_op) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_i32 t0; \
+ if (unlikely(!ctx->spe_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_SPEU); \
+ return; \
+ } \
+ t0 = tcg_temp_new_i32(); \
+ \
+ tcg_gen_trunc_tl_i32(t0, cpu_gpr[rB(ctx->opcode)]); \
+ tcg_op(t0, t0, rA(ctx->opcode)); \
+ tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t0); \
+ \
+ tcg_gen_trunc_tl_i32(t0, cpu_gprh[rB(ctx->opcode)]); \
+ tcg_op(t0, t0, rA(ctx->opcode)); \
+ tcg_gen_extu_i32_tl(cpu_gprh[rD(ctx->opcode)], t0); \
+ \
+ tcg_temp_free_i32(t0); \
+}
+GEN_SPEOP_ARITH_IMM2(evaddiw, tcg_gen_addi_i32);
+GEN_SPEOP_ARITH_IMM2(evsubifw, tcg_gen_subi_i32);
+
+/* SPE comparison */
+#define GEN_SPEOP_COMP(name, tcg_cond) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ if (unlikely(!ctx->spe_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_SPEU); \
+ return; \
+ } \
+ TCGLabel *l1 = gen_new_label(); \
+ TCGLabel *l2 = gen_new_label(); \
+ TCGLabel *l3 = gen_new_label(); \
+ TCGLabel *l4 = gen_new_label(); \
+ \
+ tcg_gen_ext32s_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); \
+ tcg_gen_ext32s_tl(cpu_gpr[rB(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); \
+ tcg_gen_ext32s_tl(cpu_gprh[rA(ctx->opcode)], cpu_gprh[rA(ctx->opcode)]); \
+ tcg_gen_ext32s_tl(cpu_gprh[rB(ctx->opcode)], cpu_gprh[rB(ctx->opcode)]); \
+ \
+ tcg_gen_brcond_tl(tcg_cond, cpu_gpr[rA(ctx->opcode)], \
+ cpu_gpr[rB(ctx->opcode)], l1); \
+ tcg_gen_movi_i32(cpu_crf[crfD(ctx->opcode)], 0); \
+ tcg_gen_br(l2); \
+ gen_set_label(l1); \
+ tcg_gen_movi_i32(cpu_crf[crfD(ctx->opcode)], \
+ CRF_CL | CRF_CH_OR_CL | CRF_CH_AND_CL); \
+ gen_set_label(l2); \
+ tcg_gen_brcond_tl(tcg_cond, cpu_gprh[rA(ctx->opcode)], \
+ cpu_gprh[rB(ctx->opcode)], l3); \
+ tcg_gen_andi_i32(cpu_crf[crfD(ctx->opcode)], cpu_crf[crfD(ctx->opcode)], \
+ ~(CRF_CH | CRF_CH_AND_CL)); \
+ tcg_gen_br(l4); \
+ gen_set_label(l3); \
+ tcg_gen_ori_i32(cpu_crf[crfD(ctx->opcode)], cpu_crf[crfD(ctx->opcode)], \
+ CRF_CH | CRF_CH_OR_CL); \
+ gen_set_label(l4); \
+}
+GEN_SPEOP_COMP(evcmpgtu, TCG_COND_GTU);
+GEN_SPEOP_COMP(evcmpgts, TCG_COND_GT);
+GEN_SPEOP_COMP(evcmpltu, TCG_COND_LTU);
+GEN_SPEOP_COMP(evcmplts, TCG_COND_LT);
+GEN_SPEOP_COMP(evcmpeq, TCG_COND_EQ);
+
+/* SPE misc */
+static inline void gen_brinc(DisasContext *ctx)
+{
+ /* Note: brinc is usable even if SPE is disabled */
+ gen_helper_brinc(cpu_gpr[rD(ctx->opcode)],
+ cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
+}
+static inline void gen_evmergelo(DisasContext *ctx)
+{
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_SPEU);
+ return;
+ }
+ tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
+}
+static inline void gen_evmergehilo(DisasContext *ctx)
+{
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_SPEU);
+ return;
+ }
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
+ tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)]);
+}
+static inline void gen_evmergelohi(DisasContext *ctx)
+{
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_SPEU);
+ return;
+ }
+ if (rD(ctx->opcode) == rA(ctx->opcode)) {
+ TCGv tmp = tcg_temp_new();
+ tcg_gen_mov_tl(tmp, cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gprh[rB(ctx->opcode)]);
+ tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], tmp);
+ tcg_temp_free(tmp);
+ } else {
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gprh[rB(ctx->opcode)]);
+ tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+ }
+}
+static inline void gen_evsplati(DisasContext *ctx)
+{
+ uint64_t imm = ((int32_t)(rA(ctx->opcode) << 27)) >> 27;
+
+ tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], imm);
+ tcg_gen_movi_tl(cpu_gprh[rD(ctx->opcode)], imm);
+}
+static inline void gen_evsplatfi(DisasContext *ctx)
+{
+ uint64_t imm = rA(ctx->opcode) << 27;
+
+ tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], imm);
+ tcg_gen_movi_tl(cpu_gprh[rD(ctx->opcode)], imm);
+}
+
+static inline void gen_evsel(DisasContext *ctx)
+{
+ TCGLabel *l1 = gen_new_label();
+ TCGLabel *l2 = gen_new_label();
+ TCGLabel *l3 = gen_new_label();
+ TCGLabel *l4 = gen_new_label();
+ TCGv_i32 t0 = tcg_temp_local_new_i32();
+
+ tcg_gen_andi_i32(t0, cpu_crf[ctx->opcode & 0x07], 1 << 3);
+ tcg_gen_brcondi_i32(TCG_COND_EQ, t0, 0, l1);
+ tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)]);
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rB(ctx->opcode)]);
+ gen_set_label(l2);
+ tcg_gen_andi_i32(t0, cpu_crf[ctx->opcode & 0x07], 1 << 2);
+ tcg_gen_brcondi_i32(TCG_COND_EQ, t0, 0, l3);
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_br(l4);
+ gen_set_label(l3);
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
+ gen_set_label(l4);
+ tcg_temp_free_i32(t0);
+}
+
+static void gen_evsel0(DisasContext *ctx)
+{
+ gen_evsel(ctx);
+}
+
+static void gen_evsel1(DisasContext *ctx)
+{
+ gen_evsel(ctx);
+}
+
+static void gen_evsel2(DisasContext *ctx)
+{
+ gen_evsel(ctx);
+}
+
+static void gen_evsel3(DisasContext *ctx)
+{
+ gen_evsel(ctx);
+}
+
+/* Multiply */
+
+static inline void gen_evmwumi(DisasContext *ctx)
+{
+ TCGv_i64 t0, t1;
+
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_SPEU);
+ return;
+ }
+
+ t0 = tcg_temp_new_i64();
+ t1 = tcg_temp_new_i64();
+
+ /* t0 := rA; t1 := rB */
+ tcg_gen_extu_tl_i64(t0, cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_ext32u_i64(t0, t0);
+ tcg_gen_extu_tl_i64(t1, cpu_gpr[rB(ctx->opcode)]);
+ tcg_gen_ext32u_i64(t1, t1);
+
+ tcg_gen_mul_i64(t0, t0, t1); /* t0 := rA * rB */
+
+ gen_store_gpr64(rD(ctx->opcode), t0); /* rD := t0 */
+
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+}
+
+static inline void gen_evmwumia(DisasContext *ctx)
+{
+ TCGv_i64 tmp;
+
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_SPEU);
+ return;
+ }
+
+ gen_evmwumi(ctx); /* rD := rA * rB */
+
+ tmp = tcg_temp_new_i64();
+
+ /* acc := rD */
+ gen_load_gpr64(tmp, rD(ctx->opcode));
+ tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUPPCState, spe_acc));
+ tcg_temp_free_i64(tmp);
+}
+
+static inline void gen_evmwumiaa(DisasContext *ctx)
+{
+ TCGv_i64 acc;
+ TCGv_i64 tmp;
+
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_SPEU);
+ return;
+ }
+
+ gen_evmwumi(ctx); /* rD := rA * rB */
+
+ acc = tcg_temp_new_i64();
+ tmp = tcg_temp_new_i64();
+
+ /* tmp := rD */
+ gen_load_gpr64(tmp, rD(ctx->opcode));
+
+ /* Load acc */
+ tcg_gen_ld_i64(acc, cpu_env, offsetof(CPUPPCState, spe_acc));
+
+ /* acc := tmp + acc */
+ tcg_gen_add_i64(acc, acc, tmp);
+
+ /* Store acc */
+ tcg_gen_st_i64(acc, cpu_env, offsetof(CPUPPCState, spe_acc));
+
+ /* rD := acc */
+ gen_store_gpr64(rD(ctx->opcode), acc);
+
+ tcg_temp_free_i64(acc);
+ tcg_temp_free_i64(tmp);
+}
+
+static inline void gen_evmwsmi(DisasContext *ctx)
+{
+ TCGv_i64 t0, t1;
+
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_SPEU);
+ return;
+ }
+
+ t0 = tcg_temp_new_i64();
+ t1 = tcg_temp_new_i64();
+
+ /* t0 := rA; t1 := rB */
+ tcg_gen_extu_tl_i64(t0, cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_ext32s_i64(t0, t0);
+ tcg_gen_extu_tl_i64(t1, cpu_gpr[rB(ctx->opcode)]);
+ tcg_gen_ext32s_i64(t1, t1);
+
+ tcg_gen_mul_i64(t0, t0, t1); /* t0 := rA * rB */
+
+ gen_store_gpr64(rD(ctx->opcode), t0); /* rD := t0 */
+
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+}
+
+static inline void gen_evmwsmia(DisasContext *ctx)
+{
+ TCGv_i64 tmp;
+
+ gen_evmwsmi(ctx); /* rD := rA * rB */
+
+ tmp = tcg_temp_new_i64();
+
+ /* acc := rD */
+ gen_load_gpr64(tmp, rD(ctx->opcode));
+ tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUPPCState, spe_acc));
+
+ tcg_temp_free_i64(tmp);
+}
+
+static inline void gen_evmwsmiaa(DisasContext *ctx)
+{
+ TCGv_i64 acc = tcg_temp_new_i64();
+ TCGv_i64 tmp = tcg_temp_new_i64();
+
+ gen_evmwsmi(ctx); /* rD := rA * rB */
+
+ acc = tcg_temp_new_i64();
+ tmp = tcg_temp_new_i64();
+
+ /* tmp := rD */
+ gen_load_gpr64(tmp, rD(ctx->opcode));
+
+ /* Load acc */
+ tcg_gen_ld_i64(acc, cpu_env, offsetof(CPUPPCState, spe_acc));
+
+ /* acc := tmp + acc */
+ tcg_gen_add_i64(acc, acc, tmp);
+
+ /* Store acc */
+ tcg_gen_st_i64(acc, cpu_env, offsetof(CPUPPCState, spe_acc));
+
+ /* rD := acc */
+ gen_store_gpr64(rD(ctx->opcode), acc);
+
+ tcg_temp_free_i64(acc);
+ tcg_temp_free_i64(tmp);
+}
+
+GEN_SPE(evaddw, speundef, 0x00, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE); ////
+GEN_SPE(evaddiw, speundef, 0x01, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE);
+GEN_SPE(evsubfw, speundef, 0x02, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE); ////
+GEN_SPE(evsubifw, speundef, 0x03, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE);
+GEN_SPE(evabs, evneg, 0x04, 0x08, 0x0000F800, 0x0000F800, PPC_SPE); ////
+GEN_SPE(evextsb, evextsh, 0x05, 0x08, 0x0000F800, 0x0000F800, PPC_SPE); ////
+GEN_SPE(evrndw, evcntlzw, 0x06, 0x08, 0x0000F800, 0x0000F800, PPC_SPE); ////
+GEN_SPE(evcntlsw, brinc, 0x07, 0x08, 0x0000F800, 0x00000000, PPC_SPE); //
+GEN_SPE(evmra, speundef, 0x02, 0x13, 0x0000F800, 0xFFFFFFFF, PPC_SPE);
+GEN_SPE(speundef, evand, 0x08, 0x08, 0xFFFFFFFF, 0x00000000, PPC_SPE); ////
+GEN_SPE(evandc, speundef, 0x09, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE); ////
+GEN_SPE(evxor, evor, 0x0B, 0x08, 0x00000000, 0x00000000, PPC_SPE); ////
+GEN_SPE(evnor, eveqv, 0x0C, 0x08, 0x00000000, 0x00000000, PPC_SPE); ////
+GEN_SPE(evmwumi, evmwsmi, 0x0C, 0x11, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(evmwumia, evmwsmia, 0x1C, 0x11, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(evmwumiaa, evmwsmiaa, 0x0C, 0x15, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evorc, 0x0D, 0x08, 0xFFFFFFFF, 0x00000000, PPC_SPE); ////
+GEN_SPE(evnand, speundef, 0x0F, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE); ////
+GEN_SPE(evsrwu, evsrws, 0x10, 0x08, 0x00000000, 0x00000000, PPC_SPE); ////
+GEN_SPE(evsrwiu, evsrwis, 0x11, 0x08, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(evslw, speundef, 0x12, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE); ////
+GEN_SPE(evslwi, speundef, 0x13, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE);
+GEN_SPE(evrlw, evsplati, 0x14, 0x08, 0x00000000, 0x0000F800, PPC_SPE); //
+GEN_SPE(evrlwi, evsplatfi, 0x15, 0x08, 0x00000000, 0x0000F800, PPC_SPE);
+GEN_SPE(evmergehi, evmergelo, 0x16, 0x08, 0x00000000, 0x00000000, PPC_SPE); ////
+GEN_SPE(evmergehilo, evmergelohi, 0x17, 0x08, 0x00000000, 0x00000000, PPC_SPE); ////
+GEN_SPE(evcmpgtu, evcmpgts, 0x18, 0x08, 0x00600000, 0x00600000, PPC_SPE); ////
+GEN_SPE(evcmpltu, evcmplts, 0x19, 0x08, 0x00600000, 0x00600000, PPC_SPE); ////
+GEN_SPE(evcmpeq, speundef, 0x1A, 0x08, 0x00600000, 0xFFFFFFFF, PPC_SPE); ////
+
+/* SPE load and stores */
+static inline void gen_addr_spe_imm_index(DisasContext *ctx, TCGv EA, int sh)
+{
+ target_ulong uimm = rB(ctx->opcode);
+
+ if (rA(ctx->opcode) == 0) {
+ tcg_gen_movi_tl(EA, uimm << sh);
+ } else {
+ tcg_gen_addi_tl(EA, cpu_gpr[rA(ctx->opcode)], uimm << sh);
+ if (NARROW_MODE(ctx)) {
+ tcg_gen_ext32u_tl(EA, EA);
+ }
+ }
+}
+
+static inline void gen_op_evldd(DisasContext *ctx, TCGv addr)
+{
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ gen_qemu_ld64_i64(ctx, t0, addr);
+ gen_store_gpr64(rD(ctx->opcode), t0);
+ tcg_temp_free_i64(t0);
+}
+
+static inline void gen_op_evldw(DisasContext *ctx, TCGv addr)
+{
+ gen_qemu_ld32u(ctx, cpu_gprh[rD(ctx->opcode)], addr);
+ gen_addr_add(ctx, addr, addr, 4);
+ gen_qemu_ld32u(ctx, cpu_gpr[rD(ctx->opcode)], addr);
+}
+
+static inline void gen_op_evldh(DisasContext *ctx, TCGv addr)
+{
+ TCGv t0 = tcg_temp_new();
+ gen_qemu_ld16u(ctx, t0, addr);
+ tcg_gen_shli_tl(cpu_gprh[rD(ctx->opcode)], t0, 16);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_ld16u(ctx, t0, addr);
+ tcg_gen_or_tl(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rD(ctx->opcode)], t0);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_ld16u(ctx, t0, addr);
+ tcg_gen_shli_tl(cpu_gprh[rD(ctx->opcode)], t0, 16);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_ld16u(ctx, t0, addr);
+ tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0);
+ tcg_temp_free(t0);
+}
+
+static inline void gen_op_evlhhesplat(DisasContext *ctx, TCGv addr)
+{
+ TCGv t0 = tcg_temp_new();
+ gen_qemu_ld16u(ctx, t0, addr);
+ tcg_gen_shli_tl(t0, t0, 16);
+ tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], t0);
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], t0);
+ tcg_temp_free(t0);
+}
+
+static inline void gen_op_evlhhousplat(DisasContext *ctx, TCGv addr)
+{
+ TCGv t0 = tcg_temp_new();
+ gen_qemu_ld16u(ctx, t0, addr);
+ tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], t0);
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], t0);
+ tcg_temp_free(t0);
+}
+
+static inline void gen_op_evlhhossplat(DisasContext *ctx, TCGv addr)
+{
+ TCGv t0 = tcg_temp_new();
+ gen_qemu_ld16s(ctx, t0, addr);
+ tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], t0);
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], t0);
+ tcg_temp_free(t0);
+}
+
+static inline void gen_op_evlwhe(DisasContext *ctx, TCGv addr)
+{
+ TCGv t0 = tcg_temp_new();
+ gen_qemu_ld16u(ctx, t0, addr);
+ tcg_gen_shli_tl(cpu_gprh[rD(ctx->opcode)], t0, 16);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_ld16u(ctx, t0, addr);
+ tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)], t0, 16);
+ tcg_temp_free(t0);
+}
+
+static inline void gen_op_evlwhou(DisasContext *ctx, TCGv addr)
+{
+ gen_qemu_ld16u(ctx, cpu_gprh[rD(ctx->opcode)], addr);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_ld16u(ctx, cpu_gpr[rD(ctx->opcode)], addr);
+}
+
+static inline void gen_op_evlwhos(DisasContext *ctx, TCGv addr)
+{
+ gen_qemu_ld16s(ctx, cpu_gprh[rD(ctx->opcode)], addr);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_ld16s(ctx, cpu_gpr[rD(ctx->opcode)], addr);
+}
+
+static inline void gen_op_evlwwsplat(DisasContext *ctx, TCGv addr)
+{
+ TCGv t0 = tcg_temp_new();
+ gen_qemu_ld32u(ctx, t0, addr);
+ tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], t0);
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], t0);
+ tcg_temp_free(t0);
+}
+
+static inline void gen_op_evlwhsplat(DisasContext *ctx, TCGv addr)
+{
+ TCGv t0 = tcg_temp_new();
+ gen_qemu_ld16u(ctx, t0, addr);
+ tcg_gen_shli_tl(cpu_gprh[rD(ctx->opcode)], t0, 16);
+ tcg_gen_or_tl(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rD(ctx->opcode)], t0);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_ld16u(ctx, t0, addr);
+ tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)], t0, 16);
+ tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gprh[rD(ctx->opcode)], t0);
+ tcg_temp_free(t0);
+}
+
+static inline void gen_op_evstdd(DisasContext *ctx, TCGv addr)
+{
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ gen_load_gpr64(t0, rS(ctx->opcode));
+ gen_qemu_st64_i64(ctx, t0, addr);
+ tcg_temp_free_i64(t0);
+}
+
+static inline void gen_op_evstdw(DisasContext *ctx, TCGv addr)
+{
+ gen_qemu_st32(ctx, cpu_gprh[rS(ctx->opcode)], addr);
+ gen_addr_add(ctx, addr, addr, 4);
+ gen_qemu_st32(ctx, cpu_gpr[rS(ctx->opcode)], addr);
+}
+
+static inline void gen_op_evstdh(DisasContext *ctx, TCGv addr)
+{
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_shri_tl(t0, cpu_gprh[rS(ctx->opcode)], 16);
+ gen_qemu_st16(ctx, t0, addr);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_st16(ctx, cpu_gprh[rS(ctx->opcode)], addr);
+ gen_addr_add(ctx, addr, addr, 2);
+ tcg_gen_shri_tl(t0, cpu_gpr[rS(ctx->opcode)], 16);
+ gen_qemu_st16(ctx, t0, addr);
+ tcg_temp_free(t0);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_st16(ctx, cpu_gpr[rS(ctx->opcode)], addr);
+}
+
+static inline void gen_op_evstwhe(DisasContext *ctx, TCGv addr)
+{
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_shri_tl(t0, cpu_gprh[rS(ctx->opcode)], 16);
+ gen_qemu_st16(ctx, t0, addr);
+ gen_addr_add(ctx, addr, addr, 2);
+ tcg_gen_shri_tl(t0, cpu_gpr[rS(ctx->opcode)], 16);
+ gen_qemu_st16(ctx, t0, addr);
+ tcg_temp_free(t0);
+}
+
+static inline void gen_op_evstwho(DisasContext *ctx, TCGv addr)
+{
+ gen_qemu_st16(ctx, cpu_gprh[rS(ctx->opcode)], addr);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_st16(ctx, cpu_gpr[rS(ctx->opcode)], addr);
+}
+
+static inline void gen_op_evstwwe(DisasContext *ctx, TCGv addr)
+{
+ gen_qemu_st32(ctx, cpu_gprh[rS(ctx->opcode)], addr);
+}
+
+static inline void gen_op_evstwwo(DisasContext *ctx, TCGv addr)
+{
+ gen_qemu_st32(ctx, cpu_gpr[rS(ctx->opcode)], addr);
+}
+
+#define GEN_SPEOP_LDST(name, opc2, sh) \
+static void glue(gen_, name)(DisasContext *ctx) \
+{ \
+ TCGv t0; \
+ if (unlikely(!ctx->spe_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_SPEU); \
+ return; \
+ } \
+ gen_set_access_type(ctx, ACCESS_INT); \
+ t0 = tcg_temp_new(); \
+ if (Rc(ctx->opcode)) { \
+ gen_addr_spe_imm_index(ctx, t0, sh); \
+ } else { \
+ gen_addr_reg_index(ctx, t0); \
+ } \
+ gen_op_##name(ctx, t0); \
+ tcg_temp_free(t0); \
+}
+
+GEN_SPEOP_LDST(evldd, 0x00, 3);
+GEN_SPEOP_LDST(evldw, 0x01, 3);
+GEN_SPEOP_LDST(evldh, 0x02, 3);
+GEN_SPEOP_LDST(evlhhesplat, 0x04, 1);
+GEN_SPEOP_LDST(evlhhousplat, 0x06, 1);
+GEN_SPEOP_LDST(evlhhossplat, 0x07, 1);
+GEN_SPEOP_LDST(evlwhe, 0x08, 2);
+GEN_SPEOP_LDST(evlwhou, 0x0A, 2);
+GEN_SPEOP_LDST(evlwhos, 0x0B, 2);
+GEN_SPEOP_LDST(evlwwsplat, 0x0C, 2);
+GEN_SPEOP_LDST(evlwhsplat, 0x0E, 2);
+
+GEN_SPEOP_LDST(evstdd, 0x10, 3);
+GEN_SPEOP_LDST(evstdw, 0x11, 3);
+GEN_SPEOP_LDST(evstdh, 0x12, 3);
+GEN_SPEOP_LDST(evstwhe, 0x18, 2);
+GEN_SPEOP_LDST(evstwho, 0x1A, 2);
+GEN_SPEOP_LDST(evstwwe, 0x1C, 2);
+GEN_SPEOP_LDST(evstwwo, 0x1E, 2);
+
+/* Multiply and add - TODO */
+#if 0
+GEN_SPE(speundef, evmhessf, 0x01, 0x10, 0xFFFFFFFF, 0x00000000, PPC_SPE);//
+GEN_SPE(speundef, evmhossf, 0x03, 0x10, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+GEN_SPE(evmheumi, evmhesmi, 0x04, 0x10, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmhesmf, 0x05, 0x10, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+GEN_SPE(evmhoumi, evmhosmi, 0x06, 0x10, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmhosmf, 0x07, 0x10, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmhessfa, 0x11, 0x10, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmhossfa, 0x13, 0x10, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+GEN_SPE(evmheumia, evmhesmia, 0x14, 0x10, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmhesmfa, 0x15, 0x10, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+GEN_SPE(evmhoumia, evmhosmia, 0x16, 0x10, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmhosmfa, 0x17, 0x10, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+
+GEN_SPE(speundef, evmwhssf, 0x03, 0x11, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+GEN_SPE(evmwlumi, speundef, 0x04, 0x11, 0x00000000, 0xFFFFFFFF, PPC_SPE);
+GEN_SPE(evmwhumi, evmwhsmi, 0x06, 0x11, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmwhsmf, 0x07, 0x11, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmwssf, 0x09, 0x11, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmwsmf, 0x0D, 0x11, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmwhssfa, 0x13, 0x11, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+GEN_SPE(evmwlumia, speundef, 0x14, 0x11, 0x00000000, 0xFFFFFFFF, PPC_SPE);
+GEN_SPE(evmwhumia, evmwhsmia, 0x16, 0x11, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmwhsmfa, 0x17, 0x11, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmwssfa, 0x19, 0x11, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmwsmfa, 0x1D, 0x11, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+
+GEN_SPE(evadduiaaw, evaddsiaaw, 0x00, 0x13, 0x0000F800, 0x0000F800, PPC_SPE);
+GEN_SPE(evsubfusiaaw, evsubfssiaaw, 0x01, 0x13, 0x0000F800, 0x0000F800, PPC_SPE);
+GEN_SPE(evaddumiaaw, evaddsmiaaw, 0x04, 0x13, 0x0000F800, 0x0000F800, PPC_SPE);
+GEN_SPE(evsubfumiaaw, evsubfsmiaaw, 0x05, 0x13, 0x0000F800, 0x0000F800, PPC_SPE);
+GEN_SPE(evdivws, evdivwu, 0x06, 0x13, 0x00000000, 0x00000000, PPC_SPE);
+
+GEN_SPE(evmheusiaaw, evmhessiaaw, 0x00, 0x14, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmhessfaaw, 0x01, 0x14, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+GEN_SPE(evmhousiaaw, evmhossiaaw, 0x02, 0x14, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmhossfaaw, 0x03, 0x14, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+GEN_SPE(evmheumiaaw, evmhesmiaaw, 0x04, 0x14, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmhesmfaaw, 0x05, 0x14, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+GEN_SPE(evmhoumiaaw, evmhosmiaaw, 0x06, 0x14, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmhosmfaaw, 0x07, 0x14, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+GEN_SPE(evmhegumiaa, evmhegsmiaa, 0x14, 0x14, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmhegsmfaa, 0x15, 0x14, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+GEN_SPE(evmhogumiaa, evmhogsmiaa, 0x16, 0x14, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmhogsmfaa, 0x17, 0x14, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+
+GEN_SPE(evmwlusiaaw, evmwlssiaaw, 0x00, 0x15, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(evmwlumiaaw, evmwlsmiaaw, 0x04, 0x15, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmwssfaa, 0x09, 0x15, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmwsmfaa, 0x0D, 0x15, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+
+GEN_SPE(evmheusianw, evmhessianw, 0x00, 0x16, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmhessfanw, 0x01, 0x16, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+GEN_SPE(evmhousianw, evmhossianw, 0x02, 0x16, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmhossfanw, 0x03, 0x16, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+GEN_SPE(evmheumianw, evmhesmianw, 0x04, 0x16, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmhesmfanw, 0x05, 0x16, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+GEN_SPE(evmhoumianw, evmhosmianw, 0x06, 0x16, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmhosmfanw, 0x07, 0x16, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+GEN_SPE(evmhegumian, evmhegsmian, 0x14, 0x16, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmhegsmfan, 0x15, 0x16, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+GEN_SPE(evmhigumian, evmhigsmian, 0x16, 0x16, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmhogsmfan, 0x17, 0x16, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+
+GEN_SPE(evmwlusianw, evmwlssianw, 0x00, 0x17, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(evmwlumianw, evmwlsmianw, 0x04, 0x17, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmwssfan, 0x09, 0x17, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+GEN_SPE(evmwumian, evmwsmian, 0x0C, 0x17, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmwsmfan, 0x0D, 0x17, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+#endif
+
+/*** SPE floating-point extension ***/
+#define GEN_SPEFPUOP_CONV_32_32(name) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_i32 t0 = tcg_temp_new_i32(); \
+ tcg_gen_trunc_tl_i32(t0, cpu_gpr[rB(ctx->opcode)]); \
+ gen_helper_##name(t0, cpu_env, t0); \
+ tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t0); \
+ tcg_temp_free_i32(t0); \
+}
+#define GEN_SPEFPUOP_CONV_32_64(name) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_i64 t0 = tcg_temp_new_i64(); \
+ TCGv_i32 t1 = tcg_temp_new_i32(); \
+ gen_load_gpr64(t0, rB(ctx->opcode)); \
+ gen_helper_##name(t1, cpu_env, t0); \
+ tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t1); \
+ tcg_temp_free_i64(t0); \
+ tcg_temp_free_i32(t1); \
+}
+#define GEN_SPEFPUOP_CONV_64_32(name) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_i64 t0 = tcg_temp_new_i64(); \
+ TCGv_i32 t1 = tcg_temp_new_i32(); \
+ tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]); \
+ gen_helper_##name(t0, cpu_env, t1); \
+ gen_store_gpr64(rD(ctx->opcode), t0); \
+ tcg_temp_free_i64(t0); \
+ tcg_temp_free_i32(t1); \
+}
+#define GEN_SPEFPUOP_CONV_64_64(name) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_i64 t0 = tcg_temp_new_i64(); \
+ gen_load_gpr64(t0, rB(ctx->opcode)); \
+ gen_helper_##name(t0, cpu_env, t0); \
+ gen_store_gpr64(rD(ctx->opcode), t0); \
+ tcg_temp_free_i64(t0); \
+}
+#define GEN_SPEFPUOP_ARITH2_32_32(name) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_i32 t0, t1; \
+ if (unlikely(!ctx->spe_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_SPEU); \
+ return; \
+ } \
+ t0 = tcg_temp_new_i32(); \
+ t1 = tcg_temp_new_i32(); \
+ tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]); \
+ tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]); \
+ gen_helper_##name(t0, cpu_env, t0, t1); \
+ tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t0); \
+ \
+ tcg_temp_free_i32(t0); \
+ tcg_temp_free_i32(t1); \
+}
+#define GEN_SPEFPUOP_ARITH2_64_64(name) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_i64 t0, t1; \
+ if (unlikely(!ctx->spe_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_SPEU); \
+ return; \
+ } \
+ t0 = tcg_temp_new_i64(); \
+ t1 = tcg_temp_new_i64(); \
+ gen_load_gpr64(t0, rA(ctx->opcode)); \
+ gen_load_gpr64(t1, rB(ctx->opcode)); \
+ gen_helper_##name(t0, cpu_env, t0, t1); \
+ gen_store_gpr64(rD(ctx->opcode), t0); \
+ tcg_temp_free_i64(t0); \
+ tcg_temp_free_i64(t1); \
+}
+#define GEN_SPEFPUOP_COMP_32(name) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_i32 t0, t1; \
+ if (unlikely(!ctx->spe_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_SPEU); \
+ return; \
+ } \
+ t0 = tcg_temp_new_i32(); \
+ t1 = tcg_temp_new_i32(); \
+ \
+ tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]); \
+ tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]); \
+ gen_helper_##name(cpu_crf[crfD(ctx->opcode)], cpu_env, t0, t1); \
+ \
+ tcg_temp_free_i32(t0); \
+ tcg_temp_free_i32(t1); \
+}
+#define GEN_SPEFPUOP_COMP_64(name) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_i64 t0, t1; \
+ if (unlikely(!ctx->spe_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_SPEU); \
+ return; \
+ } \
+ t0 = tcg_temp_new_i64(); \
+ t1 = tcg_temp_new_i64(); \
+ gen_load_gpr64(t0, rA(ctx->opcode)); \
+ gen_load_gpr64(t1, rB(ctx->opcode)); \
+ gen_helper_##name(cpu_crf[crfD(ctx->opcode)], cpu_env, t0, t1); \
+ tcg_temp_free_i64(t0); \
+ tcg_temp_free_i64(t1); \
+}
+
+/* Single precision floating-point vectors operations */
+/* Arithmetic */
+GEN_SPEFPUOP_ARITH2_64_64(evfsadd);
+GEN_SPEFPUOP_ARITH2_64_64(evfssub);
+GEN_SPEFPUOP_ARITH2_64_64(evfsmul);
+GEN_SPEFPUOP_ARITH2_64_64(evfsdiv);
+static inline void gen_evfsabs(DisasContext *ctx)
+{
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_SPEU);
+ return;
+ }
+ tcg_gen_andi_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
+ ~0x80000000);
+ tcg_gen_andi_tl(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)],
+ ~0x80000000);
+}
+static inline void gen_evfsnabs(DisasContext *ctx)
+{
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_SPEU);
+ return;
+ }
+ tcg_gen_ori_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
+ 0x80000000);
+ tcg_gen_ori_tl(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)],
+ 0x80000000);
+}
+static inline void gen_evfsneg(DisasContext *ctx)
+{
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_SPEU);
+ return;
+ }
+ tcg_gen_xori_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
+ 0x80000000);
+ tcg_gen_xori_tl(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)],
+ 0x80000000);
+}
+
+/* Conversion */
+GEN_SPEFPUOP_CONV_64_64(evfscfui);
+GEN_SPEFPUOP_CONV_64_64(evfscfsi);
+GEN_SPEFPUOP_CONV_64_64(evfscfuf);
+GEN_SPEFPUOP_CONV_64_64(evfscfsf);
+GEN_SPEFPUOP_CONV_64_64(evfsctui);
+GEN_SPEFPUOP_CONV_64_64(evfsctsi);
+GEN_SPEFPUOP_CONV_64_64(evfsctuf);
+GEN_SPEFPUOP_CONV_64_64(evfsctsf);
+GEN_SPEFPUOP_CONV_64_64(evfsctuiz);
+GEN_SPEFPUOP_CONV_64_64(evfsctsiz);
+
+/* Comparison */
+GEN_SPEFPUOP_COMP_64(evfscmpgt);
+GEN_SPEFPUOP_COMP_64(evfscmplt);
+GEN_SPEFPUOP_COMP_64(evfscmpeq);
+GEN_SPEFPUOP_COMP_64(evfststgt);
+GEN_SPEFPUOP_COMP_64(evfststlt);
+GEN_SPEFPUOP_COMP_64(evfststeq);
+
+/* Opcodes definitions */
+GEN_SPE(evfsadd, evfssub, 0x00, 0x0A, 0x00000000, 0x00000000, PPC_SPE_SINGLE); //
+GEN_SPE(evfsabs, evfsnabs, 0x02, 0x0A, 0x0000F800, 0x0000F800, PPC_SPE_SINGLE); //
+GEN_SPE(evfsneg, speundef, 0x03, 0x0A, 0x0000F800, 0xFFFFFFFF, PPC_SPE_SINGLE); //
+GEN_SPE(evfsmul, evfsdiv, 0x04, 0x0A, 0x00000000, 0x00000000, PPC_SPE_SINGLE); //
+GEN_SPE(evfscmpgt, evfscmplt, 0x06, 0x0A, 0x00600000, 0x00600000, PPC_SPE_SINGLE); //
+GEN_SPE(evfscmpeq, speundef, 0x07, 0x0A, 0x00600000, 0xFFFFFFFF, PPC_SPE_SINGLE); //
+GEN_SPE(evfscfui, evfscfsi, 0x08, 0x0A, 0x00180000, 0x00180000, PPC_SPE_SINGLE); //
+GEN_SPE(evfscfuf, evfscfsf, 0x09, 0x0A, 0x00180000, 0x00180000, PPC_SPE_SINGLE); //
+GEN_SPE(evfsctui, evfsctsi, 0x0A, 0x0A, 0x00180000, 0x00180000, PPC_SPE_SINGLE); //
+GEN_SPE(evfsctuf, evfsctsf, 0x0B, 0x0A, 0x00180000, 0x00180000, PPC_SPE_SINGLE); //
+GEN_SPE(evfsctuiz, speundef, 0x0C, 0x0A, 0x00180000, 0xFFFFFFFF, PPC_SPE_SINGLE); //
+GEN_SPE(evfsctsiz, speundef, 0x0D, 0x0A, 0x00180000, 0xFFFFFFFF, PPC_SPE_SINGLE); //
+GEN_SPE(evfststgt, evfststlt, 0x0E, 0x0A, 0x00600000, 0x00600000, PPC_SPE_SINGLE); //
+GEN_SPE(evfststeq, speundef, 0x0F, 0x0A, 0x00600000, 0xFFFFFFFF, PPC_SPE_SINGLE); //
+
+/* Single precision floating-point operations */
+/* Arithmetic */
+GEN_SPEFPUOP_ARITH2_32_32(efsadd);
+GEN_SPEFPUOP_ARITH2_32_32(efssub);
+GEN_SPEFPUOP_ARITH2_32_32(efsmul);
+GEN_SPEFPUOP_ARITH2_32_32(efsdiv);
+static inline void gen_efsabs(DisasContext *ctx)
+{
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_SPEU);
+ return;
+ }
+ tcg_gen_andi_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], (target_long)~0x80000000LL);
+}
+static inline void gen_efsnabs(DisasContext *ctx)
+{
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_SPEU);
+ return;
+ }
+ tcg_gen_ori_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 0x80000000);
+}
+static inline void gen_efsneg(DisasContext *ctx)
+{
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_SPEU);
+ return;
+ }
+ tcg_gen_xori_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 0x80000000);
+}
+
+/* Conversion */
+GEN_SPEFPUOP_CONV_32_32(efscfui);
+GEN_SPEFPUOP_CONV_32_32(efscfsi);
+GEN_SPEFPUOP_CONV_32_32(efscfuf);
+GEN_SPEFPUOP_CONV_32_32(efscfsf);
+GEN_SPEFPUOP_CONV_32_32(efsctui);
+GEN_SPEFPUOP_CONV_32_32(efsctsi);
+GEN_SPEFPUOP_CONV_32_32(efsctuf);
+GEN_SPEFPUOP_CONV_32_32(efsctsf);
+GEN_SPEFPUOP_CONV_32_32(efsctuiz);
+GEN_SPEFPUOP_CONV_32_32(efsctsiz);
+GEN_SPEFPUOP_CONV_32_64(efscfd);
+
+/* Comparison */
+GEN_SPEFPUOP_COMP_32(efscmpgt);
+GEN_SPEFPUOP_COMP_32(efscmplt);
+GEN_SPEFPUOP_COMP_32(efscmpeq);
+GEN_SPEFPUOP_COMP_32(efststgt);
+GEN_SPEFPUOP_COMP_32(efststlt);
+GEN_SPEFPUOP_COMP_32(efststeq);
+
+/* Opcodes definitions */
+GEN_SPE(efsadd, efssub, 0x00, 0x0B, 0x00000000, 0x00000000, PPC_SPE_SINGLE); //
+GEN_SPE(efsabs, efsnabs, 0x02, 0x0B, 0x0000F800, 0x0000F800, PPC_SPE_SINGLE); //
+GEN_SPE(efsneg, speundef, 0x03, 0x0B, 0x0000F800, 0xFFFFFFFF, PPC_SPE_SINGLE); //
+GEN_SPE(efsmul, efsdiv, 0x04, 0x0B, 0x00000000, 0x00000000, PPC_SPE_SINGLE); //
+GEN_SPE(efscmpgt, efscmplt, 0x06, 0x0B, 0x00600000, 0x00600000, PPC_SPE_SINGLE); //
+GEN_SPE(efscmpeq, efscfd, 0x07, 0x0B, 0x00600000, 0x00180000, PPC_SPE_SINGLE); //
+GEN_SPE(efscfui, efscfsi, 0x08, 0x0B, 0x00180000, 0x00180000, PPC_SPE_SINGLE); //
+GEN_SPE(efscfuf, efscfsf, 0x09, 0x0B, 0x00180000, 0x00180000, PPC_SPE_SINGLE); //
+GEN_SPE(efsctui, efsctsi, 0x0A, 0x0B, 0x00180000, 0x00180000, PPC_SPE_SINGLE); //
+GEN_SPE(efsctuf, efsctsf, 0x0B, 0x0B, 0x00180000, 0x00180000, PPC_SPE_SINGLE); //
+GEN_SPE(efsctuiz, speundef, 0x0C, 0x0B, 0x00180000, 0xFFFFFFFF, PPC_SPE_SINGLE); //
+GEN_SPE(efsctsiz, speundef, 0x0D, 0x0B, 0x00180000, 0xFFFFFFFF, PPC_SPE_SINGLE); //
+GEN_SPE(efststgt, efststlt, 0x0E, 0x0B, 0x00600000, 0x00600000, PPC_SPE_SINGLE); //
+GEN_SPE(efststeq, speundef, 0x0F, 0x0B, 0x00600000, 0xFFFFFFFF, PPC_SPE_SINGLE); //
+
+/* Double precision floating-point operations */
+/* Arithmetic */
+GEN_SPEFPUOP_ARITH2_64_64(efdadd);
+GEN_SPEFPUOP_ARITH2_64_64(efdsub);
+GEN_SPEFPUOP_ARITH2_64_64(efdmul);
+GEN_SPEFPUOP_ARITH2_64_64(efddiv);
+static inline void gen_efdabs(DisasContext *ctx)
+{
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_SPEU);
+ return;
+ }
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_andi_tl(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)],
+ ~0x80000000);
+}
+static inline void gen_efdnabs(DisasContext *ctx)
+{
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_SPEU);
+ return;
+ }
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_ori_tl(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)],
+ 0x80000000);
+}
+static inline void gen_efdneg(DisasContext *ctx)
+{
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_SPEU);
+ return;
+ }
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_xori_tl(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)],
+ 0x80000000);
+}
+
+/* Conversion */
+GEN_SPEFPUOP_CONV_64_32(efdcfui);
+GEN_SPEFPUOP_CONV_64_32(efdcfsi);
+GEN_SPEFPUOP_CONV_64_32(efdcfuf);
+GEN_SPEFPUOP_CONV_64_32(efdcfsf);
+GEN_SPEFPUOP_CONV_32_64(efdctui);
+GEN_SPEFPUOP_CONV_32_64(efdctsi);
+GEN_SPEFPUOP_CONV_32_64(efdctuf);
+GEN_SPEFPUOP_CONV_32_64(efdctsf);
+GEN_SPEFPUOP_CONV_32_64(efdctuiz);
+GEN_SPEFPUOP_CONV_32_64(efdctsiz);
+GEN_SPEFPUOP_CONV_64_32(efdcfs);
+GEN_SPEFPUOP_CONV_64_64(efdcfuid);
+GEN_SPEFPUOP_CONV_64_64(efdcfsid);
+GEN_SPEFPUOP_CONV_64_64(efdctuidz);
+GEN_SPEFPUOP_CONV_64_64(efdctsidz);
+
+/* Comparison */
+GEN_SPEFPUOP_COMP_64(efdcmpgt);
+GEN_SPEFPUOP_COMP_64(efdcmplt);
+GEN_SPEFPUOP_COMP_64(efdcmpeq);
+GEN_SPEFPUOP_COMP_64(efdtstgt);
+GEN_SPEFPUOP_COMP_64(efdtstlt);
+GEN_SPEFPUOP_COMP_64(efdtsteq);
+
+/* Opcodes definitions */
+GEN_SPE(efdadd, efdsub, 0x10, 0x0B, 0x00000000, 0x00000000, PPC_SPE_DOUBLE); //
+GEN_SPE(efdcfuid, efdcfsid, 0x11, 0x0B, 0x00180000, 0x00180000, PPC_SPE_DOUBLE); //
+GEN_SPE(efdabs, efdnabs, 0x12, 0x0B, 0x0000F800, 0x0000F800, PPC_SPE_DOUBLE); //
+GEN_SPE(efdneg, speundef, 0x13, 0x0B, 0x0000F800, 0xFFFFFFFF, PPC_SPE_DOUBLE); //
+GEN_SPE(efdmul, efddiv, 0x14, 0x0B, 0x00000000, 0x00000000, PPC_SPE_DOUBLE); //
+GEN_SPE(efdctuidz, efdctsidz, 0x15, 0x0B, 0x00180000, 0x00180000, PPC_SPE_DOUBLE); //
+GEN_SPE(efdcmpgt, efdcmplt, 0x16, 0x0B, 0x00600000, 0x00600000, PPC_SPE_DOUBLE); //
+GEN_SPE(efdcmpeq, efdcfs, 0x17, 0x0B, 0x00600000, 0x00180000, PPC_SPE_DOUBLE); //
+GEN_SPE(efdcfui, efdcfsi, 0x18, 0x0B, 0x00180000, 0x00180000, PPC_SPE_DOUBLE); //
+GEN_SPE(efdcfuf, efdcfsf, 0x19, 0x0B, 0x00180000, 0x00180000, PPC_SPE_DOUBLE); //
+GEN_SPE(efdctui, efdctsi, 0x1A, 0x0B, 0x00180000, 0x00180000, PPC_SPE_DOUBLE); //
+GEN_SPE(efdctuf, efdctsf, 0x1B, 0x0B, 0x00180000, 0x00180000, PPC_SPE_DOUBLE); //
+GEN_SPE(efdctuiz, speundef, 0x1C, 0x0B, 0x00180000, 0xFFFFFFFF, PPC_SPE_DOUBLE); //
+GEN_SPE(efdctsiz, speundef, 0x1D, 0x0B, 0x00180000, 0xFFFFFFFF, PPC_SPE_DOUBLE); //
+GEN_SPE(efdtstgt, efdtstlt, 0x1E, 0x0B, 0x00600000, 0x00600000, PPC_SPE_DOUBLE); //
+GEN_SPE(efdtsteq, speundef, 0x1F, 0x0B, 0x00600000, 0xFFFFFFFF, PPC_SPE_DOUBLE); //
+
+#undef GEN_SPE
+#undef GEN_SPEOP_LDST
diff --git a/target-ppc/translate/spe-ops.inc.c b/target-ppc/translate/spe-ops.inc.c
new file mode 100644
index 0000000000..7efe8b8746
--- /dev/null
+++ b/target-ppc/translate/spe-ops.inc.c
@@ -0,0 +1,105 @@
+GEN_HANDLER2(evsel0, "evsel", 0x04, 0x1c, 0x09, 0x00000000, PPC_SPE),
+GEN_HANDLER2(evsel1, "evsel", 0x04, 0x1d, 0x09, 0x00000000, PPC_SPE),
+GEN_HANDLER2(evsel2, "evsel", 0x04, 0x1e, 0x09, 0x00000000, PPC_SPE),
+GEN_HANDLER2(evsel3, "evsel", 0x04, 0x1f, 0x09, 0x00000000, PPC_SPE),
+
+#define GEN_SPE(name0, name1, opc2, opc3, inval0, inval1, type) \
+ GEN_OPCODE_DUAL(name0##_##name1, 0x04, opc2, opc3, inval0, inval1, type, PPC_NONE)
+GEN_SPE(evaddw, speundef, 0x00, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE),
+GEN_SPE(evaddiw, speundef, 0x01, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE),
+GEN_SPE(evsubfw, speundef, 0x02, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE),
+GEN_SPE(evsubifw, speundef, 0x03, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE),
+GEN_SPE(evabs, evneg, 0x04, 0x08, 0x0000F800, 0x0000F800, PPC_SPE),
+GEN_SPE(evextsb, evextsh, 0x05, 0x08, 0x0000F800, 0x0000F800, PPC_SPE),
+GEN_SPE(evrndw, evcntlzw, 0x06, 0x08, 0x0000F800, 0x0000F800, PPC_SPE),
+GEN_SPE(evcntlsw, brinc, 0x07, 0x08, 0x0000F800, 0x00000000, PPC_SPE),
+GEN_SPE(evmra, speundef, 0x02, 0x13, 0x0000F800, 0xFFFFFFFF, PPC_SPE),
+GEN_SPE(speundef, evand, 0x08, 0x08, 0xFFFFFFFF, 0x00000000, PPC_SPE),
+GEN_SPE(evandc, speundef, 0x09, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE),
+GEN_SPE(evxor, evor, 0x0B, 0x08, 0x00000000, 0x00000000, PPC_SPE),
+GEN_SPE(evnor, eveqv, 0x0C, 0x08, 0x00000000, 0x00000000, PPC_SPE),
+GEN_SPE(evmwumi, evmwsmi, 0x0C, 0x11, 0x00000000, 0x00000000, PPC_SPE),
+GEN_SPE(evmwumia, evmwsmia, 0x1C, 0x11, 0x00000000, 0x00000000, PPC_SPE),
+GEN_SPE(evmwumiaa, evmwsmiaa, 0x0C, 0x15, 0x00000000, 0x00000000, PPC_SPE),
+GEN_SPE(speundef, evorc, 0x0D, 0x08, 0xFFFFFFFF, 0x00000000, PPC_SPE),
+GEN_SPE(evnand, speundef, 0x0F, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE),
+GEN_SPE(evsrwu, evsrws, 0x10, 0x08, 0x00000000, 0x00000000, PPC_SPE),
+GEN_SPE(evsrwiu, evsrwis, 0x11, 0x08, 0x00000000, 0x00000000, PPC_SPE),
+GEN_SPE(evslw, speundef, 0x12, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE),
+GEN_SPE(evslwi, speundef, 0x13, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE),
+GEN_SPE(evrlw, evsplati, 0x14, 0x08, 0x00000000, 0x0000F800, PPC_SPE),
+GEN_SPE(evrlwi, evsplatfi, 0x15, 0x08, 0x00000000, 0x0000F800, PPC_SPE),
+GEN_SPE(evmergehi, evmergelo, 0x16, 0x08, 0x00000000, 0x00000000, PPC_SPE),
+GEN_SPE(evmergehilo, evmergelohi, 0x17, 0x08, 0x00000000, 0x00000000, PPC_SPE),
+GEN_SPE(evcmpgtu, evcmpgts, 0x18, 0x08, 0x00600000, 0x00600000, PPC_SPE),
+GEN_SPE(evcmpltu, evcmplts, 0x19, 0x08, 0x00600000, 0x00600000, PPC_SPE),
+GEN_SPE(evcmpeq, speundef, 0x1A, 0x08, 0x00600000, 0xFFFFFFFF, PPC_SPE),
+
+GEN_SPE(evfsadd, evfssub, 0x00, 0x0A, 0x00000000, 0x00000000, PPC_SPE_SINGLE),
+GEN_SPE(evfsabs, evfsnabs, 0x02, 0x0A, 0x0000F800, 0x0000F800, PPC_SPE_SINGLE),
+GEN_SPE(evfsneg, speundef, 0x03, 0x0A, 0x0000F800, 0xFFFFFFFF, PPC_SPE_SINGLE),
+GEN_SPE(evfsmul, evfsdiv, 0x04, 0x0A, 0x00000000, 0x00000000, PPC_SPE_SINGLE),
+GEN_SPE(evfscmpgt, evfscmplt, 0x06, 0x0A, 0x00600000, 0x00600000, PPC_SPE_SINGLE),
+GEN_SPE(evfscmpeq, speundef, 0x07, 0x0A, 0x00600000, 0xFFFFFFFF, PPC_SPE_SINGLE),
+GEN_SPE(evfscfui, evfscfsi, 0x08, 0x0A, 0x00180000, 0x00180000, PPC_SPE_SINGLE),
+GEN_SPE(evfscfuf, evfscfsf, 0x09, 0x0A, 0x00180000, 0x00180000, PPC_SPE_SINGLE),
+GEN_SPE(evfsctui, evfsctsi, 0x0A, 0x0A, 0x00180000, 0x00180000, PPC_SPE_SINGLE),
+GEN_SPE(evfsctuf, evfsctsf, 0x0B, 0x0A, 0x00180000, 0x00180000, PPC_SPE_SINGLE),
+GEN_SPE(evfsctuiz, speundef, 0x0C, 0x0A, 0x00180000, 0xFFFFFFFF, PPC_SPE_SINGLE),
+GEN_SPE(evfsctsiz, speundef, 0x0D, 0x0A, 0x00180000, 0xFFFFFFFF, PPC_SPE_SINGLE),
+GEN_SPE(evfststgt, evfststlt, 0x0E, 0x0A, 0x00600000, 0x00600000, PPC_SPE_SINGLE),
+GEN_SPE(evfststeq, speundef, 0x0F, 0x0A, 0x00600000, 0xFFFFFFFF, PPC_SPE_SINGLE),
+
+GEN_SPE(efsadd, efssub, 0x00, 0x0B, 0x00000000, 0x00000000, PPC_SPE_SINGLE),
+GEN_SPE(efsabs, efsnabs, 0x02, 0x0B, 0x0000F800, 0x0000F800, PPC_SPE_SINGLE),
+GEN_SPE(efsneg, speundef, 0x03, 0x0B, 0x0000F800, 0xFFFFFFFF, PPC_SPE_SINGLE),
+GEN_SPE(efsmul, efsdiv, 0x04, 0x0B, 0x00000000, 0x00000000, PPC_SPE_SINGLE),
+GEN_SPE(efscmpgt, efscmplt, 0x06, 0x0B, 0x00600000, 0x00600000, PPC_SPE_SINGLE),
+GEN_SPE(efscmpeq, efscfd, 0x07, 0x0B, 0x00600000, 0x00180000, PPC_SPE_SINGLE),
+GEN_SPE(efscfui, efscfsi, 0x08, 0x0B, 0x00180000, 0x00180000, PPC_SPE_SINGLE),
+GEN_SPE(efscfuf, efscfsf, 0x09, 0x0B, 0x00180000, 0x00180000, PPC_SPE_SINGLE),
+GEN_SPE(efsctui, efsctsi, 0x0A, 0x0B, 0x00180000, 0x00180000, PPC_SPE_SINGLE),
+GEN_SPE(efsctuf, efsctsf, 0x0B, 0x0B, 0x00180000, 0x00180000, PPC_SPE_SINGLE),
+GEN_SPE(efsctuiz, speundef, 0x0C, 0x0B, 0x00180000, 0xFFFFFFFF, PPC_SPE_SINGLE),
+GEN_SPE(efsctsiz, speundef, 0x0D, 0x0B, 0x00180000, 0xFFFFFFFF, PPC_SPE_SINGLE),
+GEN_SPE(efststgt, efststlt, 0x0E, 0x0B, 0x00600000, 0x00600000, PPC_SPE_SINGLE),
+GEN_SPE(efststeq, speundef, 0x0F, 0x0B, 0x00600000, 0xFFFFFFFF, PPC_SPE_SINGLE),
+
+GEN_SPE(efdadd, efdsub, 0x10, 0x0B, 0x00000000, 0x00000000, PPC_SPE_DOUBLE),
+GEN_SPE(efdcfuid, efdcfsid, 0x11, 0x0B, 0x00180000, 0x00180000, PPC_SPE_DOUBLE),
+GEN_SPE(efdabs, efdnabs, 0x12, 0x0B, 0x0000F800, 0x0000F800, PPC_SPE_DOUBLE),
+GEN_SPE(efdneg, speundef, 0x13, 0x0B, 0x0000F800, 0xFFFFFFFF, PPC_SPE_DOUBLE),
+GEN_SPE(efdmul, efddiv, 0x14, 0x0B, 0x00000000, 0x00000000, PPC_SPE_DOUBLE),
+GEN_SPE(efdctuidz, efdctsidz, 0x15, 0x0B, 0x00180000, 0x00180000, PPC_SPE_DOUBLE),
+GEN_SPE(efdcmpgt, efdcmplt, 0x16, 0x0B, 0x00600000, 0x00600000, PPC_SPE_DOUBLE),
+GEN_SPE(efdcmpeq, efdcfs, 0x17, 0x0B, 0x00600000, 0x00180000, PPC_SPE_DOUBLE),
+GEN_SPE(efdcfui, efdcfsi, 0x18, 0x0B, 0x00180000, 0x00180000, PPC_SPE_DOUBLE),
+GEN_SPE(efdcfuf, efdcfsf, 0x19, 0x0B, 0x00180000, 0x00180000, PPC_SPE_DOUBLE),
+GEN_SPE(efdctui, efdctsi, 0x1A, 0x0B, 0x00180000, 0x00180000, PPC_SPE_DOUBLE),
+GEN_SPE(efdctuf, efdctsf, 0x1B, 0x0B, 0x00180000, 0x00180000, PPC_SPE_DOUBLE),
+GEN_SPE(efdctuiz, speundef, 0x1C, 0x0B, 0x00180000, 0xFFFFFFFF, PPC_SPE_DOUBLE),
+GEN_SPE(efdctsiz, speundef, 0x1D, 0x0B, 0x00180000, 0xFFFFFFFF, PPC_SPE_DOUBLE),
+GEN_SPE(efdtstgt, efdtstlt, 0x1E, 0x0B, 0x00600000, 0x00600000, PPC_SPE_DOUBLE),
+GEN_SPE(efdtsteq, speundef, 0x1F, 0x0B, 0x00600000, 0xFFFFFFFF, PPC_SPE_DOUBLE),
+
+#define GEN_SPEOP_LDST(name, opc2, sh) \
+GEN_HANDLER(name, 0x04, opc2, 0x0C, 0x00000000, PPC_SPE)
+GEN_SPEOP_LDST(evldd, 0x00, 3),
+GEN_SPEOP_LDST(evldw, 0x01, 3),
+GEN_SPEOP_LDST(evldh, 0x02, 3),
+GEN_SPEOP_LDST(evlhhesplat, 0x04, 1),
+GEN_SPEOP_LDST(evlhhousplat, 0x06, 1),
+GEN_SPEOP_LDST(evlhhossplat, 0x07, 1),
+GEN_SPEOP_LDST(evlwhe, 0x08, 2),
+GEN_SPEOP_LDST(evlwhou, 0x0A, 2),
+GEN_SPEOP_LDST(evlwhos, 0x0B, 2),
+GEN_SPEOP_LDST(evlwwsplat, 0x0C, 2),
+GEN_SPEOP_LDST(evlwhsplat, 0x0E, 2),
+
+GEN_SPEOP_LDST(evstdd, 0x10, 3),
+GEN_SPEOP_LDST(evstdw, 0x11, 3),
+GEN_SPEOP_LDST(evstdh, 0x12, 3),
+GEN_SPEOP_LDST(evstwhe, 0x18, 2),
+GEN_SPEOP_LDST(evstwho, 0x1A, 2),
+GEN_SPEOP_LDST(evstwwe, 0x1C, 2),
+GEN_SPEOP_LDST(evstwwo, 0x1E, 2),
diff --git a/target-ppc/translate/vmx-impl.inc.c b/target-ppc/translate/vmx-impl.inc.c
new file mode 100644
index 0000000000..7143eb3a39
--- /dev/null
+++ b/target-ppc/translate/vmx-impl.inc.c
@@ -0,0 +1,1113 @@
+/*
+ * translate/vmx-impl.c
+ *
+ * Altivec/VMX translation
+ */
+
+/*** Altivec vector extension ***/
+/* Altivec registers moves */
+
+static inline TCGv_ptr gen_avr_ptr(int reg)
+{
+ TCGv_ptr r = tcg_temp_new_ptr();
+ tcg_gen_addi_ptr(r, cpu_env, offsetof(CPUPPCState, avr[reg]));
+ return r;
+}
+
+#define GEN_VR_LDX(name, opc2, opc3) \
+static void glue(gen_, name)(DisasContext *ctx) \
+{ \
+ TCGv EA; \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ gen_set_access_type(ctx, ACCESS_INT); \
+ EA = tcg_temp_new(); \
+ gen_addr_reg_index(ctx, EA); \
+ tcg_gen_andi_tl(EA, EA, ~0xf); \
+ /* We only need to swap high and low halves. gen_qemu_ld64_i64 does \
+ necessary 64-bit byteswap already. */ \
+ if (ctx->le_mode) { \
+ gen_qemu_ld64_i64(ctx, cpu_avrl[rD(ctx->opcode)], EA); \
+ tcg_gen_addi_tl(EA, EA, 8); \
+ gen_qemu_ld64_i64(ctx, cpu_avrh[rD(ctx->opcode)], EA); \
+ } else { \
+ gen_qemu_ld64_i64(ctx, cpu_avrh[rD(ctx->opcode)], EA); \
+ tcg_gen_addi_tl(EA, EA, 8); \
+ gen_qemu_ld64_i64(ctx, cpu_avrl[rD(ctx->opcode)], EA); \
+ } \
+ tcg_temp_free(EA); \
+}
+
+#define GEN_VR_STX(name, opc2, opc3) \
+static void gen_st##name(DisasContext *ctx) \
+{ \
+ TCGv EA; \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ gen_set_access_type(ctx, ACCESS_INT); \
+ EA = tcg_temp_new(); \
+ gen_addr_reg_index(ctx, EA); \
+ tcg_gen_andi_tl(EA, EA, ~0xf); \
+ /* We only need to swap high and low halves. gen_qemu_st64_i64 does \
+ necessary 64-bit byteswap already. */ \
+ if (ctx->le_mode) { \
+ gen_qemu_st64_i64(ctx, cpu_avrl[rD(ctx->opcode)], EA); \
+ tcg_gen_addi_tl(EA, EA, 8); \
+ gen_qemu_st64_i64(ctx, cpu_avrh[rD(ctx->opcode)], EA); \
+ } else { \
+ gen_qemu_st64_i64(ctx, cpu_avrh[rD(ctx->opcode)], EA); \
+ tcg_gen_addi_tl(EA, EA, 8); \
+ gen_qemu_st64_i64(ctx, cpu_avrl[rD(ctx->opcode)], EA); \
+ } \
+ tcg_temp_free(EA); \
+}
+
+#define GEN_VR_LVE(name, opc2, opc3, size) \
+static void gen_lve##name(DisasContext *ctx) \
+ { \
+ TCGv EA; \
+ TCGv_ptr rs; \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ gen_set_access_type(ctx, ACCESS_INT); \
+ EA = tcg_temp_new(); \
+ gen_addr_reg_index(ctx, EA); \
+ if (size > 1) { \
+ tcg_gen_andi_tl(EA, EA, ~(size - 1)); \
+ } \
+ rs = gen_avr_ptr(rS(ctx->opcode)); \
+ gen_helper_lve##name(cpu_env, rs, EA); \
+ tcg_temp_free(EA); \
+ tcg_temp_free_ptr(rs); \
+ }
+
+#define GEN_VR_STVE(name, opc2, opc3, size) \
+static void gen_stve##name(DisasContext *ctx) \
+ { \
+ TCGv EA; \
+ TCGv_ptr rs; \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ gen_set_access_type(ctx, ACCESS_INT); \
+ EA = tcg_temp_new(); \
+ gen_addr_reg_index(ctx, EA); \
+ if (size > 1) { \
+ tcg_gen_andi_tl(EA, EA, ~(size - 1)); \
+ } \
+ rs = gen_avr_ptr(rS(ctx->opcode)); \
+ gen_helper_stve##name(cpu_env, rs, EA); \
+ tcg_temp_free(EA); \
+ tcg_temp_free_ptr(rs); \
+ }
+
+GEN_VR_LDX(lvx, 0x07, 0x03);
+/* As we don't emulate the cache, lvxl is stricly equivalent to lvx */
+GEN_VR_LDX(lvxl, 0x07, 0x0B);
+
+GEN_VR_LVE(bx, 0x07, 0x00, 1);
+GEN_VR_LVE(hx, 0x07, 0x01, 2);
+GEN_VR_LVE(wx, 0x07, 0x02, 4);
+
+GEN_VR_STX(svx, 0x07, 0x07);
+/* As we don't emulate the cache, stvxl is stricly equivalent to stvx */
+GEN_VR_STX(svxl, 0x07, 0x0F);
+
+GEN_VR_STVE(bx, 0x07, 0x04, 1);
+GEN_VR_STVE(hx, 0x07, 0x05, 2);
+GEN_VR_STVE(wx, 0x07, 0x06, 4);
+
+static void gen_lvsl(DisasContext *ctx)
+{
+ TCGv_ptr rd;
+ TCGv EA;
+ if (unlikely(!ctx->altivec_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VPU);
+ return;
+ }
+ EA = tcg_temp_new();
+ gen_addr_reg_index(ctx, EA);
+ rd = gen_avr_ptr(rD(ctx->opcode));
+ gen_helper_lvsl(rd, EA);
+ tcg_temp_free(EA);
+ tcg_temp_free_ptr(rd);
+}
+
+static void gen_lvsr(DisasContext *ctx)
+{
+ TCGv_ptr rd;
+ TCGv EA;
+ if (unlikely(!ctx->altivec_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VPU);
+ return;
+ }
+ EA = tcg_temp_new();
+ gen_addr_reg_index(ctx, EA);
+ rd = gen_avr_ptr(rD(ctx->opcode));
+ gen_helper_lvsr(rd, EA);
+ tcg_temp_free(EA);
+ tcg_temp_free_ptr(rd);
+}
+
+static void gen_mfvscr(DisasContext *ctx)
+{
+ TCGv_i32 t;
+ if (unlikely(!ctx->altivec_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VPU);
+ return;
+ }
+ tcg_gen_movi_i64(cpu_avrh[rD(ctx->opcode)], 0);
+ t = tcg_temp_new_i32();
+ tcg_gen_ld_i32(t, cpu_env, offsetof(CPUPPCState, vscr));
+ tcg_gen_extu_i32_i64(cpu_avrl[rD(ctx->opcode)], t);
+ tcg_temp_free_i32(t);
+}
+
+static void gen_mtvscr(DisasContext *ctx)
+{
+ TCGv_ptr p;
+ if (unlikely(!ctx->altivec_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VPU);
+ return;
+ }
+ p = gen_avr_ptr(rB(ctx->opcode));
+ gen_helper_mtvscr(cpu_env, p);
+ tcg_temp_free_ptr(p);
+}
+
+#define GEN_VX_VMUL10(name, add_cin, ret_carry) \
+static void glue(gen_, name)(DisasContext *ctx) \
+{ \
+ TCGv_i64 t0 = tcg_temp_new_i64(); \
+ TCGv_i64 t1 = tcg_temp_new_i64(); \
+ TCGv_i64 t2 = tcg_temp_new_i64(); \
+ TCGv_i64 ten, z; \
+ \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ \
+ ten = tcg_const_i64(10); \
+ z = tcg_const_i64(0); \
+ \
+ if (add_cin) { \
+ tcg_gen_mulu2_i64(t0, t1, cpu_avrl[rA(ctx->opcode)], ten); \
+ tcg_gen_andi_i64(t2, cpu_avrl[rB(ctx->opcode)], 0xF); \
+ tcg_gen_add2_i64(cpu_avrl[rD(ctx->opcode)], t2, t0, t1, t2, z); \
+ } else { \
+ tcg_gen_mulu2_i64(cpu_avrl[rD(ctx->opcode)], t2, \
+ cpu_avrl[rA(ctx->opcode)], ten); \
+ } \
+ \
+ if (ret_carry) { \
+ tcg_gen_mulu2_i64(t0, t1, cpu_avrh[rA(ctx->opcode)], ten); \
+ tcg_gen_add2_i64(t0, cpu_avrl[rD(ctx->opcode)], t0, t1, t2, z); \
+ tcg_gen_movi_i64(cpu_avrh[rD(ctx->opcode)], 0); \
+ } else { \
+ tcg_gen_mul_i64(t0, cpu_avrh[rA(ctx->opcode)], ten); \
+ tcg_gen_add_i64(cpu_avrh[rD(ctx->opcode)], t0, t2); \
+ } \
+ \
+ tcg_temp_free_i64(t0); \
+ tcg_temp_free_i64(t1); \
+ tcg_temp_free_i64(t2); \
+ tcg_temp_free_i64(ten); \
+ tcg_temp_free_i64(z); \
+} \
+
+GEN_VX_VMUL10(vmul10uq, 0, 0);
+GEN_VX_VMUL10(vmul10euq, 1, 0);
+GEN_VX_VMUL10(vmul10cuq, 0, 1);
+GEN_VX_VMUL10(vmul10ecuq, 1, 1);
+
+/* Logical operations */
+#define GEN_VX_LOGICAL(name, tcg_op, opc2, opc3) \
+static void glue(gen_, name)(DisasContext *ctx) \
+{ \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ tcg_op(cpu_avrh[rD(ctx->opcode)], cpu_avrh[rA(ctx->opcode)], cpu_avrh[rB(ctx->opcode)]); \
+ tcg_op(cpu_avrl[rD(ctx->opcode)], cpu_avrl[rA(ctx->opcode)], cpu_avrl[rB(ctx->opcode)]); \
+}
+
+GEN_VX_LOGICAL(vand, tcg_gen_and_i64, 2, 16);
+GEN_VX_LOGICAL(vandc, tcg_gen_andc_i64, 2, 17);
+GEN_VX_LOGICAL(vor, tcg_gen_or_i64, 2, 18);
+GEN_VX_LOGICAL(vxor, tcg_gen_xor_i64, 2, 19);
+GEN_VX_LOGICAL(vnor, tcg_gen_nor_i64, 2, 20);
+GEN_VX_LOGICAL(veqv, tcg_gen_eqv_i64, 2, 26);
+GEN_VX_LOGICAL(vnand, tcg_gen_nand_i64, 2, 22);
+GEN_VX_LOGICAL(vorc, tcg_gen_orc_i64, 2, 21);
+
+#define GEN_VXFORM(name, opc2, opc3) \
+static void glue(gen_, name)(DisasContext *ctx) \
+{ \
+ TCGv_ptr ra, rb, rd; \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ ra = gen_avr_ptr(rA(ctx->opcode)); \
+ rb = gen_avr_ptr(rB(ctx->opcode)); \
+ rd = gen_avr_ptr(rD(ctx->opcode)); \
+ gen_helper_##name (rd, ra, rb); \
+ tcg_temp_free_ptr(ra); \
+ tcg_temp_free_ptr(rb); \
+ tcg_temp_free_ptr(rd); \
+}
+
+#define GEN_VXFORM_ENV(name, opc2, opc3) \
+static void glue(gen_, name)(DisasContext *ctx) \
+{ \
+ TCGv_ptr ra, rb, rd; \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ ra = gen_avr_ptr(rA(ctx->opcode)); \
+ rb = gen_avr_ptr(rB(ctx->opcode)); \
+ rd = gen_avr_ptr(rD(ctx->opcode)); \
+ gen_helper_##name(cpu_env, rd, ra, rb); \
+ tcg_temp_free_ptr(ra); \
+ tcg_temp_free_ptr(rb); \
+ tcg_temp_free_ptr(rd); \
+}
+
+#define GEN_VXFORM3(name, opc2, opc3) \
+static void glue(gen_, name)(DisasContext *ctx) \
+{ \
+ TCGv_ptr ra, rb, rc, rd; \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ ra = gen_avr_ptr(rA(ctx->opcode)); \
+ rb = gen_avr_ptr(rB(ctx->opcode)); \
+ rc = gen_avr_ptr(rC(ctx->opcode)); \
+ rd = gen_avr_ptr(rD(ctx->opcode)); \
+ gen_helper_##name(rd, ra, rb, rc); \
+ tcg_temp_free_ptr(ra); \
+ tcg_temp_free_ptr(rb); \
+ tcg_temp_free_ptr(rc); \
+ tcg_temp_free_ptr(rd); \
+}
+
+/*
+ * Support for Altivec instruction pairs that use bit 31 (Rc) as
+ * an opcode bit. In general, these pairs come from different
+ * versions of the ISA, so we must also support a pair of flags for
+ * each instruction.
+ */
+#define GEN_VXFORM_DUAL(name0, flg0, flg2_0, name1, flg1, flg2_1) \
+static void glue(gen_, name0##_##name1)(DisasContext *ctx) \
+{ \
+ if ((Rc(ctx->opcode) == 0) && \
+ ((ctx->insns_flags & flg0) || (ctx->insns_flags2 & flg2_0))) { \
+ gen_##name0(ctx); \
+ } else if ((Rc(ctx->opcode) == 1) && \
+ ((ctx->insns_flags & flg1) || (ctx->insns_flags2 & flg2_1))) { \
+ gen_##name1(ctx); \
+ } else { \
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
+ } \
+}
+
+/* Adds support to provide invalid mask */
+#define GEN_VXFORM_DUAL_EXT(name0, flg0, flg2_0, inval0, \
+ name1, flg1, flg2_1, inval1) \
+static void glue(gen_, name0##_##name1)(DisasContext *ctx) \
+{ \
+ if ((Rc(ctx->opcode) == 0) && \
+ ((ctx->insns_flags & flg0) || (ctx->insns_flags2 & flg2_0)) && \
+ !(ctx->opcode & inval0)) { \
+ gen_##name0(ctx); \
+ } else if ((Rc(ctx->opcode) == 1) && \
+ ((ctx->insns_flags & flg1) || (ctx->insns_flags2 & flg2_1)) && \
+ !(ctx->opcode & inval1)) { \
+ gen_##name1(ctx); \
+ } else { \
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
+ } \
+}
+
+GEN_VXFORM(vaddubm, 0, 0);
+GEN_VXFORM_DUAL_EXT(vaddubm, PPC_ALTIVEC, PPC_NONE, 0, \
+ vmul10cuq, PPC_NONE, PPC2_ISA300, 0x0000F800)
+GEN_VXFORM(vadduhm, 0, 1);
+GEN_VXFORM_DUAL(vadduhm, PPC_ALTIVEC, PPC_NONE, \
+ vmul10ecuq, PPC_NONE, PPC2_ISA300)
+GEN_VXFORM(vadduwm, 0, 2);
+GEN_VXFORM(vaddudm, 0, 3);
+GEN_VXFORM(vsububm, 0, 16);
+GEN_VXFORM(vsubuhm, 0, 17);
+GEN_VXFORM(vsubuwm, 0, 18);
+GEN_VXFORM(vsubudm, 0, 19);
+GEN_VXFORM(vmaxub, 1, 0);
+GEN_VXFORM(vmaxuh, 1, 1);
+GEN_VXFORM(vmaxuw, 1, 2);
+GEN_VXFORM(vmaxud, 1, 3);
+GEN_VXFORM(vmaxsb, 1, 4);
+GEN_VXFORM(vmaxsh, 1, 5);
+GEN_VXFORM(vmaxsw, 1, 6);
+GEN_VXFORM(vmaxsd, 1, 7);
+GEN_VXFORM(vminub, 1, 8);
+GEN_VXFORM(vminuh, 1, 9);
+GEN_VXFORM(vminuw, 1, 10);
+GEN_VXFORM(vminud, 1, 11);
+GEN_VXFORM(vminsb, 1, 12);
+GEN_VXFORM(vminsh, 1, 13);
+GEN_VXFORM(vminsw, 1, 14);
+GEN_VXFORM(vminsd, 1, 15);
+GEN_VXFORM(vavgub, 1, 16);
+GEN_VXFORM(vabsdub, 1, 16);
+GEN_VXFORM_DUAL(vavgub, PPC_ALTIVEC, PPC_NONE, \
+ vabsdub, PPC_NONE, PPC2_ISA300)
+GEN_VXFORM(vavguh, 1, 17);
+GEN_VXFORM(vabsduh, 1, 17);
+GEN_VXFORM_DUAL(vavguh, PPC_ALTIVEC, PPC_NONE, \
+ vabsduh, PPC_NONE, PPC2_ISA300)
+GEN_VXFORM(vavguw, 1, 18);
+GEN_VXFORM(vabsduw, 1, 18);
+GEN_VXFORM_DUAL(vavguw, PPC_ALTIVEC, PPC_NONE, \
+ vabsduw, PPC_NONE, PPC2_ISA300)
+GEN_VXFORM(vavgsb, 1, 20);
+GEN_VXFORM(vavgsh, 1, 21);
+GEN_VXFORM(vavgsw, 1, 22);
+GEN_VXFORM(vmrghb, 6, 0);
+GEN_VXFORM(vmrghh, 6, 1);
+GEN_VXFORM(vmrghw, 6, 2);
+GEN_VXFORM(vmrglb, 6, 4);
+GEN_VXFORM(vmrglh, 6, 5);
+GEN_VXFORM(vmrglw, 6, 6);
+
+static void gen_vmrgew(DisasContext *ctx)
+{
+ TCGv_i64 tmp;
+ int VT, VA, VB;
+ if (unlikely(!ctx->altivec_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VPU);
+ return;
+ }
+ VT = rD(ctx->opcode);
+ VA = rA(ctx->opcode);
+ VB = rB(ctx->opcode);
+ tmp = tcg_temp_new_i64();
+ tcg_gen_shri_i64(tmp, cpu_avrh[VB], 32);
+ tcg_gen_deposit_i64(cpu_avrh[VT], cpu_avrh[VA], tmp, 0, 32);
+ tcg_gen_shri_i64(tmp, cpu_avrl[VB], 32);
+ tcg_gen_deposit_i64(cpu_avrl[VT], cpu_avrl[VA], tmp, 0, 32);
+ tcg_temp_free_i64(tmp);
+}
+
+static void gen_vmrgow(DisasContext *ctx)
+{
+ int VT, VA, VB;
+ if (unlikely(!ctx->altivec_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VPU);
+ return;
+ }
+ VT = rD(ctx->opcode);
+ VA = rA(ctx->opcode);
+ VB = rB(ctx->opcode);
+
+ tcg_gen_deposit_i64(cpu_avrh[VT], cpu_avrh[VB], cpu_avrh[VA], 32, 32);
+ tcg_gen_deposit_i64(cpu_avrl[VT], cpu_avrl[VB], cpu_avrl[VA], 32, 32);
+}
+
+GEN_VXFORM(vmuloub, 4, 0);
+GEN_VXFORM(vmulouh, 4, 1);
+GEN_VXFORM(vmulouw, 4, 2);
+GEN_VXFORM(vmuluwm, 4, 2);
+GEN_VXFORM_DUAL(vmulouw, PPC_ALTIVEC, PPC_NONE,
+ vmuluwm, PPC_NONE, PPC2_ALTIVEC_207)
+GEN_VXFORM(vmulosb, 4, 4);
+GEN_VXFORM(vmulosh, 4, 5);
+GEN_VXFORM(vmulosw, 4, 6);
+GEN_VXFORM(vmuleub, 4, 8);
+GEN_VXFORM(vmuleuh, 4, 9);
+GEN_VXFORM(vmuleuw, 4, 10);
+GEN_VXFORM(vmulesb, 4, 12);
+GEN_VXFORM(vmulesh, 4, 13);
+GEN_VXFORM(vmulesw, 4, 14);
+GEN_VXFORM(vslb, 2, 4);
+GEN_VXFORM(vslh, 2, 5);
+GEN_VXFORM(vslw, 2, 6);
+GEN_VXFORM(vrlwnm, 2, 6);
+GEN_VXFORM_DUAL(vslw, PPC_ALTIVEC, PPC_NONE, \
+ vrlwnm, PPC_NONE, PPC2_ISA300)
+GEN_VXFORM(vsld, 2, 23);
+GEN_VXFORM(vsrb, 2, 8);
+GEN_VXFORM(vsrh, 2, 9);
+GEN_VXFORM(vsrw, 2, 10);
+GEN_VXFORM(vsrd, 2, 27);
+GEN_VXFORM(vsrab, 2, 12);
+GEN_VXFORM(vsrah, 2, 13);
+GEN_VXFORM(vsraw, 2, 14);
+GEN_VXFORM(vsrad, 2, 15);
+GEN_VXFORM(vsrv, 2, 28);
+GEN_VXFORM(vslv, 2, 29);
+GEN_VXFORM(vslo, 6, 16);
+GEN_VXFORM(vsro, 6, 17);
+GEN_VXFORM(vaddcuw, 0, 6);
+GEN_VXFORM(vsubcuw, 0, 22);
+GEN_VXFORM_ENV(vaddubs, 0, 8);
+GEN_VXFORM_DUAL_EXT(vaddubs, PPC_ALTIVEC, PPC_NONE, 0, \
+ vmul10uq, PPC_NONE, PPC2_ISA300, 0x0000F800)
+GEN_VXFORM_ENV(vadduhs, 0, 9);
+GEN_VXFORM_DUAL(vadduhs, PPC_ALTIVEC, PPC_NONE, \
+ vmul10euq, PPC_NONE, PPC2_ISA300)
+GEN_VXFORM_ENV(vadduws, 0, 10);
+GEN_VXFORM_ENV(vaddsbs, 0, 12);
+GEN_VXFORM_ENV(vaddshs, 0, 13);
+GEN_VXFORM_ENV(vaddsws, 0, 14);
+GEN_VXFORM_ENV(vsububs, 0, 24);
+GEN_VXFORM_ENV(vsubuhs, 0, 25);
+GEN_VXFORM_ENV(vsubuws, 0, 26);
+GEN_VXFORM_ENV(vsubsbs, 0, 28);
+GEN_VXFORM_ENV(vsubshs, 0, 29);
+GEN_VXFORM_ENV(vsubsws, 0, 30);
+GEN_VXFORM(vadduqm, 0, 4);
+GEN_VXFORM(vaddcuq, 0, 5);
+GEN_VXFORM3(vaddeuqm, 30, 0);
+GEN_VXFORM3(vaddecuq, 30, 0);
+GEN_VXFORM_DUAL(vaddeuqm, PPC_NONE, PPC2_ALTIVEC_207, \
+ vaddecuq, PPC_NONE, PPC2_ALTIVEC_207)
+GEN_VXFORM(vsubuqm, 0, 20);
+GEN_VXFORM(vsubcuq, 0, 21);
+GEN_VXFORM3(vsubeuqm, 31, 0);
+GEN_VXFORM3(vsubecuq, 31, 0);
+GEN_VXFORM_DUAL(vsubeuqm, PPC_NONE, PPC2_ALTIVEC_207, \
+ vsubecuq, PPC_NONE, PPC2_ALTIVEC_207)
+GEN_VXFORM(vrlb, 2, 0);
+GEN_VXFORM(vrlh, 2, 1);
+GEN_VXFORM(vrlw, 2, 2);
+GEN_VXFORM(vrlwmi, 2, 2);
+GEN_VXFORM_DUAL(vrlw, PPC_ALTIVEC, PPC_NONE, \
+ vrlwmi, PPC_NONE, PPC2_ISA300)
+GEN_VXFORM(vrld, 2, 3);
+GEN_VXFORM(vrldmi, 2, 3);
+GEN_VXFORM_DUAL(vrld, PPC_NONE, PPC2_ALTIVEC_207, \
+ vrldmi, PPC_NONE, PPC2_ISA300)
+GEN_VXFORM(vsl, 2, 7);
+GEN_VXFORM(vrldnm, 2, 7);
+GEN_VXFORM_DUAL(vsl, PPC_ALTIVEC, PPC_NONE, \
+ vrldnm, PPC_NONE, PPC2_ISA300)
+GEN_VXFORM(vsr, 2, 11);
+GEN_VXFORM_ENV(vpkuhum, 7, 0);
+GEN_VXFORM_ENV(vpkuwum, 7, 1);
+GEN_VXFORM_ENV(vpkudum, 7, 17);
+GEN_VXFORM_ENV(vpkuhus, 7, 2);
+GEN_VXFORM_ENV(vpkuwus, 7, 3);
+GEN_VXFORM_ENV(vpkudus, 7, 19);
+GEN_VXFORM_ENV(vpkshus, 7, 4);
+GEN_VXFORM_ENV(vpkswus, 7, 5);
+GEN_VXFORM_ENV(vpksdus, 7, 21);
+GEN_VXFORM_ENV(vpkshss, 7, 6);
+GEN_VXFORM_ENV(vpkswss, 7, 7);
+GEN_VXFORM_ENV(vpksdss, 7, 23);
+GEN_VXFORM(vpkpx, 7, 12);
+GEN_VXFORM_ENV(vsum4ubs, 4, 24);
+GEN_VXFORM_ENV(vsum4sbs, 4, 28);
+GEN_VXFORM_ENV(vsum4shs, 4, 25);
+GEN_VXFORM_ENV(vsum2sws, 4, 26);
+GEN_VXFORM_ENV(vsumsws, 4, 30);
+GEN_VXFORM_ENV(vaddfp, 5, 0);
+GEN_VXFORM_ENV(vsubfp, 5, 1);
+GEN_VXFORM_ENV(vmaxfp, 5, 16);
+GEN_VXFORM_ENV(vminfp, 5, 17);
+
+#define GEN_VXRFORM1(opname, name, str, opc2, opc3) \
+static void glue(gen_, name)(DisasContext *ctx) \
+ { \
+ TCGv_ptr ra, rb, rd; \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ ra = gen_avr_ptr(rA(ctx->opcode)); \
+ rb = gen_avr_ptr(rB(ctx->opcode)); \
+ rd = gen_avr_ptr(rD(ctx->opcode)); \
+ gen_helper_##opname(cpu_env, rd, ra, rb); \
+ tcg_temp_free_ptr(ra); \
+ tcg_temp_free_ptr(rb); \
+ tcg_temp_free_ptr(rd); \
+ }
+
+#define GEN_VXRFORM(name, opc2, opc3) \
+ GEN_VXRFORM1(name, name, #name, opc2, opc3) \
+ GEN_VXRFORM1(name##_dot, name##_, #name ".", opc2, (opc3 | (0x1 << 4)))
+
+/*
+ * Support for Altivec instructions that use bit 31 (Rc) as an opcode
+ * bit but also use bit 21 as an actual Rc bit. In general, thse pairs
+ * come from different versions of the ISA, so we must also support a
+ * pair of flags for each instruction.
+ */
+#define GEN_VXRFORM_DUAL(name0, flg0, flg2_0, name1, flg1, flg2_1) \
+static void glue(gen_, name0##_##name1)(DisasContext *ctx) \
+{ \
+ if ((Rc(ctx->opcode) == 0) && \
+ ((ctx->insns_flags & flg0) || (ctx->insns_flags2 & flg2_0))) { \
+ if (Rc21(ctx->opcode) == 0) { \
+ gen_##name0(ctx); \
+ } else { \
+ gen_##name0##_(ctx); \
+ } \
+ } else if ((Rc(ctx->opcode) == 1) && \
+ ((ctx->insns_flags & flg1) || (ctx->insns_flags2 & flg2_1))) { \
+ if (Rc21(ctx->opcode) == 0) { \
+ gen_##name1(ctx); \
+ } else { \
+ gen_##name1##_(ctx); \
+ } \
+ } else { \
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
+ } \
+}
+
+GEN_VXRFORM(vcmpequb, 3, 0)
+GEN_VXRFORM(vcmpequh, 3, 1)
+GEN_VXRFORM(vcmpequw, 3, 2)
+GEN_VXRFORM(vcmpequd, 3, 3)
+GEN_VXRFORM(vcmpnezb, 3, 4)
+GEN_VXRFORM(vcmpnezh, 3, 5)
+GEN_VXRFORM(vcmpnezw, 3, 6)
+GEN_VXRFORM(vcmpgtsb, 3, 12)
+GEN_VXRFORM(vcmpgtsh, 3, 13)
+GEN_VXRFORM(vcmpgtsw, 3, 14)
+GEN_VXRFORM(vcmpgtsd, 3, 15)
+GEN_VXRFORM(vcmpgtub, 3, 8)
+GEN_VXRFORM(vcmpgtuh, 3, 9)
+GEN_VXRFORM(vcmpgtuw, 3, 10)
+GEN_VXRFORM(vcmpgtud, 3, 11)
+GEN_VXRFORM(vcmpeqfp, 3, 3)
+GEN_VXRFORM(vcmpgefp, 3, 7)
+GEN_VXRFORM(vcmpgtfp, 3, 11)
+GEN_VXRFORM(vcmpbfp, 3, 15)
+GEN_VXRFORM(vcmpneb, 3, 0)
+GEN_VXRFORM(vcmpneh, 3, 1)
+GEN_VXRFORM(vcmpnew, 3, 2)
+
+GEN_VXRFORM_DUAL(vcmpequb, PPC_ALTIVEC, PPC_NONE, \
+ vcmpneb, PPC_NONE, PPC2_ISA300)
+GEN_VXRFORM_DUAL(vcmpequh, PPC_ALTIVEC, PPC_NONE, \
+ vcmpneh, PPC_NONE, PPC2_ISA300)
+GEN_VXRFORM_DUAL(vcmpequw, PPC_ALTIVEC, PPC_NONE, \
+ vcmpnew, PPC_NONE, PPC2_ISA300)
+GEN_VXRFORM_DUAL(vcmpeqfp, PPC_ALTIVEC, PPC_NONE, \
+ vcmpequd, PPC_NONE, PPC2_ALTIVEC_207)
+GEN_VXRFORM_DUAL(vcmpbfp, PPC_ALTIVEC, PPC_NONE, \
+ vcmpgtsd, PPC_NONE, PPC2_ALTIVEC_207)
+GEN_VXRFORM_DUAL(vcmpgtfp, PPC_ALTIVEC, PPC_NONE, \
+ vcmpgtud, PPC_NONE, PPC2_ALTIVEC_207)
+
+#define GEN_VXFORM_SIMM(name, opc2, opc3) \
+static void glue(gen_, name)(DisasContext *ctx) \
+ { \
+ TCGv_ptr rd; \
+ TCGv_i32 simm; \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ simm = tcg_const_i32(SIMM5(ctx->opcode)); \
+ rd = gen_avr_ptr(rD(ctx->opcode)); \
+ gen_helper_##name (rd, simm); \
+ tcg_temp_free_i32(simm); \
+ tcg_temp_free_ptr(rd); \
+ }
+
+GEN_VXFORM_SIMM(vspltisb, 6, 12);
+GEN_VXFORM_SIMM(vspltish, 6, 13);
+GEN_VXFORM_SIMM(vspltisw, 6, 14);
+
+#define GEN_VXFORM_NOA(name, opc2, opc3) \
+static void glue(gen_, name)(DisasContext *ctx) \
+ { \
+ TCGv_ptr rb, rd; \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ rb = gen_avr_ptr(rB(ctx->opcode)); \
+ rd = gen_avr_ptr(rD(ctx->opcode)); \
+ gen_helper_##name (rd, rb); \
+ tcg_temp_free_ptr(rb); \
+ tcg_temp_free_ptr(rd); \
+ }
+
+#define GEN_VXFORM_NOA_ENV(name, opc2, opc3) \
+static void glue(gen_, name)(DisasContext *ctx) \
+ { \
+ TCGv_ptr rb, rd; \
+ \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ rb = gen_avr_ptr(rB(ctx->opcode)); \
+ rd = gen_avr_ptr(rD(ctx->opcode)); \
+ gen_helper_##name(cpu_env, rd, rb); \
+ tcg_temp_free_ptr(rb); \
+ tcg_temp_free_ptr(rd); \
+ }
+
+#define GEN_VXFORM_NOA_2(name, opc2, opc3, opc4) \
+static void glue(gen_, name)(DisasContext *ctx) \
+ { \
+ TCGv_ptr rb, rd; \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ rb = gen_avr_ptr(rB(ctx->opcode)); \
+ rd = gen_avr_ptr(rD(ctx->opcode)); \
+ gen_helper_##name(rd, rb); \
+ tcg_temp_free_ptr(rb); \
+ tcg_temp_free_ptr(rd); \
+ }
+
+#define GEN_VXFORM_NOA_3(name, opc2, opc3, opc4) \
+static void glue(gen_, name)(DisasContext *ctx) \
+ { \
+ TCGv_ptr rb; \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ rb = gen_avr_ptr(rB(ctx->opcode)); \
+ gen_helper_##name(cpu_gpr[rD(ctx->opcode)], rb); \
+ tcg_temp_free_ptr(rb); \
+ }
+GEN_VXFORM_NOA(vupkhsb, 7, 8);
+GEN_VXFORM_NOA(vupkhsh, 7, 9);
+GEN_VXFORM_NOA(vupkhsw, 7, 25);
+GEN_VXFORM_NOA(vupklsb, 7, 10);
+GEN_VXFORM_NOA(vupklsh, 7, 11);
+GEN_VXFORM_NOA(vupklsw, 7, 27);
+GEN_VXFORM_NOA(vupkhpx, 7, 13);
+GEN_VXFORM_NOA(vupklpx, 7, 15);
+GEN_VXFORM_NOA_ENV(vrefp, 5, 4);
+GEN_VXFORM_NOA_ENV(vrsqrtefp, 5, 5);
+GEN_VXFORM_NOA_ENV(vexptefp, 5, 6);
+GEN_VXFORM_NOA_ENV(vlogefp, 5, 7);
+GEN_VXFORM_NOA_ENV(vrfim, 5, 11);
+GEN_VXFORM_NOA_ENV(vrfin, 5, 8);
+GEN_VXFORM_NOA_ENV(vrfip, 5, 10);
+GEN_VXFORM_NOA_ENV(vrfiz, 5, 9);
+GEN_VXFORM_NOA(vprtybw, 1, 24);
+GEN_VXFORM_NOA(vprtybd, 1, 24);
+GEN_VXFORM_NOA(vprtybq, 1, 24);
+
+#define GEN_VXFORM_SIMM(name, opc2, opc3) \
+static void glue(gen_, name)(DisasContext *ctx) \
+ { \
+ TCGv_ptr rd; \
+ TCGv_i32 simm; \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ simm = tcg_const_i32(SIMM5(ctx->opcode)); \
+ rd = gen_avr_ptr(rD(ctx->opcode)); \
+ gen_helper_##name (rd, simm); \
+ tcg_temp_free_i32(simm); \
+ tcg_temp_free_ptr(rd); \
+ }
+
+#define GEN_VXFORM_UIMM(name, opc2, opc3) \
+static void glue(gen_, name)(DisasContext *ctx) \
+ { \
+ TCGv_ptr rb, rd; \
+ TCGv_i32 uimm; \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ uimm = tcg_const_i32(UIMM5(ctx->opcode)); \
+ rb = gen_avr_ptr(rB(ctx->opcode)); \
+ rd = gen_avr_ptr(rD(ctx->opcode)); \
+ gen_helper_##name (rd, rb, uimm); \
+ tcg_temp_free_i32(uimm); \
+ tcg_temp_free_ptr(rb); \
+ tcg_temp_free_ptr(rd); \
+ }
+
+#define GEN_VXFORM_UIMM_ENV(name, opc2, opc3) \
+static void glue(gen_, name)(DisasContext *ctx) \
+ { \
+ TCGv_ptr rb, rd; \
+ TCGv_i32 uimm; \
+ \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ uimm = tcg_const_i32(UIMM5(ctx->opcode)); \
+ rb = gen_avr_ptr(rB(ctx->opcode)); \
+ rd = gen_avr_ptr(rD(ctx->opcode)); \
+ gen_helper_##name(cpu_env, rd, rb, uimm); \
+ tcg_temp_free_i32(uimm); \
+ tcg_temp_free_ptr(rb); \
+ tcg_temp_free_ptr(rd); \
+ }
+
+#define GEN_VXFORM_UIMM_SPLAT(name, opc2, opc3, splat_max) \
+static void glue(gen_, name)(DisasContext *ctx) \
+ { \
+ TCGv_ptr rb, rd; \
+ uint8_t uimm = UIMM4(ctx->opcode); \
+ TCGv_i32 t0 = tcg_temp_new_i32(); \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ if (uimm > splat_max) { \
+ uimm = 0; \
+ } \
+ tcg_gen_movi_i32(t0, uimm); \
+ rb = gen_avr_ptr(rB(ctx->opcode)); \
+ rd = gen_avr_ptr(rD(ctx->opcode)); \
+ gen_helper_##name(rd, rb, t0); \
+ tcg_temp_free_i32(t0); \
+ tcg_temp_free_ptr(rb); \
+ tcg_temp_free_ptr(rd); \
+ }
+
+GEN_VXFORM_UIMM(vspltb, 6, 8);
+GEN_VXFORM_UIMM(vsplth, 6, 9);
+GEN_VXFORM_UIMM(vspltw, 6, 10);
+GEN_VXFORM_UIMM_SPLAT(vextractub, 6, 8, 15);
+GEN_VXFORM_UIMM_SPLAT(vextractuh, 6, 9, 14);
+GEN_VXFORM_UIMM_SPLAT(vextractuw, 6, 10, 12);
+GEN_VXFORM_UIMM_SPLAT(vextractd, 6, 11, 8);
+GEN_VXFORM_UIMM_SPLAT(vinsertb, 6, 12, 15);
+GEN_VXFORM_UIMM_SPLAT(vinserth, 6, 13, 14);
+GEN_VXFORM_UIMM_SPLAT(vinsertw, 6, 14, 12);
+GEN_VXFORM_UIMM_SPLAT(vinsertd, 6, 15, 8);
+GEN_VXFORM_UIMM_ENV(vcfux, 5, 12);
+GEN_VXFORM_UIMM_ENV(vcfsx, 5, 13);
+GEN_VXFORM_UIMM_ENV(vctuxs, 5, 14);
+GEN_VXFORM_UIMM_ENV(vctsxs, 5, 15);
+GEN_VXFORM_DUAL(vspltb, PPC_ALTIVEC, PPC_NONE,
+ vextractub, PPC_NONE, PPC2_ISA300);
+GEN_VXFORM_DUAL(vsplth, PPC_ALTIVEC, PPC_NONE,
+ vextractuh, PPC_NONE, PPC2_ISA300);
+GEN_VXFORM_DUAL(vspltw, PPC_ALTIVEC, PPC_NONE,
+ vextractuw, PPC_NONE, PPC2_ISA300);
+GEN_VXFORM_DUAL(vspltisb, PPC_ALTIVEC, PPC_NONE,
+ vinsertb, PPC_NONE, PPC2_ISA300);
+GEN_VXFORM_DUAL(vspltish, PPC_ALTIVEC, PPC_NONE,
+ vinserth, PPC_NONE, PPC2_ISA300);
+GEN_VXFORM_DUAL(vspltisw, PPC_ALTIVEC, PPC_NONE,
+ vinsertw, PPC_NONE, PPC2_ISA300);
+
+static void gen_vsldoi(DisasContext *ctx)
+{
+ TCGv_ptr ra, rb, rd;
+ TCGv_i32 sh;
+ if (unlikely(!ctx->altivec_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VPU);
+ return;
+ }
+ ra = gen_avr_ptr(rA(ctx->opcode));
+ rb = gen_avr_ptr(rB(ctx->opcode));
+ rd = gen_avr_ptr(rD(ctx->opcode));
+ sh = tcg_const_i32(VSH(ctx->opcode));
+ gen_helper_vsldoi (rd, ra, rb, sh);
+ tcg_temp_free_ptr(ra);
+ tcg_temp_free_ptr(rb);
+ tcg_temp_free_ptr(rd);
+ tcg_temp_free_i32(sh);
+}
+
+#define GEN_VAFORM_PAIRED(name0, name1, opc2) \
+static void glue(gen_, name0##_##name1)(DisasContext *ctx) \
+ { \
+ TCGv_ptr ra, rb, rc, rd; \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ ra = gen_avr_ptr(rA(ctx->opcode)); \
+ rb = gen_avr_ptr(rB(ctx->opcode)); \
+ rc = gen_avr_ptr(rC(ctx->opcode)); \
+ rd = gen_avr_ptr(rD(ctx->opcode)); \
+ if (Rc(ctx->opcode)) { \
+ gen_helper_##name1(cpu_env, rd, ra, rb, rc); \
+ } else { \
+ gen_helper_##name0(cpu_env, rd, ra, rb, rc); \
+ } \
+ tcg_temp_free_ptr(ra); \
+ tcg_temp_free_ptr(rb); \
+ tcg_temp_free_ptr(rc); \
+ tcg_temp_free_ptr(rd); \
+ }
+
+GEN_VAFORM_PAIRED(vmhaddshs, vmhraddshs, 16)
+
+static void gen_vmladduhm(DisasContext *ctx)
+{
+ TCGv_ptr ra, rb, rc, rd;
+ if (unlikely(!ctx->altivec_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VPU);
+ return;
+ }
+ ra = gen_avr_ptr(rA(ctx->opcode));
+ rb = gen_avr_ptr(rB(ctx->opcode));
+ rc = gen_avr_ptr(rC(ctx->opcode));
+ rd = gen_avr_ptr(rD(ctx->opcode));
+ gen_helper_vmladduhm(rd, ra, rb, rc);
+ tcg_temp_free_ptr(ra);
+ tcg_temp_free_ptr(rb);
+ tcg_temp_free_ptr(rc);
+ tcg_temp_free_ptr(rd);
+}
+
+static void gen_vpermr(DisasContext *ctx)
+{
+ TCGv_ptr ra, rb, rc, rd;
+ if (unlikely(!ctx->altivec_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VPU);
+ return;
+ }
+ ra = gen_avr_ptr(rA(ctx->opcode));
+ rb = gen_avr_ptr(rB(ctx->opcode));
+ rc = gen_avr_ptr(rC(ctx->opcode));
+ rd = gen_avr_ptr(rD(ctx->opcode));
+ gen_helper_vpermr(cpu_env, rd, ra, rb, rc);
+ tcg_temp_free_ptr(ra);
+ tcg_temp_free_ptr(rb);
+ tcg_temp_free_ptr(rc);
+ tcg_temp_free_ptr(rd);
+}
+
+GEN_VAFORM_PAIRED(vmsumubm, vmsummbm, 18)
+GEN_VAFORM_PAIRED(vmsumuhm, vmsumuhs, 19)
+GEN_VAFORM_PAIRED(vmsumshm, vmsumshs, 20)
+GEN_VAFORM_PAIRED(vsel, vperm, 21)
+GEN_VAFORM_PAIRED(vmaddfp, vnmsubfp, 23)
+
+GEN_VXFORM_NOA(vclzb, 1, 28)
+GEN_VXFORM_NOA(vclzh, 1, 29)
+GEN_VXFORM_NOA(vclzw, 1, 30)
+GEN_VXFORM_NOA(vclzd, 1, 31)
+GEN_VXFORM_NOA_2(vnegw, 1, 24, 6)
+GEN_VXFORM_NOA_2(vnegd, 1, 24, 7)
+GEN_VXFORM_NOA_2(vextsb2w, 1, 24, 16)
+GEN_VXFORM_NOA_2(vextsh2w, 1, 24, 17)
+GEN_VXFORM_NOA_2(vextsb2d, 1, 24, 24)
+GEN_VXFORM_NOA_2(vextsh2d, 1, 24, 25)
+GEN_VXFORM_NOA_2(vextsw2d, 1, 24, 26)
+GEN_VXFORM_NOA_2(vctzb, 1, 24, 28)
+GEN_VXFORM_NOA_2(vctzh, 1, 24, 29)
+GEN_VXFORM_NOA_2(vctzw, 1, 24, 30)
+GEN_VXFORM_NOA_2(vctzd, 1, 24, 31)
+GEN_VXFORM_NOA_3(vclzlsbb, 1, 24, 0)
+GEN_VXFORM_NOA_3(vctzlsbb, 1, 24, 1)
+GEN_VXFORM_NOA(vpopcntb, 1, 28)
+GEN_VXFORM_NOA(vpopcnth, 1, 29)
+GEN_VXFORM_NOA(vpopcntw, 1, 30)
+GEN_VXFORM_NOA(vpopcntd, 1, 31)
+GEN_VXFORM_DUAL(vclzb, PPC_NONE, PPC2_ALTIVEC_207, \
+ vpopcntb, PPC_NONE, PPC2_ALTIVEC_207)
+GEN_VXFORM_DUAL(vclzh, PPC_NONE, PPC2_ALTIVEC_207, \
+ vpopcnth, PPC_NONE, PPC2_ALTIVEC_207)
+GEN_VXFORM_DUAL(vclzw, PPC_NONE, PPC2_ALTIVEC_207, \
+ vpopcntw, PPC_NONE, PPC2_ALTIVEC_207)
+GEN_VXFORM_DUAL(vclzd, PPC_NONE, PPC2_ALTIVEC_207, \
+ vpopcntd, PPC_NONE, PPC2_ALTIVEC_207)
+GEN_VXFORM(vbpermd, 6, 23);
+GEN_VXFORM(vbpermq, 6, 21);
+GEN_VXFORM_NOA(vgbbd, 6, 20);
+GEN_VXFORM(vpmsumb, 4, 16)
+GEN_VXFORM(vpmsumh, 4, 17)
+GEN_VXFORM(vpmsumw, 4, 18)
+GEN_VXFORM(vpmsumd, 4, 19)
+
+#define GEN_BCD(op) \
+static void gen_##op(DisasContext *ctx) \
+{ \
+ TCGv_ptr ra, rb, rd; \
+ TCGv_i32 ps; \
+ \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ \
+ ra = gen_avr_ptr(rA(ctx->opcode)); \
+ rb = gen_avr_ptr(rB(ctx->opcode)); \
+ rd = gen_avr_ptr(rD(ctx->opcode)); \
+ \
+ ps = tcg_const_i32((ctx->opcode & 0x200) != 0); \
+ \
+ gen_helper_##op(cpu_crf[6], rd, ra, rb, ps); \
+ \
+ tcg_temp_free_ptr(ra); \
+ tcg_temp_free_ptr(rb); \
+ tcg_temp_free_ptr(rd); \
+ tcg_temp_free_i32(ps); \
+}
+
+#define GEN_BCD2(op) \
+static void gen_##op(DisasContext *ctx) \
+{ \
+ TCGv_ptr rd, rb; \
+ TCGv_i32 ps; \
+ \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ \
+ rb = gen_avr_ptr(rB(ctx->opcode)); \
+ rd = gen_avr_ptr(rD(ctx->opcode)); \
+ \
+ ps = tcg_const_i32((ctx->opcode & 0x200) != 0); \
+ \
+ gen_helper_##op(cpu_crf[6], rd, rb, ps); \
+ \
+ tcg_temp_free_ptr(rb); \
+ tcg_temp_free_ptr(rd); \
+ tcg_temp_free_i32(ps); \
+}
+
+GEN_BCD(bcdadd)
+GEN_BCD(bcdsub)
+GEN_BCD2(bcdcfn)
+GEN_BCD2(bcdctn)
+GEN_BCD2(bcdcfz)
+GEN_BCD2(bcdctz)
+
+static void gen_xpnd04_1(DisasContext *ctx)
+{
+ switch (opc4(ctx->opcode)) {
+ case 4:
+ gen_bcdctz(ctx);
+ break;
+ case 5:
+ gen_bcdctn(ctx);
+ break;
+ case 6:
+ gen_bcdcfz(ctx);
+ break;
+ case 7:
+ gen_bcdcfn(ctx);
+ break;
+ default:
+ gen_invalid(ctx);
+ break;
+ }
+}
+
+static void gen_xpnd04_2(DisasContext *ctx)
+{
+ switch (opc4(ctx->opcode)) {
+ case 4:
+ gen_bcdctz(ctx);
+ break;
+ case 6:
+ gen_bcdcfz(ctx);
+ break;
+ case 7:
+ gen_bcdcfn(ctx);
+ break;
+ default:
+ gen_invalid(ctx);
+ break;
+ }
+}
+
+GEN_VXFORM_DUAL(vsubcuw, PPC_ALTIVEC, PPC_NONE, \
+ xpnd04_1, PPC_NONE, PPC2_ISA300)
+GEN_VXFORM_DUAL(vsubsws, PPC_ALTIVEC, PPC_NONE, \
+ xpnd04_2, PPC_NONE, PPC2_ISA300)
+
+GEN_VXFORM_DUAL(vsububm, PPC_ALTIVEC, PPC_NONE, \
+ bcdadd, PPC_NONE, PPC2_ALTIVEC_207)
+GEN_VXFORM_DUAL(vsububs, PPC_ALTIVEC, PPC_NONE, \
+ bcdadd, PPC_NONE, PPC2_ALTIVEC_207)
+GEN_VXFORM_DUAL(vsubuhm, PPC_ALTIVEC, PPC_NONE, \
+ bcdsub, PPC_NONE, PPC2_ALTIVEC_207)
+GEN_VXFORM_DUAL(vsubuhs, PPC_ALTIVEC, PPC_NONE, \
+ bcdsub, PPC_NONE, PPC2_ALTIVEC_207)
+
+static void gen_vsbox(DisasContext *ctx)
+{
+ TCGv_ptr ra, rd;
+ if (unlikely(!ctx->altivec_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VPU);
+ return;
+ }
+ ra = gen_avr_ptr(rA(ctx->opcode));
+ rd = gen_avr_ptr(rD(ctx->opcode));
+ gen_helper_vsbox(rd, ra);
+ tcg_temp_free_ptr(ra);
+ tcg_temp_free_ptr(rd);
+}
+
+GEN_VXFORM(vcipher, 4, 20)
+GEN_VXFORM(vcipherlast, 4, 20)
+GEN_VXFORM(vncipher, 4, 21)
+GEN_VXFORM(vncipherlast, 4, 21)
+
+GEN_VXFORM_DUAL(vcipher, PPC_NONE, PPC2_ALTIVEC_207,
+ vcipherlast, PPC_NONE, PPC2_ALTIVEC_207)
+GEN_VXFORM_DUAL(vncipher, PPC_NONE, PPC2_ALTIVEC_207,
+ vncipherlast, PPC_NONE, PPC2_ALTIVEC_207)
+
+#define VSHASIGMA(op) \
+static void gen_##op(DisasContext *ctx) \
+{ \
+ TCGv_ptr ra, rd; \
+ TCGv_i32 st_six; \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ ra = gen_avr_ptr(rA(ctx->opcode)); \
+ rd = gen_avr_ptr(rD(ctx->opcode)); \
+ st_six = tcg_const_i32(rB(ctx->opcode)); \
+ gen_helper_##op(rd, ra, st_six); \
+ tcg_temp_free_ptr(ra); \
+ tcg_temp_free_ptr(rd); \
+ tcg_temp_free_i32(st_six); \
+}
+
+VSHASIGMA(vshasigmaw)
+VSHASIGMA(vshasigmad)
+
+GEN_VXFORM3(vpermxor, 22, 0xFF)
+GEN_VXFORM_DUAL(vsldoi, PPC_ALTIVEC, PPC_NONE,
+ vpermxor, PPC_NONE, PPC2_ALTIVEC_207)
+
+#undef GEN_VR_LDX
+#undef GEN_VR_STX
+#undef GEN_VR_LVE
+#undef GEN_VR_STVE
+
+#undef GEN_VX_LOGICAL
+#undef GEN_VX_LOGICAL_207
+#undef GEN_VXFORM
+#undef GEN_VXFORM_207
+#undef GEN_VXFORM_DUAL
+#undef GEN_VXRFORM_DUAL
+#undef GEN_VXRFORM1
+#undef GEN_VXRFORM
+#undef GEN_VXFORM_SIMM
+#undef GEN_VXFORM_NOA
+#undef GEN_VXFORM_UIMM
+#undef GEN_VAFORM_PAIRED
+
+#undef GEN_BCD2
diff --git a/target-ppc/translate/vmx-ops.inc.c b/target-ppc/translate/vmx-ops.inc.c
new file mode 100644
index 0000000000..f02b3bed50
--- /dev/null
+++ b/target-ppc/translate/vmx-ops.inc.c
@@ -0,0 +1,294 @@
+#define GEN_VR_LDX(name, opc2, opc3) \
+GEN_HANDLER(name, 0x1F, opc2, opc3, 0x00000001, PPC_ALTIVEC)
+#define GEN_VR_STX(name, opc2, opc3) \
+GEN_HANDLER(st##name, 0x1F, opc2, opc3, 0x00000001, PPC_ALTIVEC)
+#define GEN_VR_LVE(name, opc2, opc3) \
+ GEN_HANDLER(lve##name, 0x1F, opc2, opc3, 0x00000001, PPC_ALTIVEC)
+#define GEN_VR_STVE(name, opc2, opc3) \
+ GEN_HANDLER(stve##name, 0x1F, opc2, opc3, 0x00000001, PPC_ALTIVEC)
+GEN_VR_LDX(lvx, 0x07, 0x03),
+GEN_VR_LDX(lvxl, 0x07, 0x0B),
+GEN_VR_LVE(bx, 0x07, 0x00),
+GEN_VR_LVE(hx, 0x07, 0x01),
+GEN_VR_LVE(wx, 0x07, 0x02),
+GEN_VR_STX(svx, 0x07, 0x07),
+GEN_VR_STX(svxl, 0x07, 0x0F),
+GEN_VR_STVE(bx, 0x07, 0x04),
+GEN_VR_STVE(hx, 0x07, 0x05),
+GEN_VR_STVE(wx, 0x07, 0x06),
+
+#define GEN_VX_LOGICAL(name, tcg_op, opc2, opc3) \
+GEN_HANDLER(name, 0x04, opc2, opc3, 0x00000000, PPC_ALTIVEC)
+
+#define GEN_VX_LOGICAL_207(name, tcg_op, opc2, opc3) \
+GEN_HANDLER_E(name, 0x04, opc2, opc3, 0x00000000, PPC_NONE, PPC2_ALTIVEC_207)
+
+GEN_VX_LOGICAL(vand, tcg_gen_and_i64, 2, 16),
+GEN_VX_LOGICAL(vandc, tcg_gen_andc_i64, 2, 17),
+GEN_VX_LOGICAL(vor, tcg_gen_or_i64, 2, 18),
+GEN_VX_LOGICAL(vxor, tcg_gen_xor_i64, 2, 19),
+GEN_VX_LOGICAL(vnor, tcg_gen_nor_i64, 2, 20),
+GEN_VX_LOGICAL_207(veqv, tcg_gen_eqv_i64, 2, 26),
+GEN_VX_LOGICAL_207(vnand, tcg_gen_nand_i64, 2, 22),
+GEN_VX_LOGICAL_207(vorc, tcg_gen_orc_i64, 2, 21),
+
+#define GEN_VXFORM(name, opc2, opc3) \
+GEN_HANDLER(name, 0x04, opc2, opc3, 0x00000000, PPC_ALTIVEC)
+
+#define GEN_VXFORM_207(name, opc2, opc3) \
+GEN_HANDLER_E(name, 0x04, opc2, opc3, 0x00000000, PPC_NONE, PPC2_ALTIVEC_207)
+
+#define GEN_VXFORM_300(name, opc2, opc3) \
+GEN_HANDLER_E(name, 0x04, opc2, opc3, 0x00000000, PPC_NONE, PPC2_ISA300)
+
+#define GEN_VXFORM_300_EXT(name, opc2, opc3, inval) \
+GEN_HANDLER_E(name, 0x04, opc2, opc3, inval, PPC_NONE, PPC2_ISA300)
+
+#define GEN_VXFORM_300_EO(name, opc2, opc3, opc4) \
+GEN_HANDLER_E_2(name, 0x04, opc2, opc3, opc4, 0x00000000, PPC_NONE, \
+ PPC2_ISA300)
+
+#define GEN_VXFORM_DUAL(name0, name1, opc2, opc3, type0, type1) \
+GEN_HANDLER_E(name0##_##name1, 0x4, opc2, opc3, 0x00000000, type0, type1)
+
+#define GEN_VXRFORM_DUAL(name0, name1, opc2, opc3, tp0, tp1) \
+GEN_HANDLER_E(name0##_##name1, 0x4, opc2, opc3, 0x00000000, tp0, tp1), \
+GEN_HANDLER_E(name0##_##name1, 0x4, opc2, (opc3 | 0x10), 0x00000000, tp0, tp1),
+
+GEN_VXFORM_DUAL(vaddubm, vmul10cuq, 0, 0, PPC_ALTIVEC, PPC_NONE),
+GEN_VXFORM_DUAL(vadduhm, vmul10ecuq, 0, 1, PPC_ALTIVEC, PPC_NONE),
+GEN_VXFORM(vadduwm, 0, 2),
+GEN_VXFORM_207(vaddudm, 0, 3),
+GEN_VXFORM_DUAL(vsububm, bcdadd, 0, 16, PPC_ALTIVEC, PPC_NONE),
+GEN_VXFORM_DUAL(vsubuhm, bcdsub, 0, 17, PPC_ALTIVEC, PPC_NONE),
+GEN_VXFORM(vsubuwm, 0, 18),
+GEN_VXFORM_207(vsubudm, 0, 19),
+GEN_VXFORM(vmaxub, 1, 0),
+GEN_VXFORM(vmaxuh, 1, 1),
+GEN_VXFORM(vmaxuw, 1, 2),
+GEN_VXFORM_207(vmaxud, 1, 3),
+GEN_VXFORM(vmaxsb, 1, 4),
+GEN_VXFORM(vmaxsh, 1, 5),
+GEN_VXFORM(vmaxsw, 1, 6),
+GEN_VXFORM_207(vmaxsd, 1, 7),
+GEN_VXFORM(vminub, 1, 8),
+GEN_VXFORM(vminuh, 1, 9),
+GEN_VXFORM(vminuw, 1, 10),
+GEN_VXFORM_207(vminud, 1, 11),
+GEN_VXFORM(vminsb, 1, 12),
+GEN_VXFORM(vminsh, 1, 13),
+GEN_VXFORM(vminsw, 1, 14),
+GEN_VXFORM_207(vminsd, 1, 15),
+GEN_VXFORM_DUAL(vavgub, vabsdub, 1, 16, PPC_ALTIVEC, PPC_NONE),
+GEN_VXFORM_DUAL(vavguh, vabsduh, 1, 17, PPC_ALTIVEC, PPC_NONE),
+GEN_VXFORM_DUAL(vavguw, vabsduw, 1, 18, PPC_ALTIVEC, PPC_NONE),
+GEN_VXFORM(vavgsb, 1, 20),
+GEN_VXFORM(vavgsh, 1, 21),
+GEN_VXFORM(vavgsw, 1, 22),
+GEN_VXFORM(vmrghb, 6, 0),
+GEN_VXFORM(vmrghh, 6, 1),
+GEN_VXFORM(vmrghw, 6, 2),
+GEN_VXFORM(vmrglb, 6, 4),
+GEN_VXFORM(vmrglh, 6, 5),
+GEN_VXFORM(vmrglw, 6, 6),
+GEN_VXFORM_207(vmrgew, 6, 30),
+GEN_VXFORM_207(vmrgow, 6, 26),
+GEN_VXFORM(vmuloub, 4, 0),
+GEN_VXFORM(vmulouh, 4, 1),
+GEN_VXFORM_DUAL(vmulouw, vmuluwm, 4, 2, PPC_ALTIVEC, PPC_NONE),
+GEN_VXFORM(vmulosb, 4, 4),
+GEN_VXFORM(vmulosh, 4, 5),
+GEN_VXFORM_207(vmulosw, 4, 6),
+GEN_VXFORM(vmuleub, 4, 8),
+GEN_VXFORM(vmuleuh, 4, 9),
+GEN_VXFORM_207(vmuleuw, 4, 10),
+GEN_VXFORM(vmulesb, 4, 12),
+GEN_VXFORM(vmulesh, 4, 13),
+GEN_VXFORM_207(vmulesw, 4, 14),
+GEN_VXFORM(vslb, 2, 4),
+GEN_VXFORM(vslh, 2, 5),
+GEN_VXFORM_DUAL(vslw, vrlwnm, 2, 6, PPC_ALTIVEC, PPC_NONE),
+GEN_VXFORM_207(vsld, 2, 23),
+GEN_VXFORM(vsrb, 2, 8),
+GEN_VXFORM(vsrh, 2, 9),
+GEN_VXFORM(vsrw, 2, 10),
+GEN_VXFORM_207(vsrd, 2, 27),
+GEN_VXFORM(vsrab, 2, 12),
+GEN_VXFORM(vsrah, 2, 13),
+GEN_VXFORM(vsraw, 2, 14),
+GEN_VXFORM_207(vsrad, 2, 15),
+GEN_VXFORM_300(vsrv, 2, 28),
+GEN_VXFORM_300(vslv, 2, 29),
+GEN_VXFORM(vslo, 6, 16),
+GEN_VXFORM(vsro, 6, 17),
+GEN_VXFORM(vaddcuw, 0, 6),
+GEN_HANDLER_E_2(vprtybw, 0x4, 0x1, 0x18, 8, 0, PPC_NONE, PPC2_ISA300),
+GEN_HANDLER_E_2(vprtybd, 0x4, 0x1, 0x18, 9, 0, PPC_NONE, PPC2_ISA300),
+GEN_HANDLER_E_2(vprtybq, 0x4, 0x1, 0x18, 10, 0, PPC_NONE, PPC2_ISA300),
+
+GEN_VXFORM_DUAL(vsubcuw, xpnd04_1, 0, 22, PPC_ALTIVEC, PPC_NONE),
+GEN_VXFORM_DUAL(vaddubs, vmul10uq, 0, 8, PPC_ALTIVEC, PPC_NONE),
+GEN_VXFORM_DUAL(vadduhs, vmul10euq, 0, 9, PPC_ALTIVEC, PPC_NONE),
+GEN_VXFORM(vadduws, 0, 10),
+GEN_VXFORM(vaddsbs, 0, 12),
+GEN_VXFORM(vaddshs, 0, 13),
+GEN_VXFORM(vaddsws, 0, 14),
+GEN_VXFORM_DUAL(vsububs, bcdadd, 0, 24, PPC_ALTIVEC, PPC_NONE),
+GEN_VXFORM_DUAL(vsubuhs, bcdsub, 0, 25, PPC_ALTIVEC, PPC_NONE),
+GEN_VXFORM(vsubuws, 0, 26),
+GEN_VXFORM(vsubsbs, 0, 28),
+GEN_VXFORM(vsubshs, 0, 29),
+GEN_VXFORM_DUAL(vsubsws, xpnd04_2, 0, 30, PPC_ALTIVEC, PPC_NONE),
+GEN_VXFORM_207(vadduqm, 0, 4),
+GEN_VXFORM_207(vaddcuq, 0, 5),
+GEN_VXFORM_DUAL(vaddeuqm, vaddecuq, 30, 0xFF, PPC_NONE, PPC2_ALTIVEC_207),
+GEN_VXFORM_207(vsubuqm, 0, 20),
+GEN_VXFORM_207(vsubcuq, 0, 21),
+GEN_VXFORM_DUAL(vsubeuqm, vsubecuq, 31, 0xFF, PPC_NONE, PPC2_ALTIVEC_207),
+GEN_VXFORM(vrlb, 2, 0),
+GEN_VXFORM(vrlh, 2, 1),
+GEN_VXFORM_DUAL(vrlw, vrlwmi, 2, 2, PPC_ALTIVEC, PPC_NONE),
+GEN_VXFORM_DUAL(vrld, vrldmi, 2, 3, PPC_NONE, PPC2_ALTIVEC_207),
+GEN_VXFORM_DUAL(vsl, vrldnm, 2, 7, PPC_ALTIVEC, PPC_NONE),
+GEN_VXFORM(vsr, 2, 11),
+GEN_VXFORM(vpkuhum, 7, 0),
+GEN_VXFORM(vpkuwum, 7, 1),
+GEN_VXFORM_207(vpkudum, 7, 17),
+GEN_VXFORM(vpkuhus, 7, 2),
+GEN_VXFORM(vpkuwus, 7, 3),
+GEN_VXFORM_207(vpkudus, 7, 19),
+GEN_VXFORM(vpkshus, 7, 4),
+GEN_VXFORM(vpkswus, 7, 5),
+GEN_VXFORM_207(vpksdus, 7, 21),
+GEN_VXFORM(vpkshss, 7, 6),
+GEN_VXFORM(vpkswss, 7, 7),
+GEN_VXFORM_207(vpksdss, 7, 23),
+GEN_VXFORM(vpkpx, 7, 12),
+GEN_VXFORM(vsum4ubs, 4, 24),
+GEN_VXFORM(vsum4sbs, 4, 28),
+GEN_VXFORM(vsum4shs, 4, 25),
+GEN_VXFORM(vsum2sws, 4, 26),
+GEN_VXFORM(vsumsws, 4, 30),
+GEN_VXFORM(vaddfp, 5, 0),
+GEN_VXFORM(vsubfp, 5, 1),
+GEN_VXFORM(vmaxfp, 5, 16),
+GEN_VXFORM(vminfp, 5, 17),
+
+#define GEN_VXRFORM1(opname, name, str, opc2, opc3) \
+ GEN_HANDLER2(name, str, 0x4, opc2, opc3, 0x00000000, PPC_ALTIVEC),
+#define GEN_VXRFORM1_300(opname, name, str, opc2, opc3) \
+GEN_HANDLER2_E(name, str, 0x4, opc2, opc3, 0x00000000, PPC_NONE, PPC2_ISA300),
+#define GEN_VXRFORM(name, opc2, opc3) \
+ GEN_VXRFORM1(name, name, #name, opc2, opc3) \
+ GEN_VXRFORM1(name##_dot, name##_, #name ".", opc2, (opc3 | (0x1 << 4)))
+#define GEN_VXRFORM_300(name, opc2, opc3) \
+ GEN_VXRFORM1_300(name, name, #name, opc2, opc3) \
+ GEN_VXRFORM1_300(name##_dot, name##_, #name ".", opc2, (opc3 | (0x1 << 4)))
+
+GEN_VXRFORM_300(vcmpnezb, 3, 4)
+GEN_VXRFORM_300(vcmpnezh, 3, 5)
+GEN_VXRFORM_300(vcmpnezw, 3, 6)
+GEN_VXRFORM(vcmpgtsb, 3, 12)
+GEN_VXRFORM(vcmpgtsh, 3, 13)
+GEN_VXRFORM(vcmpgtsw, 3, 14)
+GEN_VXRFORM(vcmpgtub, 3, 8)
+GEN_VXRFORM(vcmpgtuh, 3, 9)
+GEN_VXRFORM(vcmpgtuw, 3, 10)
+GEN_VXRFORM_DUAL(vcmpeqfp, vcmpequd, 3, 3, PPC_ALTIVEC, PPC_NONE)
+GEN_VXRFORM(vcmpgefp, 3, 7)
+GEN_VXRFORM_DUAL(vcmpgtfp, vcmpgtud, 3, 11, PPC_ALTIVEC, PPC_NONE)
+GEN_VXRFORM_DUAL(vcmpbfp, vcmpgtsd, 3, 15, PPC_ALTIVEC, PPC_NONE)
+GEN_VXRFORM_DUAL(vcmpequb, vcmpneb, 3, 0, PPC_ALTIVEC, PPC_NONE)
+GEN_VXRFORM_DUAL(vcmpequh, vcmpneh, 3, 1, PPC_ALTIVEC, PPC_NONE)
+GEN_VXRFORM_DUAL(vcmpequw, vcmpnew, 3, 2, PPC_ALTIVEC, PPC_NONE)
+
+#define GEN_VXFORM_DUAL_INV(name0, name1, opc2, opc3, inval0, inval1, type) \
+GEN_OPCODE_DUAL(name0##_##name1, 0x04, opc2, opc3, inval0, inval1, type, \
+ PPC_NONE)
+GEN_VXFORM_DUAL_INV(vspltb, vextractub, 6, 8, 0x00000000, 0x100000,
+ PPC_ALTIVEC),
+GEN_VXFORM_DUAL_INV(vsplth, vextractuh, 6, 9, 0x00000000, 0x100000,
+ PPC_ALTIVEC),
+GEN_VXFORM_DUAL_INV(vspltw, vextractuw, 6, 10, 0x00000000, 0x100000,
+ PPC_ALTIVEC),
+GEN_VXFORM_300_EXT(vextractd, 6, 11, 0x100000),
+GEN_VXFORM_DUAL_INV(vspltisb, vinsertb, 6, 12, 0x00000000, 0x100000,
+ PPC_ALTIVEC),
+GEN_VXFORM_DUAL_INV(vspltish, vinserth, 6, 13, 0x00000000, 0x100000,
+ PPC_ALTIVEC),
+GEN_VXFORM_DUAL_INV(vspltisw, vinsertw, 6, 14, 0x00000000, 0x100000,
+ PPC_ALTIVEC),
+GEN_VXFORM_300_EXT(vinsertd, 6, 15, 0x100000),
+GEN_VXFORM_300_EO(vnegw, 0x01, 0x18, 0x06),
+GEN_VXFORM_300_EO(vnegd, 0x01, 0x18, 0x07),
+GEN_VXFORM_300_EO(vextsb2w, 0x01, 0x18, 0x10),
+GEN_VXFORM_300_EO(vextsh2w, 0x01, 0x18, 0x11),
+GEN_VXFORM_300_EO(vextsb2d, 0x01, 0x18, 0x18),
+GEN_VXFORM_300_EO(vextsh2d, 0x01, 0x18, 0x19),
+GEN_VXFORM_300_EO(vextsw2d, 0x01, 0x18, 0x1A),
+GEN_VXFORM_300_EO(vctzb, 0x01, 0x18, 0x1C),
+GEN_VXFORM_300_EO(vctzh, 0x01, 0x18, 0x1D),
+GEN_VXFORM_300_EO(vctzw, 0x01, 0x18, 0x1E),
+GEN_VXFORM_300_EO(vctzd, 0x01, 0x18, 0x1F),
+GEN_VXFORM_300_EO(vclzlsbb, 0x01, 0x18, 0x0),
+GEN_VXFORM_300_EO(vctzlsbb, 0x01, 0x18, 0x1),
+GEN_VXFORM_300(vpermr, 0x1D, 0xFF),
+
+#define GEN_VXFORM_NOA(name, opc2, opc3) \
+ GEN_HANDLER(name, 0x04, opc2, opc3, 0x001f0000, PPC_ALTIVEC)
+GEN_VXFORM_NOA(vupkhsb, 7, 8),
+GEN_VXFORM_NOA(vupkhsh, 7, 9),
+GEN_VXFORM_207(vupkhsw, 7, 25),
+GEN_VXFORM_NOA(vupklsb, 7, 10),
+GEN_VXFORM_NOA(vupklsh, 7, 11),
+GEN_VXFORM_207(vupklsw, 7, 27),
+GEN_VXFORM_NOA(vupkhpx, 7, 13),
+GEN_VXFORM_NOA(vupklpx, 7, 15),
+GEN_VXFORM_NOA(vrefp, 5, 4),
+GEN_VXFORM_NOA(vrsqrtefp, 5, 5),
+GEN_VXFORM_NOA(vexptefp, 5, 6),
+GEN_VXFORM_NOA(vlogefp, 5, 7),
+GEN_VXFORM_NOA(vrfim, 5, 11),
+GEN_VXFORM_NOA(vrfin, 5, 8),
+GEN_VXFORM_NOA(vrfip, 5, 10),
+GEN_VXFORM_NOA(vrfiz, 5, 9),
+
+#define GEN_VXFORM_UIMM(name, opc2, opc3) \
+ GEN_HANDLER(name, 0x04, opc2, opc3, 0x00000000, PPC_ALTIVEC)
+GEN_VXFORM_UIMM(vcfux, 5, 12),
+GEN_VXFORM_UIMM(vcfsx, 5, 13),
+GEN_VXFORM_UIMM(vctuxs, 5, 14),
+GEN_VXFORM_UIMM(vctsxs, 5, 15),
+
+
+#define GEN_VAFORM_PAIRED(name0, name1, opc2) \
+ GEN_HANDLER(name0##_##name1, 0x04, opc2, 0xFF, 0x00000000, PPC_ALTIVEC)
+GEN_VAFORM_PAIRED(vmhaddshs, vmhraddshs, 16),
+GEN_VAFORM_PAIRED(vmsumubm, vmsummbm, 18),
+GEN_VAFORM_PAIRED(vmsumuhm, vmsumuhs, 19),
+GEN_VAFORM_PAIRED(vmsumshm, vmsumshs, 20),
+GEN_VAFORM_PAIRED(vsel, vperm, 21),
+GEN_VAFORM_PAIRED(vmaddfp, vnmsubfp, 23),
+
+GEN_VXFORM_DUAL(vclzb, vpopcntb, 1, 28, PPC_NONE, PPC2_ALTIVEC_207),
+GEN_VXFORM_DUAL(vclzh, vpopcnth, 1, 29, PPC_NONE, PPC2_ALTIVEC_207),
+GEN_VXFORM_DUAL(vclzw, vpopcntw, 1, 30, PPC_NONE, PPC2_ALTIVEC_207),
+GEN_VXFORM_DUAL(vclzd, vpopcntd, 1, 31, PPC_NONE, PPC2_ALTIVEC_207),
+
+GEN_VXFORM_300(vbpermd, 6, 23),
+GEN_VXFORM_207(vbpermq, 6, 21),
+GEN_VXFORM_207(vgbbd, 6, 20),
+GEN_VXFORM_207(vpmsumb, 4, 16),
+GEN_VXFORM_207(vpmsumh, 4, 17),
+GEN_VXFORM_207(vpmsumw, 4, 18),
+GEN_VXFORM_207(vpmsumd, 4, 19),
+
+GEN_VXFORM_207(vsbox, 4, 23),
+
+GEN_VXFORM_DUAL(vcipher, vcipherlast, 4, 20, PPC_NONE, PPC2_ALTIVEC_207),
+GEN_VXFORM_DUAL(vncipher, vncipherlast, 4, 21, PPC_NONE, PPC2_ALTIVEC_207),
+
+GEN_VXFORM_207(vshasigmaw, 1, 26),
+GEN_VXFORM_207(vshasigmad, 1, 27),
+
+GEN_VXFORM_DUAL(vsldoi, vpermxor, 22, 0xFF, PPC_ALTIVEC, PPC_NONE),
diff --git a/target-ppc/translate/vsx-impl.inc.c b/target-ppc/translate/vsx-impl.inc.c
new file mode 100644
index 0000000000..5a27be4bd4
--- /dev/null
+++ b/target-ppc/translate/vsx-impl.inc.c
@@ -0,0 +1,1009 @@
+/*** VSX extension ***/
+
+static inline TCGv_i64 cpu_vsrh(int n)
+{
+ if (n < 32) {
+ return cpu_fpr[n];
+ } else {
+ return cpu_avrh[n-32];
+ }
+}
+
+static inline TCGv_i64 cpu_vsrl(int n)
+{
+ if (n < 32) {
+ return cpu_vsr[n];
+ } else {
+ return cpu_avrl[n-32];
+ }
+}
+
+#define VSX_LOAD_SCALAR(name, operation) \
+static void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv EA; \
+ if (unlikely(!ctx->vsx_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VSXU); \
+ return; \
+ } \
+ gen_set_access_type(ctx, ACCESS_INT); \
+ EA = tcg_temp_new(); \
+ gen_addr_reg_index(ctx, EA); \
+ gen_qemu_##operation(ctx, cpu_vsrh(xT(ctx->opcode)), EA); \
+ /* NOTE: cpu_vsrl is undefined */ \
+ tcg_temp_free(EA); \
+}
+
+VSX_LOAD_SCALAR(lxsdx, ld64_i64)
+VSX_LOAD_SCALAR(lxsiwax, ld32s_i64)
+VSX_LOAD_SCALAR(lxsibzx, ld8u_i64)
+VSX_LOAD_SCALAR(lxsihzx, ld16u_i64)
+VSX_LOAD_SCALAR(lxsiwzx, ld32u_i64)
+VSX_LOAD_SCALAR(lxsspx, ld32fs)
+
+static void gen_lxvd2x(DisasContext *ctx)
+{
+ TCGv EA;
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ gen_set_access_type(ctx, ACCESS_INT);
+ EA = tcg_temp_new();
+ gen_addr_reg_index(ctx, EA);
+ gen_qemu_ld64_i64(ctx, cpu_vsrh(xT(ctx->opcode)), EA);
+ tcg_gen_addi_tl(EA, EA, 8);
+ gen_qemu_ld64_i64(ctx, cpu_vsrl(xT(ctx->opcode)), EA);
+ tcg_temp_free(EA);
+}
+
+static void gen_lxvdsx(DisasContext *ctx)
+{
+ TCGv EA;
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ gen_set_access_type(ctx, ACCESS_INT);
+ EA = tcg_temp_new();
+ gen_addr_reg_index(ctx, EA);
+ gen_qemu_ld64_i64(ctx, cpu_vsrh(xT(ctx->opcode)), EA);
+ tcg_gen_mov_i64(cpu_vsrl(xT(ctx->opcode)), cpu_vsrh(xT(ctx->opcode)));
+ tcg_temp_free(EA);
+}
+
+static void gen_lxvw4x(DisasContext *ctx)
+{
+ TCGv EA;
+ TCGv_i64 xth = cpu_vsrh(xT(ctx->opcode));
+ TCGv_i64 xtl = cpu_vsrl(xT(ctx->opcode));
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ gen_set_access_type(ctx, ACCESS_INT);
+ EA = tcg_temp_new();
+
+ gen_addr_reg_index(ctx, EA);
+ if (ctx->le_mode) {
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+
+ tcg_gen_qemu_ld_i64(t0, EA, ctx->mem_idx, MO_LEQ);
+ tcg_gen_shri_i64(t1, t0, 32);
+ tcg_gen_deposit_i64(xth, t1, t0, 32, 32);
+ tcg_gen_addi_tl(EA, EA, 8);
+ tcg_gen_qemu_ld_i64(t0, EA, ctx->mem_idx, MO_LEQ);
+ tcg_gen_shri_i64(t1, t0, 32);
+ tcg_gen_deposit_i64(xtl, t1, t0, 32, 32);
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+ } else {
+ tcg_gen_qemu_ld_i64(xth, EA, ctx->mem_idx, MO_BEQ);
+ tcg_gen_addi_tl(EA, EA, 8);
+ tcg_gen_qemu_ld_i64(xtl, EA, ctx->mem_idx, MO_BEQ);
+ }
+ tcg_temp_free(EA);
+}
+
+static void gen_bswap16x8(TCGv_i64 outh, TCGv_i64 outl,
+ TCGv_i64 inh, TCGv_i64 inl)
+{
+ TCGv_i64 mask = tcg_const_i64(0x00FF00FF00FF00FF);
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+
+ /* outh = ((inh & mask) << 8) | ((inh >> 8) & mask) */
+ tcg_gen_and_i64(t0, inh, mask);
+ tcg_gen_shli_i64(t0, t0, 8);
+ tcg_gen_shri_i64(t1, inh, 8);
+ tcg_gen_and_i64(t1, t1, mask);
+ tcg_gen_or_i64(outh, t0, t1);
+
+ /* outl = ((inl & mask) << 8) | ((inl >> 8) & mask) */
+ tcg_gen_and_i64(t0, inl, mask);
+ tcg_gen_shli_i64(t0, t0, 8);
+ tcg_gen_shri_i64(t1, inl, 8);
+ tcg_gen_and_i64(t1, t1, mask);
+ tcg_gen_or_i64(outl, t0, t1);
+
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+ tcg_temp_free_i64(mask);
+}
+
+static void gen_bswap32x4(TCGv_i64 outh, TCGv_i64 outl,
+ TCGv_i64 inh, TCGv_i64 inl)
+{
+ TCGv_i64 hi = tcg_temp_new_i64();
+ TCGv_i64 lo = tcg_temp_new_i64();
+
+ tcg_gen_bswap64_i64(hi, inh);
+ tcg_gen_bswap64_i64(lo, inl);
+ tcg_gen_shri_i64(outh, hi, 32);
+ tcg_gen_deposit_i64(outh, outh, hi, 32, 32);
+ tcg_gen_shri_i64(outl, lo, 32);
+ tcg_gen_deposit_i64(outl, outl, lo, 32, 32);
+
+ tcg_temp_free_i64(hi);
+ tcg_temp_free_i64(lo);
+}
+static void gen_lxvh8x(DisasContext *ctx)
+{
+ TCGv EA;
+ TCGv_i64 xth = cpu_vsrh(xT(ctx->opcode));
+ TCGv_i64 xtl = cpu_vsrl(xT(ctx->opcode));
+
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ gen_set_access_type(ctx, ACCESS_INT);
+
+ EA = tcg_temp_new();
+ gen_addr_reg_index(ctx, EA);
+ tcg_gen_qemu_ld_i64(xth, EA, ctx->mem_idx, MO_BEQ);
+ tcg_gen_addi_tl(EA, EA, 8);
+ tcg_gen_qemu_ld_i64(xtl, EA, ctx->mem_idx, MO_BEQ);
+ if (ctx->le_mode) {
+ gen_bswap16x8(xth, xtl, xth, xtl);
+ }
+ tcg_temp_free(EA);
+}
+
+static void gen_lxvb16x(DisasContext *ctx)
+{
+ TCGv EA;
+ TCGv_i64 xth = cpu_vsrh(xT(ctx->opcode));
+ TCGv_i64 xtl = cpu_vsrl(xT(ctx->opcode));
+
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ gen_set_access_type(ctx, ACCESS_INT);
+ EA = tcg_temp_new();
+ gen_addr_reg_index(ctx, EA);
+ tcg_gen_qemu_ld_i64(xth, EA, ctx->mem_idx, MO_BEQ);
+ tcg_gen_addi_tl(EA, EA, 8);
+ tcg_gen_qemu_ld_i64(xtl, EA, ctx->mem_idx, MO_BEQ);
+ tcg_temp_free(EA);
+}
+
+#define VSX_STORE_SCALAR(name, operation) \
+static void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv EA; \
+ if (unlikely(!ctx->vsx_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VSXU); \
+ return; \
+ } \
+ gen_set_access_type(ctx, ACCESS_INT); \
+ EA = tcg_temp_new(); \
+ gen_addr_reg_index(ctx, EA); \
+ gen_qemu_##operation(ctx, cpu_vsrh(xS(ctx->opcode)), EA); \
+ tcg_temp_free(EA); \
+}
+
+VSX_STORE_SCALAR(stxsdx, st64_i64)
+
+VSX_STORE_SCALAR(stxsibx, st8_i64)
+VSX_STORE_SCALAR(stxsihx, st16_i64)
+VSX_STORE_SCALAR(stxsiwx, st32_i64)
+VSX_STORE_SCALAR(stxsspx, st32fs)
+
+static void gen_stxvd2x(DisasContext *ctx)
+{
+ TCGv EA;
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ gen_set_access_type(ctx, ACCESS_INT);
+ EA = tcg_temp_new();
+ gen_addr_reg_index(ctx, EA);
+ gen_qemu_st64_i64(ctx, cpu_vsrh(xS(ctx->opcode)), EA);
+ tcg_gen_addi_tl(EA, EA, 8);
+ gen_qemu_st64_i64(ctx, cpu_vsrl(xS(ctx->opcode)), EA);
+ tcg_temp_free(EA);
+}
+
+static void gen_stxvw4x(DisasContext *ctx)
+{
+ TCGv_i64 xsh = cpu_vsrh(xS(ctx->opcode));
+ TCGv_i64 xsl = cpu_vsrl(xS(ctx->opcode));
+ TCGv EA;
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ gen_set_access_type(ctx, ACCESS_INT);
+ EA = tcg_temp_new();
+ gen_addr_reg_index(ctx, EA);
+ if (ctx->le_mode) {
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+
+ tcg_gen_shri_i64(t0, xsh, 32);
+ tcg_gen_deposit_i64(t1, t0, xsh, 32, 32);
+ tcg_gen_qemu_st_i64(t1, EA, ctx->mem_idx, MO_LEQ);
+ tcg_gen_addi_tl(EA, EA, 8);
+ tcg_gen_shri_i64(t0, xsl, 32);
+ tcg_gen_deposit_i64(t1, t0, xsl, 32, 32);
+ tcg_gen_qemu_st_i64(t1, EA, ctx->mem_idx, MO_LEQ);
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+ } else {
+ tcg_gen_qemu_st_i64(xsh, EA, ctx->mem_idx, MO_BEQ);
+ tcg_gen_addi_tl(EA, EA, 8);
+ tcg_gen_qemu_st_i64(xsl, EA, ctx->mem_idx, MO_BEQ);
+ }
+ tcg_temp_free(EA);
+}
+
+static void gen_stxvh8x(DisasContext *ctx)
+{
+ TCGv_i64 xsh = cpu_vsrh(xS(ctx->opcode));
+ TCGv_i64 xsl = cpu_vsrl(xS(ctx->opcode));
+ TCGv EA;
+
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ gen_set_access_type(ctx, ACCESS_INT);
+ EA = tcg_temp_new();
+ gen_addr_reg_index(ctx, EA);
+ if (ctx->le_mode) {
+ TCGv_i64 outh = tcg_temp_new_i64();
+ TCGv_i64 outl = tcg_temp_new_i64();
+
+ gen_bswap16x8(outh, outl, xsh, xsl);
+ tcg_gen_qemu_st_i64(outh, EA, ctx->mem_idx, MO_BEQ);
+ tcg_gen_addi_tl(EA, EA, 8);
+ tcg_gen_qemu_st_i64(outl, EA, ctx->mem_idx, MO_BEQ);
+ tcg_temp_free_i64(outh);
+ tcg_temp_free_i64(outl);
+ } else {
+ tcg_gen_qemu_st_i64(xsh, EA, ctx->mem_idx, MO_BEQ);
+ tcg_gen_addi_tl(EA, EA, 8);
+ tcg_gen_qemu_st_i64(xsl, EA, ctx->mem_idx, MO_BEQ);
+ }
+ tcg_temp_free(EA);
+}
+
+static void gen_stxvb16x(DisasContext *ctx)
+{
+ TCGv_i64 xsh = cpu_vsrh(xS(ctx->opcode));
+ TCGv_i64 xsl = cpu_vsrl(xS(ctx->opcode));
+ TCGv EA;
+
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ gen_set_access_type(ctx, ACCESS_INT);
+ EA = tcg_temp_new();
+ gen_addr_reg_index(ctx, EA);
+ tcg_gen_qemu_st_i64(xsh, EA, ctx->mem_idx, MO_BEQ);
+ tcg_gen_addi_tl(EA, EA, 8);
+ tcg_gen_qemu_st_i64(xsl, EA, ctx->mem_idx, MO_BEQ);
+ tcg_temp_free(EA);
+}
+
+#define MV_VSRW(name, tcgop1, tcgop2, target, source) \
+static void gen_##name(DisasContext *ctx) \
+{ \
+ if (xS(ctx->opcode) < 32) { \
+ if (unlikely(!ctx->fpu_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
+ return; \
+ } \
+ } else { \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ } \
+ TCGv_i64 tmp = tcg_temp_new_i64(); \
+ tcg_gen_##tcgop1(tmp, source); \
+ tcg_gen_##tcgop2(target, tmp); \
+ tcg_temp_free_i64(tmp); \
+}
+
+
+MV_VSRW(mfvsrwz, ext32u_i64, trunc_i64_tl, cpu_gpr[rA(ctx->opcode)], \
+ cpu_vsrh(xS(ctx->opcode)))
+MV_VSRW(mtvsrwa, extu_tl_i64, ext32s_i64, cpu_vsrh(xT(ctx->opcode)), \
+ cpu_gpr[rA(ctx->opcode)])
+MV_VSRW(mtvsrwz, extu_tl_i64, ext32u_i64, cpu_vsrh(xT(ctx->opcode)), \
+ cpu_gpr[rA(ctx->opcode)])
+
+#if defined(TARGET_PPC64)
+#define MV_VSRD(name, target, source) \
+static void gen_##name(DisasContext *ctx) \
+{ \
+ if (xS(ctx->opcode) < 32) { \
+ if (unlikely(!ctx->fpu_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
+ return; \
+ } \
+ } else { \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ } \
+ tcg_gen_mov_i64(target, source); \
+}
+
+MV_VSRD(mfvsrd, cpu_gpr[rA(ctx->opcode)], cpu_vsrh(xS(ctx->opcode)))
+MV_VSRD(mtvsrd, cpu_vsrh(xT(ctx->opcode)), cpu_gpr[rA(ctx->opcode)])
+
+static void gen_mfvsrld(DisasContext *ctx)
+{
+ if (xS(ctx->opcode) < 32) {
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ } else {
+ if (unlikely(!ctx->altivec_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VPU);
+ return;
+ }
+ }
+
+ tcg_gen_mov_i64(cpu_gpr[rA(ctx->opcode)], cpu_vsrl(xS(ctx->opcode)));
+}
+
+static void gen_mtvsrdd(DisasContext *ctx)
+{
+ if (xT(ctx->opcode) < 32) {
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ } else {
+ if (unlikely(!ctx->altivec_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VPU);
+ return;
+ }
+ }
+
+ if (!rA(ctx->opcode)) {
+ tcg_gen_movi_i64(cpu_vsrh(xT(ctx->opcode)), 0);
+ } else {
+ tcg_gen_mov_i64(cpu_vsrh(xT(ctx->opcode)), cpu_gpr[rA(ctx->opcode)]);
+ }
+
+ tcg_gen_mov_i64(cpu_vsrl(xT(ctx->opcode)), cpu_gpr[rB(ctx->opcode)]);
+}
+
+static void gen_mtvsrws(DisasContext *ctx)
+{
+ if (xT(ctx->opcode) < 32) {
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ } else {
+ if (unlikely(!ctx->altivec_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VPU);
+ return;
+ }
+ }
+
+ tcg_gen_deposit_i64(cpu_vsrl(xT(ctx->opcode)), cpu_gpr[rA(ctx->opcode)],
+ cpu_gpr[rA(ctx->opcode)], 32, 32);
+ tcg_gen_mov_i64(cpu_vsrh(xT(ctx->opcode)), cpu_vsrl(xT(ctx->opcode)));
+}
+
+#endif
+
+static void gen_xxpermdi(DisasContext *ctx)
+{
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+
+ if (unlikely((xT(ctx->opcode) == xA(ctx->opcode)) ||
+ (xT(ctx->opcode) == xB(ctx->opcode)))) {
+ TCGv_i64 xh, xl;
+
+ xh = tcg_temp_new_i64();
+ xl = tcg_temp_new_i64();
+
+ if ((DM(ctx->opcode) & 2) == 0) {
+ tcg_gen_mov_i64(xh, cpu_vsrh(xA(ctx->opcode)));
+ } else {
+ tcg_gen_mov_i64(xh, cpu_vsrl(xA(ctx->opcode)));
+ }
+ if ((DM(ctx->opcode) & 1) == 0) {
+ tcg_gen_mov_i64(xl, cpu_vsrh(xB(ctx->opcode)));
+ } else {
+ tcg_gen_mov_i64(xl, cpu_vsrl(xB(ctx->opcode)));
+ }
+
+ tcg_gen_mov_i64(cpu_vsrh(xT(ctx->opcode)), xh);
+ tcg_gen_mov_i64(cpu_vsrl(xT(ctx->opcode)), xl);
+
+ tcg_temp_free_i64(xh);
+ tcg_temp_free_i64(xl);
+ } else {
+ if ((DM(ctx->opcode) & 2) == 0) {
+ tcg_gen_mov_i64(cpu_vsrh(xT(ctx->opcode)), cpu_vsrh(xA(ctx->opcode)));
+ } else {
+ tcg_gen_mov_i64(cpu_vsrh(xT(ctx->opcode)), cpu_vsrl(xA(ctx->opcode)));
+ }
+ if ((DM(ctx->opcode) & 1) == 0) {
+ tcg_gen_mov_i64(cpu_vsrl(xT(ctx->opcode)), cpu_vsrh(xB(ctx->opcode)));
+ } else {
+ tcg_gen_mov_i64(cpu_vsrl(xT(ctx->opcode)), cpu_vsrl(xB(ctx->opcode)));
+ }
+ }
+}
+
+#define OP_ABS 1
+#define OP_NABS 2
+#define OP_NEG 3
+#define OP_CPSGN 4
+#define SGN_MASK_DP 0x8000000000000000ull
+#define SGN_MASK_SP 0x8000000080000000ull
+
+#define VSX_SCALAR_MOVE(name, op, sgn_mask) \
+static void glue(gen_, name)(DisasContext * ctx) \
+ { \
+ TCGv_i64 xb, sgm; \
+ if (unlikely(!ctx->vsx_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VSXU); \
+ return; \
+ } \
+ xb = tcg_temp_new_i64(); \
+ sgm = tcg_temp_new_i64(); \
+ tcg_gen_mov_i64(xb, cpu_vsrh(xB(ctx->opcode))); \
+ tcg_gen_movi_i64(sgm, sgn_mask); \
+ switch (op) { \
+ case OP_ABS: { \
+ tcg_gen_andc_i64(xb, xb, sgm); \
+ break; \
+ } \
+ case OP_NABS: { \
+ tcg_gen_or_i64(xb, xb, sgm); \
+ break; \
+ } \
+ case OP_NEG: { \
+ tcg_gen_xor_i64(xb, xb, sgm); \
+ break; \
+ } \
+ case OP_CPSGN: { \
+ TCGv_i64 xa = tcg_temp_new_i64(); \
+ tcg_gen_mov_i64(xa, cpu_vsrh(xA(ctx->opcode))); \
+ tcg_gen_and_i64(xa, xa, sgm); \
+ tcg_gen_andc_i64(xb, xb, sgm); \
+ tcg_gen_or_i64(xb, xb, xa); \
+ tcg_temp_free_i64(xa); \
+ break; \
+ } \
+ } \
+ tcg_gen_mov_i64(cpu_vsrh(xT(ctx->opcode)), xb); \
+ tcg_temp_free_i64(xb); \
+ tcg_temp_free_i64(sgm); \
+ }
+
+VSX_SCALAR_MOVE(xsabsdp, OP_ABS, SGN_MASK_DP)
+VSX_SCALAR_MOVE(xsnabsdp, OP_NABS, SGN_MASK_DP)
+VSX_SCALAR_MOVE(xsnegdp, OP_NEG, SGN_MASK_DP)
+VSX_SCALAR_MOVE(xscpsgndp, OP_CPSGN, SGN_MASK_DP)
+
+#define VSX_VECTOR_MOVE(name, op, sgn_mask) \
+static void glue(gen_, name)(DisasContext * ctx) \
+ { \
+ TCGv_i64 xbh, xbl, sgm; \
+ if (unlikely(!ctx->vsx_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VSXU); \
+ return; \
+ } \
+ xbh = tcg_temp_new_i64(); \
+ xbl = tcg_temp_new_i64(); \
+ sgm = tcg_temp_new_i64(); \
+ tcg_gen_mov_i64(xbh, cpu_vsrh(xB(ctx->opcode))); \
+ tcg_gen_mov_i64(xbl, cpu_vsrl(xB(ctx->opcode))); \
+ tcg_gen_movi_i64(sgm, sgn_mask); \
+ switch (op) { \
+ case OP_ABS: { \
+ tcg_gen_andc_i64(xbh, xbh, sgm); \
+ tcg_gen_andc_i64(xbl, xbl, sgm); \
+ break; \
+ } \
+ case OP_NABS: { \
+ tcg_gen_or_i64(xbh, xbh, sgm); \
+ tcg_gen_or_i64(xbl, xbl, sgm); \
+ break; \
+ } \
+ case OP_NEG: { \
+ tcg_gen_xor_i64(xbh, xbh, sgm); \
+ tcg_gen_xor_i64(xbl, xbl, sgm); \
+ break; \
+ } \
+ case OP_CPSGN: { \
+ TCGv_i64 xah = tcg_temp_new_i64(); \
+ TCGv_i64 xal = tcg_temp_new_i64(); \
+ tcg_gen_mov_i64(xah, cpu_vsrh(xA(ctx->opcode))); \
+ tcg_gen_mov_i64(xal, cpu_vsrl(xA(ctx->opcode))); \
+ tcg_gen_and_i64(xah, xah, sgm); \
+ tcg_gen_and_i64(xal, xal, sgm); \
+ tcg_gen_andc_i64(xbh, xbh, sgm); \
+ tcg_gen_andc_i64(xbl, xbl, sgm); \
+ tcg_gen_or_i64(xbh, xbh, xah); \
+ tcg_gen_or_i64(xbl, xbl, xal); \
+ tcg_temp_free_i64(xah); \
+ tcg_temp_free_i64(xal); \
+ break; \
+ } \
+ } \
+ tcg_gen_mov_i64(cpu_vsrh(xT(ctx->opcode)), xbh); \
+ tcg_gen_mov_i64(cpu_vsrl(xT(ctx->opcode)), xbl); \
+ tcg_temp_free_i64(xbh); \
+ tcg_temp_free_i64(xbl); \
+ tcg_temp_free_i64(sgm); \
+ }
+
+VSX_VECTOR_MOVE(xvabsdp, OP_ABS, SGN_MASK_DP)
+VSX_VECTOR_MOVE(xvnabsdp, OP_NABS, SGN_MASK_DP)
+VSX_VECTOR_MOVE(xvnegdp, OP_NEG, SGN_MASK_DP)
+VSX_VECTOR_MOVE(xvcpsgndp, OP_CPSGN, SGN_MASK_DP)
+VSX_VECTOR_MOVE(xvabssp, OP_ABS, SGN_MASK_SP)
+VSX_VECTOR_MOVE(xvnabssp, OP_NABS, SGN_MASK_SP)
+VSX_VECTOR_MOVE(xvnegsp, OP_NEG, SGN_MASK_SP)
+VSX_VECTOR_MOVE(xvcpsgnsp, OP_CPSGN, SGN_MASK_SP)
+
+#define GEN_VSX_HELPER_2(name, op1, op2, inval, type) \
+static void gen_##name(DisasContext * ctx) \
+{ \
+ TCGv_i32 opc; \
+ if (unlikely(!ctx->vsx_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VSXU); \
+ return; \
+ } \
+ opc = tcg_const_i32(ctx->opcode); \
+ gen_helper_##name(cpu_env, opc); \
+ tcg_temp_free_i32(opc); \
+}
+
+#define GEN_VSX_HELPER_XT_XB_ENV(name, op1, op2, inval, type) \
+static void gen_##name(DisasContext * ctx) \
+{ \
+ if (unlikely(!ctx->vsx_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VSXU); \
+ return; \
+ } \
+ gen_helper_##name(cpu_vsrh(xT(ctx->opcode)), cpu_env, \
+ cpu_vsrh(xB(ctx->opcode))); \
+}
+
+GEN_VSX_HELPER_2(xsadddp, 0x00, 0x04, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xssubdp, 0x00, 0x05, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xsmuldp, 0x00, 0x06, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xsdivdp, 0x00, 0x07, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xsredp, 0x14, 0x05, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xssqrtdp, 0x16, 0x04, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xsrsqrtedp, 0x14, 0x04, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xstdivdp, 0x14, 0x07, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xstsqrtdp, 0x14, 0x06, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xsmaddadp, 0x04, 0x04, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xsmaddmdp, 0x04, 0x05, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xsmsubadp, 0x04, 0x06, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xsmsubmdp, 0x04, 0x07, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xsnmaddadp, 0x04, 0x14, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xsnmaddmdp, 0x04, 0x15, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xsnmsubadp, 0x04, 0x16, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xsnmsubmdp, 0x04, 0x17, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xscmpeqdp, 0x0C, 0x00, 0, PPC2_ISA300)
+GEN_VSX_HELPER_2(xscmpgtdp, 0x0C, 0x01, 0, PPC2_ISA300)
+GEN_VSX_HELPER_2(xscmpgedp, 0x0C, 0x02, 0, PPC2_ISA300)
+GEN_VSX_HELPER_2(xscmpnedp, 0x0C, 0x03, 0, PPC2_ISA300)
+GEN_VSX_HELPER_2(xscmpodp, 0x0C, 0x05, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xscmpudp, 0x0C, 0x04, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xsmaxdp, 0x00, 0x14, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xsmindp, 0x00, 0x15, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xscvdpsp, 0x12, 0x10, 0, PPC2_VSX)
+GEN_VSX_HELPER_XT_XB_ENV(xscvdpspn, 0x16, 0x10, 0, PPC2_VSX207)
+GEN_VSX_HELPER_2(xscvspdp, 0x12, 0x14, 0, PPC2_VSX)
+GEN_VSX_HELPER_XT_XB_ENV(xscvspdpn, 0x16, 0x14, 0, PPC2_VSX207)
+GEN_VSX_HELPER_2(xscvdpsxds, 0x10, 0x15, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xscvdpsxws, 0x10, 0x05, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xscvdpuxds, 0x10, 0x14, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xscvdpuxws, 0x10, 0x04, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xscvsxddp, 0x10, 0x17, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xscvuxddp, 0x10, 0x16, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xsrdpi, 0x12, 0x04, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xsrdpic, 0x16, 0x06, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xsrdpim, 0x12, 0x07, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xsrdpip, 0x12, 0x06, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xsrdpiz, 0x12, 0x05, 0, PPC2_VSX)
+GEN_VSX_HELPER_XT_XB_ENV(xsrsp, 0x12, 0x11, 0, PPC2_VSX207)
+
+GEN_VSX_HELPER_2(xsaddsp, 0x00, 0x00, 0, PPC2_VSX207)
+GEN_VSX_HELPER_2(xssubsp, 0x00, 0x01, 0, PPC2_VSX207)
+GEN_VSX_HELPER_2(xsmulsp, 0x00, 0x02, 0, PPC2_VSX207)
+GEN_VSX_HELPER_2(xsdivsp, 0x00, 0x03, 0, PPC2_VSX207)
+GEN_VSX_HELPER_2(xsresp, 0x14, 0x01, 0, PPC2_VSX207)
+GEN_VSX_HELPER_2(xssqrtsp, 0x16, 0x00, 0, PPC2_VSX207)
+GEN_VSX_HELPER_2(xsrsqrtesp, 0x14, 0x00, 0, PPC2_VSX207)
+GEN_VSX_HELPER_2(xsmaddasp, 0x04, 0x00, 0, PPC2_VSX207)
+GEN_VSX_HELPER_2(xsmaddmsp, 0x04, 0x01, 0, PPC2_VSX207)
+GEN_VSX_HELPER_2(xsmsubasp, 0x04, 0x02, 0, PPC2_VSX207)
+GEN_VSX_HELPER_2(xsmsubmsp, 0x04, 0x03, 0, PPC2_VSX207)
+GEN_VSX_HELPER_2(xsnmaddasp, 0x04, 0x10, 0, PPC2_VSX207)
+GEN_VSX_HELPER_2(xsnmaddmsp, 0x04, 0x11, 0, PPC2_VSX207)
+GEN_VSX_HELPER_2(xsnmsubasp, 0x04, 0x12, 0, PPC2_VSX207)
+GEN_VSX_HELPER_2(xsnmsubmsp, 0x04, 0x13, 0, PPC2_VSX207)
+GEN_VSX_HELPER_2(xscvsxdsp, 0x10, 0x13, 0, PPC2_VSX207)
+GEN_VSX_HELPER_2(xscvuxdsp, 0x10, 0x12, 0, PPC2_VSX207)
+
+GEN_VSX_HELPER_2(xvadddp, 0x00, 0x0C, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvsubdp, 0x00, 0x0D, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvmuldp, 0x00, 0x0E, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvdivdp, 0x00, 0x0F, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvredp, 0x14, 0x0D, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvsqrtdp, 0x16, 0x0C, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvrsqrtedp, 0x14, 0x0C, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvtdivdp, 0x14, 0x0F, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvtsqrtdp, 0x14, 0x0E, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvmaddadp, 0x04, 0x0C, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvmaddmdp, 0x04, 0x0D, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvmsubadp, 0x04, 0x0E, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvmsubmdp, 0x04, 0x0F, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvnmaddadp, 0x04, 0x1C, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvnmaddmdp, 0x04, 0x1D, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvnmsubadp, 0x04, 0x1E, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvnmsubmdp, 0x04, 0x1F, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvmaxdp, 0x00, 0x1C, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvmindp, 0x00, 0x1D, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvcmpeqdp, 0x0C, 0x0C, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvcmpgtdp, 0x0C, 0x0D, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvcmpgedp, 0x0C, 0x0E, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvcmpnedp, 0x0C, 0x0F, 0, PPC2_ISA300)
+GEN_VSX_HELPER_2(xvcvdpsp, 0x12, 0x18, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvcvdpsxds, 0x10, 0x1D, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvcvdpsxws, 0x10, 0x0D, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvcvdpuxds, 0x10, 0x1C, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvcvdpuxws, 0x10, 0x0C, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvcvsxddp, 0x10, 0x1F, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvcvuxddp, 0x10, 0x1E, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvcvsxwdp, 0x10, 0x0F, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvcvuxwdp, 0x10, 0x0E, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvrdpi, 0x12, 0x0C, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvrdpic, 0x16, 0x0E, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvrdpim, 0x12, 0x0F, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvrdpip, 0x12, 0x0E, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvrdpiz, 0x12, 0x0D, 0, PPC2_VSX)
+
+GEN_VSX_HELPER_2(xvaddsp, 0x00, 0x08, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvsubsp, 0x00, 0x09, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvmulsp, 0x00, 0x0A, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvdivsp, 0x00, 0x0B, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvresp, 0x14, 0x09, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvsqrtsp, 0x16, 0x08, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvrsqrtesp, 0x14, 0x08, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvtdivsp, 0x14, 0x0B, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvtsqrtsp, 0x14, 0x0A, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvmaddasp, 0x04, 0x08, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvmaddmsp, 0x04, 0x09, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvmsubasp, 0x04, 0x0A, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvmsubmsp, 0x04, 0x0B, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvnmaddasp, 0x04, 0x18, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvnmaddmsp, 0x04, 0x19, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvnmsubasp, 0x04, 0x1A, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvnmsubmsp, 0x04, 0x1B, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvmaxsp, 0x00, 0x18, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvminsp, 0x00, 0x19, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvcmpeqsp, 0x0C, 0x08, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvcmpgtsp, 0x0C, 0x09, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvcmpgesp, 0x0C, 0x0A, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvcmpnesp, 0x0C, 0x0B, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvcvspdp, 0x12, 0x1C, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvcvspsxds, 0x10, 0x19, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvcvspsxws, 0x10, 0x09, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvcvspuxds, 0x10, 0x18, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvcvspuxws, 0x10, 0x08, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvcvsxdsp, 0x10, 0x1B, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvcvuxdsp, 0x10, 0x1A, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvcvsxwsp, 0x10, 0x0B, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvcvuxwsp, 0x10, 0x0A, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvrspi, 0x12, 0x08, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvrspic, 0x16, 0x0A, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvrspim, 0x12, 0x0B, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvrspip, 0x12, 0x0A, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvrspiz, 0x12, 0x09, 0, PPC2_VSX)
+
+static void gen_xxbrd(DisasContext *ctx)
+{
+ TCGv_i64 xth = cpu_vsrh(xT(ctx->opcode));
+ TCGv_i64 xtl = cpu_vsrl(xT(ctx->opcode));
+ TCGv_i64 xbh = cpu_vsrh(xB(ctx->opcode));
+ TCGv_i64 xbl = cpu_vsrl(xB(ctx->opcode));
+
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ tcg_gen_bswap64_i64(xth, xbh);
+ tcg_gen_bswap64_i64(xtl, xbl);
+}
+
+static void gen_xxbrh(DisasContext *ctx)
+{
+ TCGv_i64 xth = cpu_vsrh(xT(ctx->opcode));
+ TCGv_i64 xtl = cpu_vsrl(xT(ctx->opcode));
+ TCGv_i64 xbh = cpu_vsrh(xB(ctx->opcode));
+ TCGv_i64 xbl = cpu_vsrl(xB(ctx->opcode));
+
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ gen_bswap16x8(xth, xtl, xbh, xbl);
+}
+
+static void gen_xxbrq(DisasContext *ctx)
+{
+ TCGv_i64 xth = cpu_vsrh(xT(ctx->opcode));
+ TCGv_i64 xtl = cpu_vsrl(xT(ctx->opcode));
+ TCGv_i64 xbh = cpu_vsrh(xB(ctx->opcode));
+ TCGv_i64 xbl = cpu_vsrl(xB(ctx->opcode));
+ TCGv_i64 t0 = tcg_temp_new_i64();
+
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ tcg_gen_bswap64_i64(t0, xbl);
+ tcg_gen_bswap64_i64(xtl, xbh);
+ tcg_gen_mov_i64(xth, t0);
+ tcg_temp_free_i64(t0);
+}
+
+static void gen_xxbrw(DisasContext *ctx)
+{
+ TCGv_i64 xth = cpu_vsrh(xT(ctx->opcode));
+ TCGv_i64 xtl = cpu_vsrl(xT(ctx->opcode));
+ TCGv_i64 xbh = cpu_vsrh(xB(ctx->opcode));
+ TCGv_i64 xbl = cpu_vsrl(xB(ctx->opcode));
+
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ gen_bswap32x4(xth, xtl, xbh, xbl);
+}
+
+#define VSX_LOGICAL(name, tcg_op) \
+static void glue(gen_, name)(DisasContext * ctx) \
+ { \
+ if (unlikely(!ctx->vsx_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VSXU); \
+ return; \
+ } \
+ tcg_op(cpu_vsrh(xT(ctx->opcode)), cpu_vsrh(xA(ctx->opcode)), \
+ cpu_vsrh(xB(ctx->opcode))); \
+ tcg_op(cpu_vsrl(xT(ctx->opcode)), cpu_vsrl(xA(ctx->opcode)), \
+ cpu_vsrl(xB(ctx->opcode))); \
+ }
+
+VSX_LOGICAL(xxland, tcg_gen_and_i64)
+VSX_LOGICAL(xxlandc, tcg_gen_andc_i64)
+VSX_LOGICAL(xxlor, tcg_gen_or_i64)
+VSX_LOGICAL(xxlxor, tcg_gen_xor_i64)
+VSX_LOGICAL(xxlnor, tcg_gen_nor_i64)
+VSX_LOGICAL(xxleqv, tcg_gen_eqv_i64)
+VSX_LOGICAL(xxlnand, tcg_gen_nand_i64)
+VSX_LOGICAL(xxlorc, tcg_gen_orc_i64)
+
+#define VSX_XXMRG(name, high) \
+static void glue(gen_, name)(DisasContext * ctx) \
+ { \
+ TCGv_i64 a0, a1, b0, b1; \
+ if (unlikely(!ctx->vsx_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VSXU); \
+ return; \
+ } \
+ a0 = tcg_temp_new_i64(); \
+ a1 = tcg_temp_new_i64(); \
+ b0 = tcg_temp_new_i64(); \
+ b1 = tcg_temp_new_i64(); \
+ if (high) { \
+ tcg_gen_mov_i64(a0, cpu_vsrh(xA(ctx->opcode))); \
+ tcg_gen_mov_i64(a1, cpu_vsrh(xA(ctx->opcode))); \
+ tcg_gen_mov_i64(b0, cpu_vsrh(xB(ctx->opcode))); \
+ tcg_gen_mov_i64(b1, cpu_vsrh(xB(ctx->opcode))); \
+ } else { \
+ tcg_gen_mov_i64(a0, cpu_vsrl(xA(ctx->opcode))); \
+ tcg_gen_mov_i64(a1, cpu_vsrl(xA(ctx->opcode))); \
+ tcg_gen_mov_i64(b0, cpu_vsrl(xB(ctx->opcode))); \
+ tcg_gen_mov_i64(b1, cpu_vsrl(xB(ctx->opcode))); \
+ } \
+ tcg_gen_shri_i64(a0, a0, 32); \
+ tcg_gen_shri_i64(b0, b0, 32); \
+ tcg_gen_deposit_i64(cpu_vsrh(xT(ctx->opcode)), \
+ b0, a0, 32, 32); \
+ tcg_gen_deposit_i64(cpu_vsrl(xT(ctx->opcode)), \
+ b1, a1, 32, 32); \
+ tcg_temp_free_i64(a0); \
+ tcg_temp_free_i64(a1); \
+ tcg_temp_free_i64(b0); \
+ tcg_temp_free_i64(b1); \
+ }
+
+VSX_XXMRG(xxmrghw, 1)
+VSX_XXMRG(xxmrglw, 0)
+
+static void gen_xxsel(DisasContext * ctx)
+{
+ TCGv_i64 a, b, c;
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ a = tcg_temp_new_i64();
+ b = tcg_temp_new_i64();
+ c = tcg_temp_new_i64();
+
+ tcg_gen_mov_i64(a, cpu_vsrh(xA(ctx->opcode)));
+ tcg_gen_mov_i64(b, cpu_vsrh(xB(ctx->opcode)));
+ tcg_gen_mov_i64(c, cpu_vsrh(xC(ctx->opcode)));
+
+ tcg_gen_and_i64(b, b, c);
+ tcg_gen_andc_i64(a, a, c);
+ tcg_gen_or_i64(cpu_vsrh(xT(ctx->opcode)), a, b);
+
+ tcg_gen_mov_i64(a, cpu_vsrl(xA(ctx->opcode)));
+ tcg_gen_mov_i64(b, cpu_vsrl(xB(ctx->opcode)));
+ tcg_gen_mov_i64(c, cpu_vsrl(xC(ctx->opcode)));
+
+ tcg_gen_and_i64(b, b, c);
+ tcg_gen_andc_i64(a, a, c);
+ tcg_gen_or_i64(cpu_vsrl(xT(ctx->opcode)), a, b);
+
+ tcg_temp_free_i64(a);
+ tcg_temp_free_i64(b);
+ tcg_temp_free_i64(c);
+}
+
+static void gen_xxspltw(DisasContext *ctx)
+{
+ TCGv_i64 b, b2;
+ TCGv_i64 vsr = (UIM(ctx->opcode) & 2) ?
+ cpu_vsrl(xB(ctx->opcode)) :
+ cpu_vsrh(xB(ctx->opcode));
+
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+
+ b = tcg_temp_new_i64();
+ b2 = tcg_temp_new_i64();
+
+ if (UIM(ctx->opcode) & 1) {
+ tcg_gen_ext32u_i64(b, vsr);
+ } else {
+ tcg_gen_shri_i64(b, vsr, 32);
+ }
+
+ tcg_gen_shli_i64(b2, b, 32);
+ tcg_gen_or_i64(cpu_vsrh(xT(ctx->opcode)), b, b2);
+ tcg_gen_mov_i64(cpu_vsrl(xT(ctx->opcode)), cpu_vsrh(xT(ctx->opcode)));
+
+ tcg_temp_free_i64(b);
+ tcg_temp_free_i64(b2);
+}
+
+#define pattern(x) (((x) & 0xff) * (~(uint64_t)0 / 0xff))
+
+static void gen_xxspltib(DisasContext *ctx)
+{
+ unsigned char uim8 = IMM8(ctx->opcode);
+ if (xS(ctx->opcode) < 32) {
+ if (unlikely(!ctx->altivec_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VPU);
+ return;
+ }
+ } else {
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ }
+ tcg_gen_movi_i64(cpu_vsrh(xT(ctx->opcode)), pattern(uim8));
+ tcg_gen_movi_i64(cpu_vsrl(xT(ctx->opcode)), pattern(uim8));
+}
+
+static void gen_xxsldwi(DisasContext *ctx)
+{
+ TCGv_i64 xth, xtl;
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ xth = tcg_temp_new_i64();
+ xtl = tcg_temp_new_i64();
+
+ switch (SHW(ctx->opcode)) {
+ case 0: {
+ tcg_gen_mov_i64(xth, cpu_vsrh(xA(ctx->opcode)));
+ tcg_gen_mov_i64(xtl, cpu_vsrl(xA(ctx->opcode)));
+ break;
+ }
+ case 1: {
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ tcg_gen_mov_i64(xth, cpu_vsrh(xA(ctx->opcode)));
+ tcg_gen_shli_i64(xth, xth, 32);
+ tcg_gen_mov_i64(t0, cpu_vsrl(xA(ctx->opcode)));
+ tcg_gen_shri_i64(t0, t0, 32);
+ tcg_gen_or_i64(xth, xth, t0);
+ tcg_gen_mov_i64(xtl, cpu_vsrl(xA(ctx->opcode)));
+ tcg_gen_shli_i64(xtl, xtl, 32);
+ tcg_gen_mov_i64(t0, cpu_vsrh(xB(ctx->opcode)));
+ tcg_gen_shri_i64(t0, t0, 32);
+ tcg_gen_or_i64(xtl, xtl, t0);
+ tcg_temp_free_i64(t0);
+ break;
+ }
+ case 2: {
+ tcg_gen_mov_i64(xth, cpu_vsrl(xA(ctx->opcode)));
+ tcg_gen_mov_i64(xtl, cpu_vsrh(xB(ctx->opcode)));
+ break;
+ }
+ case 3: {
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ tcg_gen_mov_i64(xth, cpu_vsrl(xA(ctx->opcode)));
+ tcg_gen_shli_i64(xth, xth, 32);
+ tcg_gen_mov_i64(t0, cpu_vsrh(xB(ctx->opcode)));
+ tcg_gen_shri_i64(t0, t0, 32);
+ tcg_gen_or_i64(xth, xth, t0);
+ tcg_gen_mov_i64(xtl, cpu_vsrh(xB(ctx->opcode)));
+ tcg_gen_shli_i64(xtl, xtl, 32);
+ tcg_gen_mov_i64(t0, cpu_vsrl(xB(ctx->opcode)));
+ tcg_gen_shri_i64(t0, t0, 32);
+ tcg_gen_or_i64(xtl, xtl, t0);
+ tcg_temp_free_i64(t0);
+ break;
+ }
+ }
+
+ tcg_gen_mov_i64(cpu_vsrh(xT(ctx->opcode)), xth);
+ tcg_gen_mov_i64(cpu_vsrl(xT(ctx->opcode)), xtl);
+
+ tcg_temp_free_i64(xth);
+ tcg_temp_free_i64(xtl);
+}
+
+#undef GEN_XX2FORM
+#undef GEN_XX3FORM
+#undef GEN_XX2IFORM
+#undef GEN_XX3_RC_FORM
+#undef GEN_XX3FORM_DM
+#undef VSX_LOGICAL
diff --git a/target-ppc/translate/vsx-ops.inc.c b/target-ppc/translate/vsx-ops.inc.c
new file mode 100644
index 0000000000..3d9104155a
--- /dev/null
+++ b/target-ppc/translate/vsx-ops.inc.c
@@ -0,0 +1,300 @@
+GEN_HANDLER_E(lxsdx, 0x1F, 0x0C, 0x12, 0, PPC_NONE, PPC2_VSX),
+GEN_HANDLER_E(lxsiwax, 0x1F, 0x0C, 0x02, 0, PPC_NONE, PPC2_VSX207),
+GEN_HANDLER_E(lxsiwzx, 0x1F, 0x0C, 0x00, 0, PPC_NONE, PPC2_VSX207),
+GEN_HANDLER_E(lxsibzx, 0x1F, 0x0D, 0x18, 0, PPC_NONE, PPC2_ISA300),
+GEN_HANDLER_E(lxsihzx, 0x1F, 0x0D, 0x19, 0, PPC_NONE, PPC2_ISA300),
+GEN_HANDLER_E(lxsspx, 0x1F, 0x0C, 0x10, 0, PPC_NONE, PPC2_VSX207),
+GEN_HANDLER_E(lxvd2x, 0x1F, 0x0C, 0x1A, 0, PPC_NONE, PPC2_VSX),
+GEN_HANDLER_E(lxvdsx, 0x1F, 0x0C, 0x0A, 0, PPC_NONE, PPC2_VSX),
+GEN_HANDLER_E(lxvw4x, 0x1F, 0x0C, 0x18, 0, PPC_NONE, PPC2_VSX),
+GEN_HANDLER_E(lxvh8x, 0x1F, 0x0C, 0x19, 0, PPC_NONE, PPC2_ISA300),
+GEN_HANDLER_E(lxvb16x, 0x1F, 0x0C, 0x1B, 0, PPC_NONE, PPC2_ISA300),
+
+GEN_HANDLER_E(stxsdx, 0x1F, 0xC, 0x16, 0, PPC_NONE, PPC2_VSX),
+GEN_HANDLER_E(stxsibx, 0x1F, 0xD, 0x1C, 0, PPC_NONE, PPC2_ISA300),
+GEN_HANDLER_E(stxsihx, 0x1F, 0xD, 0x1D, 0, PPC_NONE, PPC2_ISA300),
+GEN_HANDLER_E(stxsiwx, 0x1F, 0xC, 0x04, 0, PPC_NONE, PPC2_VSX207),
+GEN_HANDLER_E(stxsspx, 0x1F, 0xC, 0x14, 0, PPC_NONE, PPC2_VSX207),
+GEN_HANDLER_E(stxvd2x, 0x1F, 0xC, 0x1E, 0, PPC_NONE, PPC2_VSX),
+GEN_HANDLER_E(stxvw4x, 0x1F, 0xC, 0x1C, 0, PPC_NONE, PPC2_VSX),
+GEN_HANDLER_E(stxvh8x, 0x1F, 0x0C, 0x1D, 0, PPC_NONE, PPC2_ISA300),
+GEN_HANDLER_E(stxvb16x, 0x1F, 0x0C, 0x1F, 0, PPC_NONE, PPC2_ISA300),
+
+GEN_HANDLER_E(mfvsrwz, 0x1F, 0x13, 0x03, 0x0000F800, PPC_NONE, PPC2_VSX207),
+GEN_HANDLER_E(mtvsrwa, 0x1F, 0x13, 0x06, 0x0000F800, PPC_NONE, PPC2_VSX207),
+GEN_HANDLER_E(mtvsrwz, 0x1F, 0x13, 0x07, 0x0000F800, PPC_NONE, PPC2_VSX207),
+#if defined(TARGET_PPC64)
+GEN_HANDLER_E(mfvsrd, 0x1F, 0x13, 0x01, 0x0000F800, PPC_NONE, PPC2_VSX207),
+GEN_HANDLER_E(mtvsrd, 0x1F, 0x13, 0x05, 0x0000F800, PPC_NONE, PPC2_VSX207),
+GEN_HANDLER_E(mfvsrld, 0X1F, 0x13, 0x09, 0x0000F800, PPC_NONE, PPC2_ISA300),
+GEN_HANDLER_E(mtvsrdd, 0X1F, 0x13, 0x0D, 0x0, PPC_NONE, PPC2_ISA300),
+GEN_HANDLER_E(mtvsrws, 0x1F, 0x13, 0x0C, 0x0000F800, PPC_NONE, PPC2_ISA300),
+#endif
+
+#define GEN_XX1FORM(name, opc2, opc3, fl2) \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0, opc3, 0, PPC_NONE, fl2), \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 1, opc3, 0, PPC_NONE, fl2)
+
+#define GEN_XX2FORM(name, opc2, opc3, fl2) \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0, opc3, 0, PPC_NONE, fl2), \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 1, opc3, 0, PPC_NONE, fl2)
+
+#define GEN_XX2FORM_EO(name, opc2, opc3, opc4, fl2) \
+GEN_HANDLER2_E_2(name, #name, 0x3C, opc2 | 0, opc3, opc4, 0, PPC_NONE, fl2), \
+GEN_HANDLER2_E_2(name, #name, 0x3C, opc2 | 1, opc3, opc4, 0, PPC_NONE, fl2)
+
+#define GEN_XX3FORM(name, opc2, opc3, fl2) \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0, opc3, 0, PPC_NONE, fl2), \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 1, opc3, 0, PPC_NONE, fl2), \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 2, opc3, 0, PPC_NONE, fl2), \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 3, opc3, 0, PPC_NONE, fl2)
+
+#define GEN_XX2IFORM(name, opc2, opc3, fl2) \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0, opc3, 1, PPC_NONE, fl2), \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 1, opc3, 1, PPC_NONE, fl2), \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 2, opc3, 1, PPC_NONE, fl2), \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 3, opc3, 1, PPC_NONE, fl2)
+
+#define GEN_XX3_RC_FORM(name, opc2, opc3, fl2) \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0x00, opc3 | 0x00, 0, PPC_NONE, fl2), \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0x01, opc3 | 0x00, 0, PPC_NONE, fl2), \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0x02, opc3 | 0x00, 0, PPC_NONE, fl2), \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0x03, opc3 | 0x00, 0, PPC_NONE, fl2), \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0x00, opc3 | 0x10, 0, PPC_NONE, fl2), \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0x01, opc3 | 0x10, 0, PPC_NONE, fl2), \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0x02, opc3 | 0x10, 0, PPC_NONE, fl2), \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0x03, opc3 | 0x10, 0, PPC_NONE, fl2)
+
+#define GEN_XX3FORM_DM(name, opc2, opc3) \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x00, opc3|0x00, 0, PPC_NONE, PPC2_VSX),\
+GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x01, opc3|0x00, 0, PPC_NONE, PPC2_VSX),\
+GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x02, opc3|0x00, 0, PPC_NONE, PPC2_VSX),\
+GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x03, opc3|0x00, 0, PPC_NONE, PPC2_VSX),\
+GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x00, opc3|0x04, 0, PPC_NONE, PPC2_VSX),\
+GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x01, opc3|0x04, 0, PPC_NONE, PPC2_VSX),\
+GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x02, opc3|0x04, 0, PPC_NONE, PPC2_VSX),\
+GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x03, opc3|0x04, 0, PPC_NONE, PPC2_VSX),\
+GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x00, opc3|0x08, 0, PPC_NONE, PPC2_VSX),\
+GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x01, opc3|0x08, 0, PPC_NONE, PPC2_VSX),\
+GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x02, opc3|0x08, 0, PPC_NONE, PPC2_VSX),\
+GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x03, opc3|0x08, 0, PPC_NONE, PPC2_VSX),\
+GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x00, opc3|0x0C, 0, PPC_NONE, PPC2_VSX),\
+GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x01, opc3|0x0C, 0, PPC_NONE, PPC2_VSX),\
+GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x02, opc3|0x0C, 0, PPC_NONE, PPC2_VSX),\
+GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x03, opc3|0x0C, 0, PPC_NONE, PPC2_VSX)
+
+GEN_XX2FORM(xsabsdp, 0x12, 0x15, PPC2_VSX),
+GEN_XX2FORM(xsnabsdp, 0x12, 0x16, PPC2_VSX),
+GEN_XX2FORM(xsnegdp, 0x12, 0x17, PPC2_VSX),
+GEN_XX3FORM(xscpsgndp, 0x00, 0x16, PPC2_VSX),
+
+GEN_XX2FORM(xvabsdp, 0x12, 0x1D, PPC2_VSX),
+GEN_XX2FORM(xvnabsdp, 0x12, 0x1E, PPC2_VSX),
+GEN_XX2FORM(xvnegdp, 0x12, 0x1F, PPC2_VSX),
+GEN_XX3FORM(xvcpsgndp, 0x00, 0x1E, PPC2_VSX),
+GEN_XX2FORM(xvabssp, 0x12, 0x19, PPC2_VSX),
+GEN_XX2FORM(xvnabssp, 0x12, 0x1A, PPC2_VSX),
+GEN_XX2FORM(xvnegsp, 0x12, 0x1B, PPC2_VSX),
+GEN_XX3FORM(xvcpsgnsp, 0x00, 0x1A, PPC2_VSX),
+
+GEN_XX3FORM(xsadddp, 0x00, 0x04, PPC2_VSX),
+GEN_XX3FORM(xssubdp, 0x00, 0x05, PPC2_VSX),
+GEN_XX3FORM(xsmuldp, 0x00, 0x06, PPC2_VSX),
+GEN_XX3FORM(xsdivdp, 0x00, 0x07, PPC2_VSX),
+GEN_XX2FORM(xsredp, 0x14, 0x05, PPC2_VSX),
+GEN_XX2FORM(xssqrtdp, 0x16, 0x04, PPC2_VSX),
+GEN_XX2FORM(xsrsqrtedp, 0x14, 0x04, PPC2_VSX),
+GEN_XX3FORM(xstdivdp, 0x14, 0x07, PPC2_VSX),
+GEN_XX2FORM(xstsqrtdp, 0x14, 0x06, PPC2_VSX),
+GEN_XX3FORM(xsmaddadp, 0x04, 0x04, PPC2_VSX),
+GEN_XX3FORM(xsmaddmdp, 0x04, 0x05, PPC2_VSX),
+GEN_XX3FORM(xsmsubadp, 0x04, 0x06, PPC2_VSX),
+GEN_XX3FORM(xsmsubmdp, 0x04, 0x07, PPC2_VSX),
+GEN_XX3FORM(xsnmaddadp, 0x04, 0x14, PPC2_VSX),
+GEN_XX3FORM(xsnmaddmdp, 0x04, 0x15, PPC2_VSX),
+GEN_XX3FORM(xsnmsubadp, 0x04, 0x16, PPC2_VSX),
+GEN_XX3FORM(xsnmsubmdp, 0x04, 0x17, PPC2_VSX),
+GEN_XX3FORM(xscmpeqdp, 0x0C, 0x00, PPC2_ISA300),
+GEN_XX3FORM(xscmpgtdp, 0x0C, 0x01, PPC2_ISA300),
+GEN_XX3FORM(xscmpgedp, 0x0C, 0x02, PPC2_ISA300),
+GEN_XX3FORM(xscmpnedp, 0x0C, 0x03, PPC2_ISA300),
+GEN_XX2IFORM(xscmpodp, 0x0C, 0x05, PPC2_VSX),
+GEN_XX2IFORM(xscmpudp, 0x0C, 0x04, PPC2_VSX),
+GEN_XX3FORM(xsmaxdp, 0x00, 0x14, PPC2_VSX),
+GEN_XX3FORM(xsmindp, 0x00, 0x15, PPC2_VSX),
+GEN_XX2FORM(xscvdpsp, 0x12, 0x10, PPC2_VSX),
+GEN_XX2FORM(xscvdpspn, 0x16, 0x10, PPC2_VSX207),
+GEN_XX2FORM(xscvspdp, 0x12, 0x14, PPC2_VSX),
+GEN_XX2FORM(xscvspdpn, 0x16, 0x14, PPC2_VSX207),
+GEN_XX2FORM(xscvdpsxds, 0x10, 0x15, PPC2_VSX),
+GEN_XX2FORM(xscvdpsxws, 0x10, 0x05, PPC2_VSX),
+GEN_XX2FORM(xscvdpuxds, 0x10, 0x14, PPC2_VSX),
+GEN_XX2FORM(xscvdpuxws, 0x10, 0x04, PPC2_VSX),
+GEN_XX2FORM(xscvsxddp, 0x10, 0x17, PPC2_VSX),
+GEN_XX2FORM(xscvuxddp, 0x10, 0x16, PPC2_VSX),
+GEN_XX2FORM(xsrdpi, 0x12, 0x04, PPC2_VSX),
+GEN_XX2FORM(xsrdpic, 0x16, 0x06, PPC2_VSX),
+GEN_XX2FORM(xsrdpim, 0x12, 0x07, PPC2_VSX),
+GEN_XX2FORM(xsrdpip, 0x12, 0x06, PPC2_VSX),
+GEN_XX2FORM(xsrdpiz, 0x12, 0x05, PPC2_VSX),
+
+GEN_XX3FORM(xsaddsp, 0x00, 0x00, PPC2_VSX207),
+GEN_XX3FORM(xssubsp, 0x00, 0x01, PPC2_VSX207),
+GEN_XX3FORM(xsmulsp, 0x00, 0x02, PPC2_VSX207),
+GEN_XX3FORM(xsdivsp, 0x00, 0x03, PPC2_VSX207),
+GEN_XX2FORM(xsresp, 0x14, 0x01, PPC2_VSX207),
+GEN_XX2FORM(xsrsp, 0x12, 0x11, PPC2_VSX207),
+GEN_XX2FORM(xssqrtsp, 0x16, 0x00, PPC2_VSX207),
+GEN_XX2FORM(xsrsqrtesp, 0x14, 0x00, PPC2_VSX207),
+GEN_XX3FORM(xsmaddasp, 0x04, 0x00, PPC2_VSX207),
+GEN_XX3FORM(xsmaddmsp, 0x04, 0x01, PPC2_VSX207),
+GEN_XX3FORM(xsmsubasp, 0x04, 0x02, PPC2_VSX207),
+GEN_XX3FORM(xsmsubmsp, 0x04, 0x03, PPC2_VSX207),
+GEN_XX3FORM(xsnmaddasp, 0x04, 0x10, PPC2_VSX207),
+GEN_XX3FORM(xsnmaddmsp, 0x04, 0x11, PPC2_VSX207),
+GEN_XX3FORM(xsnmsubasp, 0x04, 0x12, PPC2_VSX207),
+GEN_XX3FORM(xsnmsubmsp, 0x04, 0x13, PPC2_VSX207),
+GEN_XX2FORM(xscvsxdsp, 0x10, 0x13, PPC2_VSX207),
+GEN_XX2FORM(xscvuxdsp, 0x10, 0x12, PPC2_VSX207),
+
+GEN_XX3FORM(xvadddp, 0x00, 0x0C, PPC2_VSX),
+GEN_XX3FORM(xvsubdp, 0x00, 0x0D, PPC2_VSX),
+GEN_XX3FORM(xvmuldp, 0x00, 0x0E, PPC2_VSX),
+GEN_XX3FORM(xvdivdp, 0x00, 0x0F, PPC2_VSX),
+GEN_XX2FORM(xvredp, 0x14, 0x0D, PPC2_VSX),
+GEN_XX2FORM(xvsqrtdp, 0x16, 0x0C, PPC2_VSX),
+GEN_XX2FORM(xvrsqrtedp, 0x14, 0x0C, PPC2_VSX),
+GEN_XX3FORM(xvtdivdp, 0x14, 0x0F, PPC2_VSX),
+GEN_XX2FORM(xvtsqrtdp, 0x14, 0x0E, PPC2_VSX),
+GEN_XX3FORM(xvmaddadp, 0x04, 0x0C, PPC2_VSX),
+GEN_XX3FORM(xvmaddmdp, 0x04, 0x0D, PPC2_VSX),
+GEN_XX3FORM(xvmsubadp, 0x04, 0x0E, PPC2_VSX),
+GEN_XX3FORM(xvmsubmdp, 0x04, 0x0F, PPC2_VSX),
+GEN_XX3FORM(xvnmaddadp, 0x04, 0x1C, PPC2_VSX),
+GEN_XX3FORM(xvnmaddmdp, 0x04, 0x1D, PPC2_VSX),
+GEN_XX3FORM(xvnmsubadp, 0x04, 0x1E, PPC2_VSX),
+GEN_XX3FORM(xvnmsubmdp, 0x04, 0x1F, PPC2_VSX),
+GEN_XX3FORM(xvmaxdp, 0x00, 0x1C, PPC2_VSX),
+GEN_XX3FORM(xvmindp, 0x00, 0x1D, PPC2_VSX),
+GEN_XX3_RC_FORM(xvcmpeqdp, 0x0C, 0x0C, PPC2_VSX),
+GEN_XX3_RC_FORM(xvcmpgtdp, 0x0C, 0x0D, PPC2_VSX),
+GEN_XX3_RC_FORM(xvcmpgedp, 0x0C, 0x0E, PPC2_VSX),
+GEN_XX3_RC_FORM(xvcmpnedp, 0x0C, 0x0F, PPC2_ISA300),
+GEN_XX2FORM(xvcvdpsp, 0x12, 0x18, PPC2_VSX),
+GEN_XX2FORM(xvcvdpsxds, 0x10, 0x1D, PPC2_VSX),
+GEN_XX2FORM(xvcvdpsxws, 0x10, 0x0D, PPC2_VSX),
+GEN_XX2FORM(xvcvdpuxds, 0x10, 0x1C, PPC2_VSX),
+GEN_XX2FORM(xvcvdpuxws, 0x10, 0x0C, PPC2_VSX),
+GEN_XX2FORM(xvcvsxddp, 0x10, 0x1F, PPC2_VSX),
+GEN_XX2FORM(xvcvuxddp, 0x10, 0x1E, PPC2_VSX),
+GEN_XX2FORM(xvcvsxwdp, 0x10, 0x0F, PPC2_VSX),
+GEN_XX2FORM(xvcvuxwdp, 0x10, 0x0E, PPC2_VSX),
+GEN_XX2FORM(xvrdpi, 0x12, 0x0C, PPC2_VSX),
+GEN_XX2FORM(xvrdpic, 0x16, 0x0E, PPC2_VSX),
+GEN_XX2FORM(xvrdpim, 0x12, 0x0F, PPC2_VSX),
+GEN_XX2FORM(xvrdpip, 0x12, 0x0E, PPC2_VSX),
+GEN_XX2FORM(xvrdpiz, 0x12, 0x0D, PPC2_VSX),
+
+GEN_XX3FORM(xvaddsp, 0x00, 0x08, PPC2_VSX),
+GEN_XX3FORM(xvsubsp, 0x00, 0x09, PPC2_VSX),
+GEN_XX3FORM(xvmulsp, 0x00, 0x0A, PPC2_VSX),
+GEN_XX3FORM(xvdivsp, 0x00, 0x0B, PPC2_VSX),
+GEN_XX2FORM(xvresp, 0x14, 0x09, PPC2_VSX),
+GEN_XX2FORM(xvsqrtsp, 0x16, 0x08, PPC2_VSX),
+GEN_XX2FORM(xvrsqrtesp, 0x14, 0x08, PPC2_VSX),
+GEN_XX3FORM(xvtdivsp, 0x14, 0x0B, PPC2_VSX),
+GEN_XX2FORM(xvtsqrtsp, 0x14, 0x0A, PPC2_VSX),
+GEN_XX3FORM(xvmaddasp, 0x04, 0x08, PPC2_VSX),
+GEN_XX3FORM(xvmaddmsp, 0x04, 0x09, PPC2_VSX),
+GEN_XX3FORM(xvmsubasp, 0x04, 0x0A, PPC2_VSX),
+GEN_XX3FORM(xvmsubmsp, 0x04, 0x0B, PPC2_VSX),
+GEN_XX3FORM(xvnmaddasp, 0x04, 0x18, PPC2_VSX),
+GEN_XX3FORM(xvnmaddmsp, 0x04, 0x19, PPC2_VSX),
+GEN_XX3FORM(xvnmsubasp, 0x04, 0x1A, PPC2_VSX),
+GEN_XX3FORM(xvnmsubmsp, 0x04, 0x1B, PPC2_VSX),
+GEN_XX3FORM(xvmaxsp, 0x00, 0x18, PPC2_VSX),
+GEN_XX3FORM(xvminsp, 0x00, 0x19, PPC2_VSX),
+GEN_XX3_RC_FORM(xvcmpeqsp, 0x0C, 0x08, PPC2_VSX),
+GEN_XX3_RC_FORM(xvcmpgtsp, 0x0C, 0x09, PPC2_VSX),
+GEN_XX3_RC_FORM(xvcmpgesp, 0x0C, 0x0A, PPC2_VSX),
+GEN_XX3_RC_FORM(xvcmpnesp, 0x0C, 0x0B, PPC2_ISA300),
+GEN_XX2FORM(xvcvspdp, 0x12, 0x1C, PPC2_VSX),
+GEN_XX2FORM(xvcvspsxds, 0x10, 0x19, PPC2_VSX),
+GEN_XX2FORM(xvcvspsxws, 0x10, 0x09, PPC2_VSX),
+GEN_XX2FORM(xvcvspuxds, 0x10, 0x18, PPC2_VSX),
+GEN_XX2FORM(xvcvspuxws, 0x10, 0x08, PPC2_VSX),
+GEN_XX2FORM(xvcvsxdsp, 0x10, 0x1B, PPC2_VSX),
+GEN_XX2FORM(xvcvuxdsp, 0x10, 0x1A, PPC2_VSX),
+GEN_XX2FORM(xvcvsxwsp, 0x10, 0x0B, PPC2_VSX),
+GEN_XX2FORM(xvcvuxwsp, 0x10, 0x0A, PPC2_VSX),
+GEN_XX2FORM(xvrspi, 0x12, 0x08, PPC2_VSX),
+GEN_XX2FORM(xvrspic, 0x16, 0x0A, PPC2_VSX),
+GEN_XX2FORM(xvrspim, 0x12, 0x0B, PPC2_VSX),
+GEN_XX2FORM(xvrspip, 0x12, 0x0A, PPC2_VSX),
+GEN_XX2FORM(xvrspiz, 0x12, 0x09, PPC2_VSX),
+GEN_XX2FORM_EO(xxbrh, 0x16, 0x1D, 0x07, PPC2_ISA300),
+GEN_XX2FORM_EO(xxbrw, 0x16, 0x1D, 0x0F, PPC2_ISA300),
+GEN_XX2FORM_EO(xxbrd, 0x16, 0x1D, 0x17, PPC2_ISA300),
+GEN_XX2FORM_EO(xxbrq, 0x16, 0x1D, 0x1F, PPC2_ISA300),
+
+#define VSX_LOGICAL(name, opc2, opc3, fl2) \
+GEN_XX3FORM(name, opc2, opc3, fl2)
+
+VSX_LOGICAL(xxland, 0x8, 0x10, PPC2_VSX),
+VSX_LOGICAL(xxlandc, 0x8, 0x11, PPC2_VSX),
+VSX_LOGICAL(xxlor, 0x8, 0x12, PPC2_VSX),
+VSX_LOGICAL(xxlxor, 0x8, 0x13, PPC2_VSX),
+VSX_LOGICAL(xxlnor, 0x8, 0x14, PPC2_VSX),
+VSX_LOGICAL(xxleqv, 0x8, 0x17, PPC2_VSX207),
+VSX_LOGICAL(xxlnand, 0x8, 0x16, PPC2_VSX207),
+VSX_LOGICAL(xxlorc, 0x8, 0x15, PPC2_VSX207),
+GEN_XX3FORM(xxmrghw, 0x08, 0x02, PPC2_VSX),
+GEN_XX3FORM(xxmrglw, 0x08, 0x06, PPC2_VSX),
+GEN_XX2FORM(xxspltw, 0x08, 0x0A, PPC2_VSX),
+GEN_XX1FORM(xxspltib, 0x08, 0x0B, PPC2_ISA300),
+GEN_XX3FORM_DM(xxsldwi, 0x08, 0x00),
+
+#define GEN_XXSEL_ROW(opc3) \
+GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x18, opc3, 0, PPC_NONE, PPC2_VSX), \
+GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x19, opc3, 0, PPC_NONE, PPC2_VSX), \
+GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x1A, opc3, 0, PPC_NONE, PPC2_VSX), \
+GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x1B, opc3, 0, PPC_NONE, PPC2_VSX), \
+GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x1C, opc3, 0, PPC_NONE, PPC2_VSX), \
+GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x1D, opc3, 0, PPC_NONE, PPC2_VSX), \
+GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x1E, opc3, 0, PPC_NONE, PPC2_VSX), \
+GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x1F, opc3, 0, PPC_NONE, PPC2_VSX), \
+
+GEN_XXSEL_ROW(0x00)
+GEN_XXSEL_ROW(0x01)
+GEN_XXSEL_ROW(0x02)
+GEN_XXSEL_ROW(0x03)
+GEN_XXSEL_ROW(0x04)
+GEN_XXSEL_ROW(0x05)
+GEN_XXSEL_ROW(0x06)
+GEN_XXSEL_ROW(0x07)
+GEN_XXSEL_ROW(0x08)
+GEN_XXSEL_ROW(0x09)
+GEN_XXSEL_ROW(0x0A)
+GEN_XXSEL_ROW(0x0B)
+GEN_XXSEL_ROW(0x0C)
+GEN_XXSEL_ROW(0x0D)
+GEN_XXSEL_ROW(0x0E)
+GEN_XXSEL_ROW(0x0F)
+GEN_XXSEL_ROW(0x10)
+GEN_XXSEL_ROW(0x11)
+GEN_XXSEL_ROW(0x12)
+GEN_XXSEL_ROW(0x13)
+GEN_XXSEL_ROW(0x14)
+GEN_XXSEL_ROW(0x15)
+GEN_XXSEL_ROW(0x16)
+GEN_XXSEL_ROW(0x17)
+GEN_XXSEL_ROW(0x18)
+GEN_XXSEL_ROW(0x19)
+GEN_XXSEL_ROW(0x1A)
+GEN_XXSEL_ROW(0x1B)
+GEN_XXSEL_ROW(0x1C)
+GEN_XXSEL_ROW(0x1D)
+GEN_XXSEL_ROW(0x1E)
+GEN_XXSEL_ROW(0x1F)
+
+GEN_XX3FORM_DM(xxpermdi, 0x08, 0x01),
diff --git a/target-ppc/translate_init.c b/target-ppc/translate_init.c
index 7a9b15e7e1..626e03186c 100644
--- a/target-ppc/translate_init.c
+++ b/target-ppc/translate_init.c
@@ -7459,7 +7459,8 @@ enum BOOK3S_CPU_TYPE {
BOOK3S_CPU_POWER5PLUS,
BOOK3S_CPU_POWER6,
BOOK3S_CPU_POWER7,
- BOOK3S_CPU_POWER8
+ BOOK3S_CPU_POWER8,
+ BOOK3S_CPU_POWER9
};
static void gen_fscr_facility_check(DisasContext *ctx, int facility_sprn,
@@ -7469,7 +7470,6 @@ static void gen_fscr_facility_check(DisasContext *ctx, int facility_sprn,
TCGv_i32 t2 = tcg_const_i32(sprn);
TCGv_i32 t3 = tcg_const_i32(cause);
- gen_update_current_nip(ctx);
gen_helper_fscr_facility_check(cpu_env, t1, t2, t3);
tcg_temp_free_i32(t3);
@@ -7484,7 +7484,6 @@ static void gen_msr_facility_check(DisasContext *ctx, int facility_sprn,
TCGv_i32 t2 = tcg_const_i32(sprn);
TCGv_i32 t3 = tcg_const_i32(cause);
- gen_update_current_nip(ctx);
gen_helper_msr_facility_check(cpu_env, t1, t2, t3);
tcg_temp_free_i32(t3);
@@ -8241,6 +8240,7 @@ static void init_proc_book3s_64(CPUPPCState *env, int version)
break;
case BOOK3S_CPU_POWER7:
case BOOK3S_CPU_POWER8:
+ case BOOK3S_CPU_POWER9:
gen_spr_book3s_ids(env);
gen_spr_amr(env, version >= BOOK3S_CPU_POWER8);
gen_spr_book3s_purr(env);
@@ -8293,6 +8293,7 @@ static void init_proc_book3s_64(CPUPPCState *env, int version)
break;
case BOOK3S_CPU_POWER7:
case BOOK3S_CPU_POWER8:
+ case BOOK3S_CPU_POWER9:
default:
env->slb_nr = 32;
break;
@@ -8310,6 +8311,7 @@ static void init_proc_book3s_64(CPUPPCState *env, int version)
ppcPOWER7_irq_init(ppc_env_get_cpu(env));
break;
case BOOK3S_CPU_POWER8:
+ case BOOK3S_CPU_POWER9:
init_excp_POWER8(env);
ppcPOWER7_irq_init(ppc_env_get_cpu(env));
break;
@@ -8772,6 +8774,86 @@ POWERPC_FAMILY(POWER8)(ObjectClass *oc, void *data)
pcc->l1_icache_size = 0x8000;
pcc->interrupts_big_endian = ppc_cpu_interrupts_big_endian_lpcr;
}
+static void init_proc_POWER9(CPUPPCState *env)
+{
+ init_proc_book3s_64(env, BOOK3S_CPU_POWER9);
+}
+
+static bool ppc_pvr_match_power9(PowerPCCPUClass *pcc, uint32_t pvr)
+{
+ if ((pvr & CPU_POWERPC_POWER_SERVER_MASK) == CPU_POWERPC_POWER9_BASE) {
+ return true;
+ }
+ return false;
+}
+
+POWERPC_FAMILY(POWER9)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->fw_name = "PowerPC,POWER9";
+ dc->desc = "POWER9";
+ dc->props = powerpc_servercpu_properties;
+ pcc->pvr_match = ppc_pvr_match_power9;
+ pcc->pcr_mask = PCR_COMPAT_2_05 | PCR_COMPAT_2_06 | PCR_COMPAT_2_07;
+ pcc->init_proc = init_proc_POWER9;
+ pcc->check_pow = check_pow_nocheck;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_ISEL | PPC_STRING | PPC_MFTB |
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
+ PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE |
+ PPC_FLOAT_FRSQRTES |
+ PPC_FLOAT_STFIWX |
+ PPC_FLOAT_EXT |
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO |
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC |
+ PPC_64B | PPC_64BX | PPC_ALTIVEC |
+ PPC_SEGMENT_64B | PPC_SLBI |
+ PPC_POPCNTB | PPC_POPCNTWD |
+ PPC_CILDST;
+ pcc->insns_flags2 = PPC2_VSX | PPC2_VSX207 | PPC2_DFP | PPC2_DBRX |
+ PPC2_PERM_ISA206 | PPC2_DIVE_ISA206 |
+ PPC2_ATOMIC_ISA206 | PPC2_FP_CVT_ISA206 |
+ PPC2_FP_TST_ISA206 | PPC2_BCTAR_ISA207 |
+ PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207 |
+ PPC2_ISA205 | PPC2_ISA207S | PPC2_FP_CVT_S64 |
+ PPC2_TM | PPC2_PM_ISA206 | PPC2_ISA300;
+ pcc->msr_mask = (1ull << MSR_SF) |
+ (1ull << MSR_TM) |
+ (1ull << MSR_VR) |
+ (1ull << MSR_VSX) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_SE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR) |
+ (1ull << MSR_PMM) |
+ (1ull << MSR_RI) |
+ (1ull << MSR_LE);
+ /* Using 2.07 defines until new radix model is added. */
+ pcc->mmu_model = POWERPC_MMU_2_07;
+#if defined(CONFIG_SOFTMMU)
+ pcc->handle_mmu_fault = ppc_hash64_handle_mmu_fault;
+ /* segment page size remain the same */
+ pcc->sps = &POWER7_POWER8_sps;
+#endif
+ pcc->excp_model = POWERPC_EXCP_POWER8;
+ pcc->bus_model = PPC_FLAGS_INPUT_POWER7;
+ pcc->bfd_mach = bfd_mach_ppc64;
+ pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE |
+ POWERPC_FLAG_BE | POWERPC_FLAG_PMM |
+ POWERPC_FLAG_BUS_CLK | POWERPC_FLAG_CFAR |
+ POWERPC_FLAG_VSX | POWERPC_FLAG_TM;
+ pcc->l1_dcache_size = 0x8000;
+ pcc->l1_icache_size = 0x8000;
+ pcc->interrupts_big_endian = ppc_cpu_interrupts_big_endian_lpcr;
+}
#if !defined(CONFIG_USER_ONLY)
@@ -9169,13 +9251,47 @@ static int register_dblind_insn (opc_handler_t **ppc_opcodes,
return 0;
}
+static int register_trplind_insn(opc_handler_t **ppc_opcodes,
+ unsigned char idx1, unsigned char idx2,
+ unsigned char idx3, unsigned char idx4,
+ opc_handler_t *handler)
+{
+ opc_handler_t **table;
+
+ if (register_ind_in_table(ppc_opcodes, idx1, idx2, NULL) < 0) {
+ printf("*** ERROR: unable to join indirect table idx "
+ "[%02x-%02x]\n", idx1, idx2);
+ return -1;
+ }
+ table = ind_table(ppc_opcodes[idx1]);
+ if (register_ind_in_table(table, idx2, idx3, NULL) < 0) {
+ printf("*** ERROR: unable to join 2nd-level indirect table idx "
+ "[%02x-%02x-%02x]\n", idx1, idx2, idx3);
+ return -1;
+ }
+ table = ind_table(table[idx2]);
+ if (register_ind_in_table(table, idx3, idx4, handler) < 0) {
+ printf("*** ERROR: unable to insert opcode "
+ "[%02x-%02x-%02x-%02x]\n", idx1, idx2, idx3, idx4);
+ return -1;
+ }
+ return 0;
+}
static int register_insn (opc_handler_t **ppc_opcodes, opcode_t *insn)
{
if (insn->opc2 != 0xFF) {
if (insn->opc3 != 0xFF) {
- if (register_dblind_insn(ppc_opcodes, insn->opc1, insn->opc2,
- insn->opc3, &insn->handler) < 0)
- return -1;
+ if (insn->opc4 != 0xFF) {
+ if (register_trplind_insn(ppc_opcodes, insn->opc1, insn->opc2,
+ insn->opc3, insn->opc4,
+ &insn->handler) < 0) {
+ return -1;
+ }
+ } else {
+ if (register_dblind_insn(ppc_opcodes, insn->opc1, insn->opc2,
+ insn->opc3, &insn->handler) < 0)
+ return -1;
+ }
} else {
if (register_ind_insn(ppc_opcodes, insn->opc1,
insn->opc2, &insn->handler) < 0)
@@ -9251,7 +9367,7 @@ static void dump_ppc_insns (CPUPPCState *env)
{
opc_handler_t **table, *handler;
const char *p, *q;
- uint8_t opc1, opc2, opc3;
+ uint8_t opc1, opc2, opc3, opc4;
printf("Instructions set:\n");
/* opc1 is 6 bits long */
@@ -9271,34 +9387,51 @@ static void dump_ppc_insns (CPUPPCState *env)
for (opc3 = 0; opc3 < PPC_CPU_INDIRECT_OPCODES_LEN;
opc3++) {
handler = table[opc3];
- if (handler->handler != &gen_invalid) {
- /* Special hack to properly dump SPE insns */
- p = strchr(handler->oname, '_');
- if (p == NULL) {
- printf("INSN: %02x %02x %02x (%02d %04d) : "
- "%s\n",
- opc1, opc2, opc3, opc1,
- (opc3 << 5) | opc2,
- handler->oname);
- } else {
- q = "speundef";
- if ((p - handler->oname) != strlen(q) ||
- memcmp(handler->oname, q, strlen(q)) != 0) {
- /* First instruction */
- printf("INSN: %02x %02x %02x (%02d %04d) : "
- "%.*s\n",
- opc1, opc2 << 1, opc3, opc1,
- (opc3 << 6) | (opc2 << 1),
- (int)(p - handler->oname),
+ if (is_indirect_opcode(handler)) {
+ table = ind_table(handler);
+ /* opc4 is 5 bits long */
+ for (opc4 = 0; opc4 < PPC_CPU_INDIRECT_OPCODES_LEN;
+ opc4++) {
+ handler = table[opc4];
+ if (handler->handler != &gen_invalid) {
+ printf("INSN: %02x %02x %02x %02x -- "
+ "(%02d %04d %02d) : %s\n",
+ opc1, opc2, opc3, opc4,
+ opc1, (opc3 << 5) | opc2, opc4,
handler->oname);
}
- if (strcmp(p + 1, q) != 0) {
- /* Second instruction */
+ }
+ } else {
+ if (handler->handler != &gen_invalid) {
+ /* Special hack to properly dump SPE insns */
+ p = strchr(handler->oname, '_');
+ if (p == NULL) {
printf("INSN: %02x %02x %02x (%02d %04d) : "
"%s\n",
- opc1, (opc2 << 1) | 1, opc3, opc1,
- (opc3 << 6) | (opc2 << 1) | 1,
- p + 1);
+ opc1, opc2, opc3, opc1,
+ (opc3 << 5) | opc2,
+ handler->oname);
+ } else {
+ q = "speundef";
+ if ((p - handler->oname) != strlen(q)
+ || (memcmp(handler->oname, q, strlen(q))
+ != 0)) {
+ /* First instruction */
+ printf("INSN: %02x %02x %02x"
+ "(%02d %04d) : %.*s\n",
+ opc1, opc2 << 1, opc3, opc1,
+ (opc3 << 6) | (opc2 << 1),
+ (int)(p - handler->oname),
+ handler->oname);
+ }
+ if (strcmp(p + 1, q) != 0) {
+ /* Second instruction */
+ printf("INSN: %02x %02x %02x "
+ "(%02d %04d) : %s\n", opc1,
+ (opc2 << 1) | 1, opc3, opc1,
+ (opc3 << 6) | (opc2 << 1) | 1,
+ p + 1);
+ }
}
}
}
@@ -9545,7 +9678,7 @@ static void ppc_cpu_realizefn(DeviceState *dev, Error **errp)
}
#endif
- cpu_exec_init(cs, &local_err);
+ cpu_exec_realizefn(cs, &local_err);
if (local_err != NULL) {
error_propagate(errp, local_err);
return;
@@ -9773,11 +9906,17 @@ static void ppc_cpu_realizefn(DeviceState *dev, Error **errp)
static void ppc_cpu_unrealizefn(DeviceState *dev, Error **errp)
{
PowerPCCPU *cpu = POWERPC_CPU(dev);
+ PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
CPUPPCState *env = &cpu->env;
- opc_handler_t **table;
- int i, j;
+ Error *local_err = NULL;
+ opc_handler_t **table, **table_2;
+ int i, j, k;
- cpu_exec_exit(CPU(dev));
+ pcc->parent_unrealize(dev, &local_err);
+ if (local_err != NULL) {
+ error_propagate(errp, local_err);
+ return;
+ }
for (i = 0; i < PPC_CPU_OPCODES_LEN; i++) {
if (env->opcodes[i] == &invalid_handler) {
@@ -9786,10 +9925,20 @@ static void ppc_cpu_unrealizefn(DeviceState *dev, Error **errp)
if (is_indirect_opcode(env->opcodes[i])) {
table = ind_table(env->opcodes[i]);
for (j = 0; j < PPC_CPU_INDIRECT_OPCODES_LEN; j++) {
- if (table[j] != &invalid_handler &&
- is_indirect_opcode(table[j])) {
+ if (table[j] == &invalid_handler) {
+ continue;
+ }
+ if (is_indirect_opcode(table[j])) {
+ table_2 = ind_table(table[j]);
+ for (k = 0; k < PPC_CPU_INDIRECT_OPCODES_LEN; k++) {
+ if (table_2[k] != &invalid_handler &&
+ is_indirect_opcode(table_2[k])) {
+ g_free((opc_handler_t *)((uintptr_t)table_2[k] &
+ ~PPC_INDIRECT));
+ }
+ }
g_free((opc_handler_t *)((uintptr_t)table[j] &
- ~PPC_INDIRECT));
+ ~PPC_INDIRECT));
}
}
g_free((opc_handler_t *)((uintptr_t)env->opcodes[i] &
@@ -9800,7 +9949,8 @@ static void ppc_cpu_unrealizefn(DeviceState *dev, Error **errp)
int ppc_get_compat_smt_threads(PowerPCCPU *cpu)
{
- int ret = MIN(smp_threads, kvmppc_smt_threads());
+ CPUState *cs = CPU(cpu);
+ int ret = MIN(cs->nr_threads, kvmppc_smt_threads());
switch (cpu->cpu_version) {
case CPU_POWERPC_LOGICAL_2_05:
@@ -10370,6 +10520,11 @@ static gchar *ppc_gdb_arch_name(CPUState *cs)
#endif
}
+static Property ppc_cpu_properties[] = {
+ DEFINE_PROP_BOOL("pre-2.8-migration", PowerPCCPU, pre_2_8_migration, false),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
static void ppc_cpu_class_init(ObjectClass *oc, void *data)
{
PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
@@ -10377,10 +10532,12 @@ static void ppc_cpu_class_init(ObjectClass *oc, void *data)
DeviceClass *dc = DEVICE_CLASS(oc);
pcc->parent_realize = dc->realize;
+ pcc->parent_unrealize = dc->unrealize;
pcc->pvr_match = ppc_pvr_match_default;
pcc->interrupts_big_endian = ppc_cpu_interrupts_big_endian_always;
dc->realize = ppc_cpu_realizefn;
dc->unrealize = ppc_cpu_unrealizefn;
+ dc->props = ppc_cpu_properties;
pcc->parent_reset = cc->reset;
cc->reset = ppc_cpu_reset;