summaryrefslogtreecommitdiff
path: root/target-mips
diff options
context:
space:
mode:
authorNathan Froyd <froydnj@codesourcery.com>2009-12-08 08:06:29 -0800
committerAurelien Jarno <aurelien@aurel32.net>2009-12-13 20:20:20 +0100
commit364d48314a97dc4e3a98ff2643232faf37a2690d (patch)
treed6932b83fbacae8d677e1714e0aa49b60d1b4358 /target-mips
parent6ea219d01962b6953716e6cd47ba081299811ffe (diff)
downloadqemu-364d48314a97dc4e3a98ff2643232faf37a2690d.tar.gz
qemu-364d48314a97dc4e3a98ff2643232faf37a2690d.tar.bz2
qemu-364d48314a97dc4e3a98ff2643232faf37a2690d.zip
target-mips: add mips16 instruction decoding
There's no good way to add this incrementally, so we do it all at once. The only changes to shared code are in handle_delay_slot. We need to flip ISAMode when doing a jump-and-exchange. We also need to set ISAMode the low bit of the target address for jump-to-register. Also, since we're now adding bits that can be in MIPS_HFLAG_BMASK_EXT, make sure we use MIPS_HFLAG_BMASK_BASE in the places where we just want basic information about a branch. Signed-off-by: Nathan Froyd <froydnj@codesourcery.com> Signed-off-by: Aurelien Jarno <aurelien@aurel32.net>
Diffstat (limited to 'target-mips')
-rw-r--r--target-mips/translate.c1072
1 files changed, 1063 insertions, 9 deletions
diff --git a/target-mips/translate.c b/target-mips/translate.c
index cc1811339b..a3f5f727a1 100644
--- a/target-mips/translate.c
+++ b/target-mips/translate.c
@@ -83,6 +83,7 @@ enum {
OPC_LH = (0x21 << 26),
OPC_LWL = (0x22 << 26),
OPC_LW = (0x23 << 26),
+ OPC_LWPC = OPC_LW | 0x5,
OPC_LBU = (0x24 << 26),
OPC_LHU = (0x25 << 26),
OPC_LWR = (0x26 << 26),
@@ -97,6 +98,7 @@ enum {
OPC_LL = (0x30 << 26),
OPC_LLD = (0x34 << 26),
OPC_LD = (0x37 << 26),
+ OPC_LDPC = OPC_LD | 0x5,
OPC_SC = (0x38 << 26),
OPC_SCD = (0x3C << 26),
OPC_SD = (0x3F << 26),
@@ -173,6 +175,7 @@ enum {
/* Jumps */
OPC_JR = 0x08 | OPC_SPECIAL, /* Also JR.HB */
OPC_JALR = 0x09 | OPC_SPECIAL, /* Also JALR.HB */
+ OPC_JALRC = OPC_JALR | (0x5 << 6),
/* Traps */
OPC_TGE = 0x30 | OPC_SPECIAL,
OPC_TGEU = 0x31 | OPC_SPECIAL,
@@ -763,7 +766,7 @@ static inline void save_cpu_state (DisasContext *ctx, int do_save_pc)
if (ctx->hflags != ctx->saved_hflags) {
tcg_gen_movi_i32(hflags, ctx->hflags);
ctx->saved_hflags = ctx->hflags;
- switch (ctx->hflags & MIPS_HFLAG_BMASK) {
+ switch (ctx->hflags & MIPS_HFLAG_BMASK_BASE) {
case MIPS_HFLAG_BR:
break;
case MIPS_HFLAG_BC:
@@ -778,7 +781,7 @@ static inline void save_cpu_state (DisasContext *ctx, int do_save_pc)
static inline void restore_cpu_state (CPUState *env, DisasContext *ctx)
{
ctx->saved_hflags = ctx->hflags;
- switch (ctx->hflags & MIPS_HFLAG_BMASK) {
+ switch (ctx->hflags & MIPS_HFLAG_BMASK_BASE) {
case MIPS_HFLAG_BR:
break;
case MIPS_HFLAG_BC:
@@ -994,6 +997,20 @@ static void gen_base_offset_addr (DisasContext *ctx, TCGv addr,
}
}
+static target_ulong pc_relative_pc (DisasContext *ctx)
+{
+ target_ulong pc = ctx->pc;
+
+ if (ctx->hflags & MIPS_HFLAG_BMASK) {
+ int branch_bytes = ctx->hflags & MIPS_HFLAG_BDS16 ? 2 : 4;
+
+ pc -= branch_bytes;
+ }
+
+ pc &= ~(target_ulong)3;
+ return pc;
+}
+
/* Load and store */
static void gen_ldst (DisasContext *ctx, uint32_t opc, int rt,
int base, int16_t offset)
@@ -1057,7 +1074,21 @@ static void gen_ldst (DisasContext *ctx, uint32_t opc, int rt,
gen_helper_2i(sdr, t1, t0, ctx->mem_idx);
opn = "sdr";
break;
+ case OPC_LDPC:
+ save_cpu_state(ctx, 1);
+ tcg_gen_movi_tl(t1, pc_relative_pc(ctx));
+ gen_op_addr_add(ctx, t0, t0, t1);
+ op_ldst_ld(t0, t0, ctx);
+ gen_store_gpr(t0, rt);
+ break;
#endif
+ case OPC_LWPC:
+ save_cpu_state(ctx, 1);
+ tcg_gen_movi_tl(t1, pc_relative_pc(ctx));
+ gen_op_addr_add(ctx, t0, t0, t1);
+ op_ldst_lw(t0, t0, ctx);
+ gen_store_gpr(t0, rt);
+ break;
case OPC_LW:
save_cpu_state(ctx, 0);
op_ldst_lw(t0, t0, ctx);
@@ -2434,11 +2465,13 @@ static void gen_compute_branch (DisasContext *ctx, uint32_t opc,
break;
case OPC_J:
case OPC_JAL:
+ case OPC_JALX:
/* Jump to immediate */
btgt = ((ctx->pc + insn_bytes) & (int32_t)0xF0000000) | (uint32_t)offset;
break;
case OPC_JR:
case OPC_JALR:
+ case OPC_JALRC:
/* Jump to register */
if (offset != 0 && offset != 16) {
/* Hint = 0 is JR/JALR, hint 16 is JR.HB/JALR.HB, the
@@ -2501,18 +2534,29 @@ static void gen_compute_branch (DisasContext *ctx, uint32_t opc,
ctx->hflags |= MIPS_HFLAG_B;
MIPS_DEBUG("j " TARGET_FMT_lx, btgt);
break;
+ case OPC_JALX:
+ ctx->hflags |= MIPS_HFLAG_BX;
+ /* Fallthrough */
case OPC_JAL:
blink = 31;
ctx->hflags |= MIPS_HFLAG_B;
+ ctx->hflags |= (ctx->hflags & MIPS_HFLAG_M16
+ ? MIPS_HFLAG_BDS16
+ : MIPS_HFLAG_BDS32);
MIPS_DEBUG("jal " TARGET_FMT_lx, btgt);
break;
case OPC_JR:
ctx->hflags |= MIPS_HFLAG_BR;
+ if (ctx->hflags & MIPS_HFLAG_M16)
+ ctx->hflags |= MIPS_HFLAG_BDS16;
MIPS_DEBUG("jr %s", regnames[rs]);
break;
case OPC_JALR:
+ case OPC_JALRC:
blink = rt;
ctx->hflags |= MIPS_HFLAG_BR;
+ if (ctx->hflags & MIPS_HFLAG_M16)
+ ctx->hflags |= MIPS_HFLAG_BDS16;
MIPS_DEBUG("jalr %s, %s", regnames[rt], regnames[rs]);
break;
default:
@@ -2609,10 +2653,18 @@ static void gen_compute_branch (DisasContext *ctx, uint32_t opc,
ctx->btarget = btgt;
if (blink > 0) {
- tcg_gen_movi_tl(cpu_gpr[blink], ctx->pc + 8);
+ int post_delay = insn_bytes;
+ int lowbit = !!(ctx->hflags & MIPS_HFLAG_M16);
+
+ if (opc != OPC_JALRC)
+ post_delay += ((ctx->hflags & MIPS_HFLAG_BDS16) ? 2 : 4);
+
+ tcg_gen_movi_tl(cpu_gpr[blink], ctx->pc + post_delay + lowbit);
}
out:
+ if (insn_bytes == 2)
+ ctx->hflags |= MIPS_HFLAG_B16;
tcg_temp_free(t0);
tcg_temp_free(t1);
}
@@ -7536,16 +7588,19 @@ static void handle_delay_slot (CPUState *env, DisasContext *ctx,
int insn_bytes)
{
if (ctx->hflags & MIPS_HFLAG_BMASK) {
- int hflags = ctx->hflags & MIPS_HFLAG_BMASK;
+ int proc_hflags = ctx->hflags & MIPS_HFLAG_BMASK;
/* Branches completion */
ctx->hflags &= ~MIPS_HFLAG_BMASK;
ctx->bstate = BS_BRANCH;
save_cpu_state(ctx, 0);
/* FIXME: Need to clear can_do_io. */
- switch (hflags) {
+ switch (proc_hflags & MIPS_HFLAG_BMASK_BASE) {
case MIPS_HFLAG_B:
/* unconditional branch */
MIPS_DEBUG("unconditional branch");
+ if (proc_hflags & MIPS_HFLAG_BX) {
+ tcg_gen_xori_i32(hflags, hflags, MIPS_HFLAG_M16);
+ }
gen_goto_tb(ctx, 0, ctx->btarget);
break;
case MIPS_HFLAG_BL:
@@ -7568,7 +7623,22 @@ static void handle_delay_slot (CPUState *env, DisasContext *ctx,
case MIPS_HFLAG_BR:
/* unconditional branch to register */
MIPS_DEBUG("branch to register");
- tcg_gen_mov_tl(cpu_PC, btarget);
+ if (env->insn_flags & ASE_MIPS16) {
+ TCGv t0 = tcg_temp_new();
+ TCGv_i32 t1 = tcg_temp_new_i32();
+
+ tcg_gen_andi_tl(t0, btarget, 0x1);
+ tcg_gen_trunc_tl_i32(t1, t0);
+ tcg_temp_free(t0);
+ tcg_gen_andi_i32(hflags, hflags, ~(uint32_t)MIPS_HFLAG_M16);
+ tcg_gen_shli_i32(t1, t1, MIPS_HFLAG_M16_SHIFT);
+ tcg_gen_or_i32(hflags, hflags, t1);
+ tcg_temp_free_i32(t1);
+
+ tcg_gen_andi_tl(cpu_PC, btarget, ~(target_ulong)0x1);
+ } else {
+ tcg_gen_mov_tl(cpu_PC, btarget);
+ }
if (ctx->singlestep_enabled) {
save_cpu_state(ctx, 0);
gen_helper_0i(raise_exception, EXCP_DEBUG);
@@ -7681,7 +7751,7 @@ enum {
I64_SDRASP = 0x2,
I64_DADJSP = 0x3,
I64_LDPC = 0x4,
- I64_DADDIU = 0x5,
+ I64_DADDIU5 = 0x5,
I64_DADDIUPC = 0x6,
I64_DADDIUSP = 0x7
};
@@ -7696,6 +7766,984 @@ enum {
RR_RY_CNVT_SEW = 0x6,
};
+static int xlat (int r)
+{
+ static int map[] = { 16, 17, 2, 3, 4, 5, 6, 7 };
+
+ return map[r];
+}
+
+static void gen_mips16_save (DisasContext *ctx,
+ int xsregs, int aregs,
+ int do_ra, int do_s0, int do_s1,
+ int framesize)
+{
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ int args, astatic;
+
+ switch (aregs) {
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ case 11:
+ args = 0;
+ break;
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ args = 1;
+ break;
+ case 8:
+ case 9:
+ case 10:
+ args = 2;
+ break;
+ case 12:
+ case 13:
+ args = 3;
+ break;
+ case 14:
+ args = 4;
+ break;
+ default:
+ generate_exception(ctx, EXCP_RI);
+ return;
+ }
+
+ switch (args) {
+ case 4:
+ gen_base_offset_addr(ctx, t0, 29, 12);
+ gen_load_gpr(t1, 7);
+ op_ldst_sw(t1, t0, ctx);
+ /* Fall through */
+ case 3:
+ gen_base_offset_addr(ctx, t0, 29, 8);
+ gen_load_gpr(t1, 6);
+ op_ldst_sw(t1, t0, ctx);
+ /* Fall through */
+ case 2:
+ gen_base_offset_addr(ctx, t0, 29, 4);
+ gen_load_gpr(t1, 5);
+ op_ldst_sw(t1, t0, ctx);
+ /* Fall through */
+ case 1:
+ gen_base_offset_addr(ctx, t0, 29, 0);
+ gen_load_gpr(t1, 4);
+ op_ldst_sw(t1, t0, ctx);
+ }
+
+ gen_load_gpr(t0, 29);
+
+#define DECR_AND_STORE(reg) do { \
+ tcg_gen_subi_tl(t0, t0, 4); \
+ gen_load_gpr(t1, reg); \
+ op_ldst_sw(t1, t0, ctx); \
+ } while (0)
+
+ if (do_ra) {
+ DECR_AND_STORE(31);
+ }
+
+ switch (xsregs) {
+ case 7:
+ DECR_AND_STORE(30);
+ /* Fall through */
+ case 6:
+ DECR_AND_STORE(23);
+ /* Fall through */
+ case 5:
+ DECR_AND_STORE(22);
+ /* Fall through */
+ case 4:
+ DECR_AND_STORE(21);
+ /* Fall through */
+ case 3:
+ DECR_AND_STORE(20);
+ /* Fall through */
+ case 2:
+ DECR_AND_STORE(19);
+ /* Fall through */
+ case 1:
+ DECR_AND_STORE(18);
+ }
+
+ if (do_s1) {
+ DECR_AND_STORE(17);
+ }
+ if (do_s0) {
+ DECR_AND_STORE(16);
+ }
+
+ switch (aregs) {
+ case 0:
+ case 4:
+ case 8:
+ case 12:
+ case 14:
+ astatic = 0;
+ break;
+ case 1:
+ case 5:
+ case 9:
+ case 13:
+ astatic = 1;
+ break;
+ case 2:
+ case 6:
+ case 10:
+ astatic = 2;
+ break;
+ case 3:
+ case 7:
+ astatic = 3;
+ break;
+ case 11:
+ astatic = 4;
+ break;
+ default:
+ generate_exception(ctx, EXCP_RI);
+ return;
+ }
+
+ if (astatic > 0) {
+ DECR_AND_STORE(7);
+ if (astatic > 1) {
+ DECR_AND_STORE(6);
+ if (astatic > 2) {
+ DECR_AND_STORE(5);
+ if (astatic > 3) {
+ DECR_AND_STORE(4);
+ }
+ }
+ }
+ }
+#undef DECR_AND_STORE
+
+ tcg_gen_subi_tl(cpu_gpr[29], cpu_gpr[29], framesize);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+}
+
+static void gen_mips16_restore (DisasContext *ctx,
+ int xsregs, int aregs,
+ int do_ra, int do_s0, int do_s1,
+ int framesize)
+{
+ int astatic;
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+
+ tcg_gen_addi_tl(t0, cpu_gpr[29], framesize);
+
+#define DECR_AND_LOAD(reg) do { \
+ tcg_gen_subi_tl(t0, t0, 4); \
+ op_ldst_lw(t1, t0, ctx); \
+ gen_store_gpr(t1, reg); \
+ } while (0)
+
+ if (do_ra) {
+ DECR_AND_LOAD(31);
+ }
+
+ switch (xsregs) {
+ case 7:
+ DECR_AND_LOAD(30);
+ /* Fall through */
+ case 6:
+ DECR_AND_LOAD(23);
+ /* Fall through */
+ case 5:
+ DECR_AND_LOAD(22);
+ /* Fall through */
+ case 4:
+ DECR_AND_LOAD(21);
+ /* Fall through */
+ case 3:
+ DECR_AND_LOAD(20);
+ /* Fall through */
+ case 2:
+ DECR_AND_LOAD(19);
+ /* Fall through */
+ case 1:
+ DECR_AND_LOAD(18);
+ }
+
+ if (do_s1) {
+ DECR_AND_LOAD(17);
+ }
+ if (do_s0) {
+ DECR_AND_LOAD(16);
+ }
+
+ switch (aregs) {
+ case 0:
+ case 4:
+ case 8:
+ case 12:
+ case 14:
+ astatic = 0;
+ break;
+ case 1:
+ case 5:
+ case 9:
+ case 13:
+ astatic = 1;
+ break;
+ case 2:
+ case 6:
+ case 10:
+ astatic = 2;
+ break;
+ case 3:
+ case 7:
+ astatic = 3;
+ break;
+ case 11:
+ astatic = 4;
+ break;
+ default:
+ generate_exception(ctx, EXCP_RI);
+ return;
+ }
+
+ if (astatic > 0) {
+ DECR_AND_LOAD(7);
+ if (astatic > 1) {
+ DECR_AND_LOAD(6);
+ if (astatic > 2) {
+ DECR_AND_LOAD(5);
+ if (astatic > 3) {
+ DECR_AND_LOAD(4);
+ }
+ }
+ }
+ }
+#undef DECR_AND_LOAD
+
+ tcg_gen_addi_tl(cpu_gpr[29], cpu_gpr[29], framesize);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+}
+
+static void gen_addiupc (DisasContext *ctx, int rx, int imm,
+ int is_64_bit, int extended)
+{
+ TCGv t0;
+
+ if (extended && (ctx->hflags & MIPS_HFLAG_BMASK)) {
+ generate_exception(ctx, EXCP_RI);
+ return;
+ }
+
+ t0 = tcg_temp_new();
+
+ tcg_gen_movi_tl(t0, pc_relative_pc(ctx));
+ tcg_gen_addi_tl(cpu_gpr[rx], t0, imm);
+ if (!is_64_bit) {
+ tcg_gen_ext32s_tl(cpu_gpr[rx], cpu_gpr[rx]);
+ }
+
+ tcg_temp_free(t0);
+}
+
+#if defined(TARGET_MIPS64)
+static void decode_i64_mips16 (CPUState *env, DisasContext *ctx,
+ int ry, int funct, int16_t offset,
+ int extended)
+{
+ switch (funct) {
+ case I64_LDSP:
+ check_mips_64(ctx);
+ offset = extended ? offset : offset << 3;
+ gen_ldst(ctx, OPC_LD, ry, 29, offset);
+ break;
+ case I64_SDSP:
+ check_mips_64(ctx);
+ offset = extended ? offset : offset << 3;
+ gen_ldst(ctx, OPC_SD, ry, 29, offset);
+ break;
+ case I64_SDRASP:
+ check_mips_64(ctx);
+ offset = extended ? offset : (ctx->opcode & 0xff) << 3;
+ gen_ldst(ctx, OPC_SD, 31, 29, offset);
+ break;
+ case I64_DADJSP:
+ check_mips_64(ctx);
+ offset = extended ? offset : ((int8_t)ctx->opcode) << 3;
+ gen_arith_imm(env, ctx, OPC_DADDIU, 29, 29, offset);
+ break;
+ case I64_LDPC:
+ if (extended && (ctx->hflags & MIPS_HFLAG_BMASK)) {
+ generate_exception(ctx, EXCP_RI);
+ } else {
+ offset = extended ? offset : offset << 3;
+ gen_ldst(ctx, OPC_LDPC, ry, 0, offset);
+ }
+ break;
+ case I64_DADDIU5:
+ check_mips_64(ctx);
+ offset = extended ? offset : ((int8_t)(offset << 3)) >> 3;
+ gen_arith_imm(env, ctx, OPC_DADDIU, ry, ry, offset);
+ break;
+ case I64_DADDIUPC:
+ check_mips_64(ctx);
+ offset = extended ? offset : offset << 2;
+ gen_addiupc(ctx, ry, offset, 1, extended);
+ break;
+ case I64_DADDIUSP:
+ check_mips_64(ctx);
+ offset = extended ? offset : offset << 2;
+ gen_arith_imm(env, ctx, OPC_DADDIU, ry, 29, offset);
+ break;
+ }
+}
+#endif
+
+static int decode_extended_mips16_opc (CPUState *env, DisasContext *ctx,
+ int *is_branch)
+{
+ int extend = lduw_code(ctx->pc + 2);
+ int op, rx, ry, funct, sa;
+ int16_t imm, offset;
+
+ ctx->opcode = (ctx->opcode << 16) | extend;
+ op = (ctx->opcode >> 11) & 0x1f;
+ sa = (ctx->opcode >> 22) & 0x1f;
+ funct = (ctx->opcode >> 8) & 0x7;
+ rx = xlat((ctx->opcode >> 8) & 0x7);
+ ry = xlat((ctx->opcode >> 5) & 0x7);
+ offset = imm = (int16_t) (((ctx->opcode >> 16) & 0x1f) << 11
+ | ((ctx->opcode >> 21) & 0x3f) << 5
+ | (ctx->opcode & 0x1f));
+
+ /* The extended opcodes cleverly reuse the opcodes from their 16-bit
+ counterparts. */
+ switch (op) {
+ case M16_OPC_ADDIUSP:
+ gen_arith_imm(env, ctx, OPC_ADDIU, rx, 29, imm);
+ break;
+ case M16_OPC_ADDIUPC:
+ gen_addiupc(ctx, rx, imm, 0, 1);
+ break;
+ case M16_OPC_B:
+ gen_compute_branch(ctx, OPC_BEQ, 4, 0, 0, offset << 1);
+ /* No delay slot, so just process as a normal instruction */
+ break;
+ case M16_OPC_BEQZ:
+ gen_compute_branch(ctx, OPC_BEQ, 4, rx, 0, offset << 1);
+ /* No delay slot, so just process as a normal instruction */
+ break;
+ case M16_OPC_BNEQZ:
+ gen_compute_branch(ctx, OPC_BNE, 4, rx, 0, offset << 1);
+ /* No delay slot, so just process as a normal instruction */
+ break;
+ case M16_OPC_SHIFT:
+ switch (ctx->opcode & 0x3) {
+ case 0x0:
+ gen_shift_imm(env, ctx, OPC_SLL, rx, ry, sa);
+ break;
+ case 0x1:
+#if defined(TARGET_MIPS64)
+ check_mips_64(ctx);
+ gen_shift_imm(env, ctx, OPC_DSLL, rx, ry, sa);
+#else
+ generate_exception(ctx, EXCP_RI);
+#endif
+ break;
+ case 0x2:
+ gen_shift_imm(env, ctx, OPC_SRL, rx, ry, sa);
+ break;
+ case 0x3:
+ gen_shift_imm(env, ctx, OPC_SRA, rx, ry, sa);
+ break;
+ }
+ break;
+#if defined(TARGET_MIPS64)
+ case M16_OPC_LD:
+ check_mips_64(ctx);
+ gen_ldst(ctx, OPC_LD, ry, rx, offset);
+ break;
+#endif
+ case M16_OPC_RRIA:
+ imm = ctx->opcode & 0xf;
+ imm = imm | ((ctx->opcode >> 20) & 0x7f) << 4;
+ imm = imm | ((ctx->opcode >> 16) & 0xf) << 11;
+ imm = (int16_t) (imm << 1) >> 1;
+ if ((ctx->opcode >> 4) & 0x1) {
+#if defined(TARGET_MIPS64)
+ check_mips_64(ctx);
+ gen_arith_imm(env, ctx, OPC_DADDIU, ry, rx, imm);
+#else
+ generate_exception(ctx, EXCP_RI);
+#endif
+ } else {
+ gen_arith_imm(env, ctx, OPC_ADDIU, ry, rx, imm);
+ }
+ break;
+ case M16_OPC_ADDIU8:
+ gen_arith_imm(env, ctx, OPC_ADDIU, rx, rx, imm);
+ break;
+ case M16_OPC_SLTI:
+ gen_slt_imm(env, OPC_SLTI, 24, rx, imm);
+ break;
+ case M16_OPC_SLTIU:
+ gen_slt_imm(env, OPC_SLTIU, 24, rx, imm);
+ break;
+ case M16_OPC_I8:
+ switch (funct) {
+ case I8_BTEQZ:
+ gen_compute_branch(ctx, OPC_BEQ, 4, 24, 0, offset << 1);
+ break;
+ case I8_BTNEZ:
+ gen_compute_branch(ctx, OPC_BNE, 4, 24, 0, offset << 1);
+ break;
+ case I8_SWRASP:
+ gen_ldst(ctx, OPC_SW, 31, 29, imm);
+ break;
+ case I8_ADJSP:
+ gen_arith_imm(env, ctx, OPC_ADDIU, 29, 29, imm);
+ break;
+ case I8_SVRS:
+ {
+ int xsregs = (ctx->opcode >> 24) & 0x7;
+ int aregs = (ctx->opcode >> 16) & 0xf;
+ int do_ra = (ctx->opcode >> 6) & 0x1;
+ int do_s0 = (ctx->opcode >> 5) & 0x1;
+ int do_s1 = (ctx->opcode >> 4) & 0x1;
+ int framesize = (((ctx->opcode >> 20) & 0xf) << 4
+ | (ctx->opcode & 0xf)) << 3;
+
+ if (ctx->opcode & (1 << 7)) {
+ gen_mips16_save(ctx, xsregs, aregs,
+ do_ra, do_s0, do_s1,
+ framesize);
+ } else {
+ gen_mips16_restore(ctx, xsregs, aregs,
+ do_ra, do_s0, do_s1,
+ framesize);
+ }
+ }
+ break;
+ default:
+ generate_exception(ctx, EXCP_RI);
+ break;
+ }
+ break;
+ case M16_OPC_LI:
+ tcg_gen_movi_tl(cpu_gpr[rx], (uint16_t) imm);
+ break;
+ case M16_OPC_CMPI:
+ tcg_gen_xori_tl(cpu_gpr[24], cpu_gpr[rx], (uint16_t) imm);
+ break;
+#if defined(TARGET_MIPS64)
+ case M16_OPC_SD:
+ gen_ldst(ctx, OPC_SD, ry, rx, offset);
+ break;
+#endif
+ case M16_OPC_LB:
+ gen_ldst(ctx, OPC_LB, ry, rx, offset);
+ break;
+ case M16_OPC_LH:
+ gen_ldst(ctx, OPC_LH, ry, rx, offset);
+ break;
+ case M16_OPC_LWSP:
+ gen_ldst(ctx, OPC_LW, rx, 29, offset);
+ break;
+ case M16_OPC_LW:
+ gen_ldst(ctx, OPC_LW, ry, rx, offset);
+ break;
+ case M16_OPC_LBU:
+ gen_ldst(ctx, OPC_LBU, ry, rx, offset);
+ break;
+ case M16_OPC_LHU:
+ gen_ldst(ctx, OPC_LHU, ry, rx, offset);
+ break;
+ case M16_OPC_LWPC:
+ gen_ldst(ctx, OPC_LWPC, rx, 0, offset);
+ break;
+#if defined(TARGET_MIPS64)
+ case M16_OPC_LWU:
+ gen_ldst(ctx, OPC_LWU, ry, rx, offset);
+ break;
+#endif
+ case M16_OPC_SB:
+ gen_ldst(ctx, OPC_SB, ry, rx, offset);
+ break;
+ case M16_OPC_SH:
+ gen_ldst(ctx, OPC_SH, ry, rx, offset);
+ break;
+ case M16_OPC_SWSP:
+ gen_ldst(ctx, OPC_SW, rx, 29, offset);
+ break;
+ case M16_OPC_SW:
+ gen_ldst(ctx, OPC_SW, ry, rx, offset);
+ break;
+#if defined(TARGET_MIPS64)
+ case M16_OPC_I64:
+ decode_i64_mips16(env, ctx, ry, funct, offset, 1);
+ break;
+#endif
+ default:
+ generate_exception(ctx, EXCP_RI);
+ break;
+ }
+
+ return 4;
+}
+
+static int decode_mips16_opc (CPUState *env, DisasContext *ctx,
+ int *is_branch)
+{
+ int rx, ry;
+ int sa;
+ int op, cnvt_op, op1, offset;
+ int funct;
+ int n_bytes;
+
+ op = (ctx->opcode >> 11) & 0x1f;
+ sa = (ctx->opcode >> 2) & 0x7;
+ sa = sa == 0 ? 8 : sa;
+ rx = xlat((ctx->opcode >> 8) & 0x7);
+ cnvt_op = (ctx->opcode >> 5) & 0x7;
+ ry = xlat((ctx->opcode >> 5) & 0x7);
+ op1 = offset = ctx->opcode & 0x1f;
+
+ n_bytes = 2;
+
+ switch (op) {
+ case M16_OPC_ADDIUSP:
+ {
+ int16_t imm = ((uint8_t) ctx->opcode) << 2;
+
+ gen_arith_imm(env, ctx, OPC_ADDIU, rx, 29, imm);
+ }
+ break;
+ case M16_OPC_ADDIUPC:
+ gen_addiupc(ctx, rx, ((uint8_t) ctx->opcode) << 2, 0, 0);
+ break;
+ case M16_OPC_B:
+ offset = (ctx->opcode & 0x7ff) << 1;
+ offset = (int16_t)(offset << 4) >> 4;
+ gen_compute_branch(ctx, OPC_BEQ, 2, 0, 0, offset);
+ /* No delay slot, so just process as a normal instruction */
+ break;
+ case M16_OPC_JAL:
+ offset = lduw_code(ctx->pc + 2);
+ offset = (((ctx->opcode & 0x1f) << 21)
+ | ((ctx->opcode >> 5) & 0x1f) << 16
+ | offset) << 2;
+ op = ((ctx->opcode >> 10) & 0x1) ? OPC_JALX : OPC_JAL;
+ gen_compute_branch(ctx, op, 4, rx, ry, offset);
+ n_bytes = 4;
+ *is_branch = 1;
+ break;
+ case M16_OPC_BEQZ:
+ gen_compute_branch(ctx, OPC_BEQ, 2, rx, 0, ((int8_t)ctx->opcode) << 1);
+ /* No delay slot, so just process as a normal instruction */
+ break;
+ case M16_OPC_BNEQZ:
+ gen_compute_branch(ctx, OPC_BNE, 2, rx, 0, ((int8_t)ctx->opcode) << 1);
+ /* No delay slot, so just process as a normal instruction */
+ break;
+ case M16_OPC_SHIFT:
+ switch (ctx->opcode & 0x3) {
+ case 0x0:
+ gen_shift_imm(env, ctx, OPC_SLL, rx, ry, sa);
+ break;
+ case 0x1:
+#if defined(TARGET_MIPS64)
+ check_mips_64(ctx);
+ gen_shift_imm(env, ctx, OPC_DSLL, rx, ry, sa);
+#else
+ generate_exception(ctx, EXCP_RI);
+#endif
+ break;
+ case 0x2:
+ gen_shift_imm(env, ctx, OPC_SRL, rx, ry, sa);
+ break;
+ case 0x3:
+ gen_shift_imm(env, ctx, OPC_SRA, rx, ry, sa);
+ break;
+ }
+ break;
+#if defined(TARGET_MIPS64)
+ case M16_OPC_LD:
+ check_mips_64(ctx);
+ gen_ldst(ctx, OPC_LD, ry, rx, offset << 3);
+ break;
+#endif
+ case M16_OPC_RRIA:
+ {
+ int16_t imm = (int8_t)((ctx->opcode & 0xf) << 4) >> 4;
+
+ if ((ctx->opcode >> 4) & 1) {
+#if defined(TARGET_MIPS64)
+ check_mips_64(ctx);
+ gen_arith_imm(env, ctx, OPC_DADDIU, ry, rx, imm);
+#else
+ generate_exception(ctx, EXCP_RI);
+#endif
+ } else {
+ gen_arith_imm(env, ctx, OPC_ADDIU, ry, rx, imm);
+ }
+ }
+ break;
+ case M16_OPC_ADDIU8:
+ {
+ int16_t imm = (int8_t) ctx->opcode;
+
+ gen_arith_imm(env, ctx, OPC_ADDIU, rx, rx, imm);
+ }
+ break;
+ case M16_OPC_SLTI:
+ {
+ int16_t imm = (uint8_t) ctx->opcode;
+
+ gen_slt_imm(env, OPC_SLTI, 24, rx, imm);
+ }
+ break;
+ case M16_OPC_SLTIU:
+ {
+ int16_t imm = (uint8_t) ctx->opcode;
+
+ gen_slt_imm(env, OPC_SLTIU, 24, rx, imm);
+ }
+ break;
+ case M16_OPC_I8:
+ {
+ int reg32;
+
+ funct = (ctx->opcode >> 8) & 0x7;
+ switch (funct) {
+ case I8_BTEQZ:
+ gen_compute_branch(ctx, OPC_BEQ, 2, 24, 0,
+ ((int8_t)ctx->opcode) << 1);
+ break;
+ case I8_BTNEZ:
+ gen_compute_branch(ctx, OPC_BNE, 2, 24, 0,
+ ((int8_t)ctx->opcode) << 1);
+ break;
+ case I8_SWRASP:
+ gen_ldst(ctx, OPC_SW, 31, 29, (ctx->opcode & 0xff) << 2);
+ break;
+ case I8_ADJSP:
+ gen_arith_imm(env, ctx, OPC_ADDIU, 29, 29,
+ ((int8_t)ctx->opcode) << 3);
+ break;
+ case I8_SVRS:
+ {
+ int do_ra = ctx->opcode & (1 << 6);
+ int do_s0 = ctx->opcode & (1 << 5);
+ int do_s1 = ctx->opcode & (1 << 4);
+ int framesize = ctx->opcode & 0xf;
+
+ if (framesize == 0) {
+ framesize = 128;
+ } else {
+ framesize = framesize << 3;
+ }
+
+ if (ctx->opcode & (1 << 7)) {
+ gen_mips16_save(ctx, 0, 0,
+ do_ra, do_s0, do_s1, framesize);
+ } else {
+ gen_mips16_restore(ctx, 0, 0,
+ do_ra, do_s0, do_s1, framesize);
+ }
+ }
+ break;
+ case I8_MOV32R:
+ {
+ int rz = xlat(ctx->opcode & 0x7);
+
+ reg32 = (((ctx->opcode >> 3) & 0x3) << 3) |
+ ((ctx->opcode >> 5) & 0x7);
+ gen_arith(env, ctx, OPC_ADDU, reg32, rz, 0);
+ }
+ break;
+ case I8_MOVR32:
+ reg32 = ctx->opcode & 0x1f;
+ gen_arith(env, ctx, OPC_ADDU, ry, reg32, 0);
+ break;
+ default:
+ generate_exception(ctx, EXCP_RI);
+ break;
+ }
+ }
+ break;
+ case M16_OPC_LI:
+ {
+ int16_t imm = (uint8_t) ctx->opcode;
+
+ gen_arith_imm(env, ctx, OPC_ADDIU, rx, 0, imm);
+ }
+ break;
+ case M16_OPC_CMPI:
+ {
+ int16_t imm = (uint8_t) ctx->opcode;
+
+ gen_logic_imm(env, OPC_XORI, 24, rx, imm);
+ }
+ break;
+#if defined(TARGET_MIPS64)
+ case M16_OPC_SD:
+ check_mips_64(ctx);
+ gen_ldst(ctx, OPC_SD, ry, rx, offset << 3);
+ break;
+#endif
+ case M16_OPC_LB:
+ gen_ldst(ctx, OPC_LB, ry, rx, offset);
+ break;
+ case M16_OPC_LH:
+ gen_ldst(ctx, OPC_LH, ry, rx, offset << 1);
+ break;
+ case M16_OPC_LWSP:
+ gen_ldst(ctx, OPC_LW, rx, 29, ((uint8_t)ctx->opcode) << 2);
+ break;
+ case M16_OPC_LW:
+ gen_ldst(ctx, OPC_LW, ry, rx, offset << 2);
+ break;
+ case M16_OPC_LBU:
+ gen_ldst(ctx, OPC_LBU, ry, rx, offset);
+ break;
+ case M16_OPC_LHU:
+ gen_ldst(ctx, OPC_LHU, ry, rx, offset << 1);
+ break;
+ case M16_OPC_LWPC:
+ gen_ldst(ctx, OPC_LWPC, rx, 0, ((uint8_t)ctx->opcode) << 2);
+ break;
+#if defined (TARGET_MIPS64)
+ case M16_OPC_LWU:
+ check_mips_64(ctx);
+ gen_ldst(ctx, OPC_LWU, ry, rx, offset << 2);
+ break;
+#endif
+ case M16_OPC_SB:
+ gen_ldst(ctx, OPC_SB, ry, rx, offset);
+ break;
+ case M16_OPC_SH:
+ gen_ldst(ctx, OPC_SH, ry, rx, offset << 1);
+ break;
+ case M16_OPC_SWSP:
+ gen_ldst(ctx, OPC_SW, rx, 29, ((uint8_t)ctx->opcode) << 2);
+ break;
+ case M16_OPC_SW:
+ gen_ldst(ctx, OPC_SW, ry, rx, offset << 2);
+ break;
+ case M16_OPC_RRR:
+ {
+ int rz = xlat((ctx->opcode >> 2) & 0x7);
+ int mips32_op;
+
+ switch (ctx->opcode & 0x3) {
+ case RRR_ADDU:
+ mips32_op = OPC_ADDU;
+ break;
+ case RRR_SUBU:
+ mips32_op = OPC_SUBU;
+ break;
+#if defined(TARGET_MIPS64)
+ case RRR_DADDU:
+ mips32_op = OPC_DADDU;
+ check_mips_64(ctx);
+ break;
+ case RRR_DSUBU:
+ mips32_op = OPC_DSUBU;
+ check_mips_64(ctx);
+ break;
+#endif
+ default:
+ generate_exception(ctx, EXCP_RI);
+ goto done;
+ }
+
+ gen_arith(env, ctx, mips32_op, rz, rx, ry);
+ done:
+ ;
+ }
+ break;
+ case M16_OPC_RR:
+ switch (op1) {
+ case RR_JR:
+ {
+ int nd = (ctx->opcode >> 7) & 0x1;
+ int link = (ctx->opcode >> 6) & 0x1;
+ int ra = (ctx->opcode >> 5) & 0x1;
+
+ if (link) {
+ op = nd ? OPC_JALRC : OPC_JALR;
+ } else {
+ op = OPC_JR;
+ }
+
+ gen_compute_branch(ctx, op, 2, ra ? 31 : rx, 31, 0);
+ if (!nd) {
+ *is_branch = 1;
+ }
+ }
+ break;
+ case RR_SDBBP:
+ /* XXX: not clear which exception should be raised
+ * when in debug mode...
+ */
+ check_insn(env, ctx, ISA_MIPS32);
+ if (!(ctx->hflags & MIPS_HFLAG_DM)) {
+ generate_exception(ctx, EXCP_DBp);
+ } else {
+ generate_exception(ctx, EXCP_DBp);
+ }
+ break;
+ case RR_SLT:
+ gen_slt(env, OPC_SLT, 24, rx, ry);
+ break;
+ case RR_SLTU:
+ gen_slt(env, OPC_SLTU, 24, rx, ry);
+ break;
+ case RR_BREAK:
+ generate_exception(ctx, EXCP_BREAK);
+ break;
+ case RR_SLLV:
+ gen_shift(env, ctx, OPC_SLLV, ry, rx, ry);
+ break;
+ case RR_SRLV:
+ gen_shift(env, ctx, OPC_SRLV, ry, rx, ry);
+ break;
+ case RR_SRAV:
+ gen_shift(env, ctx, OPC_SRAV, ry, rx, ry);
+ break;
+#if defined (TARGET_MIPS64)
+ case RR_DSRL:
+ check_mips_64(ctx);
+ gen_shift_imm(env, ctx, OPC_DSRL, ry, ry, sa);
+ break;
+#endif
+ case RR_CMP:
+ gen_logic(env, OPC_XOR, 24, rx, ry);
+ break;
+ case RR_NEG:
+ gen_arith(env, ctx, OPC_SUBU, rx, 0, ry);
+ break;
+ case RR_AND:
+ gen_logic(env, OPC_AND, rx, rx, ry);
+ break;
+ case RR_OR:
+ gen_logic(env, OPC_OR, rx, rx, ry);
+ break;
+ case RR_XOR:
+ gen_logic(env, OPC_XOR, rx, rx, ry);
+ break;
+ case RR_NOT:
+ gen_logic(env, OPC_NOR, rx, ry, 0);
+ break;
+ case RR_MFHI:
+ gen_HILO(ctx, OPC_MFHI, rx);
+ break;
+ case RR_CNVT:
+ switch (cnvt_op) {
+ case RR_RY_CNVT_ZEB:
+ tcg_gen_ext8u_tl(cpu_gpr[rx], cpu_gpr[rx]);
+ break;
+ case RR_RY_CNVT_ZEH:
+ tcg_gen_ext16u_tl(cpu_gpr[rx], cpu_gpr[rx]);
+ break;
+ case RR_RY_CNVT_SEB:
+ tcg_gen_ext8s_tl(cpu_gpr[rx], cpu_gpr[rx]);
+ break;
+ case RR_RY_CNVT_SEH:
+ tcg_gen_ext16s_tl(cpu_gpr[rx], cpu_gpr[rx]);
+ break;
+#if defined (TARGET_MIPS64)
+ case RR_RY_CNVT_ZEW:
+ check_mips_64(ctx);
+ tcg_gen_ext32u_tl(cpu_gpr[rx], cpu_gpr[rx]);
+ break;
+ case RR_RY_CNVT_SEW:
+ check_mips_64(ctx);
+ tcg_gen_ext32s_tl(cpu_gpr[rx], cpu_gpr[rx]);
+ break;
+#endif
+ default:
+ generate_exception(ctx, EXCP_RI);
+ break;
+ }
+ break;
+ case RR_MFLO:
+ gen_HILO(ctx, OPC_MFLO, rx);
+ break;
+#if defined (TARGET_MIPS64)
+ case RR_DSRA:
+ check_mips_64(ctx);
+ gen_shift_imm(env, ctx, OPC_DSRA, ry, ry, sa);
+ break;
+ case RR_DSLLV:
+ check_mips_64(ctx);
+ gen_shift(env, ctx, OPC_DSLLV, ry, rx, ry);
+ break;
+ case RR_DSRLV:
+ check_mips_64(ctx);
+ gen_shift(env, ctx, OPC_DSRLV, ry, rx, ry);
+ break;
+ case RR_DSRAV:
+ check_mips_64(ctx);
+ gen_shift(env, ctx, OPC_DSRAV, ry, rx, ry);
+ break;
+#endif
+ case RR_MULT:
+ gen_muldiv(ctx, OPC_MULT, rx, ry);
+ break;
+ case RR_MULTU:
+ gen_muldiv(ctx, OPC_MULTU, rx, ry);
+ break;
+ case RR_DIV:
+ gen_muldiv(ctx, OPC_DIV, rx, ry);
+ break;
+ case RR_DIVU:
+ gen_muldiv(ctx, OPC_DIVU, rx, ry);
+ break;
+#if defined (TARGET_MIPS64)
+ case RR_DMULT:
+ check_mips_64(ctx);
+ gen_muldiv(ctx, OPC_DMULT, rx, ry);
+ break;
+ case RR_DMULTU:
+ check_mips_64(ctx);
+ gen_muldiv(ctx, OPC_DMULTU, rx, ry);
+ break;
+ case RR_DDIV:
+ check_mips_64(ctx);
+ gen_muldiv(ctx, OPC_DDIV, rx, ry);
+ break;
+ case RR_DDIVU:
+ check_mips_64(ctx);
+ gen_muldiv(ctx, OPC_DDIVU, rx, ry);
+ break;
+#endif
+ default:
+ generate_exception(ctx, EXCP_RI);
+ break;
+ }
+ break;
+ case M16_OPC_EXTEND:
+ decode_extended_mips16_opc(env, ctx, is_branch);
+ n_bytes = 4;
+ break;
+#if defined(TARGET_MIPS64)
+ case M16_OPC_I64:
+ funct = (ctx->opcode >> 8) & 0x7;
+ decode_i64_mips16(env, ctx, ry, funct, offset, 0);
+ break;
+#endif
+ default:
+ generate_exception(ctx, EXCP_RI);
+ break;
+ }
+
+ return n_bytes;
+}
+
/* SmartMIPS extension to MIPS32 */
#if defined(TARGET_MIPS64)
@@ -8384,7 +9432,10 @@ static void decode_opc (CPUState *env, DisasContext *ctx, int *is_branch)
#endif
case OPC_JALX:
check_insn(env, ctx, ASE_MIPS16);
- /* MIPS16: Not implemented. */
+ offset = (int32_t)(ctx->opcode & 0x3FFFFFF) << 2;
+ gen_compute_branch(ctx, op, 4, rs, rt, offset);
+ *is_branch = 1;
+ break;
case OPC_MDMX:
check_insn(env, ctx, ASE_MDMX);
/* MDMX: Not implemented. */
@@ -8469,10 +9520,13 @@ gen_intermediate_code_internal (CPUState *env, TranslationBlock *tb,
gen_io_start();
is_branch = 0;
- if (ctx.isa_mode == 0) {
+ if (!(ctx.hflags & MIPS_HFLAG_M16)) {
ctx.opcode = ldl_code(ctx.pc);
insn_bytes = 4;
decode_opc(env, &ctx, &is_branch);
+ } else if (env->insn_flags & ASE_MIPS16) {
+ ctx.opcode = lduw_code(ctx.pc);
+ insn_bytes = decode_mips16_opc(env, &ctx, &is_branch);
} else {
generate_exception(&ctx, EXCP_RI);
break;