diff options
author | bellard <bellard@c046a42c-6fe2-441c-8c8c-71466251a162> | 2008-05-21 16:34:06 +0000 |
---|---|---|
committer | bellard <bellard@c046a42c-6fe2-441c-8c8c-71466251a162> | 2008-05-21 16:34:06 +0000 |
commit | 3bd7da9e1851f024919c07eba4c29acf209ecd6e (patch) | |
tree | ff29adc10afaaa64727ae8141b6826fe4df8fad9 /target-i386 | |
parent | cec6843e87fe29d8419fd5a9ed9912729c068656 (diff) | |
download | qemu-3bd7da9e1851f024919c07eba4c29acf209ecd6e.tar.gz qemu-3bd7da9e1851f024919c07eba4c29acf209ecd6e.tar.bz2 qemu-3bd7da9e1851f024919c07eba4c29acf209ecd6e.zip |
convert remaining segment handling to TCG
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@4514 c046a42c-6fe2-441c-8c8c-71466251a162
Diffstat (limited to 'target-i386')
-rw-r--r-- | target-i386/TODO | 3 | ||||
-rw-r--r-- | target-i386/op.c | 39 | ||||
-rw-r--r-- | target-i386/translate.c | 45 |
3 files changed, 38 insertions, 49 deletions
diff --git a/target-i386/TODO b/target-i386/TODO index 53be196415..15a3b36602 100644 --- a/target-i386/TODO +++ b/target-i386/TODO @@ -4,7 +4,6 @@ Correctness issues: - SVM: rework the implementation: simplify code, move most intercept tests as dynamic, correct segment access, verify exception safety, cpu save/restore, SMM save/restore. -- arpl eflags computation is invalid - x86_64: fxsave/fxrestore intel/amd differences - x86_64: lcall/ljmp intel/amd differences ? - x86_64: cmpxchgl intel/amd differences ? @@ -32,6 +31,8 @@ Optimizations/Features: - add VMX support - add AVX support - add SSE5 support +- faster EFLAGS update: consider SZAP, C, O can be updated separately + with a bit field in CC_OP and more state variables. - evaluate x87 stack pointer statically - find a way to avoid translating several time the same TB if CR0.TS is set or not. diff --git a/target-i386/op.c b/target-i386/op.c index d637423c65..0a0532c6bf 100644 --- a/target-i386/op.c +++ b/target-i386/op.c @@ -147,45 +147,6 @@ #endif -/* segment handling */ - -/* faster VM86 version */ -void OPPROTO op_movl_seg_T0_vm(void) -{ - int selector; - SegmentCache *sc; - - selector = T0 & 0xffff; - /* env->segs[] access */ - sc = (SegmentCache *)((char *)env + PARAM1); - sc->selector = selector; - sc->base = (selector << 4); -} - -void OPPROTO op_movl_T0_seg(void) -{ - T0 = env->segs[PARAM1].selector; -} - -void OPPROTO op_arpl(void) -{ - if ((T0 & 3) < (T1 & 3)) { - /* XXX: emulate bug or 0xff3f0000 oring as in bochs ? */ - T0 = (T0 & ~3) | (T1 & 3); - T1 = CC_Z; - } else { - T1 = 0; - } - FORCE_RET(); -} - -void OPPROTO op_arpl_update(void) -{ - int eflags; - eflags = cc_table[CC_OP].compute_all(); - CC_SRC = (eflags & ~CC_Z) | T1; -} - void OPPROTO op_movl_T0_env(void) { T0 = *(uint32_t *)((char *)env + PARAM1); diff --git a/target-i386/translate.c b/target-i386/translate.c index b2cd4d5f28..6ad32de883 100644 --- a/target-i386/translate.c +++ b/target-i386/translate.c @@ -2167,6 +2167,22 @@ static void gen_setcc(DisasContext *s, int b) } } +static inline void gen_op_movl_T0_seg(int seg_reg) +{ + tcg_gen_ld32u_tl(cpu_T[0], cpu_env, + offsetof(CPUX86State,segs[seg_reg].selector)); +} + +static inline void gen_op_movl_seg_T0_vm(int seg_reg) +{ + tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffff); + tcg_gen_st32_tl(cpu_T[0], cpu_env, + offsetof(CPUX86State,segs[seg_reg].selector)); + tcg_gen_shli_tl(cpu_T[0], cpu_T[0], 4); + tcg_gen_st_tl(cpu_T[0], cpu_env, + offsetof(CPUX86State,segs[seg_reg].base)); +} + /* move T0 to seg_reg and compute if the CPU state may change. Never call this function with seg_reg == R_CS */ static void gen_movl_seg_T0(DisasContext *s, int seg_reg, target_ulong cur_eip) @@ -2185,7 +2201,7 @@ static void gen_movl_seg_T0(DisasContext *s, int seg_reg, target_ulong cur_eip) if (seg_reg == R_SS || (s->code32 && seg_reg < R_FS)) s->is_jmp = 3; } else { - gen_op_movl_seg_T0_vm(offsetof(CPUX86State,segs[seg_reg])); + gen_op_movl_seg_T0_vm(seg_reg); if (seg_reg == R_SS) s->is_jmp = 3; } @@ -4085,7 +4101,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) cpu_T[1], tcg_const_i32(s->pc - pc_start)); } else { - gen_op_movl_seg_T0_vm(offsetof(CPUX86State,segs[R_CS])); + gen_op_movl_seg_T0_vm(R_CS); gen_op_movl_T0_T1(); gen_op_jmp_T0(); } @@ -5575,7 +5591,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) /* pop selector */ gen_op_addl_A0_im(2 << s->dflag); gen_op_ld_T0_A0(1 + s->dflag + s->mem_index); - gen_op_movl_seg_T0_vm(offsetof(CPUX86State,segs[R_CS])); + gen_op_movl_seg_T0_vm(R_CS); /* add stack offset */ gen_stack_update(s, val + (4 << s->dflag)); } @@ -6578,9 +6594,10 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) } else #endif { + int label1; if (!s->pe || s->vm86) goto illegal_op; - ot = dflag ? OT_LONG : OT_WORD; + ot = OT_WORD; modrm = ldub_code(s->pc++); reg = (modrm >> 3) & 7; mod = (modrm >> 6) & 3; @@ -6592,16 +6609,26 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) gen_op_mov_TN_reg(ot, 0, rm); } gen_op_mov_TN_reg(ot, 1, reg); - if (s->cc_op != CC_OP_DYNAMIC) - gen_op_set_cc_op(s->cc_op); - gen_op_arpl(); - s->cc_op = CC_OP_EFLAGS; + tcg_gen_andi_tl(cpu_tmp0, cpu_T[0], 3); + tcg_gen_andi_tl(cpu_T[1], cpu_T[1], 3); + tcg_gen_movi_tl(cpu_T3, 0); + label1 = gen_new_label(); + tcg_gen_brcond_tl(TCG_COND_GE, cpu_tmp0, cpu_T[1], label1); + tcg_gen_andi_tl(cpu_T[0], cpu_T[0], ~3); + tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_T[1]); + tcg_gen_movi_tl(cpu_T3, CC_Z); + gen_set_label(label1); if (mod != 3) { gen_op_st_T0_A0(ot + s->mem_index); } else { gen_op_mov_reg_T0(ot, rm); } - gen_op_arpl_update(); + if (s->cc_op != CC_OP_DYNAMIC) + gen_op_set_cc_op(s->cc_op); + gen_compute_eflags(cpu_cc_src); + tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_Z); + tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_T3); + s->cc_op = CC_OP_EFLAGS; } break; case 0x102: /* lar */ |