diff options
Diffstat (limited to 'target-i386')
-rw-r--r-- | target-i386/cpu.h | 8 | ||||
-rw-r--r-- | target-i386/exec.h | 2 | ||||
-rw-r--r-- | target-i386/helper.c | 112 | ||||
-rw-r--r-- | target-i386/helper2.c | 28 | ||||
-rw-r--r-- | target-i386/op.c | 22 | ||||
-rw-r--r-- | target-i386/ops_sse.h | 4 | ||||
-rw-r--r-- | target-i386/ops_template.h | 2 | ||||
-rw-r--r-- | target-i386/translate-copy.c | 34 | ||||
-rw-r--r-- | target-i386/translate.c | 94 |
9 files changed, 153 insertions, 153 deletions
diff --git a/target-i386/cpu.h b/target-i386/cpu.h index 6cf2c487b8..9ae50ee6d7 100644 --- a/target-i386/cpu.h +++ b/target-i386/cpu.h @@ -476,7 +476,7 @@ typedef struct CPUX86State { int i32; int64_t i64; } fp_convert; - + float_status sse_status; uint32_t mxcsr; XMMReg xmm_regs[CPU_NB_REGS]; @@ -504,7 +504,7 @@ typedef struct CPUX86State { uint32_t saved_esp; int native_fp_regs; /* if true, the FPU state is in the native CPU regs */ #endif - + /* exception/interrupt handling */ jmp_buf jmp_env; int exception_index; @@ -531,7 +531,7 @@ typedef struct CPUX86State { uint32_t cpuid_model[12]; uint32_t cpuid_ext2_features; uint32_t cpuid_apic_id; - + #ifdef USE_KQEMU int kqemu_enabled; int last_io_time; @@ -558,7 +558,7 @@ static inline void cpu_x86_load_seg_cache(CPUX86State *env, { SegmentCache *sc; unsigned int new_hflags; - + sc = &env->segs[seg_reg]; sc->selector = selector; sc->base = base; diff --git a/target-i386/exec.h b/target-i386/exec.h index a3ab9b439f..59c0649232 100644 --- a/target-i386/exec.h +++ b/target-i386/exec.h @@ -443,7 +443,7 @@ static inline CPU86_LDouble helper_fldt(target_ulong ptr) static inline void helper_fstt(CPU86_LDouble f, target_ulong ptr) { CPU86_LDoubleU temp; - + temp.d = f; stq(ptr, temp.l.lower); stw(ptr + 8, temp.l.upper); diff --git a/target-i386/helper.c b/target-i386/helper.c index 447f4d8910..43a8b9b579 100644 --- a/target-i386/helper.c +++ b/target-i386/helper.c @@ -91,7 +91,7 @@ const CPU86_LDouble f15rk[7] = 1.44269504088896340739L, /*l2e*/ 3.32192809488736234781L, /*l2t*/ }; - + /* thread support */ spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED; @@ -126,7 +126,7 @@ static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr, *e2_ptr = ldl_kernel(ptr + 4); return 0; } - + static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2) { unsigned int limit; @@ -160,7 +160,7 @@ static inline void get_ss_esp_from_tss(uint32_t *ss_ptr, uint32_t *esp_ptr, int dpl) { int type, index, shift; - + #if 0 { int i; @@ -325,7 +325,7 @@ static void switch_tss(int tss_selector, new_segs[R_GS] = 0; new_trap = 0; } - + /* NOTE: we must avoid memory exceptions during the task switch, so we make dummy accesses before */ /* XXX: it can still fail in some cases, so a bigger hack is @@ -335,7 +335,7 @@ static void switch_tss(int tss_selector, v2 = ldub_kernel(env->tr.base + old_tss_limit_max); stb_kernel(env->tr.base, v1); stb_kernel(env->tr.base + old_tss_limit_max, v2); - + /* clear busy bit (it is restartable) */ if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) { target_ulong ptr; @@ -348,7 +348,7 @@ static void switch_tss(int tss_selector, old_eflags = compute_eflags(); if (source == SWITCH_TSS_IRET) old_eflags &= ~NT_MASK; - + /* save the current state in the old TSS */ if (type & 8) { /* 32 bit */ @@ -379,7 +379,7 @@ static void switch_tss(int tss_selector, for(i = 0; i < 4; i++) stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector); } - + /* now if an exception occurs, it will occurs in the next task context */ @@ -406,11 +406,11 @@ static void switch_tss(int tss_selector, env->tr.base = tss_base; env->tr.limit = tss_limit; env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK; - + if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) { cpu_x86_update_cr3(env, new_cr3); } - + /* load all registers without an exception, then reload them with possible exception */ env->eip = new_eip; @@ -440,7 +440,7 @@ static void switch_tss(int tss_selector, for(i = 0; i < 6; i++) cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0); } - + env->ldt.selector = new_ldt & ~4; env->ldt.base = 0; env->ldt.limit = 0; @@ -464,7 +464,7 @@ static void switch_tss(int tss_selector, raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc); load_seg_cache_raw_dt(&env->ldt, e1, e2); } - + /* load the segments */ if (!(new_eflags & VM_MASK)) { tss_load_seg(R_CS, new_segs[R_CS]); @@ -474,7 +474,7 @@ static void switch_tss(int tss_selector, tss_load_seg(R_FS, new_segs[R_FS]); tss_load_seg(R_GS, new_segs[R_GS]); } - + /* check that EIP is in the CS segment limits */ if (new_eip > env->segs[R_CS].limit) { /* XXX: different exception if CALL ? */ @@ -486,7 +486,7 @@ static void switch_tss(int tss_selector, static inline void check_io(int addr, int size) { int io_offset, val, mask; - + /* TSS must be a valid 32 bit one */ if (!(env->tr.flags & DESC_P_MASK) || ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 || @@ -760,7 +760,7 @@ static void do_interrupt_protected(int intno, int is_int, int error_code, PUSHW(ssp, esp, sp_mask, error_code); } } - + if (new_stack) { if (env->eflags & VM_MASK) { cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0); @@ -806,7 +806,7 @@ static void do_interrupt_protected(int intno, int is_int, int error_code, static inline target_ulong get_rsp_from_tss(int level) { int index; - + #if 0 printf("TR: base=" TARGET_FMT_lx " limit=%x\n", env->tr.base, env->tr.limit); @@ -926,7 +926,7 @@ static void do_interrupt64(int intno, int is_int, int error_code, if (has_error_code) { PUSHQ(esp, error_code); } - + if (new_stack) { ss = 0 | dpl; cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0); @@ -963,7 +963,7 @@ void helper_syscall(int next_eip_addend) ECX = env->eip + next_eip_addend; env->regs[11] = compute_eflags(); - + code64 = env->hflags & HF_CS64_MASK; cpu_x86_set_cpl(env, 0); @@ -986,7 +986,7 @@ void helper_syscall(int next_eip_addend) #endif { ECX = (uint32_t)(env->eip + next_eip_addend); - + cpu_x86_set_cpl(env, 0); cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc, 0, 0xffffffff, @@ -1096,7 +1096,7 @@ static void do_interrupt_real(int intno, int is_int, int error_code, PUSHW(ssp, esp, 0xffff, compute_eflags()); PUSHW(ssp, esp, 0xffff, old_cs); PUSHW(ssp, esp, 0xffff, old_eip); - + /* update processor state */ ESP = (ESP & ~0xffff) | (esp & 0xffff); env->eip = offset; @@ -1117,7 +1117,7 @@ void do_interrupt_user(int intno, int is_int, int error_code, dt = &env->idt; ptr = dt->base + (intno * 8); e2 = ldl_kernel(ptr + 4); - + dpl = (e2 >> DESC_DPL_SHIFT) & 3; cpl = env->hflags & HF_CPL_MASK; /* check privledge if software int */ @@ -1134,7 +1134,7 @@ void do_interrupt_user(int intno, int is_int, int error_code, /* * Begin execution of an interruption. is_int is TRUE if coming from * the int instruction. next_eip is the EIP value AFTER the interrupt - * instruction. It is only relevant if is_int is TRUE. + * instruction. It is only relevant if is_int is TRUE. */ void do_interrupt(int intno, int is_int, int error_code, target_ulong next_eip, int is_hw) @@ -1222,7 +1222,7 @@ int check_exception(int intno, int *error_code) * Signal an interruption. It is executed in the main CPU loop. * is_int is TRUE if coming from the int instruction. next_eip is the * EIP value AFTER the interrupt instruction. It is only relevant if - * is_int is TRUE. + * is_int is TRUE. */ void raise_interrupt(int intno, int is_int, int error_code, int next_eip_addend) @@ -1296,7 +1296,7 @@ void do_smm_enter(void) cpu_smm_update(env); sm_state = env->smbase + 0x8000; - + #ifdef TARGET_X86_64 for(i = 0; i < 6; i++) { dt = &env->segs[i]; @@ -1314,7 +1314,7 @@ void do_smm_enter(void) stq_phys(sm_state + 0x7e78, env->ldt.base); stl_phys(sm_state + 0x7e74, env->ldt.limit); stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff); - + stq_phys(sm_state + 0x7e88, env->idt.base); stl_phys(sm_state + 0x7e84, env->idt.limit); @@ -1322,7 +1322,7 @@ void do_smm_enter(void) stq_phys(sm_state + 0x7e98, env->tr.base); stl_phys(sm_state + 0x7e94, env->tr.limit); stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff); - + stq_phys(sm_state + 0x7ed0, env->efer); stq_phys(sm_state + 0x7ff8, EAX); @@ -1361,17 +1361,17 @@ void do_smm_enter(void) stl_phys(sm_state + 0x7fd0, EAX); stl_phys(sm_state + 0x7fcc, env->dr[6]); stl_phys(sm_state + 0x7fc8, env->dr[7]); - + stl_phys(sm_state + 0x7fc4, env->tr.selector); stl_phys(sm_state + 0x7f64, env->tr.base); stl_phys(sm_state + 0x7f60, env->tr.limit); stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff); - + stl_phys(sm_state + 0x7fc0, env->ldt.selector); stl_phys(sm_state + 0x7f80, env->ldt.base); stl_phys(sm_state + 0x7f7c, env->ldt.limit); stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff); - + stl_phys(sm_state + 0x7f74, env->gdt.base); stl_phys(sm_state + 0x7f70, env->gdt.limit); @@ -1409,7 +1409,7 @@ void do_smm_enter(void) cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0); cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0); cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0); - + cpu_x86_update_cr0(env, env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK)); cpu_x86_update_cr4(env, 0); @@ -1447,7 +1447,7 @@ void helper_rsm(void) env->ldt.base = ldq_phys(sm_state + 0x7e78); env->ldt.limit = ldl_phys(sm_state + 0x7e74); env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8; - + env->idt.base = ldq_phys(sm_state + 0x7e88); env->idt.limit = ldl_phys(sm_state + 0x7e84); @@ -1455,7 +1455,7 @@ void helper_rsm(void) env->tr.base = ldq_phys(sm_state + 0x7e98); env->tr.limit = ldl_phys(sm_state + 0x7e94); env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8; - + EAX = ldq_phys(sm_state + 0x7ff8); ECX = ldq_phys(sm_state + 0x7ff0); EDX = ldq_phys(sm_state + 0x7fe8); @@ -1496,17 +1496,17 @@ void helper_rsm(void) EAX = ldl_phys(sm_state + 0x7fd0); env->dr[6] = ldl_phys(sm_state + 0x7fcc); env->dr[7] = ldl_phys(sm_state + 0x7fc8); - + env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff; env->tr.base = ldl_phys(sm_state + 0x7f64); env->tr.limit = ldl_phys(sm_state + 0x7f60); env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8; - + env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff; env->ldt.base = ldl_phys(sm_state + 0x7f80); env->ldt.limit = ldl_phys(sm_state + 0x7f7c); env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8; - + env->gdt.base = ldl_phys(sm_state + 0x7f74); env->gdt.limit = ldl_phys(sm_state + 0x7f70); @@ -1564,7 +1564,7 @@ void helper_divl_EAX_T0(void) { unsigned int den, r; uint64_t num, q; - + num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32); den = T0; if (den == 0) { @@ -1586,7 +1586,7 @@ void helper_idivl_EAX_T0(void) { int den, r; int64_t num, q; - + num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32); den = T0; if (den == 0) { @@ -1632,7 +1632,7 @@ void helper_cpuid(void) { uint32_t index; index = (uint32_t)EAX; - + /* test if maximum index reached */ if (index & 0x80000000) { if (index > env->cpuid_xlevel) @@ -1641,7 +1641,7 @@ void helper_cpuid(void) if (index > env->cpuid_level) index = env->cpuid_level; } - + switch(index) { case 0: EAX = env->cpuid_level; @@ -1783,7 +1783,7 @@ void helper_lldt_T0(void) uint32_t e1, e2; int index, entry_limit; target_ulong ptr; - + selector = T0 & 0xffff; if ((selector & 0xfffc) == 0) { /* XXX: NULL selector case: invalid LDT */ @@ -1798,7 +1798,7 @@ void helper_lldt_T0(void) if (env->hflags & HF_LMA_MASK) entry_limit = 15; else -#endif +#endif entry_limit = 7; if ((index + entry_limit) > dt->limit) raise_exception_err(EXCP0D_GPF, selector & 0xfffc); @@ -1831,7 +1831,7 @@ void helper_ltr_T0(void) uint32_t e1, e2; int index, type, entry_limit; target_ulong ptr; - + selector = T0 & 0xffff; if ((selector & 0xfffc) == 0) { /* NULL selector case: invalid TR */ @@ -1847,7 +1847,7 @@ void helper_ltr_T0(void) if (env->hflags & HF_LMA_MASK) entry_limit = 15; else -#endif +#endif entry_limit = 7; if ((index + entry_limit) > dt->limit) raise_exception_err(EXCP0D_GPF, selector & 0xfffc); @@ -1901,7 +1901,7 @@ void load_seg(int seg_reg, int selector) raise_exception_err(EXCP0D_GPF, 0); cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0); } else { - + if (selector & 0x4) dt = &env->ldt; else @@ -1912,7 +1912,7 @@ void load_seg(int seg_reg, int selector) ptr = dt->base + index; e1 = ldl_kernel(ptr); e2 = ldl_kernel(ptr + 4); - + if (!(e2 & DESC_S_MASK)) raise_exception_err(EXCP0D_GPF, selector & 0xfffc); rpl = selector & 3; @@ -1927,7 +1927,7 @@ void load_seg(int seg_reg, int selector) /* must be readable segment */ if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) raise_exception_err(EXCP0D_GPF, selector & 0xfffc); - + if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) { /* if not conforming code, test rights */ if (dpl < cpl || dpl < rpl) @@ -1965,7 +1965,7 @@ void helper_ljmp_protected_T0_T1(int next_eip_addend) int new_cs, gate_cs, type; uint32_t e1, e2, cpl, dpl, rpl, limit; target_ulong new_eip, next_eip; - + new_cs = T0; new_eip = T1; if ((new_cs & 0xfffc) == 0) @@ -2084,7 +2084,7 @@ void helper_lcall_protected_T0_T1(int shift, int next_eip_addend) uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask; uint32_t val, limit, old_sp_mask; target_ulong ssp, old_ssp, next_eip, new_eip; - + new_cs = T0; new_eip = T1; next_eip = env->eip + next_eip_addend; @@ -2151,7 +2151,7 @@ void helper_lcall_protected_T0_T1(int shift, int next_eip_addend) PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector); PUSHW(ssp, sp, sp_mask, next_eip); } - + limit = get_seg_limit(e1, e2); if (new_eip > limit) raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); @@ -2228,12 +2228,12 @@ void helper_lcall_protected_T0_T1(int shift, int next_eip_addend) raise_exception_err(EXCP0A_TSS, ss & 0xfffc); if (!(ss_e2 & DESC_P_MASK)) raise_exception_err(EXCP0A_TSS, ss & 0xfffc); - + // push_size = ((param_count * 2) + 8) << shift; old_sp_mask = get_sp_mask(env->segs[R_SS].flags); old_ssp = env->segs[R_SS].base; - + sp_mask = get_sp_mask(ss_e2); ssp = get_seg_base(ss_e1, ss_e2); if (shift) { @@ -2360,7 +2360,7 @@ static inline void helper_ret_protected(int shift, int is_iret, int addend) uint32_t e1, e2, ss_e1, ss_e2; int cpl, dpl, rpl, eflags_mask, iopl; target_ulong ssp, sp, new_eip, new_esp, sp_mask; - + #ifdef TARGET_X86_64 if (shift == 2) sp_mask = -1; @@ -2425,7 +2425,7 @@ static inline void helper_ret_protected(int shift, int is_iret, int addend) } if (!(e2 & DESC_P_MASK)) raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc); - + sp += addend; if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) || ((env->hflags & HF_CS64_MASK) && !is_iret))) { @@ -2539,7 +2539,7 @@ static inline void helper_ret_protected(int shift, int is_iret, int addend) POPL(ssp, sp, sp_mask, new_ds); POPL(ssp, sp, sp_mask, new_fs); POPL(ssp, sp, sp_mask, new_gs); - + /* modify processor state */ load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK); @@ -2559,7 +2559,7 @@ void helper_iret_protected(int shift, int next_eip) { int tss_selector, type; uint32_t e1, e2; - + /* specific case for TSS */ if (env->eflags & NT_MASK) { #ifdef TARGET_X86_64 @@ -3080,7 +3080,7 @@ void helper_f2xm1(void) void helper_fyl2x(void) { CPU86_LDouble fptemp; - + fptemp = ST0; if (fptemp>0.0){ fptemp = log(fptemp)/log(2.0); /* log2(ST) */ @@ -3490,7 +3490,7 @@ void helper_fxsave(target_ulong ptr, int data64) helper_fstt(tmp, addr); addr += 16; } - + if (env->cr[4] & CR4_OSFXSR_MASK) { /* XXX: finish it */ stl(ptr + 0x18, env->mxcsr); /* mxcsr */ diff --git a/target-i386/helper2.c b/target-i386/helper2.c index 8b61cf8740..715b289b30 100644 --- a/target-i386/helper2.c +++ b/target-i386/helper2.c @@ -75,7 +75,7 @@ CPUX86State *cpu_x86_init(void) ldt.seg_not_present = 0; ldt.useable = 1; modify_ldt(1, &ldt, sizeof(ldt)); /* write ldt entry */ - + asm volatile ("movl %0, %%fs" : : "r" ((1 << 3) | 7)); } #endif @@ -173,19 +173,19 @@ void cpu_reset(CPUX86State *env) env->ldt.flags = DESC_P_MASK; env->tr.limit = 0xffff; env->tr.flags = DESC_P_MASK; - + cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff, 0); cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff, 0); cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff, 0); cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff, 0); cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff, 0); cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff, 0); - + env->eip = 0xfff0; env->regs[R_EDX] = 0x600; /* indicate P6 processor */ - + env->eflags = 0x2; - + /* FPU init */ for(i = 0;i < 8; i++) env->fptags[i] = 1; @@ -516,7 +516,7 @@ void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0) } #endif env->cr[0] = new_cr0 | CR0_ET_MASK; - + /* update PE flag in hidden flags */ pe_state = (env->cr[0] & CR0_PE_MASK); env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT); @@ -603,13 +603,13 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr, int error_code, is_dirty, prot, page_size, ret, is_write; unsigned long paddr, page_offset; target_ulong vaddr, virt_addr; - + #if defined(DEBUG_MMU) printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n", addr, is_write1, is_user, env->eip); #endif is_write = is_write1 & 1; - + if (!(env->cr[0] & CR0_PG_MASK)) { pte = addr; virt_addr = addr & TARGET_PAGE_MASK; @@ -635,7 +635,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr, env->exception_index = EXCP0D_GPF; return 1; } - + pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) & env->a20_mask; pml4e = ldq_phys(pml4e_addr); @@ -794,7 +794,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr, pde |= PG_DIRTY_MASK; stl_phys_notdirty(pde_addr, pde); } - + pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */ ptep = pte; virt_addr = addr & ~(page_size - 1); @@ -859,7 +859,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr, page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1); paddr = (pte & TARGET_PAGE_MASK) + page_offset; vaddr = virt_addr + page_offset; - + ret = tlb_set_page_exec(env, vaddr, paddr, prot, is_user, is_softmmu); return ret; do_fault_protect: @@ -897,13 +897,13 @@ target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr) sext = (int64_t)addr >> 47; if (sext != 0 && sext != -1) return -1; - + pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) & env->a20_mask; pml4e = ldl_phys(pml4e_addr); if (!(pml4e & PG_PRESENT_MASK)) return -1; - + pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) & env->a20_mask; pdpe = ldl_phys(pdpe_addr); @@ -987,7 +987,7 @@ void restore_native_fp_state(CPUState *env) { int fptag, i, j; struct fpstate fp1, *fp = &fp1; - + fp->fpuc = env->fpuc; fp->fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11; fptag = 0; diff --git a/target-i386/op.c b/target-i386/op.c index a790aebb17..9b4263ca7d 100644 --- a/target-i386/op.c +++ b/target-i386/op.c @@ -1149,7 +1149,7 @@ void OPPROTO op_movl_seg_T0_vm(void) { int selector; SegmentCache *sc; - + selector = T0 & 0xffff; /* env->segs[] access */ sc = (SegmentCache *)((char *)env + PARAM1); @@ -1193,14 +1193,14 @@ void OPPROTO op_arpl(void) } FORCE_RET(); } - + void OPPROTO op_arpl_update(void) { int eflags; eflags = cc_table[CC_OP].compute_all(); CC_SRC = (eflags & ~CC_Z) | T1; } - + /* T0: segment, T1:eip */ void OPPROTO op_ljmp_protected_T0_T1(void) { @@ -1591,23 +1591,23 @@ CCTable cc_table[CC_OP_NB] = { [CC_OP_SUBB] = { compute_all_subb, compute_c_subb }, [CC_OP_SUBW] = { compute_all_subw, compute_c_subw }, [CC_OP_SUBL] = { compute_all_subl, compute_c_subl }, - + [CC_OP_SBBB] = { compute_all_sbbb, compute_c_sbbb }, [CC_OP_SBBW] = { compute_all_sbbw, compute_c_sbbw }, [CC_OP_SBBL] = { compute_all_sbbl, compute_c_sbbl }, - + [CC_OP_LOGICB] = { compute_all_logicb, compute_c_logicb }, [CC_OP_LOGICW] = { compute_all_logicw, compute_c_logicw }, [CC_OP_LOGICL] = { compute_all_logicl, compute_c_logicl }, - + [CC_OP_INCB] = { compute_all_incb, compute_c_incl }, [CC_OP_INCW] = { compute_all_incw, compute_c_incl }, [CC_OP_INCL] = { compute_all_incl, compute_c_incl }, - + [CC_OP_DECB] = { compute_all_decb, compute_c_incl }, [CC_OP_DECW] = { compute_all_decw, compute_c_incl }, [CC_OP_DECL] = { compute_all_decl, compute_c_incl }, - + [CC_OP_SHLB] = { compute_all_shlb, compute_c_shlb }, [CC_OP_SHLW] = { compute_all_shlw, compute_c_shlw }, [CC_OP_SHLL] = { compute_all_shll, compute_c_shll }, @@ -1624,11 +1624,11 @@ CCTable cc_table[CC_OP_NB] = { [CC_OP_ADCQ] = { compute_all_adcq, compute_c_adcq }, [CC_OP_SUBQ] = { compute_all_subq, compute_c_subq }, - + [CC_OP_SBBQ] = { compute_all_sbbq, compute_c_sbbq }, - + [CC_OP_LOGICQ] = { compute_all_logicq, compute_c_logicq }, - + [CC_OP_INCQ] = { compute_all_incq, compute_c_incl }, [CC_OP_DECQ] = { compute_all_decq, compute_c_incl }, diff --git a/target-i386/ops_sse.h b/target-i386/ops_sse.h index b6ca9a3d77..82d1ec0a1e 100644 --- a/target-i386/ops_sse.h +++ b/target-i386/ops_sse.h @@ -1213,7 +1213,7 @@ void OPPROTO glue(op_pinsrw, SUFFIX) (void) { Reg *d = (Reg *)((char *)env + PARAM1); int pos = PARAM2; - + d->W(pos) = T0; } @@ -1221,7 +1221,7 @@ void OPPROTO glue(op_pextrw, SUFFIX) (void) { Reg *s = (Reg *)((char *)env + PARAM1); int pos = PARAM2; - + T0 = s->W(pos); } diff --git a/target-i386/ops_template.h b/target-i386/ops_template.h index e2bc1cfc43..d1df07f9d1 100644 --- a/target-i386/ops_template.h +++ b/target-i386/ops_template.h @@ -502,7 +502,7 @@ void OPPROTO glue(glue(op_bsf, SUFFIX), _T0_cc)(void) { int count; target_long res; - + res = T0 & DATA_MASK; if (res != 0) { count = 0; diff --git a/target-i386/translate-copy.c b/target-i386/translate-copy.c index 8b8d267955..d68764360b 100644 --- a/target-i386/translate-copy.c +++ b/target-i386/translate-copy.c @@ -63,7 +63,7 @@ typedef struct DisasContext { /* code output */ uint8_t *gen_code_ptr; uint8_t *gen_code_start; - + /* current block context */ target_ulong cs_base; /* base of CS segment */ int pe; /* protected mode */ @@ -155,7 +155,7 @@ static void gen_jcc(DisasContext *s, int op, gb(s, 0xe9); /* jmp */ tb->tb_jmp_offset[1] = s->gen_code_ptr - s->gen_code_start; gl(s, 0); - + tb->tb_next_offset[0] = s->gen_code_ptr - s->gen_code_start; gen_movl_addr_im(s, CPU_FIELD_OFFSET(eip), target_eip); gen_movl_addr_im(s, CPU_FIELD_OFFSET(tmp0), (uint32_t)tb); @@ -194,7 +194,7 @@ static inline void gen_lea_modrm(DisasContext *s, int modrm) base = rm; index = 0; scale = 0; - + if (base == 4) { havesib = 1; code = ldub_code(s->pc++); @@ -222,7 +222,7 @@ static inline void gen_lea_modrm(DisasContext *s, int modrm) s->pc += 4; break; } - + } else { switch (mod) { case 0: @@ -248,7 +248,7 @@ static inline void gen_lea_modrm(DisasContext *s, int modrm) static inline void parse_modrm(DisasContext *s, int modrm) { if ((modrm & 0xc0) != 0xc0) - gen_lea_modrm(s, modrm); + gen_lea_modrm(s, modrm); } static inline uint32_t insn_get(DisasContext *s, int ot) @@ -351,7 +351,7 @@ static int disas_insn(DisasContext *s) /* extended op code */ b = ldub_code(s->pc++) | 0x100; goto reswitch; - + /**************************/ /* arith & logic */ case 0x00 ... 0x05: @@ -370,7 +370,7 @@ static int disas_insn(DisasContext *s) ot = OT_BYTE; else ot = dflag ? OT_LONG : OT_WORD; - + switch(f) { case 0: /* OP Ev, Gv */ modrm = ldub_code(s->pc++); @@ -396,7 +396,7 @@ static int disas_insn(DisasContext *s) ot = OT_BYTE; else ot = dflag ? OT_LONG : OT_WORD; - + modrm = ldub_code(s->pc++); parse_modrm(s, modrm); @@ -506,7 +506,7 @@ static int disas_insn(DisasContext *s) ot = dflag ? OT_LONG : OT_WORD; insn_get(s, ot); break; - + case 0x98: /* CWDE/CBW */ break; case 0x99: /* CDQ/CWD */ @@ -527,7 +527,7 @@ static int disas_insn(DisasContext *s) case 0x84: /* test Ev, Gv */ case 0x85: - + case 0x1c0: case 0x1c1: /* xadd Ev, Gv */ @@ -583,7 +583,7 @@ static int disas_insn(DisasContext *s) goto illegal_op; parse_modrm(s, modrm); break; - + /**************************/ /* push/pop */ case 0x50 ... 0x57: /* push */ @@ -850,7 +850,7 @@ static int disas_insn(DisasContext *s) goto illegal_op; parse_modrm(s, modrm); break; - + case 0xa0: /* mov EAX, Ov */ case 0xa1: case 0xa2: /* mov Ov, EAX */ @@ -888,14 +888,14 @@ static int disas_insn(DisasContext *s) parse_modrm(s, modrm); ldub_code(s->pc++); break; - + /************************/ /* string ops */ case 0xa4: /* movsS */ case 0xa5: break; - + case 0xaa: /* stosS */ case 0xab: break; @@ -955,7 +955,7 @@ static int disas_insn(DisasContext *s) case 0xc3: /* ret */ gb(s, CPU_SEG); - if (!s->dflag) + if (!s->dflag) gb(s, 0x66); /* d16 */ gb(s, 0x8f); /* pop addr */ gb(s, 0x05); @@ -1244,7 +1244,7 @@ static inline int gen_intermediate_code_internal(CPUState *env, break; } } - + #ifdef DEBUG_DISAS if (loglevel & CPU_LOG_TB_IN_ASM) { fprintf(logfile, "----------------\n"); @@ -1304,7 +1304,7 @@ int cpu_restore_state_copy(TranslationBlock *tb, return ret; /* restore all the CPU state from the CPU context from the signal. The FPU context stays in the host CPU. */ - + env->regs[R_EAX] = uc->uc_mcontext.gregs[REG_EAX]; env->regs[R_ECX] = uc->uc_mcontext.gregs[REG_ECX]; env->regs[R_EDX] = uc->uc_mcontext.gregs[REG_EDX]; diff --git a/target-i386/translate.c b/target-i386/translate.c index 946a43097e..35ba6319cc 100644 --- a/target-i386/translate.c +++ b/target-i386/translate.c @@ -878,7 +878,7 @@ static GenOpFunc1 *gen_op_jnz_ecx[3] = { gen_op_jnz_ecxl, X86_64_ONLY(gen_op_jnz_ecxq), }; - + static GenOpFunc1 *gen_op_jz_ecx[3] = { gen_op_jz_ecxw, gen_op_jz_ecxl, @@ -1318,7 +1318,7 @@ static GenOpFunc1 *gen_op_fp_arith_STN_ST0[8] = { static void gen_op(DisasContext *s1, int op, int ot, int d) { GenOpFunc *gen_update_cc; - + if (d != OR_TMP0) { gen_op_mov_TN_reg[ot][0][d](); } else { @@ -1408,7 +1408,7 @@ static void gen_shift(DisasContext *s1, int op, int ot, int d, int s) /* for zero counts, flags are not updated, so must do it dynamically */ if (s1->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s1->cc_op); - + if (d != OR_TMP0) gen_op_shift_T0_T1_cc[ot][op](); else @@ -1448,7 +1448,7 @@ static void gen_lea_modrm(DisasContext *s, int modrm, int *reg_ptr, int *offset_ base = rm; index = 0; scale = 0; - + if (base == 4) { havesib = 1; code = ldub_code(s->pc++); @@ -1480,7 +1480,7 @@ static void gen_lea_modrm(DisasContext *s, int modrm, int *reg_ptr, int *offset_ s->pc += 4; break; } - + if (base >= 0) { /* for correct popl handling with esp */ if (base == 4 && s->popl_esp_hack) @@ -1627,12 +1627,12 @@ static void gen_nop_modrm(DisasContext *s, int modrm) if (s->aflag) { base = rm; - + if (base == 4) { code = ldub_code(s->pc++); base = (code & 7); } - + switch (mod) { case 0: if (base == 5) { @@ -1787,7 +1787,7 @@ static inline void gen_jcc(DisasContext *s, int b, inv = b & 1; jcc_op = (b >> 1) & 7; - + if (s->jmp_opt) { switch(s->cc_op) { /* we optimize the cmp/jcc case */ @@ -1797,7 +1797,7 @@ static inline void gen_jcc(DisasContext *s, int b, case CC_OP_SUBQ: func = gen_jcc_sub[s->cc_op - CC_OP_SUBB][jcc_op]; break; - + /* some jumps are easy to compute */ case CC_OP_ADDB: case CC_OP_ADDW: @@ -1864,7 +1864,7 @@ static inline void gen_jcc(DisasContext *s, int b, gen_setcc_slow[jcc_op](); func = gen_op_jnz_T0_label; } - + if (inv) { tmp = val; val = next_eip; @@ -1922,7 +1922,7 @@ static void gen_setcc(DisasContext *s, int b) if (!func) goto slow_jcc; break; - + /* some jumps are easy to compute */ case CC_OP_ADDB: case CC_OP_ADDW: @@ -2094,7 +2094,7 @@ static void gen_push_T1(DisasContext *s) gen_op_addl_A0_SS(); } gen_op_st_T1_A0[s->dflag + 1 + s->mem_index](); - + if (s->ss32 && !s->addseg) gen_op_movl_ESP_A0(); else @@ -2196,7 +2196,7 @@ static void gen_enter(DisasContext *s, int esp_addend, int level) if (CODE64(s)) { ot = s->dflag ? OT_QUAD : OT_WORD; opsize = 1 << ot; - + gen_op_movl_A0_ESP(); gen_op_addq_A0_im(-opsize); gen_op_movl_T1_A0(); @@ -2215,7 +2215,7 @@ static void gen_enter(DisasContext *s, int esp_addend, int level) { ot = s->dflag + OT_WORD; opsize = 2 << s->dflag; - + gen_op_movl_A0_ESP(); gen_op_addl_A0_im(-opsize); if (!s->ss32) @@ -2309,7 +2309,7 @@ static void gen_jmp(DisasContext *s, target_ulong eip) static void gen_movtl_T0_im(target_ulong val) { -#ifdef TARGET_X86_64 +#ifdef TARGET_X86_64 if ((int32_t)val == val) { gen_op_movl_T0_im(val); } else { @@ -2322,7 +2322,7 @@ static void gen_movtl_T0_im(target_ulong val) static void gen_movtl_T1_im(target_ulong val) { -#ifdef TARGET_X86_64 +#ifdef TARGET_X86_64 if ((int32_t)val == val) { gen_op_movl_T1_im(val); } else { @@ -2522,7 +2522,7 @@ static GenOpFunc1 *sse_op_table3[4 * 3] = { gen_op_cvtsi2sd, X86_64_ONLY(gen_op_cvtsq2ss), X86_64_ONLY(gen_op_cvtsq2sd), - + gen_op_cvttss2si, gen_op_cvttsd2si, X86_64_ONLY(gen_op_cvttss2sq), @@ -2533,7 +2533,7 @@ static GenOpFunc1 *sse_op_table3[4 * 3] = { X86_64_ONLY(gen_op_cvtss2sq), X86_64_ONLY(gen_op_cvtsd2sq), }; - + static GenOpFunc2 *sse_op_table4[8][4] = { SSE_FOP(cmpeq), SSE_FOP(cmplt), @@ -2544,7 +2544,7 @@ static GenOpFunc2 *sse_op_table4[8][4] = { SSE_FOP(cmpnle), SSE_FOP(cmpord), }; - + static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r) { int b1, op1_offset, op2_offset, is_xmm, val, ot; @@ -3285,7 +3285,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) /* extended op code */ b = ldub_code(s->pc++) | 0x100; goto reswitch; - + /**************************/ /* arith & logic */ case 0x00 ... 0x05: @@ -3305,7 +3305,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) ot = OT_BYTE; else ot = dflag + OT_WORD; - + switch(f) { case 0: /* OP Ev, Gv */ modrm = ldub_code(s->pc++); @@ -3364,12 +3364,12 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) ot = OT_BYTE; else ot = dflag + OT_WORD; - + modrm = ldub_code(s->pc++); mod = (modrm >> 6) & 3; rm = (modrm & 7) | REX_B(s); op = (modrm >> 3) & 7; - + if (mod != 3) { if (b == 0x83) s->rip_offset = 1; @@ -3666,13 +3666,13 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) mod = (modrm >> 6) & 3; rm = (modrm & 7) | REX_B(s); reg = ((modrm >> 3) & 7) | rex_r; - + gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0); gen_op_mov_TN_reg[ot][1][reg](); gen_op_testl_T0_T1_cc(); s->cc_op = CC_OP_LOGICB + ot; break; - + case 0xa8: /* test eAX, Iv */ case 0xa9: if ((b & 1) == 0) @@ -3686,7 +3686,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) gen_op_testl_T0_T1_cc(); s->cc_op = CC_OP_LOGICB + ot; break; - + case 0x98: /* CWDE/CBW */ #ifdef TARGET_X86_64 if (dflag == 2) { @@ -3804,7 +3804,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) gen_op_cmpxchg8b(); s->cc_op = CC_OP_EFLAGS; break; - + /**************************/ /* push/pop */ case 0x50 ... 0x57: /* push */ @@ -3955,7 +3955,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) ot = dflag + OT_WORD; modrm = ldub_code(s->pc++); reg = ((modrm >> 3) & 7) | rex_r; - + /* generate a generic store */ gen_ldst_modrm(s, modrm, ot, reg, 1); break; @@ -3986,7 +3986,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) ot = OT_WORD + dflag; modrm = ldub_code(s->pc++); reg = ((modrm >> 3) & 7) | rex_r; - + gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0); gen_op_mov_reg_T0[ot][reg](); break; @@ -4038,7 +4038,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) reg = ((modrm >> 3) & 7) | rex_r; mod = (modrm >> 6) & 3; rm = (modrm & 7) | REX_B(s); - + if (mod == 3) { gen_op_mov_TN_reg[ot][0][rm](); switch(ot | (b & 8)) { @@ -4084,7 +4084,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) s->addseg = val; gen_op_mov_reg_A0[ot - OT_WORD][reg](); break; - + case 0xa0: /* mov EAX, Ov */ case 0xa1: case 0xa2: /* mov Ov, EAX */ @@ -4239,7 +4239,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) gen_eob(s); } break; - + /************************/ /* shifts */ case 0xc0: @@ -4252,11 +4252,11 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) ot = OT_BYTE; else ot = dflag + OT_WORD; - + modrm = ldub_code(s->pc++); mod = (modrm >> 6) & 3; op = (modrm >> 3) & 7; - + if (mod != 3) { if (shift == 2) { s->rip_offset = 1; @@ -4310,7 +4310,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) mod = (modrm >> 6) & 3; rm = (modrm & 7) | REX_B(s); reg = ((modrm >> 3) & 7) | rex_r; - + if (mod != 3) { gen_lea_modrm(s, modrm, ®_addr, &offset_addr); gen_op_ld_T0_A0[ot + s->mem_index](); @@ -4318,7 +4318,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) gen_op_mov_TN_reg[ot][0][rm](); } gen_op_mov_TN_reg[ot][1][reg](); - + if (shift) { val = ldub_code(s->pc++); if (ot == OT_QUAD) @@ -4389,7 +4389,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) gen_op_fild_FT0_A0(); break; } - + gen_op_fp_arith_ST0_FT0[op1](); if (op1 == 3) { /* fcomp needs pop */ @@ -4646,7 +4646,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */ { int op1; - + op1 = op & 7; if (op >= 0x20) { gen_op_fp_arith_STN_ST0[op1](opreg); @@ -4816,7 +4816,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) gen_movs(s, ot); } break; - + case 0xaa: /* stosS */ case 0xab: if ((b & 1) == 0) @@ -5042,13 +5042,13 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) case 0x9a: /* lcall im */ { unsigned int selector, offset; - + if (CODE64(s)) goto illegal_op; ot = dflag ? OT_LONG : OT_WORD; offset = insn_get(s, ot); selector = insn_get(s, OT_WORD); - + gen_op_movl_T0_im(selector); gen_op_movl_T1_imu(offset); } @@ -5072,7 +5072,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) ot = dflag ? OT_LONG : OT_WORD; offset = insn_get(s, ot); selector = insn_get(s, OT_WORD); - + gen_op_movl_T0_im(selector); gen_op_movl_T1_imu(offset); } @@ -5121,7 +5121,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) } gen_op_cmov_reg_T1_T0[ot - OT_WORD][reg](); break; - + /************************/ /* flags */ case 0x9c: /* pushf */ @@ -5481,7 +5481,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) tval += next_eip; if (s->dflag == 0) tval &= 0xffff; - + l1 = gen_new_label(); l2 = gen_new_label(); b &= 3; @@ -5802,7 +5802,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) reg = ((modrm >> 3) & 7) | rex_r; mod = (modrm >> 6) & 3; rm = (modrm & 7) | REX_B(s); - + if (mod == 3) { gen_op_mov_TN_reg[OT_LONG][0][rm](); /* sign extend */ @@ -6465,7 +6465,7 @@ static inline int gen_intermediate_code_internal(CPUState *env, int flags, j, lj, cflags; target_ulong pc_start; target_ulong cs_base; - + /* generate intermediate code */ pc_start = tb->pc; cs_base = tb->cs_base; @@ -6574,7 +6574,7 @@ static inline int gen_intermediate_code_internal(CPUState *env, while (lj <= j) gen_opc_instr_start[lj++] = 0; } - + #ifdef DEBUG_DISAS if (loglevel & CPU_LOG_TB_CPU) { cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP); |