diff options
author | Chanho Park <chanho61.park@samsung.com> | 2014-12-10 15:42:55 +0900 |
---|---|---|
committer | Chanho Park <chanho61.park@samsung.com> | 2014-12-10 15:42:55 +0900 |
commit | 0d6a2f7e595218b5632ba7005128470e65138951 (patch) | |
tree | 596b09930ef1538e6606450e2d8b88ec2e296a9b /softmmu_template.h | |
parent | 16b1353a36171ae06d63fd309f4772dbfb1da113 (diff) | |
download | qemu-0d6a2f7e595218b5632ba7005128470e65138951.tar.gz qemu-0d6a2f7e595218b5632ba7005128470e65138951.tar.bz2 qemu-0d6a2f7e595218b5632ba7005128470e65138951.zip |
Imported Upstream version 2.2.0upstream/2.2.1upstream/2.2.0
Diffstat (limited to 'softmmu_template.h')
-rw-r--r-- | softmmu_template.h | 65 |
1 files changed, 53 insertions, 12 deletions
diff --git a/softmmu_template.h b/softmmu_template.h index 5a07f991a..6b4e615db 100644 --- a/softmmu_template.h +++ b/softmmu_template.h @@ -67,10 +67,10 @@ #endif #ifdef SOFTMMU_CODE_ACCESS -#define READ_ACCESS_TYPE 2 +#define READ_ACCESS_TYPE MMU_INST_FETCH #define ADDR_READ addr_code #else -#define READ_ACCESS_TYPE 0 +#define READ_ACCESS_TYPE MMU_DATA_LOAD #define ADDR_READ addr_read #endif @@ -116,6 +116,31 @@ # define helper_te_st_name helper_le_st_name #endif +/* macro to check the victim tlb */ +#define VICTIM_TLB_HIT(ty) \ +({ \ + /* we are about to do a page table walk. our last hope is the \ + * victim tlb. try to refill from the victim tlb before walking the \ + * page table. */ \ + int vidx; \ + hwaddr tmpiotlb; \ + CPUTLBEntry tmptlb; \ + for (vidx = CPU_VTLB_SIZE-1; vidx >= 0; --vidx) { \ + if (env->tlb_v_table[mmu_idx][vidx].ty == (addr & TARGET_PAGE_MASK)) {\ + /* found entry in victim tlb, swap tlb and iotlb */ \ + tmptlb = env->tlb_table[mmu_idx][index]; \ + env->tlb_table[mmu_idx][index] = env->tlb_v_table[mmu_idx][vidx]; \ + env->tlb_v_table[mmu_idx][vidx] = tmptlb; \ + tmpiotlb = env->iotlb[mmu_idx][index]; \ + env->iotlb[mmu_idx][index] = env->iotlb_v[mmu_idx][vidx]; \ + env->iotlb_v[mmu_idx][vidx] = tmpiotlb; \ + break; \ + } \ + } \ + /* return true when there is a vtlb hit, i.e. vidx >=0 */ \ + vidx >= 0; \ +}) + #ifndef SOFTMMU_CODE_ACCESS static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env, hwaddr physaddr, @@ -161,7 +186,10 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx, mmu_idx, retaddr); } #endif - tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, mmu_idx, retaddr); + if (!VICTIM_TLB_HIT(ADDR_READ)) { + tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, + mmu_idx, retaddr); + } tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ; } @@ -246,7 +274,10 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx, mmu_idx, retaddr); } #endif - tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, mmu_idx, retaddr); + if (!VICTIM_TLB_HIT(ADDR_READ)) { + tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, + mmu_idx, retaddr); + } tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ; } @@ -365,10 +396,13 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { #ifdef ALIGNED_ONLY if ((addr & (DATA_SIZE - 1)) != 0) { - cpu_unaligned_access(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr); + cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, + mmu_idx, retaddr); } #endif - tlb_fill(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr); + if (!VICTIM_TLB_HIT(addr_write)) { + tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr); + } tlb_addr = env->tlb_table[mmu_idx][index].addr_write; } @@ -394,7 +428,8 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, int i; do_unaligned_access: #ifdef ALIGNED_ONLY - cpu_unaligned_access(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr); + cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, + mmu_idx, retaddr); #endif /* XXX: not efficient, but simple */ /* Note: relies on the fact that tlb_fill() does not remove the @@ -413,7 +448,8 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, /* Handle aligned access or unaligned access in the same page. */ #ifdef ALIGNED_ONLY if ((addr & (DATA_SIZE - 1)) != 0) { - cpu_unaligned_access(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr); + cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, + mmu_idx, retaddr); } #endif @@ -441,10 +477,13 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { #ifdef ALIGNED_ONLY if ((addr & (DATA_SIZE - 1)) != 0) { - cpu_unaligned_access(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr); + cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, + mmu_idx, retaddr); } #endif - tlb_fill(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr); + if (!VICTIM_TLB_HIT(addr_write)) { + tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr); + } tlb_addr = env->tlb_table[mmu_idx][index].addr_write; } @@ -470,7 +509,8 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, int i; do_unaligned_access: #ifdef ALIGNED_ONLY - cpu_unaligned_access(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr); + cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, + mmu_idx, retaddr); #endif /* XXX: not efficient, but simple */ /* Note: relies on the fact that tlb_fill() does not remove the @@ -489,7 +529,8 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, /* Handle aligned access or unaligned access in the same page. */ #ifdef ALIGNED_ONLY if ((addr & (DATA_SIZE - 1)) != 0) { - cpu_unaligned_access(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr); + cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, + mmu_idx, retaddr); } #endif |