diff options
author | Paolo Bonzini <pbonzini@redhat.com> | 2014-03-28 18:14:58 +0100 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2014-06-05 16:10:31 +0200 |
commit | 93e22326d62d903b301e90bea71f0dbd0de858d3 (patch) | |
tree | 5c15dc72fdb3db11f3af7476860c8be995008fcf | |
parent | ca0aa408167888d862df3e7f734f6b7b35bd556d (diff) | |
download | qemu-93e22326d62d903b301e90bea71f0dbd0de858d3.tar.gz qemu-93e22326d62d903b301e90bea71f0dbd0de858d3.tar.bz2 qemu-93e22326d62d903b301e90bea71f0dbd0de858d3.zip |
softmmu: make do_unaligned_access a method of CPU
We will reference it from more files in the next patch. To avoid
ruining the small steps we're making towards multi-target, make
it a method of CPU rather than just a global.
Reviewed-by: Andreas Färber <afaerber@suse.de>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r-- | include/exec/softmmu_template.h | 30 | ||||
-rw-r--r-- | include/qom/cpu.h | 15 | ||||
-rw-r--r-- | target-alpha/cpu-qom.h | 2 | ||||
-rw-r--r-- | target-alpha/cpu.c | 1 | ||||
-rw-r--r-- | target-alpha/mem_helper.c | 8 | ||||
-rw-r--r-- | target-mips/cpu-qom.h | 2 | ||||
-rw-r--r-- | target-mips/cpu.c | 1 | ||||
-rw-r--r-- | target-mips/op_helper.c | 11 | ||||
-rw-r--r-- | target-sparc/cpu-qom.h | 3 | ||||
-rw-r--r-- | target-sparc/cpu.c | 1 | ||||
-rw-r--r-- | target-sparc/ldst_helper.c | 13 | ||||
-rw-r--r-- | target-xtensa/cpu-qom.h | 2 | ||||
-rw-r--r-- | target-xtensa/cpu.c | 1 | ||||
-rw-r--r-- | target-xtensa/op_helper.c | 10 |
14 files changed, 63 insertions, 37 deletions
diff --git a/include/exec/softmmu_template.h b/include/exec/softmmu_template.h index 73ed7cf921..12ead5a2b1 100644 --- a/include/exec/softmmu_template.h +++ b/include/exec/softmmu_template.h @@ -155,7 +155,8 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx, != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { #ifdef ALIGNED_ONLY if ((addr & (DATA_SIZE - 1)) != 0) { - do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr); + cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, + mmu_idx, retaddr); } #endif tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, mmu_idx, retaddr); @@ -186,7 +187,8 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx, unsigned shift; do_unaligned_access: #ifdef ALIGNED_ONLY - do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr); + cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, + mmu_idx, retaddr); #endif addr1 = addr & ~(DATA_SIZE - 1); addr2 = addr1 + DATA_SIZE; @@ -204,7 +206,8 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx, /* Handle aligned access or unaligned access in the same page. */ #ifdef ALIGNED_ONLY if ((addr & (DATA_SIZE - 1)) != 0) { - do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr); + cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, + mmu_idx, retaddr); } #endif @@ -237,7 +240,8 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx, != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { #ifdef ALIGNED_ONLY if ((addr & (DATA_SIZE - 1)) != 0) { - do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr); + cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, + mmu_idx, retaddr); } #endif tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, mmu_idx, retaddr); @@ -268,7 +272,8 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx, unsigned shift; do_unaligned_access: #ifdef ALIGNED_ONLY - do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr); + cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, + mmu_idx, retaddr); #endif addr1 = addr & ~(DATA_SIZE - 1); addr2 = addr1 + DATA_SIZE; @@ -286,7 +291,8 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx, /* Handle aligned access or unaligned access in the same page. */ #ifdef ALIGNED_ONLY if ((addr & (DATA_SIZE - 1)) != 0) { - do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr); + cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, + mmu_idx, retaddr); } #endif @@ -357,7 +363,7 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { #ifdef ALIGNED_ONLY if ((addr & (DATA_SIZE - 1)) != 0) { - do_unaligned_access(env, addr, 1, mmu_idx, retaddr); + cpu_unaligned_access(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr); } #endif tlb_fill(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr); @@ -386,7 +392,7 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, int i; do_unaligned_access: #ifdef ALIGNED_ONLY - do_unaligned_access(env, addr, 1, mmu_idx, retaddr); + cpu_unaligned_access(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr); #endif /* XXX: not efficient, but simple */ /* Note: relies on the fact that tlb_fill() does not remove the @@ -405,7 +411,7 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, /* Handle aligned access or unaligned access in the same page. */ #ifdef ALIGNED_ONLY if ((addr & (DATA_SIZE - 1)) != 0) { - do_unaligned_access(env, addr, 1, mmu_idx, retaddr); + cpu_unaligned_access(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr); } #endif @@ -433,7 +439,7 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { #ifdef ALIGNED_ONLY if ((addr & (DATA_SIZE - 1)) != 0) { - do_unaligned_access(env, addr, 1, mmu_idx, retaddr); + cpu_unaligned_access(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr); } #endif tlb_fill(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr); @@ -462,7 +468,7 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, int i; do_unaligned_access: #ifdef ALIGNED_ONLY - do_unaligned_access(env, addr, 1, mmu_idx, retaddr); + cpu_unaligned_access(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr); #endif /* XXX: not efficient, but simple */ /* Note: relies on the fact that tlb_fill() does not remove the @@ -481,7 +487,7 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, /* Handle aligned access or unaligned access in the same page. */ #ifdef ALIGNED_ONLY if ((addr & (DATA_SIZE - 1)) != 0) { - do_unaligned_access(env, addr, 1, mmu_idx, retaddr); + cpu_unaligned_access(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr); } #endif diff --git a/include/qom/cpu.h b/include/qom/cpu.h index df977c88f0..4b352a28fa 100644 --- a/include/qom/cpu.h +++ b/include/qom/cpu.h @@ -80,6 +80,8 @@ struct TranslationBlock; * @has_work: Callback for checking if there is work to do. * @do_interrupt: Callback for interrupt handling. * @do_unassigned_access: Callback for unassigned access handling. + * @do_unaligned_access: Callback for unaligned access handling, if + * the target defines #ALIGNED_ONLY. * @memory_rw_debug: Callback for GDB memory access. * @dump_state: Callback for dumping state. * @dump_statistics: Callback for dumping statistics. @@ -112,6 +114,8 @@ typedef struct CPUClass { bool (*has_work)(CPUState *cpu); void (*do_interrupt)(CPUState *cpu); CPUUnassignedAccess do_unassigned_access; + void (*do_unaligned_access)(CPUState *cpu, vaddr addr, + int is_write, int is_user, uintptr_t retaddr); int (*memory_rw_debug)(CPUState *cpu, vaddr addr, uint8_t *buf, int len, bool is_write); void (*dump_state)(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf, @@ -544,8 +548,7 @@ void cpu_interrupt(CPUState *cpu, int mask); #endif /* USER_ONLY */ -#ifndef CONFIG_USER_ONLY - +#ifdef CONFIG_SOFTMMU static inline void cpu_unassigned_access(CPUState *cpu, hwaddr addr, bool is_write, bool is_exec, int opaque, unsigned size) @@ -557,6 +560,14 @@ static inline void cpu_unassigned_access(CPUState *cpu, hwaddr addr, } } +static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr, + int is_write, int is_user, + uintptr_t retaddr) +{ + CPUClass *cc = CPU_GET_CLASS(cpu); + + return cc->do_unaligned_access(cpu, addr, is_write, is_user, retaddr); +} #endif /** diff --git a/target-alpha/cpu-qom.h b/target-alpha/cpu-qom.h index 198f1b13a3..0caa362f5b 100644 --- a/target-alpha/cpu-qom.h +++ b/target-alpha/cpu-qom.h @@ -84,5 +84,7 @@ void alpha_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf, hwaddr alpha_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); int alpha_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg); int alpha_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); +void alpha_cpu_do_unaligned_access(CPUState *cpu, vaddr addr, + int is_write, int is_user, uintptr_t retaddr); #endif diff --git a/target-alpha/cpu.c b/target-alpha/cpu.c index 7ec46b90fc..2491f0a301 100644 --- a/target-alpha/cpu.c +++ b/target-alpha/cpu.c @@ -292,6 +292,7 @@ static void alpha_cpu_class_init(ObjectClass *oc, void *data) cc->handle_mmu_fault = alpha_cpu_handle_mmu_fault; #else cc->do_unassigned_access = alpha_cpu_unassigned_access; + cc->do_unaligned_access = alpha_cpu_do_unaligned_access; cc->get_phys_page_debug = alpha_cpu_get_phys_page_debug; dc->vmsd = &vmstate_alpha_cpu; #endif diff --git a/target-alpha/mem_helper.c b/target-alpha/mem_helper.c index ef6b7058cb..c560bd9717 100644 --- a/target-alpha/mem_helper.c +++ b/target-alpha/mem_helper.c @@ -96,11 +96,11 @@ uint64_t helper_stq_c_phys(CPUAlphaState *env, uint64_t p, uint64_t v) return ret; } -static void do_unaligned_access(CPUAlphaState *env, target_ulong addr, - int is_write, int is_user, uintptr_t retaddr) +void alpha_cpu_do_unaligned_access(CPUState *cs, vaddr addr, + int is_write, int is_user, uintptr_t retaddr) { - AlphaCPU *cpu = alpha_env_get_cpu(env); - CPUState *cs = CPU(cpu); + AlphaCPU *cpu = ALPHA_CPU(cs); + CPUAlphaState *env = &cpu->env; uint64_t pc; uint32_t insn; diff --git a/target-mips/cpu-qom.h b/target-mips/cpu-qom.h index 8877f813f7..2cff15a273 100644 --- a/target-mips/cpu-qom.h +++ b/target-mips/cpu-qom.h @@ -80,5 +80,7 @@ void mips_cpu_dump_state(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf, hwaddr mips_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); int mips_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg); int mips_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); +void mips_cpu_do_unaligned_access(CPUState *cpu, vaddr addr, + int is_write, int is_user, uintptr_t retaddr); #endif diff --git a/target-mips/cpu.c b/target-mips/cpu.c index ae37ae26c0..dd954fc55a 100644 --- a/target-mips/cpu.c +++ b/target-mips/cpu.c @@ -137,6 +137,7 @@ static void mips_cpu_class_init(ObjectClass *c, void *data) cc->handle_mmu_fault = mips_cpu_handle_mmu_fault; #else cc->do_unassigned_access = mips_cpu_unassigned_access; + cc->do_unaligned_access = mips_cpu_do_unaligned_access; cc->get_phys_page_debug = mips_cpu_get_phys_page_debug; #endif diff --git a/target-mips/op_helper.c b/target-mips/op_helper.c index 8af931abd9..2b665a19f1 100644 --- a/target-mips/op_helper.c +++ b/target-mips/op_helper.c @@ -2128,10 +2128,6 @@ void helper_wait(CPUMIPSState *env) #if !defined(CONFIG_USER_ONLY) -static void QEMU_NORETURN do_unaligned_access(CPUMIPSState *env, - target_ulong addr, int is_write, - int is_user, uintptr_t retaddr); - #define MMUSUFFIX _mmu #define ALIGNED_ONLY @@ -2147,9 +2143,12 @@ static void QEMU_NORETURN do_unaligned_access(CPUMIPSState *env, #define SHIFT 3 #include "exec/softmmu_template.h" -static void do_unaligned_access(CPUMIPSState *env, target_ulong addr, - int is_write, int is_user, uintptr_t retaddr) +void mips_cpu_do_unaligned_access(CPUState *cs, vaddr addr, + int is_write, int is_user, uintptr_t retaddr) { + MIPSCPU *cpu = MIPS_CPU(cs); + CPUMIPSState *env = &cpu->env; + env->CP0_BadVAddr = addr; do_raise_exception(env, (is_write == 1) ? EXCP_AdES : EXCP_AdEL, retaddr); } diff --git a/target-sparc/cpu-qom.h b/target-sparc/cpu-qom.h index 8e3e0de277..477c4d5136 100644 --- a/target-sparc/cpu-qom.h +++ b/target-sparc/cpu-qom.h @@ -81,5 +81,8 @@ void sparc_cpu_dump_state(CPUState *cpu, FILE *f, hwaddr sparc_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); int sparc_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg); int sparc_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); +void QEMU_NORETURN sparc_cpu_do_unaligned_access(CPUState *cpu, + vaddr addr, int is_write, + int is_user, uintptr_t retaddr); #endif diff --git a/target-sparc/cpu.c b/target-sparc/cpu.c index d9f37e9b6a..3a0ee504e5 100644 --- a/target-sparc/cpu.c +++ b/target-sparc/cpu.c @@ -825,6 +825,7 @@ static void sparc_cpu_class_init(ObjectClass *oc, void *data) cc->handle_mmu_fault = sparc_cpu_handle_mmu_fault; #else cc->do_unassigned_access = sparc_cpu_unassigned_access; + cc->do_unaligned_access = sparc_cpu_do_unaligned_access; cc->get_phys_page_debug = sparc_cpu_get_phys_page_debug; #endif diff --git a/target-sparc/ldst_helper.c b/target-sparc/ldst_helper.c index b6b9866b93..6e04c0938e 100644 --- a/target-sparc/ldst_helper.c +++ b/target-sparc/ldst_helper.c @@ -65,9 +65,6 @@ #define QT1 (env->qt1) #if !defined(CONFIG_USER_ONLY) -static void QEMU_NORETURN do_unaligned_access(CPUSPARCState *env, - target_ulong addr, int is_write, - int is_user, uintptr_t retaddr); #include "exec/softmmu_exec.h" #define MMUSUFFIX _mmu #define ALIGNED_ONLY @@ -2425,11 +2422,13 @@ void sparc_cpu_unassigned_access(CPUState *cs, hwaddr addr, #endif #if !defined(CONFIG_USER_ONLY) -static void QEMU_NORETURN do_unaligned_access(CPUSPARCState *env, - target_ulong addr, int is_write, - int is_user, uintptr_t retaddr) +void QEMU_NORETURN sparc_cpu_do_unaligned_access(CPUState *cs, + vaddr addr, int is_write, + int is_user, uintptr_t retaddr) { - SPARCCPU *cpu = sparc_env_get_cpu(env); + SPARCCPU *cpu = SPARC_CPU(cs); + CPUSPARCState *env = &cpu->env; + #ifdef DEBUG_UNALIGNED printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx "\n", addr, env->pc); diff --git a/target-xtensa/cpu-qom.h b/target-xtensa/cpu-qom.h index c6cc2d91f4..f320486a68 100644 --- a/target-xtensa/cpu-qom.h +++ b/target-xtensa/cpu-qom.h @@ -89,5 +89,7 @@ void xtensa_cpu_dump_state(CPUState *cpu, FILE *f, hwaddr xtensa_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); int xtensa_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg); int xtensa_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); +void xtensa_cpu_do_unaligned_access(CPUState *cpu, vaddr addr, + int is_write, int is_user, uintptr_t retaddr); #endif diff --git a/target-xtensa/cpu.c b/target-xtensa/cpu.c index 6251f1c47e..9d8801b70e 100644 --- a/target-xtensa/cpu.c +++ b/target-xtensa/cpu.c @@ -148,6 +148,7 @@ static void xtensa_cpu_class_init(ObjectClass *oc, void *data) cc->gdb_read_register = xtensa_cpu_gdb_read_register; cc->gdb_write_register = xtensa_cpu_gdb_write_register; #ifndef CONFIG_USER_ONLY + cc->do_unaligned_access = xtensa_cpu_do_unaligned_access; cc->get_phys_page_debug = xtensa_cpu_get_phys_page_debug; #endif dc->vmsd = &vmstate_xtensa_cpu; diff --git a/target-xtensa/op_helper.c b/target-xtensa/op_helper.c index 01edab4082..fd514fc813 100644 --- a/target-xtensa/op_helper.c +++ b/target-xtensa/op_helper.c @@ -31,9 +31,6 @@ #include "exec/softmmu_exec.h" #include "exec/address-spaces.h" -static void do_unaligned_access(CPUXtensaState *env, - target_ulong addr, int is_write, int is_user, uintptr_t retaddr); - #define ALIGNED_ONLY #define MMUSUFFIX _mmu @@ -49,10 +46,11 @@ static void do_unaligned_access(CPUXtensaState *env, #define SHIFT 3 #include "exec/softmmu_template.h" -static void do_unaligned_access(CPUXtensaState *env, - target_ulong addr, int is_write, int is_user, uintptr_t retaddr) +void xtensa_cpu_do_unaligned_access(CPUState *cs, + vaddr addr, int is_write, int is_user, uintptr_t retaddr) { - XtensaCPU *cpu = xtensa_env_get_cpu(env); + XtensaCPU *cpu = XTENSA_CPU(cs); + CPUXtensaState *env = &cpu->env; if (xtensa_option_enabled(env->config, XTENSA_OPTION_UNALIGNED_EXCEPTION) && !xtensa_option_enabled(env->config, XTENSA_OPTION_HW_ALIGNMENT)) { |