summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEdgar E. Iglesias <edgar.iglesias@gmail.com>2011-06-10 22:21:14 +0200
committerEdgar E. Iglesias <edgar.iglesias@gmail.com>2011-06-10 22:21:14 +0200
commit448293961f889d635295ad5b1ecc57ce267801ba (patch)
treed91a77a3a5f8fe466ae1a98d1705f8b757f319da
parent1c532d92ab9bc2a8180296d7abe14f29d762f612 (diff)
parent3b4fefd6e65a7bdb994d10a9fa36c19b5749b502 (diff)
downloadqemu-448293961f889d635295ad5b1ecc57ce267801ba.tar.gz
qemu-448293961f889d635295ad5b1ecc57ce267801ba.tar.bz2
qemu-448293961f889d635295ad5b1ecc57ce267801ba.zip
Merge remote branch 'rth/axp-next' into alpha-merge
* rth/axp-next: (26 commits) target-alpha: Implement TLB flush primitives. target-alpha: Use a fixed frequency for the RPCC in system mode. target-alpha: Trap for unassigned and unaligned addresses. target-alpha: Remap PIO space for 43-bit KSEG for EV6. target-alpha: Implement cpu_alpha_handle_mmu_fault for system mode. target-alpha: Implement more CALL_PAL values inline. target-alpha: Disable interrupts properly. target-alpha: All ISA checks to use TB->FLAGS. target-alpha: Swap shadow registers moving to/from PALmode. target-alpha: Implement do_interrupt for system mode. target-alpha: Add IPRs to be used by the emulation PALcode. target-alpha: Use kernel mmu_idx for pal_mode. target-alpha: Add various symbolic constants. target-alpha: Use do_restore_state for arithmetic exceptions. target-alpha: Tidy up arithmetic exceptions. target-alpha: Tidy exception constants. target-alpha: Enable the alpha-softmmu target. target-alpha: Rationalize internal processor registers. target-alpha: Merge HW_REI and HW_RET implementations. target-alpha: Cleanup MMU modes. ...
-rw-r--r--MAINTAINERS4
-rw-r--r--Makefile.target3
-rw-r--r--alpha-dis.c4
-rwxr-xr-xconfigure1
-rw-r--r--cpu-exec.c33
-rw-r--r--default-configs/alpha-softmmu.mak9
-rw-r--r--dis-asm.h3
-rw-r--r--disas.c2
-rw-r--r--exec-all.h2
-rw-r--r--exec.c12
-rw-r--r--hw/alpha_palcode.c1048
-rw-r--r--linux-user/main.c50
-rw-r--r--target-alpha/cpu.h375
-rw-r--r--target-alpha/exec.h12
-rw-r--r--target-alpha/helper.c589
-rw-r--r--target-alpha/helper.h32
-rw-r--r--target-alpha/machine.c87
-rw-r--r--target-alpha/op_helper.c278
-rw-r--r--target-alpha/translate.c804
19 files changed, 1179 insertions, 2169 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index e6f853dfff..35d4496186 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -56,8 +56,8 @@ M: Paul Brook <paul@codesourcery.com>
Guest CPU cores (TCG):
----------------------
Alpha
-M: qemu-devel@nongnu.org
-S: Orphan
+M: Richard Henderson <rth@twiddle.net>
+S: Maintained
F: target-alpha/
ARM
diff --git a/Makefile.target b/Makefile.target
index f7cb17de80..b1a0f6d28b 100644
--- a/Makefile.target
+++ b/Makefile.target
@@ -374,7 +374,8 @@ obj-m68k-y += m68k-semi.o dummy_m68k.o
obj-s390x-y = s390-virtio-bus.o s390-virtio.o
-obj-alpha-y = alpha_palcode.o
+obj-alpha-y = i8259.o mc146818rtc.o
+obj-alpha-y += vga.o cirrus_vga.o
main.o: QEMU_CFLAGS+=$(GPROF_CFLAGS)
diff --git a/alpha-dis.c b/alpha-dis.c
index 8a2411e4d5..ae331b35b8 100644
--- a/alpha-dis.c
+++ b/alpha-dis.c
@@ -238,10 +238,6 @@ extern const unsigned alpha_num_operands;
#define AXP_REG_SP 30
#define AXP_REG_ZERO 31
-#define bfd_mach_alpha_ev4 0x10
-#define bfd_mach_alpha_ev5 0x20
-#define bfd_mach_alpha_ev6 0x30
-
enum bfd_reloc_code_real {
BFD_RELOC_23_PCREL_S2,
BFD_RELOC_ALPHA_HINT
diff --git a/configure b/configure
index bade3e3f2a..663b557ad0 100755
--- a/configure
+++ b/configure
@@ -831,6 +831,7 @@ if [ "$softmmu" = "yes" ] ; then
default_target_list="\
i386-softmmu \
x86_64-softmmu \
+alpha-softmmu \
arm-softmmu \
cris-softmmu \
lm32-softmmu \
diff --git a/cpu-exec.c b/cpu-exec.c
index 6ddd8dd1ae..e1de56b397 100644
--- a/cpu-exec.c
+++ b/cpu-exec.c
@@ -488,9 +488,36 @@ int cpu_exec(CPUState *env1)
next_tb = 0;
}
#elif defined(TARGET_ALPHA)
- if (interrupt_request & CPU_INTERRUPT_HARD) {
- do_interrupt(env);
- next_tb = 0;
+ {
+ int idx = -1;
+ /* ??? This hard-codes the OSF/1 interrupt levels. */
+ switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
+ case 0 ... 3:
+ if (interrupt_request & CPU_INTERRUPT_HARD) {
+ idx = EXCP_DEV_INTERRUPT;
+ }
+ /* FALLTHRU */
+ case 4:
+ if (interrupt_request & CPU_INTERRUPT_TIMER) {
+ idx = EXCP_CLK_INTERRUPT;
+ }
+ /* FALLTHRU */
+ case 5:
+ if (interrupt_request & CPU_INTERRUPT_SMP) {
+ idx = EXCP_SMP_INTERRUPT;
+ }
+ /* FALLTHRU */
+ case 6:
+ if (interrupt_request & CPU_INTERRUPT_MCHK) {
+ idx = EXCP_MCHK;
+ }
+ }
+ if (idx >= 0) {
+ env->exception_index = idx;
+ env->error_code = 0;
+ do_interrupt(env);
+ next_tb = 0;
+ }
}
#elif defined(TARGET_CRIS)
if (interrupt_request & CPU_INTERRUPT_HARD
diff --git a/default-configs/alpha-softmmu.mak b/default-configs/alpha-softmmu.mak
new file mode 100644
index 0000000000..abadcffec9
--- /dev/null
+++ b/default-configs/alpha-softmmu.mak
@@ -0,0 +1,9 @@
+# Default configuration for alpha-softmmu
+
+include pci.mak
+CONFIG_SERIAL=y
+CONFIG_I8254=y
+CONFIG_VGA_PCI=y
+CONFIG_IDE_CORE=y
+CONFIG_IDE_QDEV=y
+CONFIG_VMWARE_VGA=y
diff --git a/dis-asm.h b/dis-asm.h
index 296537ad3a..5b07d7f3a0 100644
--- a/dis-asm.h
+++ b/dis-asm.h
@@ -184,6 +184,9 @@ enum bfd_architecture
#define bfd_mach_sh5 0x50
bfd_arch_alpha, /* Dec Alpha */
#define bfd_mach_alpha 1
+#define bfd_mach_alpha_ev4 0x10
+#define bfd_mach_alpha_ev5 0x20
+#define bfd_mach_alpha_ev6 0x30
bfd_arch_arm, /* Advanced Risc Machines ARM */
#define bfd_mach_arm_unknown 0
#define bfd_mach_arm_2 1
diff --git a/disas.c b/disas.c
index 223606cc50..d208c52402 100644
--- a/disas.c
+++ b/disas.c
@@ -205,7 +205,7 @@ void target_disas(FILE *out, target_ulong code, target_ulong size, int flags)
disasm_info.mach = bfd_mach_sh4;
print_insn = print_insn_sh;
#elif defined(TARGET_ALPHA)
- disasm_info.mach = bfd_mach_alpha;
+ disasm_info.mach = bfd_mach_alpha_ev6;
print_insn = print_insn_alpha;
#elif defined(TARGET_CRIS)
if (flags != 32) {
diff --git a/exec-all.h b/exec-all.h
index 026864e908..2a13a9535e 100644
--- a/exec-all.h
+++ b/exec-all.h
@@ -325,7 +325,7 @@ static inline tb_page_addr_t get_page_addr_code(CPUState *env1, target_ulong add
}
pd = env1->tlb_table[mmu_idx][page_index].addr_code & ~TARGET_PAGE_MASK;
if (pd > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
-#if defined(TARGET_SPARC) || defined(TARGET_MIPS)
+#if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_SPARC)
do_unassigned_access(addr, 0, 1, 0, 4);
#else
cpu_abort(env1, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
diff --git a/exec.c b/exec.c
index bffc201f97..09928a3b37 100644
--- a/exec.c
+++ b/exec.c
@@ -3193,7 +3193,7 @@ static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
#ifdef DEBUG_UNASSIGNED
printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
#endif
-#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
+#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
do_unassigned_access(addr, 0, 0, 0, 1);
#endif
return 0;
@@ -3204,7 +3204,7 @@ static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
#ifdef DEBUG_UNASSIGNED
printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
#endif
-#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
+#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
do_unassigned_access(addr, 0, 0, 0, 2);
#endif
return 0;
@@ -3215,7 +3215,7 @@ static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
#ifdef DEBUG_UNASSIGNED
printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
#endif
-#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
+#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
do_unassigned_access(addr, 0, 0, 0, 4);
#endif
return 0;
@@ -3226,7 +3226,7 @@ static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_
#ifdef DEBUG_UNASSIGNED
printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
#endif
-#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
+#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
do_unassigned_access(addr, 1, 0, 0, 1);
#endif
}
@@ -3236,7 +3236,7 @@ static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_
#ifdef DEBUG_UNASSIGNED
printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
#endif
-#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
+#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
do_unassigned_access(addr, 1, 0, 0, 2);
#endif
}
@@ -3246,7 +3246,7 @@ static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_
#ifdef DEBUG_UNASSIGNED
printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
#endif
-#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
+#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
do_unassigned_access(addr, 1, 0, 0, 4);
#endif
}
diff --git a/hw/alpha_palcode.c b/hw/alpha_palcode.c
deleted file mode 100644
index 033b54201c..0000000000
--- a/hw/alpha_palcode.c
+++ /dev/null
@@ -1,1048 +0,0 @@
-/*
- * Alpha emulation - PALcode emulation for qemu.
- *
- * Copyright (c) 2007 Jocelyn Mayer
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <stdint.h>
-#include <stdlib.h>
-#include <stdio.h>
-
-#include "cpu.h"
-#include "exec-all.h"
-
-/* Shared handlers */
-static void pal_reset (CPUState *env);
-/* Console handlers */
-static void pal_console_call (CPUState *env, uint32_t palcode);
-/* OpenVMS handlers */
-static void pal_openvms_call (CPUState *env, uint32_t palcode);
-/* UNIX / Linux handlers */
-static void pal_unix_call (CPUState *env, uint32_t palcode);
-
-pal_handler_t pal_handlers[] = {
- /* Console handler */
- {
- .reset = &pal_reset,
- .call_pal = &pal_console_call,
- },
- /* OpenVMS handler */
- {
- .reset = &pal_reset,
- .call_pal = &pal_openvms_call,
- },
- /* UNIX / Linux handler */
- {
- .reset = &pal_reset,
- .call_pal = &pal_unix_call,
- },
-};
-
-#if 0
-/* One must explicitly check that the TB is valid and the FOE bit is reset */
-static void update_itb (void)
-{
- /* This writes into a temp register, not the actual one */
- mtpr(TB_TAG);
- mtpr(TB_CTL);
- /* This commits the TB update */
- mtpr(ITB_PTE);
-}
-
-static void update_dtb (void);
-{
- mtpr(TB_CTL);
- /* This write into a temp register, not the actual one */
- mtpr(TB_TAG);
- /* This commits the TB update */
- mtpr(DTB_PTE);
-}
-#endif
-
-static void pal_reset (CPUState *env)
-{
-}
-
-static void do_swappal (CPUState *env, uint64_t palid)
-{
- pal_handler_t *pal_handler;
-
- switch (palid) {
- case 0 ... 2:
- pal_handler = &pal_handlers[palid];
- env->pal_handler = pal_handler;
- env->ipr[IPR_PAL_BASE] = -1ULL;
- (*pal_handler->reset)(env);
- break;
- case 3 ... 255:
- /* Unknown identifier */
- env->ir[0] = 1;
- return;
- default:
- /* We were given the entry point address */
- env->pal_handler = NULL;
- env->ipr[IPR_PAL_BASE] = palid;
- env->pc = env->ipr[IPR_PAL_BASE];
- cpu_loop_exit();
- }
-}
-
-static void pal_console_call (CPUState *env, uint32_t palcode)
-{
- uint64_t palid;
-
- if (palcode < 0x00000080) {
- /* Privileged palcodes */
- if (!(env->ps >> 3)) {
- /* TODO: generate privilege exception */
- }
- }
- switch (palcode) {
- case 0x00000000:
- /* HALT */
- /* REQUIRED */
- break;
- case 0x00000001:
- /* CFLUSH */
- break;
- case 0x00000002:
- /* DRAINA */
- /* REQUIRED */
- /* Implemented as no-op */
- break;
- case 0x00000009:
- /* CSERVE */
- /* REQUIRED */
- break;
- case 0x0000000A:
- /* SWPPAL */
- /* REQUIRED */
- palid = env->ir[16];
- do_swappal(env, palid);
- break;
- case 0x00000080:
- /* BPT */
- /* REQUIRED */
- break;
- case 0x00000081:
- /* BUGCHK */
- /* REQUIRED */
- break;
- case 0x00000086:
- /* IMB */
- /* REQUIRED */
- /* Implemented as no-op */
- break;
- case 0x0000009E:
- /* RDUNIQUE */
- /* REQUIRED */
- break;
- case 0x0000009F:
- /* WRUNIQUE */
- /* REQUIRED */
- break;
- case 0x000000AA:
- /* GENTRAP */
- /* REQUIRED */
- break;
- default:
- break;
- }
-}
-
-static void pal_openvms_call (CPUState *env, uint32_t palcode)
-{
- uint64_t palid, val, oldval;
-
- if (palcode < 0x00000080) {
- /* Privileged palcodes */
- if (!(env->ps >> 3)) {
- /* TODO: generate privilege exception */
- }
- }
- switch (palcode) {
- case 0x00000000:
- /* HALT */
- /* REQUIRED */
- break;
- case 0x00000001:
- /* CFLUSH */
- break;
- case 0x00000002:
- /* DRAINA */
- /* REQUIRED */
- /* Implemented as no-op */
- break;
- case 0x00000003:
- /* LDQP */
- break;
- case 0x00000004:
- /* STQP */
- break;
- case 0x00000005:
- /* SWPCTX */
- break;
- case 0x00000006:
- /* MFPR_ASN */
- if (cpu_alpha_mfpr(env, IPR_ASN, &val) == 0)
- env->ir[0] = val;
- break;
- case 0x00000007:
- /* MTPR_ASTEN */
- val = env->ir[16];
- if (cpu_alpha_mtpr(env, IPR_ASTEN, val, &oldval) == 1)
- env->ir[0] = val;
- break;
- case 0x00000008:
- /* MTPR_ASTSR */
- val = env->ir[16];
- if (cpu_alpha_mtpr(env, IPR_ASTSR, val, &oldval) == 1)
- env->ir[0] = val;
- break;
- case 0x00000009:
- /* CSERVE */
- /* REQUIRED */
- break;
- case 0x0000000A:
- /* SWPPAL */
- /* REQUIRED */
- palid = env->ir[16];
- do_swappal(env, palid);
- break;
- case 0x0000000B:
- /* MFPR_FEN */
- if (cpu_alpha_mfpr(env, IPR_FEN, &val) == 0)
- env->ir[0] = val;
- break;
- case 0x0000000C:
- /* MTPR_FEN */
- val = env->ir[16];
- if (cpu_alpha_mtpr(env, IPR_FEN, val, &oldval) == 1)
- env->ir[0] = val;
- break;
- case 0x0000000D:
- /* MTPR_IPIR */
- val = env->ir[16];
- if (cpu_alpha_mtpr(env, IPR_IPIR, val, &oldval) == 1)
- env->ir[0] = val;
- break;
- case 0x0000000E:
- /* MFPR_IPL */
- if (cpu_alpha_mfpr(env, IPR_IPL, &val) == 0)
- env->ir[0] = val;
- break;
- case 0x0000000F:
- /* MTPR_IPL */
- val = env->ir[16];
- if (cpu_alpha_mtpr(env, IPR_IPL, val, &oldval) == 1)
- env->ir[0] = val;
- break;
- case 0x00000010:
- /* MFPR_MCES */
- if (cpu_alpha_mfpr(env, IPR_MCES, &val) == 0)
- env->ir[0] = val;
- break;
- case 0x00000011:
- /* MTPR_MCES */
- val = env->ir[16];
- if (cpu_alpha_mtpr(env, IPR_MCES, val, &oldval) == 1)
- env->ir[0] = val;
- break;
- case 0x00000012:
- /* MFPR_PCBB */
- if (cpu_alpha_mfpr(env, IPR_PCBB, &val) == 0)
- env->ir[0] = val;
- break;
- case 0x00000013:
- /* MFPR_PRBR */
- if (cpu_alpha_mfpr(env, IPR_PRBR, &val) == 0)
- env->ir[0] = val;
- break;
- case 0x00000014:
- /* MTPR_PRBR */
- val = env->ir[16];
- if (cpu_alpha_mtpr(env, IPR_PRBR, val, &oldval) == 1)
- env->ir[0] = val;
- break;
- case 0x00000015:
- /* MFPR_PTBR */
- if (cpu_alpha_mfpr(env, IPR_PTBR, &val) == 0)
- env->ir[0] = val;
- break;
- case 0x00000016:
- /* MFPR_SCBB */
- if (cpu_alpha_mfpr(env, IPR_SCBB, &val) == 0)
- env->ir[0] = val;
- break;
- case 0x00000017:
- /* MTPR_SCBB */
- val = env->ir[16];
- if (cpu_alpha_mtpr(env, IPR_SCBB, val, &oldval) == 1)
- env->ir[0] = val;
- break;
- case 0x00000018:
- /* MTPR_SIRR */
- val = env->ir[16];
- if (cpu_alpha_mtpr(env, IPR_SIRR, val, &oldval) == 1)
- env->ir[0] = val;
- break;
- case 0x00000019:
- /* MFPR_SISR */
- if (cpu_alpha_mfpr(env, IPR_SISR, &val) == 0)
- env->ir[0] = val;
- break;
- case 0x0000001A:
- /* MFPR_TBCHK */
- if (cpu_alpha_mfpr(env, IPR_TBCHK, &val) == 0)
- env->ir[0] = val;
- break;
- case 0x0000001B:
- /* MTPR_TBIA */
- val = env->ir[16];
- if (cpu_alpha_mtpr(env, IPR_TBIA, val, &oldval) == 1)
- env->ir[0] = val;
- break;
- case 0x0000001C:
- /* MTPR_TBIAP */
- val = env->ir[16];
- if (cpu_alpha_mtpr(env, IPR_TBIAP, val, &oldval) == 1)
- env->ir[0] = val;
- break;
- case 0x0000001D:
- /* MTPR_TBIS */
- val = env->ir[16];
- if (cpu_alpha_mtpr(env, IPR_TBIS, val, &oldval) == 1)
- env->ir[0] = val;
- break;
- case 0x0000001E:
- /* MFPR_ESP */
- if (cpu_alpha_mfpr(env, IPR_ESP, &val) == 0)
- env->ir[0] = val;
- break;
- case 0x0000001F:
- /* MTPR_ESP */
- val = env->ir[16];
- if (cpu_alpha_mtpr(env, IPR_ESP, val, &oldval) == 1)
- env->ir[0] = val;
- break;
- case 0x00000020:
- /* MFPR_SSP */
- if (cpu_alpha_mfpr(env, IPR_SSP, &val) == 0)
- env->ir[0] = val;
- break;
- case 0x00000021:
- /* MTPR_SSP */
- val = env->ir[16];
- if (cpu_alpha_mtpr(env, IPR_SSP, val, &oldval) == 1)
- env->ir[0] = val;
- break;
- case 0x00000022:
- /* MFPR_USP */
- if (cpu_alpha_mfpr(env, IPR_USP, &val) == 0)
- env->ir[0] = val;
- break;
- case 0x00000023:
- /* MTPR_USP */
- val = env->ir[16];
- if (cpu_alpha_mtpr(env, IPR_USP, val, &oldval) == 1)
- env->ir[0] = val;
- break;
- case 0x00000024:
- /* MTPR_TBISD */
- val = env->ir[16];
- if (cpu_alpha_mtpr(env, IPR_TBISD, val, &oldval) == 1)
- env->ir[0] = val;
- break;
- case 0x00000025:
- /* MTPR_TBISI */
- val = env->ir[16];
- if (cpu_alpha_mtpr(env, IPR_TBISI, val, &oldval) == 1)
- env->ir[0] = val;
- break;
- case 0x00000026:
- /* MFPR_ASTEN */
- if (cpu_alpha_mfpr(env, IPR_ASTEN, &val) == 0)
- env->ir[0] = val;
- break;
- case 0x00000027:
- /* MFPR_ASTSR */
- if (cpu_alpha_mfpr(env, IPR_ASTSR, &val) == 0)
- env->ir[0] = val;
- break;
- case 0x00000029:
- /* MFPR_VPTB */
- if (cpu_alpha_mfpr(env, IPR_VPTB, &val) == 0)
- env->ir[0] = val;
- break;
- case 0x0000002A:
- /* MTPR_VPTB */
- val = env->ir[16];
- if (cpu_alpha_mtpr(env, IPR_VPTB, val, &oldval) == 1)
- env->ir[0] = val;
- break;
- case 0x0000002B:
- /* MTPR_PERFMON */
- val = env->ir[16];
- if (cpu_alpha_mtpr(env, IPR_PERFMON, val, &oldval) == 1)
- env->ir[0] = val;
- break;
- case 0x0000002E:
- /* MTPR_DATFX */
- val = env->ir[16];
- if (cpu_alpha_mtpr(env, IPR_DATFX, val, &oldval) == 1)
- env->ir[0] = val;
- break;
- case 0x0000003E:
- /* WTINT */
- break;
- case 0x0000003F:
- /* MFPR_WHAMI */
- if (cpu_alpha_mfpr(env, IPR_WHAMI, &val) == 0)
- env->ir[0] = val;
- break;
- case 0x00000080:
- /* BPT */
- /* REQUIRED */
- break;
- case 0x00000081:
- /* BUGCHK */
- /* REQUIRED */
- break;
- case 0x00000082:
- /* CHME */
- break;
- case 0x00000083:
- /* CHMK */
- break;
- case 0x00000084:
- /* CHMS */
- break;
- case 0x00000085:
- /* CHMU */
- break;
- case 0x00000086:
- /* IMB */
- /* REQUIRED */
- /* Implemented as no-op */
- break;
- case 0x00000087:
- /* INSQHIL */
- break;
- case 0x00000088:
- /* INSQTIL */
- break;
- case 0x00000089:
- /* INSQHIQ */
- break;
- case 0x0000008A:
- /* INSQTIQ */
- break;
- case 0x0000008B:
- /* INSQUEL */
- break;
- case 0x0000008C:
- /* INSQUEQ */
- break;
- case 0x0000008D:
- /* INSQUEL/D */
- break;
- case 0x0000008E:
- /* INSQUEQ/D */
- break;
- case 0x0000008F:
- /* PROBER */
- break;
- case 0x00000090:
- /* PROBEW */
- break;
- case 0x00000091:
- /* RD_PS */
- break;
- case 0x00000092:
- /* REI */
- break;
- case 0x00000093:
- /* REMQHIL */
- break;
- case 0x00000094:
- /* REMQTIL */
- break;
- case 0x00000095:
- /* REMQHIQ */
- break;
- case 0x00000096:
- /* REMQTIQ */
- break;
- case 0x00000097:
- /* REMQUEL */
- break;
- case 0x00000098:
- /* REMQUEQ */
- break;
- case 0x00000099:
- /* REMQUEL/D */
- break;
- case 0x0000009A:
- /* REMQUEQ/D */
- break;
- case 0x0000009B:
- /* SWASTEN */
- break;
- case 0x0000009C:
- /* WR_PS_SW */
- break;
- case 0x0000009D:
- /* RSCC */
- break;
- case 0x0000009E:
- /* READ_UNQ */
- /* REQUIRED */
- break;
- case 0x0000009F:
- /* WRITE_UNQ */
- /* REQUIRED */
- break;
- case 0x000000A0:
- /* AMOVRR */
- break;
- case 0x000000A1:
- /* AMOVRM */
- break;
- case 0x000000A2:
- /* INSQHILR */
- break;
- case 0x000000A3:
- /* INSQTILR */
- break;
- case 0x000000A4:
- /* INSQHIQR */
- break;
- case 0x000000A5:
- /* INSQTIQR */
- break;
- case 0x000000A6:
- /* REMQHILR */
- break;
- case 0x000000A7:
- /* REMQTILR */
- break;
- case 0x000000A8:
- /* REMQHIQR */
- break;
- case 0x000000A9:
- /* REMQTIQR */
- break;
- case 0x000000AA:
- /* GENTRAP */
- /* REQUIRED */
- break;
- case 0x000000AE:
- /* CLRFEN */
- break;
- default:
- break;
- }
-}
-
-static void pal_unix_call (CPUState *env, uint32_t palcode)
-{
- uint64_t palid, val, oldval;
-
- if (palcode < 0x00000080) {
- /* Privileged palcodes */
- if (!(env->ps >> 3)) {
- /* TODO: generate privilege exception */
- }
- }
- switch (palcode) {
- case 0x00000000:
- /* HALT */
- /* REQUIRED */
- break;
- case 0x00000001:
- /* CFLUSH */
- break;
- case 0x00000002:
- /* DRAINA */
- /* REQUIRED */
- /* Implemented as no-op */
- break;
- case 0x00000009:
- /* CSERVE */
- /* REQUIRED */
- break;
- case 0x0000000A:
- /* SWPPAL */
- /* REQUIRED */
- palid = env->ir[16];
- do_swappal(env, palid);
- break;
- case 0x0000000D:
- /* WRIPIR */
- val = env->ir[16];
- if (cpu_alpha_mtpr(env, IPR_IPIR, val, &oldval) == 1)
- env->ir[0] = val;
- break;
- case 0x00000010:
- /* RDMCES */
- if (cpu_alpha_mfpr(env, IPR_MCES, &val) == 0)
- env->ir[0] = val;
- break;
- case 0x00000011:
- /* WRMCES */
- val = env->ir[16];
- if (cpu_alpha_mtpr(env, IPR_MCES, val, &oldval) == 1)
- env->ir[0] = val;
- break;
- case 0x0000002B:
- /* WRFEN */
- val = env->ir[16];
- if (cpu_alpha_mtpr(env, IPR_PERFMON, val, &oldval) == 1)
- env->ir[0] = val;
- break;
- case 0x0000002D:
- /* WRVPTPTR */
- break;
- case 0x00000030:
- /* SWPCTX */
- break;
- case 0x00000031:
- /* WRVAL */
- break;
- case 0x00000032:
- /* RDVAL */
- break;
- case 0x00000033:
- /* TBI */
- val = env->ir[16];
- if (cpu_alpha_mtpr(env, IPR_TBIS, val, &oldval) == 1)
- env->ir[0] = val;
- break;
- case 0x00000034:
- /* WRENT */
- break;
- case 0x00000035:
- /* SWPIPL */
- break;
- case 0x00000036:
- /* RDPS */
- break;
- case 0x00000037:
- /* WRKGP */
- break;
- case 0x00000038:
- /* WRUSP */
- val = env->ir[16];
- if (cpu_alpha_mtpr(env, IPR_USP, val, &oldval) == 1)
- env->ir[0] = val;
- break;
- case 0x00000039:
- /* WRPERFMON */
- val = env->ir[16];
- if (cpu_alpha_mtpr(env, IPR_PERFMON, val, &oldval) == 1)
- env->ir[0] = val;
- break;
- case 0x0000003A:
- /* RDUSP */
- if (cpu_alpha_mfpr(env, IPR_USP, &val) == 0)
- env->ir[0] = val;
- break;
- case 0x0000003C:
- /* WHAMI */
- if (cpu_alpha_mfpr(env, IPR_WHAMI, &val) == 0)
- env->ir[0] = val;
- break;
- case 0x0000003D:
- /* RETSYS */
- break;
- case 0x0000003E:
- /* WTINT */
- break;
- case 0x0000003F:
- /* RTI */
- if (cpu_alpha_mfpr(env, IPR_WHAMI, &val) == 0)
- env->ir[0] = val;
- break;
- case 0x00000080:
- /* BPT */
- /* REQUIRED */
- break;
- case 0x00000081:
- /* BUGCHK */
- /* REQUIRED */
- break;
- case 0x00000083:
- /* CALLSYS */
- break;
- case 0x00000086:
- /* IMB */
- /* REQUIRED */
- /* Implemented as no-op */
- break;
- case 0x00000092:
- /* URTI */
- break;
- case 0x0000009E:
- /* RDUNIQUE */
- /* REQUIRED */
- break;
- case 0x0000009F:
- /* WRUNIQUE */
- /* REQUIRED */
- break;
- case 0x000000AA:
- /* GENTRAP */
- /* REQUIRED */
- break;
- case 0x000000AE:
- /* CLRFEN */
- break;
- default:
- break;
- }
-}
-
-void call_pal (CPUState *env)
-{
- pal_handler_t *pal_handler = env->pal_handler;
-
- switch (env->exception_index) {
- case EXCP_RESET:
- (*pal_handler->reset)(env);
- break;
- case EXCP_MCHK:
- (*pal_handler->machine_check)(env);
- break;
- case EXCP_ARITH:
- (*pal_handler->arithmetic)(env);
- break;
- case EXCP_INTERRUPT:
- (*pal_handler->interrupt)(env);
- break;
- case EXCP_DFAULT:
- (*pal_handler->dfault)(env);
- break;
- case EXCP_DTB_MISS_PAL:
- (*pal_handler->dtb_miss_pal)(env);
- break;
- case EXCP_DTB_MISS_NATIVE:
- (*pal_handler->dtb_miss_native)(env);
- break;
- case EXCP_UNALIGN:
- (*pal_handler->unalign)(env);
- break;
- case EXCP_ITB_MISS:
- (*pal_handler->itb_miss)(env);
- break;
- case EXCP_ITB_ACV:
- (*pal_handler->itb_acv)(env);
- break;
- case EXCP_OPCDEC:
- (*pal_handler->opcdec)(env);
- break;
- case EXCP_FEN:
- (*pal_handler->fen)(env);
- break;
- default:
- if (env->exception_index >= EXCP_CALL_PAL &&
- env->exception_index < EXCP_CALL_PALP) {
- /* Unprivileged PAL call */
- (*pal_handler->call_pal)
- (env, (env->exception_index - EXCP_CALL_PAL) >> 6);
- } else if (env->exception_index >= EXCP_CALL_PALP &&
- env->exception_index < EXCP_CALL_PALE) {
- /* Privileged PAL call */
- (*pal_handler->call_pal)
- (env, ((env->exception_index - EXCP_CALL_PALP) >> 6) + 0x80);
- } else {
- /* Should never happen */
- }
- break;
- }
- env->ipr[IPR_EXC_ADDR] &= ~1;
-}
-
-void pal_init (CPUState *env)
-{
- do_swappal(env, 0);
-}
-
-#if 0
-static uint64_t get_ptebase (CPUState *env, uint64_t vaddr)
-{
- uint64_t virbnd, ptbr;
-
- if ((env->features & FEATURE_VIRBND)) {
- cpu_alpha_mfpr(env, IPR_VIRBND, &virbnd);
- if (vaddr >= virbnd)
- cpu_alpha_mfpr(env, IPR_SYSPTBR, &ptbr);
- else
- cpu_alpha_mfpr(env, IPR_PTBR, &ptbr);
- } else {
- cpu_alpha_mfpr(env, IPR_PTBR, &ptbr);
- }
-
- return ptbr;
-}
-
-static int get_page_bits (CPUState *env)
-{
- /* XXX */
- return 13;
-}
-
-static int get_pte (uint64_t *pfnp, int *zbitsp, int *protp,
- uint64_t ptebase, int page_bits, uint64_t level,
- int mmu_idx, int rw)
-{
- uint64_t pteaddr, pte, pfn;
- uint8_t gh;
- int ure, uwe, kre, kwe, foE, foR, foW, v, ret, ar, is_user;
-
- /* XXX: TOFIX */
- is_user = mmu_idx == MMU_USER_IDX;
- pteaddr = (ptebase << page_bits) + (8 * level);
- pte = ldq_raw(pteaddr);
- /* Decode all interresting PTE fields */
- pfn = pte >> 32;
- uwe = (pte >> 13) & 1;
- kwe = (pte >> 12) & 1;
- ure = (pte >> 9) & 1;
- kre = (pte >> 8) & 1;
- gh = (pte >> 5) & 3;
- foE = (pte >> 3) & 1;
- foW = (pte >> 2) & 1;
- foR = (pte >> 1) & 1;
- v = pte & 1;
- ret = 0;
- if (!v)
- ret = 0x1;
- /* Check access rights */
- ar = 0;
- if (is_user) {
- if (ure)
- ar |= PAGE_READ;
- if (uwe)
- ar |= PAGE_WRITE;
- if (rw == 1 && !uwe)
- ret |= 0x2;
- if (rw != 1 && !ure)
- ret |= 0x2;
- } else {
- if (kre)
- ar |= PAGE_READ;
- if (kwe)
- ar |= PAGE_WRITE;
- if (rw == 1 && !kwe)
- ret |= 0x2;
- if (rw != 1 && !kre)
- ret |= 0x2;
- }
- if (rw == 0 && foR)
- ret |= 0x4;
- if (rw == 2 && foE)
- ret |= 0x8;
- if (rw == 1 && foW)
- ret |= 0xC;
- *pfnp = pfn;
- if (zbitsp != NULL)
- *zbitsp = page_bits + (3 * gh);
- if (protp != NULL)
- *protp = ar;
-
- return ret;
-}
-
-static int paddr_from_pte (uint64_t *paddr, int *zbitsp, int *prot,
- uint64_t ptebase, int page_bits,
- uint64_t vaddr, int mmu_idx, int rw)
-{
- uint64_t pfn, page_mask, lvl_mask, level1, level2, level3;
- int lvl_bits, ret;
-
- page_mask = (1ULL << page_bits) - 1ULL;
- lvl_bits = page_bits - 3;
- lvl_mask = (1ULL << lvl_bits) - 1ULL;
- level3 = (vaddr >> page_bits) & lvl_mask;
- level2 = (vaddr >> (page_bits + lvl_bits)) & lvl_mask;
- level1 = (vaddr >> (page_bits + (2 * lvl_bits))) & lvl_mask;
- /* Level 1 PTE */
- ret = get_pte(&pfn, NULL, NULL, ptebase, page_bits, level1, 0, 0);
- switch (ret) {
- case 3:
- /* Access violation */
- return 2;
- case 2:
- /* translation not valid */
- return 1;
- default:
- /* OK */
- break;
- }
- /* Level 2 PTE */
- ret = get_pte(&pfn, NULL, NULL, pfn, page_bits, level2, 0, 0);
- switch (ret) {
- case 3:
- /* Access violation */
- return 2;
- case 2:
- /* translation not valid */
- return 1;
- default:
- /* OK */
- break;
- }
- /* Level 3 PTE */
- ret = get_pte(&pfn, zbitsp, prot, pfn, page_bits, level3, mmu_idx, rw);
- if (ret & 0x1) {
- /* Translation not valid */
- ret = 1;
- } else if (ret & 2) {
- /* Access violation */
- ret = 2;
- } else {
- switch (ret & 0xC) {
- case 0:
- /* OK */
- ret = 0;
- break;
- case 0x4:
- /* Fault on read */
- ret = 3;
- break;
- case 0x8:
- /* Fault on execute */
- ret = 4;
- break;
- case 0xC:
- /* Fault on write */
- ret = 5;
- break;
- }
- }
- *paddr = (pfn << page_bits) | (vaddr & page_mask);
-
- return 0;
-}
-
-static int virtual_to_physical (CPUState *env, uint64_t *physp,
- int *zbitsp, int *protp,
- uint64_t virtual, int mmu_idx, int rw)
-{
- uint64_t sva, ptebase;
- int seg, page_bits, ret;
-
- sva = ((int64_t)(virtual << (64 - VA_BITS))) >> (64 - VA_BITS);
- if (sva != virtual)
- seg = -1;
- else
- seg = sva >> (VA_BITS - 2);
- virtual &= ~(0xFFFFFC0000000000ULL << (VA_BITS - 43));
- ptebase = get_ptebase(env, virtual);
- page_bits = get_page_bits(env);
- ret = 0;
- switch (seg) {
- case 0:
- /* seg1: 3 levels of PTE */
- ret = paddr_from_pte(physp, zbitsp, protp, ptebase, page_bits,
- virtual, mmu_idx, rw);
- break;
- case 1:
- /* seg1: 2 levels of PTE */
- ret = paddr_from_pte(physp, zbitsp, protp, ptebase, page_bits,
- virtual, mmu_idx, rw);
- break;
- case 2:
- /* kernel segment */
- if (mmu_idx != 0) {
- ret = 2;
- } else {
- *physp = virtual;
- }
- break;
- case 3:
- /* seg1: TB mapped */
- ret = paddr_from_pte(physp, zbitsp, protp, ptebase, page_bits,
- virtual, mmu_idx, rw);
- break;
- default:
- ret = 1;
- break;
- }
-
- return ret;
-}
-
-/* XXX: code provision */
-int cpu_ppc_handle_mmu_fault (CPUState *env, uint32_t address, int rw,
- int mmu_idx, int is_softmmu)
-{
- uint64_t physical, page_size, end;
- int prot, zbits, ret;
-
- ret = virtual_to_physical(env, &physical, &zbits, &prot,
- address, mmu_idx, rw);
-
- switch (ret) {
- case 0:
- /* No fault */
- page_size = 1ULL << zbits;
- address &= ~(page_size - 1);
- /* FIXME: page_size should probably be passed to tlb_set_page,
- and this loop removed. */
- for (end = physical + page_size; physical < end; physical += 0x1000) {
- tlb_set_page(env, address, physical, prot, mmu_idx,
- TARGET_PAGE_SIZE);
- address += 0x1000;
- }
- ret = 0;
- break;
-#if 0
- case 1:
- env->exception_index = EXCP_DFAULT;
- env->ipr[IPR_EXC_ADDR] = address;
- ret = 1;
- break;
- case 2:
- env->exception_index = EXCP_ACCESS_VIOLATION;
- env->ipr[IPR_EXC_ADDR] = address;
- ret = 1;
- break;
- case 3:
- env->exception_index = EXCP_FAULT_ON_READ;
- env->ipr[IPR_EXC_ADDR] = address;
- ret = 1;
- break;
- case 4:
- env->exception_index = EXCP_FAULT_ON_EXECUTE;
- env->ipr[IPR_EXC_ADDR] = address;
- ret = 1;
- case 5:
- env->exception_index = EXCP_FAULT_ON_WRITE;
- env->ipr[IPR_EXC_ADDR] = address;
- ret = 1;
-#endif
- default:
- /* Should never happen */
- env->exception_index = EXCP_MCHK;
- env->ipr[IPR_EXC_ADDR] = address;
- ret = 1;
- break;
- }
-
- return ret;
-}
-#endif
diff --git a/linux-user/main.c b/linux-user/main.c
index 088def3cfd..04da0a4ca4 100644
--- a/linux-user/main.c
+++ b/linux-user/main.c
@@ -2508,49 +2508,27 @@ void cpu_loop (CPUState *env)
fprintf(stderr, "Machine check exception. Exit\n");
exit(1);
break;
- case EXCP_ARITH:
- env->lock_addr = -1;
- info.si_signo = TARGET_SIGFPE;
- info.si_errno = 0;
- info.si_code = TARGET_FPE_FLTINV;
- info._sifields._sigfault._addr = env->pc;
- queue_signal(env, info.si_signo, &info);
- break;
- case EXCP_HW_INTERRUPT:
+ case EXCP_SMP_INTERRUPT:
+ case EXCP_CLK_INTERRUPT:
+ case EXCP_DEV_INTERRUPT:
fprintf(stderr, "External interrupt. Exit\n");
exit(1);
break;
- case EXCP_DFAULT:
+ case EXCP_MMFAULT:
env->lock_addr = -1;
info.si_signo = TARGET_SIGSEGV;
info.si_errno = 0;
- info.si_code = (page_get_flags(env->ipr[IPR_EXC_ADDR]) & PAGE_VALID
+ info.si_code = (page_get_flags(env->trap_arg0) & PAGE_VALID
? TARGET_SEGV_ACCERR : TARGET_SEGV_MAPERR);
- info._sifields._sigfault._addr = env->ipr[IPR_EXC_ADDR];
+ info._sifields._sigfault._addr = env->trap_arg0;
queue_signal(env, info.si_signo, &info);
break;
- case EXCP_DTB_MISS_PAL:
- fprintf(stderr, "MMU data TLB miss in PALcode\n");
- exit(1);
- break;
- case EXCP_ITB_MISS:
- fprintf(stderr, "MMU instruction TLB miss\n");
- exit(1);
- break;
- case EXCP_ITB_ACV:
- fprintf(stderr, "MMU instruction access violation\n");
- exit(1);
- break;
- case EXCP_DTB_MISS_NATIVE:
- fprintf(stderr, "MMU data TLB miss\n");
- exit(1);
- break;
case EXCP_UNALIGN:
env->lock_addr = -1;
info.si_signo = TARGET_SIGBUS;
info.si_errno = 0;
info.si_code = TARGET_BUS_ADRALN;
- info._sifields._sigfault._addr = env->ipr[IPR_EXC_ADDR];
+ info._sifields._sigfault._addr = env->trap_arg0;
queue_signal(env, info.si_signo, &info);
break;
case EXCP_OPCDEC:
@@ -2562,12 +2540,20 @@ void cpu_loop (CPUState *env)
info._sifields._sigfault._addr = env->pc;
queue_signal(env, info.si_signo, &info);
break;
+ case EXCP_ARITH:
+ env->lock_addr = -1;
+ info.si_signo = TARGET_SIGFPE;
+ info.si_errno = 0;
+ info.si_code = TARGET_FPE_FLTINV;
+ info._sifields._sigfault._addr = env->pc;
+ queue_signal(env, info.si_signo, &info);
+ break;
case EXCP_FEN:
/* No-op. Linux simply re-enables the FPU. */
break;
- case EXCP_CALL_PAL ... (EXCP_CALL_PALP - 1):
+ case EXCP_CALL_PAL:
env->lock_addr = -1;
- switch ((trapnr >> 6) | 0x80) {
+ switch (env->error_code) {
case 0x80:
/* BPT */
info.si_signo = TARGET_SIGTRAP;
@@ -2658,8 +2644,6 @@ void cpu_loop (CPUState *env)
goto do_sigill;
}
break;
- case EXCP_CALL_PALP ... (EXCP_CALL_PALE - 1):
- goto do_sigill;
case EXCP_DEBUG:
info.si_signo = gdb_handlesig (env, TARGET_SIGTRAP);
if (info.si_signo) {
diff --git a/target-alpha/cpu.h b/target-alpha/cpu.h
index 686fb4a6a7..e98b32513c 100644
--- a/target-alpha/cpu.h
+++ b/target-alpha/cpu.h
@@ -192,171 +192,39 @@ enum {
#define SWCR_MASK (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK | SWCR_STATUS_MASK)
-/* Internal processor registers */
-/* XXX: TOFIX: most of those registers are implementation dependant */
-enum {
-#if defined(CONFIG_USER_ONLY)
- IPR_EXC_ADDR,
- IPR_EXC_SUM,
- IPR_EXC_MASK,
-#else
- /* Ebox IPRs */
- IPR_CC = 0xC0, /* 21264 */
- IPR_CC_CTL = 0xC1, /* 21264 */
-#define IPR_CC_CTL_ENA_SHIFT 32
-#define IPR_CC_CTL_COUNTER_MASK 0xfffffff0UL
- IPR_VA = 0xC2, /* 21264 */
- IPR_VA_CTL = 0xC4, /* 21264 */
-#define IPR_VA_CTL_VA_48_SHIFT 1
-#define IPR_VA_CTL_VPTB_SHIFT 30
- IPR_VA_FORM = 0xC3, /* 21264 */
- /* Ibox IPRs */
- IPR_ITB_TAG = 0x00, /* 21264 */
- IPR_ITB_PTE = 0x01, /* 21264 */
- IPR_ITB_IAP = 0x02,
- IPR_ITB_IA = 0x03, /* 21264 */
- IPR_ITB_IS = 0x04, /* 21264 */
- IPR_PMPC = 0x05,
- IPR_EXC_ADDR = 0x06, /* 21264 */
- IPR_IVA_FORM = 0x07, /* 21264 */
- IPR_CM = 0x09, /* 21264 */
-#define IPR_CM_SHIFT 3
-#define IPR_CM_MASK (3ULL << IPR_CM_SHIFT) /* 21264 */
- IPR_IER = 0x0A, /* 21264 */
-#define IPR_IER_MASK 0x0000007fffffe000ULL
- IPR_IER_CM = 0x0B, /* 21264: = CM | IER */
- IPR_SIRR = 0x0C, /* 21264 */
-#define IPR_SIRR_SHIFT 14
-#define IPR_SIRR_MASK 0x7fff
- IPR_ISUM = 0x0D, /* 21264 */
- IPR_HW_INT_CLR = 0x0E, /* 21264 */
- IPR_EXC_SUM = 0x0F,
- IPR_PAL_BASE = 0x10,
- IPR_I_CTL = 0x11,
-#define IPR_I_CTL_CHIP_ID_SHIFT 24 /* 21264 */
-#define IPR_I_CTL_BIST_FAIL (1 << 23) /* 21264 */
-#define IPR_I_CTL_IC_EN_SHIFT 2 /* 21264 */
-#define IPR_I_CTL_SDE1_SHIFT 7 /* 21264 */
-#define IPR_I_CTL_HWE_SHIFT 12 /* 21264 */
-#define IPR_I_CTL_VA_48_SHIFT 15 /* 21264 */
-#define IPR_I_CTL_SPE_SHIFT 3 /* 21264 */
-#define IPR_I_CTL_CALL_PAL_R23_SHIFT 20 /* 21264 */
- IPR_I_STAT = 0x16, /* 21264 */
- IPR_IC_FLUSH = 0x13, /* 21264 */
- IPR_IC_FLUSH_ASM = 0x12, /* 21264 */
- IPR_CLR_MAP = 0x15,
- IPR_SLEEP = 0x17,
- IPR_PCTX = 0x40,
- IPR_PCTX_ASN = 0x01, /* field */
-#define IPR_PCTX_ASN_SHIFT 39
- IPR_PCTX_ASTER = 0x02, /* field */
-#define IPR_PCTX_ASTER_SHIFT 5
- IPR_PCTX_ASTRR = 0x04, /* field */
-#define IPR_PCTX_ASTRR_SHIFT 9
- IPR_PCTX_PPCE = 0x08, /* field */
-#define IPR_PCTX_PPCE_SHIFT 1
- IPR_PCTX_FPE = 0x10, /* field */
-#define IPR_PCTX_FPE_SHIFT 2
- IPR_PCTX_ALL = 0x5f, /* all fields */
- IPR_PCTR_CTL = 0x14, /* 21264 */
- /* Mbox IPRs */
- IPR_DTB_TAG0 = 0x20, /* 21264 */
- IPR_DTB_TAG1 = 0xA0, /* 21264 */
- IPR_DTB_PTE0 = 0x21, /* 21264 */
- IPR_DTB_PTE1 = 0xA1, /* 21264 */
- IPR_DTB_ALTMODE = 0xA6,
- IPR_DTB_ALTMODE0 = 0x26, /* 21264 */
-#define IPR_DTB_ALTMODE_MASK 3
- IPR_DTB_IAP = 0xA2,
- IPR_DTB_IA = 0xA3, /* 21264 */
- IPR_DTB_IS0 = 0x24,
- IPR_DTB_IS1 = 0xA4,
- IPR_DTB_ASN0 = 0x25, /* 21264 */
- IPR_DTB_ASN1 = 0xA5, /* 21264 */
-#define IPR_DTB_ASN_SHIFT 56
- IPR_MM_STAT = 0x27, /* 21264 */
- IPR_M_CTL = 0x28, /* 21264 */
-#define IPR_M_CTL_SPE_SHIFT 1
-#define IPR_M_CTL_SPE_MASK 7
- IPR_DC_CTL = 0x29, /* 21264 */
- IPR_DC_STAT = 0x2A, /* 21264 */
- /* Cbox IPRs */
- IPR_C_DATA = 0x2B,
- IPR_C_SHIFT = 0x2C,
-
- IPR_ASN,
- IPR_ASTEN,
- IPR_ASTSR,
- IPR_DATFX,
- IPR_ESP,
- IPR_FEN,
- IPR_IPIR,
- IPR_IPL,
- IPR_KSP,
- IPR_MCES,
- IPR_PERFMON,
- IPR_PCBB,
- IPR_PRBR,
- IPR_PTBR,
- IPR_SCBB,
- IPR_SISR,
- IPR_SSP,
- IPR_SYSPTBR,
- IPR_TBCHK,
- IPR_TBIA,
- IPR_TBIAP,
- IPR_TBIS,
- IPR_TBISD,
- IPR_TBISI,
- IPR_USP,
- IPR_VIRBND,
- IPR_VPTB,
- IPR_WHAMI,
- IPR_ALT_MODE,
-#endif
- IPR_LAST,
-};
+/* MMU modes definitions */
-typedef struct CPUAlphaState CPUAlphaState;
+/* Alpha has 5 MMU modes: PALcode, kernel, executive, supervisor, and user.
+ The Unix PALcode only exposes the kernel and user modes; presumably
+ executive and supervisor are used by VMS.
-typedef struct pal_handler_t pal_handler_t;
-struct pal_handler_t {
- /* Reset */
- void (*reset)(CPUAlphaState *env);
- /* Uncorrectable hardware error */
- void (*machine_check)(CPUAlphaState *env);
- /* Arithmetic exception */
- void (*arithmetic)(CPUAlphaState *env);
- /* Interrupt / correctable hardware error */
- void (*interrupt)(CPUAlphaState *env);
- /* Data fault */
- void (*dfault)(CPUAlphaState *env);
- /* DTB miss pal */
- void (*dtb_miss_pal)(CPUAlphaState *env);
- /* DTB miss native */
- void (*dtb_miss_native)(CPUAlphaState *env);
- /* Unaligned access */
- void (*unalign)(CPUAlphaState *env);
- /* ITB miss */
- void (*itb_miss)(CPUAlphaState *env);
- /* Instruction stream access violation */
- void (*itb_acv)(CPUAlphaState *env);
- /* Reserved or privileged opcode */
- void (*opcdec)(CPUAlphaState *env);
- /* Floating point exception */
- void (*fen)(CPUAlphaState *env);
- /* Call pal instruction */
- void (*call_pal)(CPUAlphaState *env, uint32_t palcode);
-};
+ PALcode itself uses physical mode for code and kernel mode for data;
+ there are PALmode instructions that can access data via physical mode
+ or via an os-installed "alternate mode", which is one of the 4 above.
-#define NB_MMU_MODES 4
+ QEMU does not currently properly distinguish between code/data when
+ looking up addresses. To avoid having to address this issue, our
+ emulated PALcode will cheat and use the KSEG mapping for its code+data
+ rather than physical addresses.
+
+ Moreover, we're only emulating Unix PALcode, and not attempting VMS.
+
+ All of which allows us to drop all but kernel and user modes.
+ Elide the unused MMU modes to save space. */
+
+#define NB_MMU_MODES 2
+
+#define MMU_MODE0_SUFFIX _kernel
+#define MMU_MODE1_SUFFIX _user
+#define MMU_KERNEL_IDX 0
+#define MMU_USER_IDX 1
+
+typedef struct CPUAlphaState CPUAlphaState;
struct CPUAlphaState {
uint64_t ir[31];
float64 fir[31];
uint64_t pc;
- uint64_t ipr[IPR_LAST];
- uint64_t ps;
uint64_t unique;
uint64_t lock_addr;
uint64_t lock_st_addr;
@@ -371,10 +239,33 @@ struct CPUAlphaState {
uint8_t fpcr_dnod;
uint8_t fpcr_undz;
- /* Used for HW_LD / HW_ST */
- uint8_t saved_mode;
- /* For RC and RS */
+ /* The Internal Processor Registers. Some of these we assume always
+ exist for use in user-mode. */
+ uint8_t ps;
uint8_t intr_flag;
+ uint8_t pal_mode;
+ uint8_t fen;
+
+ uint32_t pcc_ofs;
+
+ /* These pass data from the exception logic in the translator and
+ helpers to the OS entry point. This is used for both system
+ emulation and user-mode. */
+ uint64_t trap_arg0;
+ uint64_t trap_arg1;
+ uint64_t trap_arg2;
+
+#if !defined(CONFIG_USER_ONLY)
+ /* The internal data required by our emulation of the Unix PALcode. */
+ uint64_t exc_addr;
+ uint64_t palbr;
+ uint64_t ptbr;
+ uint64_t vptptr;
+ uint64_t sysval;
+ uint64_t usp;
+ uint64_t shadow[8];
+ uint64_t scratch[24];
+#endif
#if TARGET_LONG_BITS > HOST_LONG_BITS
/* temporary fixed-point registers
@@ -386,14 +277,11 @@ struct CPUAlphaState {
/* Those resources are used only in Qemu core */
CPU_COMMON
- uint32_t hflags;
-
int error_code;
uint32_t features;
uint32_t amask;
int implver;
- pal_handler_t *pal_handler;
};
#define cpu_init cpu_alpha_init
@@ -401,17 +289,6 @@ struct CPUAlphaState {
#define cpu_gen_code cpu_alpha_gen_code
#define cpu_signal_handler cpu_alpha_signal_handler
-/* MMU modes definitions */
-#define MMU_MODE0_SUFFIX _kernel
-#define MMU_MODE1_SUFFIX _executive
-#define MMU_MODE2_SUFFIX _supervisor
-#define MMU_MODE3_SUFFIX _user
-#define MMU_USER_IDX 3
-static inline int cpu_mmu_index (CPUState *env)
-{
- return (env->ps >> 3) & 3;
-}
-
#include "cpu-all.h"
enum {
@@ -422,36 +299,89 @@ enum {
};
enum {
- EXCP_RESET = 0x0000,
- EXCP_MCHK = 0x0020,
- EXCP_ARITH = 0x0060,
- EXCP_HW_INTERRUPT = 0x00E0,
- EXCP_DFAULT = 0x01E0,
- EXCP_DTB_MISS_PAL = 0x09E0,
- EXCP_ITB_MISS = 0x03E0,
- EXCP_ITB_ACV = 0x07E0,
- EXCP_DTB_MISS_NATIVE = 0x08E0,
- EXCP_UNALIGN = 0x11E0,
- EXCP_OPCDEC = 0x13E0,
- EXCP_FEN = 0x17E0,
- EXCP_CALL_PAL = 0x2000,
- EXCP_CALL_PALP = 0x3000,
- EXCP_CALL_PALE = 0x4000,
- /* Pseudo exception for console */
- EXCP_CONSOLE_DISPATCH = 0x4001,
- EXCP_CONSOLE_FIXUP = 0x4002,
- EXCP_STL_C = 0x4003,
- EXCP_STQ_C = 0x4004,
+ EXCP_RESET,
+ EXCP_MCHK,
+ EXCP_SMP_INTERRUPT,
+ EXCP_CLK_INTERRUPT,
+ EXCP_DEV_INTERRUPT,
+ EXCP_MMFAULT,
+ EXCP_UNALIGN,
+ EXCP_OPCDEC,
+ EXCP_ARITH,
+ EXCP_FEN,
+ EXCP_CALL_PAL,
+ /* For Usermode emulation. */
+ EXCP_STL_C,
+ EXCP_STQ_C,
};
-/* Arithmetic exception */
-#define EXC_M_IOV (1<<16) /* Integer Overflow */
-#define EXC_M_INE (1<<15) /* Inexact result */
-#define EXC_M_UNF (1<<14) /* Underflow */
-#define EXC_M_FOV (1<<13) /* Overflow */
-#define EXC_M_DZE (1<<12) /* Division by zero */
-#define EXC_M_INV (1<<11) /* Invalid operation */
-#define EXC_M_SWC (1<<10) /* Software completion */
+/* Alpha-specific interrupt pending bits. */
+#define CPU_INTERRUPT_TIMER CPU_INTERRUPT_TGT_EXT_0
+#define CPU_INTERRUPT_SMP CPU_INTERRUPT_TGT_EXT_1
+#define CPU_INTERRUPT_MCHK CPU_INTERRUPT_TGT_EXT_2
+
+/* OSF/1 Page table bits. */
+enum {
+ PTE_VALID = 0x0001,
+ PTE_FOR = 0x0002, /* used for page protection (fault on read) */
+ PTE_FOW = 0x0004, /* used for page protection (fault on write) */
+ PTE_FOE = 0x0008, /* used for page protection (fault on exec) */
+ PTE_ASM = 0x0010,
+ PTE_KRE = 0x0100,
+ PTE_URE = 0x0200,
+ PTE_KWE = 0x1000,
+ PTE_UWE = 0x2000
+};
+
+/* Hardware interrupt (entInt) constants. */
+enum {
+ INT_K_IP,
+ INT_K_CLK,
+ INT_K_MCHK,
+ INT_K_DEV,
+ INT_K_PERF,
+};
+
+/* Memory management (entMM) constants. */
+enum {
+ MM_K_TNV,
+ MM_K_ACV,
+ MM_K_FOR,
+ MM_K_FOE,
+ MM_K_FOW
+};
+
+/* Arithmetic exception (entArith) constants. */
+enum {
+ EXC_M_SWC = 1, /* Software completion */
+ EXC_M_INV = 2, /* Invalid operation */
+ EXC_M_DZE = 4, /* Division by zero */
+ EXC_M_FOV = 8, /* Overflow */
+ EXC_M_UNF = 16, /* Underflow */
+ EXC_M_INE = 32, /* Inexact result */
+ EXC_M_IOV = 64 /* Integer Overflow */
+};
+
+/* Processor status constants. */
+enum {
+ /* Low 3 bits are interrupt mask level. */
+ PS_INT_MASK = 7,
+
+ /* Bits 4 and 5 are the mmu mode. The VMS PALcode uses all 4 modes;
+ The Unix PALcode only uses bit 4. */
+ PS_USER_MODE = 8
+};
+
+static inline int cpu_mmu_index(CPUState *env)
+{
+ if (env->pal_mode) {
+ return MMU_KERNEL_IDX;
+ } else if (env->ps & PS_USER_MODE) {
+ return MMU_USER_IDX;
+ } else {
+ return MMU_KERNEL_IDX;
+ }
+}
enum {
IR_V0 = 0,
@@ -504,19 +434,46 @@ void do_interrupt (CPUState *env);
uint64_t cpu_alpha_load_fpcr (CPUState *env);
void cpu_alpha_store_fpcr (CPUState *env, uint64_t val);
-int cpu_alpha_mfpr (CPUState *env, int iprn, uint64_t *valp);
-int cpu_alpha_mtpr (CPUState *env, int iprn, uint64_t val, uint64_t *oldvalp);
-#if !defined (CONFIG_USER_ONLY)
-void pal_init (CPUState *env);
-void call_pal (CPUState *env);
+#ifndef CONFIG_USER_ONLY
+void swap_shadow_regs(CPUState *env);
+extern QEMU_NORETURN void do_unassigned_access(target_phys_addr_t addr,
+ int, int, int, int);
#endif
+/* Bits in TB->FLAGS that control how translation is processed. */
+enum {
+ TB_FLAGS_PAL_MODE = 1,
+ TB_FLAGS_FEN = 2,
+ TB_FLAGS_USER_MODE = 8,
+
+ TB_FLAGS_AMASK_SHIFT = 4,
+ TB_FLAGS_AMASK_BWX = AMASK_BWX << TB_FLAGS_AMASK_SHIFT,
+ TB_FLAGS_AMASK_FIX = AMASK_FIX << TB_FLAGS_AMASK_SHIFT,
+ TB_FLAGS_AMASK_CIX = AMASK_CIX << TB_FLAGS_AMASK_SHIFT,
+ TB_FLAGS_AMASK_MVI = AMASK_MVI << TB_FLAGS_AMASK_SHIFT,
+ TB_FLAGS_AMASK_TRAP = AMASK_TRAP << TB_FLAGS_AMASK_SHIFT,
+ TB_FLAGS_AMASK_PREFETCH = AMASK_PREFETCH << TB_FLAGS_AMASK_SHIFT,
+};
+
static inline void cpu_get_tb_cpu_state(CPUState *env, target_ulong *pc,
- target_ulong *cs_base, int *flags)
+ target_ulong *cs_base, int *pflags)
{
+ int flags = 0;
+
*pc = env->pc;
*cs_base = 0;
- *flags = env->ps;
+
+ if (env->pal_mode) {
+ flags = TB_FLAGS_PAL_MODE;
+ } else {
+ flags = env->ps & PS_USER_MODE;
+ }
+ if (env->fen) {
+ flags |= TB_FLAGS_FEN;
+ }
+ flags |= env->amask << TB_FLAGS_AMASK_SHIFT;
+
+ *pflags = flags;
}
#if defined(CONFIG_USER_ONLY)
diff --git a/target-alpha/exec.h b/target-alpha/exec.h
index 6ae96d148b..7a325e7a75 100644
--- a/target-alpha/exec.h
+++ b/target-alpha/exec.h
@@ -39,7 +39,17 @@ register struct CPUAlphaState *env asm(AREG0);
static inline int cpu_has_work(CPUState *env)
{
- return (env->interrupt_request & CPU_INTERRUPT_HARD);
+ /* Here we are checking to see if the CPU should wake up from HALT.
+ We will have gotten into this state only for WTINT from PALmode. */
+ /* ??? I'm not sure how the IPL state works with WTINT to keep a CPU
+ asleep even if (some) interrupts have been asserted. For now,
+ assume that if a CPU really wants to stay asleep, it will mask
+ interrupts at the chipset level, which will prevent these bits
+ from being set in the first place. */
+ return env->interrupt_request & (CPU_INTERRUPT_HARD
+ | CPU_INTERRUPT_TIMER
+ | CPU_INTERRUPT_SMP
+ | CPU_INTERRUPT_MCHK);
}
static inline void cpu_pc_from_tb(CPUState *env, TranslationBlock *tb)
diff --git a/target-alpha/helper.c b/target-alpha/helper.c
index 3ba4478c8e..32c2cf9db3 100644
--- a/target-alpha/helper.c
+++ b/target-alpha/helper.c
@@ -160,382 +160,299 @@ void cpu_alpha_store_fpcr (CPUState *env, uint64_t val)
}
#if defined(CONFIG_USER_ONLY)
-
int cpu_alpha_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
int mmu_idx, int is_softmmu)
{
- if (rw == 2)
- env->exception_index = EXCP_ITB_MISS;
- else
- env->exception_index = EXCP_DFAULT;
- env->ipr[IPR_EXC_ADDR] = address;
-
+ env->exception_index = EXCP_MMFAULT;
+ env->trap_arg0 = address;
return 1;
}
-
-void do_interrupt (CPUState *env)
+#else
+void swap_shadow_regs(CPUState *env)
{
- env->exception_index = -1;
+ uint64_t i0, i1, i2, i3, i4, i5, i6, i7;
+
+ i0 = env->ir[8];
+ i1 = env->ir[9];
+ i2 = env->ir[10];
+ i3 = env->ir[11];
+ i4 = env->ir[12];
+ i5 = env->ir[13];
+ i6 = env->ir[14];
+ i7 = env->ir[25];
+
+ env->ir[8] = env->shadow[0];
+ env->ir[9] = env->shadow[1];
+ env->ir[10] = env->shadow[2];
+ env->ir[11] = env->shadow[3];
+ env->ir[12] = env->shadow[4];
+ env->ir[13] = env->shadow[5];
+ env->ir[14] = env->shadow[6];
+ env->ir[25] = env->shadow[7];
+
+ env->shadow[0] = i0;
+ env->shadow[1] = i1;
+ env->shadow[2] = i2;
+ env->shadow[3] = i3;
+ env->shadow[4] = i4;
+ env->shadow[5] = i5;
+ env->shadow[6] = i6;
+ env->shadow[7] = i7;
}
-#else
+/* Returns the OSF/1 entMM failure indication, or -1 on success. */
+static int get_physical_address(CPUState *env, target_ulong addr,
+ int prot_need, int mmu_idx,
+ target_ulong *pphys, int *pprot)
+{
+ target_long saddr = addr;
+ target_ulong phys = 0;
+ target_ulong L1pte, L2pte, L3pte;
+ target_ulong pt, index;
+ int prot = 0;
+ int ret = MM_K_ACV;
+
+ /* Ensure that the virtual address is properly sign-extended from
+ the last implemented virtual address bit. */
+ if (saddr >> TARGET_VIRT_ADDR_SPACE_BITS != saddr >> 63) {
+ goto exit;
+ }
+
+ /* Translate the superpage. */
+ /* ??? When we do more than emulate Unix PALcode, we'll need to
+ determine which KSEG is actually active. */
+ if (saddr < 0 && ((saddr >> 41) & 3) == 2) {
+ /* User-space cannot access KSEG addresses. */
+ if (mmu_idx != MMU_KERNEL_IDX) {
+ goto exit;
+ }
+
+ /* For the benefit of the Typhoon chipset, move bit 40 to bit 43.
+ We would not do this if the 48-bit KSEG is enabled. */
+ phys = saddr & ((1ull << 40) - 1);
+ phys |= (saddr & (1ull << 40)) << 3;
+
+ prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ ret = -1;
+ goto exit;
+ }
+
+ /* Interpret the page table exactly like PALcode does. */
-target_phys_addr_t cpu_get_phys_page_debug (CPUState *env, target_ulong addr)
+ pt = env->ptbr;
+
+ /* L1 page table read. */
+ index = (addr >> (TARGET_PAGE_BITS + 20)) & 0x3ff;
+ L1pte = ldq_phys(pt + index*8);
+
+ if (unlikely((L1pte & PTE_VALID) == 0)) {
+ ret = MM_K_TNV;
+ goto exit;
+ }
+ if (unlikely((L1pte & PTE_KRE) == 0)) {
+ goto exit;
+ }
+ pt = L1pte >> 32 << TARGET_PAGE_BITS;
+
+ /* L2 page table read. */
+ index = (addr >> (TARGET_PAGE_BITS + 10)) & 0x3ff;
+ L2pte = ldq_phys(pt + index*8);
+
+ if (unlikely((L2pte & PTE_VALID) == 0)) {
+ ret = MM_K_TNV;
+ goto exit;
+ }
+ if (unlikely((L2pte & PTE_KRE) == 0)) {
+ goto exit;
+ }
+ pt = L2pte >> 32 << TARGET_PAGE_BITS;
+
+ /* L3 page table read. */
+ index = (addr >> TARGET_PAGE_BITS) & 0x3ff;
+ L3pte = ldq_phys(pt + index*8);
+
+ phys = L3pte >> 32 << TARGET_PAGE_BITS;
+ if (unlikely((L3pte & PTE_VALID) == 0)) {
+ ret = MM_K_TNV;
+ goto exit;
+ }
+
+#if PAGE_READ != 1 || PAGE_WRITE != 2 || PAGE_EXEC != 4
+# error page bits out of date
+#endif
+
+ /* Check access violations. */
+ if (L3pte & (PTE_KRE << mmu_idx)) {
+ prot |= PAGE_READ | PAGE_EXEC;
+ }
+ if (L3pte & (PTE_KWE << mmu_idx)) {
+ prot |= PAGE_WRITE;
+ }
+ if (unlikely((prot & prot_need) == 0 && prot_need)) {
+ goto exit;
+ }
+
+ /* Check fault-on-operation violations. */
+ prot &= ~(L3pte >> 1);
+ ret = -1;
+ if (unlikely((prot & prot_need) == 0)) {
+ ret = (prot_need & PAGE_EXEC ? MM_K_FOE :
+ prot_need & PAGE_WRITE ? MM_K_FOW :
+ prot_need & PAGE_READ ? MM_K_FOR : -1);
+ }
+
+ exit:
+ *pphys = phys;
+ *pprot = prot;
+ return ret;
+}
+
+target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
{
- return -1;
+ target_ulong phys;
+ int prot, fail;
+
+ fail = get_physical_address(env, addr, 0, 0, &phys, &prot);
+ return (fail >= 0 ? -1 : phys);
}
-int cpu_alpha_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
- int mmu_idx, int is_softmmu)
+int cpu_alpha_handle_mmu_fault(CPUState *env, target_ulong addr, int rw,
+ int mmu_idx, int is_softmmu)
{
- uint32_t opc;
-
- if (rw == 2) {
- /* Instruction translation buffer miss */
- env->exception_index = EXCP_ITB_MISS;
- } else {
- if (env->ipr[IPR_EXC_ADDR] & 1)
- env->exception_index = EXCP_DTB_MISS_PAL;
- else
- env->exception_index = EXCP_DTB_MISS_NATIVE;
- opc = (ldl_code(env->pc) >> 21) << 4;
- if (rw) {
- opc |= 0x9;
- } else {
- opc |= 0x4;
- }
- env->ipr[IPR_MM_STAT] = opc;
+ target_ulong phys;
+ int prot, fail;
+
+ fail = get_physical_address(env, addr, 1 << rw, mmu_idx, &phys, &prot);
+ if (unlikely(fail >= 0)) {
+ env->exception_index = EXCP_MMFAULT;
+ env->trap_arg0 = addr;
+ env->trap_arg1 = fail;
+ env->trap_arg2 = (rw == 2 ? -1 : rw);
+ return 1;
}
- return 1;
+ tlb_set_page(env, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK,
+ prot, mmu_idx, TARGET_PAGE_SIZE);
+ return 0;
}
+#endif /* USER_ONLY */
-int cpu_alpha_mfpr (CPUState *env, int iprn, uint64_t *valp)
+void do_interrupt (CPUState *env)
{
- uint64_t hwpcb;
- int ret = 0;
-
- hwpcb = env->ipr[IPR_PCBB];
- switch (iprn) {
- case IPR_ASN:
- if (env->features & FEATURE_ASN)
- *valp = env->ipr[IPR_ASN];
- else
- *valp = 0;
- break;
- case IPR_ASTEN:
- *valp = ((int64_t)(env->ipr[IPR_ASTEN] << 60)) >> 60;
- break;
- case IPR_ASTSR:
- *valp = ((int64_t)(env->ipr[IPR_ASTSR] << 60)) >> 60;
- break;
- case IPR_DATFX:
- /* Write only */
- ret = -1;
- break;
- case IPR_ESP:
- if (env->features & FEATURE_SPS)
- *valp = env->ipr[IPR_ESP];
- else
- *valp = ldq_raw(hwpcb + 8);
- break;
- case IPR_FEN:
- *valp = ((int64_t)(env->ipr[IPR_FEN] << 63)) >> 63;
- break;
- case IPR_IPIR:
- /* Write-only */
- ret = -1;
- break;
- case IPR_IPL:
- *valp = ((int64_t)(env->ipr[IPR_IPL] << 59)) >> 59;
- break;
- case IPR_KSP:
- if (!(env->ipr[IPR_EXC_ADDR] & 1)) {
- ret = -1;
- } else {
- if (env->features & FEATURE_SPS)
- *valp = env->ipr[IPR_KSP];
- else
- *valp = ldq_raw(hwpcb + 0);
- }
- break;
- case IPR_MCES:
- *valp = ((int64_t)(env->ipr[IPR_MCES] << 59)) >> 59;
- break;
- case IPR_PERFMON:
- /* Implementation specific */
- *valp = 0;
- break;
- case IPR_PCBB:
- *valp = ((int64_t)env->ipr[IPR_PCBB] << 16) >> 16;
- break;
- case IPR_PRBR:
- *valp = env->ipr[IPR_PRBR];
- break;
- case IPR_PTBR:
- *valp = env->ipr[IPR_PTBR];
- break;
- case IPR_SCBB:
- *valp = (int64_t)((int32_t)env->ipr[IPR_SCBB]);
- break;
- case IPR_SIRR:
- /* Write-only */
- ret = -1;
- break;
- case IPR_SISR:
- *valp = (int64_t)((int16_t)env->ipr[IPR_SISR]);
- case IPR_SSP:
- if (env->features & FEATURE_SPS)
- *valp = env->ipr[IPR_SSP];
- else
- *valp = ldq_raw(hwpcb + 16);
- break;
- case IPR_SYSPTBR:
- if (env->features & FEATURE_VIRBND)
- *valp = env->ipr[IPR_SYSPTBR];
- else
- ret = -1;
- break;
- case IPR_TBCHK:
- if ((env->features & FEATURE_TBCHK)) {
- /* XXX: TODO */
- *valp = 0;
- ret = -1;
- } else {
- ret = -1;
+ int i = env->exception_index;
+
+ if (qemu_loglevel_mask(CPU_LOG_INT)) {
+ static int count;
+ const char *name = "<unknown>";
+
+ switch (i) {
+ case EXCP_RESET:
+ name = "reset";
+ break;
+ case EXCP_MCHK:
+ name = "mchk";
+ break;
+ case EXCP_SMP_INTERRUPT:
+ name = "smp_interrupt";
+ break;
+ case EXCP_CLK_INTERRUPT:
+ name = "clk_interrupt";
+ break;
+ case EXCP_DEV_INTERRUPT:
+ name = "dev_interrupt";
+ break;
+ case EXCP_MMFAULT:
+ name = "mmfault";
+ break;
+ case EXCP_UNALIGN:
+ name = "unalign";
+ break;
+ case EXCP_OPCDEC:
+ name = "opcdec";
+ break;
+ case EXCP_ARITH:
+ name = "arith";
+ break;
+ case EXCP_FEN:
+ name = "fen";
+ break;
+ case EXCP_CALL_PAL:
+ name = "call_pal";
+ break;
+ case EXCP_STL_C:
+ name = "stl_c";
+ break;
+ case EXCP_STQ_C:
+ name = "stq_c";
+ break;
}
- break;
- case IPR_TBIA:
- /* Write-only */
- ret = -1;
- break;
- case IPR_TBIAP:
- /* Write-only */
- ret = -1;
- break;
- case IPR_TBIS:
- /* Write-only */
- ret = -1;
- break;
- case IPR_TBISD:
- /* Write-only */
- ret = -1;
- break;
- case IPR_TBISI:
- /* Write-only */
- ret = -1;
- break;
- case IPR_USP:
- if (env->features & FEATURE_SPS)
- *valp = env->ipr[IPR_USP];
- else
- *valp = ldq_raw(hwpcb + 24);
- break;
- case IPR_VIRBND:
- if (env->features & FEATURE_VIRBND)
- *valp = env->ipr[IPR_VIRBND];
- else
- ret = -1;
- break;
- case IPR_VPTB:
- *valp = env->ipr[IPR_VPTB];
- break;
- case IPR_WHAMI:
- *valp = env->ipr[IPR_WHAMI];
- break;
- default:
- /* Invalid */
- ret = -1;
- break;
+ qemu_log("INT %6d: %s(%#x) pc=%016" PRIx64 " sp=%016" PRIx64 "\n",
+ ++count, name, env->error_code, env->pc, env->ir[IR_SP]);
}
- return ret;
-}
+ env->exception_index = -1;
-int cpu_alpha_mtpr (CPUState *env, int iprn, uint64_t val, uint64_t *oldvalp)
-{
- uint64_t hwpcb, tmp64;
- uint8_t tmp8;
- int ret = 0;
-
- hwpcb = env->ipr[IPR_PCBB];
- switch (iprn) {
- case IPR_ASN:
- /* Read-only */
- ret = -1;
+#if !defined(CONFIG_USER_ONLY)
+ switch (i) {
+ case EXCP_RESET:
+ i = 0x0000;
break;
- case IPR_ASTEN:
- tmp8 = ((int8_t)(env->ipr[IPR_ASTEN] << 4)) >> 4;
- *oldvalp = tmp8;
- tmp8 &= val & 0xF;
- tmp8 |= (val >> 4) & 0xF;
- env->ipr[IPR_ASTEN] &= ~0xF;
- env->ipr[IPR_ASTEN] |= tmp8;
- ret = 1;
+ case EXCP_MCHK:
+ i = 0x0080;
break;
- case IPR_ASTSR:
- tmp8 = ((int8_t)(env->ipr[IPR_ASTSR] << 4)) >> 4;
- *oldvalp = tmp8;
- tmp8 &= val & 0xF;
- tmp8 |= (val >> 4) & 0xF;
- env->ipr[IPR_ASTSR] &= ~0xF;
- env->ipr[IPR_ASTSR] |= tmp8;
- ret = 1;
- case IPR_DATFX:
- env->ipr[IPR_DATFX] &= ~0x1;
- env->ipr[IPR_DATFX] |= val & 1;
- tmp64 = ldq_raw(hwpcb + 56);
- tmp64 &= ~0x8000000000000000ULL;
- tmp64 |= (val & 1) << 63;
- stq_raw(hwpcb + 56, tmp64);
+ case EXCP_SMP_INTERRUPT:
+ i = 0x0100;
break;
- case IPR_ESP:
- if (env->features & FEATURE_SPS)
- env->ipr[IPR_ESP] = val;
- else
- stq_raw(hwpcb + 8, val);
+ case EXCP_CLK_INTERRUPT:
+ i = 0x0180;
break;
- case IPR_FEN:
- env->ipr[IPR_FEN] = val & 1;
- tmp64 = ldq_raw(hwpcb + 56);
- tmp64 &= ~1;
- tmp64 |= val & 1;
- stq_raw(hwpcb + 56, tmp64);
+ case EXCP_DEV_INTERRUPT:
+ i = 0x0200;
break;
- case IPR_IPIR:
- /* XXX: TODO: Send IRQ to CPU #ir[16] */
+ case EXCP_MMFAULT:
+ i = 0x0280;
break;
- case IPR_IPL:
- *oldvalp = ((int64_t)(env->ipr[IPR_IPL] << 59)) >> 59;
- env->ipr[IPR_IPL] &= ~0x1F;
- env->ipr[IPR_IPL] |= val & 0x1F;
- /* XXX: may issue an interrupt or ASR _now_ */
- ret = 1;
+ case EXCP_UNALIGN:
+ i = 0x0300;
break;
- case IPR_KSP:
- if (!(env->ipr[IPR_EXC_ADDR] & 1)) {
- ret = -1;
- } else {
- if (env->features & FEATURE_SPS)
- env->ipr[IPR_KSP] = val;
- else
- stq_raw(hwpcb + 0, val);
- }
+ case EXCP_OPCDEC:
+ i = 0x0380;
break;
- case IPR_MCES:
- env->ipr[IPR_MCES] &= ~((val & 0x7) | 0x18);
- env->ipr[IPR_MCES] |= val & 0x18;
+ case EXCP_ARITH:
+ i = 0x0400;
break;
- case IPR_PERFMON:
- /* Implementation specific */
- *oldvalp = 0;
- ret = 1;
+ case EXCP_FEN:
+ i = 0x0480;
break;
- case IPR_PCBB:
- /* Read-only */
- ret = -1;
- break;
- case IPR_PRBR:
- env->ipr[IPR_PRBR] = val;
- break;
- case IPR_PTBR:
- /* Read-only */
- ret = -1;
- break;
- case IPR_SCBB:
- env->ipr[IPR_SCBB] = (uint32_t)val;
- break;
- case IPR_SIRR:
- if (val & 0xF) {
- env->ipr[IPR_SISR] |= 1 << (val & 0xF);
- /* XXX: request a software interrupt _now_ */
+ case EXCP_CALL_PAL:
+ i = env->error_code;
+ /* There are 64 entry points for both privileged and unprivileged,
+ with bit 0x80 indicating unprivileged. Each entry point gets
+ 64 bytes to do its job. */
+ if (i & 0x80) {
+ i = 0x2000 + (i - 0x80) * 64;
+ } else {
+ i = 0x1000 + i * 64;
}
break;
- case IPR_SISR:
- /* Read-only */
- ret = -1;
- break;
- case IPR_SSP:
- if (env->features & FEATURE_SPS)
- env->ipr[IPR_SSP] = val;
- else
- stq_raw(hwpcb + 16, val);
- break;
- case IPR_SYSPTBR:
- if (env->features & FEATURE_VIRBND)
- env->ipr[IPR_SYSPTBR] = val;
- else
- ret = -1;
- break;
- case IPR_TBCHK:
- /* Read-only */
- ret = -1;
- break;
- case IPR_TBIA:
- tlb_flush(env, 1);
- break;
- case IPR_TBIAP:
- tlb_flush(env, 1);
- break;
- case IPR_TBIS:
- tlb_flush_page(env, val);
- break;
- case IPR_TBISD:
- tlb_flush_page(env, val);
- break;
- case IPR_TBISI:
- tlb_flush_page(env, val);
- break;
- case IPR_USP:
- if (env->features & FEATURE_SPS)
- env->ipr[IPR_USP] = val;
- else
- stq_raw(hwpcb + 24, val);
- break;
- case IPR_VIRBND:
- if (env->features & FEATURE_VIRBND)
- env->ipr[IPR_VIRBND] = val;
- else
- ret = -1;
- break;
- case IPR_VPTB:
- env->ipr[IPR_VPTB] = val;
- break;
- case IPR_WHAMI:
- /* Read-only */
- ret = -1;
- break;
default:
- /* Invalid */
- ret = -1;
- break;
+ cpu_abort(env, "Unhandled CPU exception");
}
- return ret;
-}
+ /* Remember where the exception happened. Emulate real hardware in
+ that the low bit of the PC indicates PALmode. */
+ env->exc_addr = env->pc | env->pal_mode;
-void do_interrupt (CPUState *env)
-{
- int excp;
+ /* Continue execution at the PALcode entry point. */
+ env->pc = env->palbr + i;
- env->ipr[IPR_EXC_ADDR] = env->pc | 1;
- excp = env->exception_index;
- env->exception_index = -1;
- env->error_code = 0;
- /* XXX: disable interrupts and memory mapping */
- if (env->ipr[IPR_PAL_BASE] != -1ULL) {
- /* We use native PALcode */
- env->pc = env->ipr[IPR_PAL_BASE] + excp;
- } else {
- /* We use emulated PALcode */
- call_pal(env);
- /* Emulate REI */
- env->pc = env->ipr[IPR_EXC_ADDR] & ~7;
- env->ipr[IPR_EXC_ADDR] = env->ipr[IPR_EXC_ADDR] & 1;
- /* XXX: re-enable interrupts and memory mapping */
+ /* Switch to PALmode. */
+ if (!env->pal_mode) {
+ env->pal_mode = 1;
+ swap_shadow_regs(env);
}
+#endif /* !USER_ONLY */
}
-#endif
void cpu_dump_state (CPUState *env, FILE *f, fprintf_function cpu_fprintf,
int flags)
@@ -548,7 +465,7 @@ void cpu_dump_state (CPUState *env, FILE *f, fprintf_function cpu_fprintf,
};
int i;
- cpu_fprintf(f, " PC " TARGET_FMT_lx " PS " TARGET_FMT_lx "\n",
+ cpu_fprintf(f, " PC " TARGET_FMT_lx " PS %02x\n",
env->pc, env->ps);
for (i = 0; i < 31; i++) {
cpu_fprintf(f, "IR%02d %s " TARGET_FMT_lx " ", i,
diff --git a/target-alpha/helper.h b/target-alpha/helper.h
index ccf6a2aae9..2dec57e44b 100644
--- a/target-alpha/helper.h
+++ b/target-alpha/helper.h
@@ -100,27 +100,19 @@ DEF_HELPER_1(ieee_input_cmp, i64, i64)
DEF_HELPER_1(ieee_input_s, i64, i64)
#if !defined (CONFIG_USER_ONLY)
-DEF_HELPER_0(hw_rei, void)
DEF_HELPER_1(hw_ret, void, i64)
-DEF_HELPER_2(mfpr, i64, int, i64)
-DEF_HELPER_2(mtpr, void, int, i64)
-DEF_HELPER_0(set_alt_mode, void)
-DEF_HELPER_0(restore_mode, void)
-
-DEF_HELPER_1(ld_virt_to_phys, i64, i64)
-DEF_HELPER_1(st_virt_to_phys, i64, i64)
-DEF_HELPER_2(ldl_raw, void, i64, i64)
-DEF_HELPER_2(ldq_raw, void, i64, i64)
-DEF_HELPER_2(ldl_l_raw, void, i64, i64)
-DEF_HELPER_2(ldq_l_raw, void, i64, i64)
-DEF_HELPER_2(ldl_kernel, void, i64, i64)
-DEF_HELPER_2(ldq_kernel, void, i64, i64)
-DEF_HELPER_2(ldl_data, void, i64, i64)
-DEF_HELPER_2(ldq_data, void, i64, i64)
-DEF_HELPER_2(stl_raw, void, i64, i64)
-DEF_HELPER_2(stq_raw, void, i64, i64)
-DEF_HELPER_2(stl_c_raw, i64, i64, i64)
-DEF_HELPER_2(stq_c_raw, i64, i64, i64)
+
+DEF_HELPER_1(ldl_phys, i64, i64)
+DEF_HELPER_1(ldq_phys, i64, i64)
+DEF_HELPER_1(ldl_l_phys, i64, i64)
+DEF_HELPER_1(ldq_l_phys, i64, i64)
+DEF_HELPER_2(stl_phys, void, i64, i64)
+DEF_HELPER_2(stq_phys, void, i64, i64)
+DEF_HELPER_2(stl_c_phys, i64, i64, i64)
+DEF_HELPER_2(stq_c_phys, i64, i64, i64)
+
+DEF_HELPER_FLAGS_0(tbia, TCG_CALL_CONST, void)
+DEF_HELPER_FLAGS_1(tbis, TCG_CALL_CONST, void, i64)
#endif
#include "def-helper.h"
diff --git a/target-alpha/machine.c b/target-alpha/machine.c
new file mode 100644
index 0000000000..76d70d9b35
--- /dev/null
+++ b/target-alpha/machine.c
@@ -0,0 +1,87 @@
+#include "hw/hw.h"
+#include "hw/boards.h"
+
+static int get_fpcr(QEMUFile *f, void *opaque, size_t size)
+{
+ CPUAlphaState *env = opaque;
+ cpu_alpha_store_fpcr(env, qemu_get_be64(f));
+ return 0;
+}
+
+static void put_fpcr(QEMUFile *f, void *opaque, size_t size)
+{
+ CPUAlphaState *env = opaque;
+ qemu_put_be64(f, cpu_alpha_load_fpcr(env));
+}
+
+static const VMStateInfo vmstate_fpcr = {
+ .name = "fpcr",
+ .get = get_fpcr,
+ .put = put_fpcr,
+};
+
+static VMStateField vmstate_cpu_fields[] = {
+ VMSTATE_UINTTL_ARRAY(ir, CPUState, 31),
+ VMSTATE_UINTTL_ARRAY(fir, CPUState, 31),
+ /* Save the architecture value of the fpcr, not the internally
+ expanded version. Since this architecture value does not
+ exist in memory to be stored, this requires a but of hoop
+ jumping. We want OFFSET=0 so that we effectively pass ENV
+ to the helper functions, and we need to fill in the name by
+ hand since there's no field of that name. */
+ {
+ .name = "fpcr",
+ .version_id = 0,
+ .size = sizeof(uint64_t),
+ .info = &vmstate_fpcr,
+ .flags = VMS_SINGLE,
+ .offset = 0
+ },
+ VMSTATE_UINTTL(pc, CPUState),
+ VMSTATE_UINTTL(unique, CPUState),
+ VMSTATE_UINTTL(lock_addr, CPUState),
+ VMSTATE_UINTTL(lock_value, CPUState),
+ /* Note that lock_st_addr is not saved; it is a temporary
+ used during the execution of the st[lq]_c insns. */
+
+ VMSTATE_UINT8(ps, CPUState),
+ VMSTATE_UINT8(intr_flag, CPUState),
+ VMSTATE_UINT8(pal_mode, CPUState),
+ VMSTATE_UINT8(fen, CPUState),
+
+ VMSTATE_UINT32(pcc_ofs, CPUState),
+
+ VMSTATE_UINTTL(trap_arg0, CPUState),
+ VMSTATE_UINTTL(trap_arg1, CPUState),
+ VMSTATE_UINTTL(trap_arg2, CPUState),
+
+ VMSTATE_UINTTL(exc_addr, CPUState),
+ VMSTATE_UINTTL(palbr, CPUState),
+ VMSTATE_UINTTL(ptbr, CPUState),
+ VMSTATE_UINTTL(vptptr, CPUState),
+ VMSTATE_UINTTL(sysval, CPUState),
+ VMSTATE_UINTTL(usp, CPUState),
+
+ VMSTATE_UINTTL_ARRAY(shadow, CPUState, 8),
+ VMSTATE_UINTTL_ARRAY(scratch, CPUState, 24),
+
+ VMSTATE_END_OF_LIST()
+};
+
+static const VMStateDescription vmstate_cpu = {
+ .name = "cpu",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .minimum_version_id_old = 1,
+ .fields = vmstate_cpu_fields,
+};
+
+void cpu_save(QEMUFile *f, void *opaque)
+{
+ vmstate_save_state(f, &vmstate_cpu, opaque);
+}
+
+int cpu_load(QEMUFile *f, void *opaque, int version_id)
+{
+ return vmstate_load_state(f, &vmstate_cpu, opaque, version_id);
+}
diff --git a/target-alpha/op_helper.c b/target-alpha/op_helper.c
index 4ccb10b0f4..d33271958f 100644
--- a/target-alpha/op_helper.c
+++ b/target-alpha/op_helper.c
@@ -25,17 +25,57 @@
/*****************************************************************************/
/* Exceptions processing helpers */
-void QEMU_NORETURN helper_excp (int excp, int error)
+
+/* This should only be called from translate, via gen_excp.
+ We expect that ENV->PC has already been updated. */
+void QEMU_NORETURN helper_excp(int excp, int error)
{
env->exception_index = excp;
env->error_code = error;
cpu_loop_exit();
}
+static void do_restore_state(void *retaddr)
+{
+ unsigned long pc = (unsigned long)retaddr;
+
+ if (pc) {
+ TranslationBlock *tb = tb_find_pc(pc);
+ if (tb) {
+ cpu_restore_state(tb, env, pc);
+ }
+ }
+}
+
+/* This may be called from any of the helpers to set up EXCEPTION_INDEX. */
+static void QEMU_NORETURN dynamic_excp(int excp, int error)
+{
+ env->exception_index = excp;
+ env->error_code = error;
+ do_restore_state(GETPC());
+ cpu_loop_exit();
+}
+
+static void QEMU_NORETURN arith_excp(int exc, uint64_t mask)
+{
+ env->trap_arg0 = exc;
+ env->trap_arg1 = mask;
+ dynamic_excp(EXCP_ARITH, 0);
+}
+
uint64_t helper_load_pcc (void)
{
- /* ??? This isn't a timer for which we have any rate info. */
+#ifndef CONFIG_USER_ONLY
+ /* In system mode we have access to a decent high-resolution clock.
+ In order to make OS-level time accounting work with the RPCC,
+ present it with a well-timed clock fixed at 250MHz. */
+ return (((uint64_t)env->pcc_ofs << 32)
+ | (uint32_t)(qemu_get_clock_ns(vm_clock) >> 2));
+#else
+ /* In user-mode, vm_clock doesn't exist. Just pass through the host cpu
+ clock ticks. Also, don't bother taking PCC_OFS into account. */
return (uint32_t)cpu_get_real_ticks();
+#endif
}
uint64_t helper_load_fpcr (void)
@@ -53,7 +93,7 @@ uint64_t helper_addqv (uint64_t op1, uint64_t op2)
uint64_t tmp = op1;
op1 += op2;
if (unlikely((tmp ^ op2 ^ (-1ULL)) & (tmp ^ op1) & (1ULL << 63))) {
- helper_excp(EXCP_ARITH, EXC_M_IOV);
+ arith_excp(EXC_M_IOV, 0);
}
return op1;
}
@@ -63,7 +103,7 @@ uint64_t helper_addlv (uint64_t op1, uint64_t op2)
uint64_t tmp = op1;
op1 = (uint32_t)(op1 + op2);
if (unlikely((tmp ^ op2 ^ (-1UL)) & (tmp ^ op1) & (1UL << 31))) {
- helper_excp(EXCP_ARITH, EXC_M_IOV);
+ arith_excp(EXC_M_IOV, 0);
}
return op1;
}
@@ -73,7 +113,7 @@ uint64_t helper_subqv (uint64_t op1, uint64_t op2)
uint64_t res;
res = op1 - op2;
if (unlikely((op1 ^ op2) & (res ^ op1) & (1ULL << 63))) {
- helper_excp(EXCP_ARITH, EXC_M_IOV);
+ arith_excp(EXC_M_IOV, 0);
}
return res;
}
@@ -83,7 +123,7 @@ uint64_t helper_sublv (uint64_t op1, uint64_t op2)
uint32_t res;
res = op1 - op2;
if (unlikely((op1 ^ op2) & (res ^ op1) & (1UL << 31))) {
- helper_excp(EXCP_ARITH, EXC_M_IOV);
+ arith_excp(EXC_M_IOV, 0);
}
return res;
}
@@ -93,7 +133,7 @@ uint64_t helper_mullv (uint64_t op1, uint64_t op2)
int64_t res = (int64_t)op1 * (int64_t)op2;
if (unlikely((int32_t)res != res)) {
- helper_excp(EXCP_ARITH, EXC_M_IOV);
+ arith_excp(EXC_M_IOV, 0);
}
return (int64_t)((int32_t)res);
}
@@ -105,7 +145,7 @@ uint64_t helper_mulqv (uint64_t op1, uint64_t op2)
muls64(&tl, &th, op1, op2);
/* If th != 0 && th != -1, then we had an overflow */
if (unlikely((th + 1) > 1)) {
- helper_excp(EXCP_ARITH, EXC_M_IOV);
+ arith_excp(EXC_M_IOV, 0);
}
return tl;
}
@@ -373,8 +413,6 @@ void helper_fp_exc_raise(uint32_t exc, uint32_t regno)
if (exc) {
uint32_t hw_exc = 0;
- env->ipr[IPR_EXC_MASK] |= 1ull << regno;
-
if (exc & float_flag_invalid) {
hw_exc |= EXC_M_INV;
}
@@ -390,7 +428,8 @@ void helper_fp_exc_raise(uint32_t exc, uint32_t regno)
if (exc & float_flag_inexact) {
hw_exc |= EXC_M_INE;
}
- helper_excp(EXCP_ARITH, hw_exc);
+
+ arith_excp(hw_exc, 1ull << regno);
}
}
@@ -420,7 +459,7 @@ uint64_t helper_ieee_input(uint64_t val)
if (env->fpcr_dnz) {
val &= 1ull << 63;
} else {
- helper_excp(EXCP_ARITH, EXC_M_UNF);
+ arith_excp(EXC_M_UNF, 0);
}
}
} else if (exp == 0x7ff) {
@@ -428,7 +467,7 @@ uint64_t helper_ieee_input(uint64_t val)
/* ??? I'm not sure these exception bit flags are correct. I do
know that the Linux kernel, at least, doesn't rely on them and
just emulates the insn to figure out what exception to use. */
- helper_excp(EXCP_ARITH, frac ? EXC_M_INV : EXC_M_FOV);
+ arith_excp(frac ? EXC_M_INV : EXC_M_FOV, 0);
}
return val;
}
@@ -445,12 +484,12 @@ uint64_t helper_ieee_input_cmp(uint64_t val)
if (env->fpcr_dnz) {
val &= 1ull << 63;
} else {
- helper_excp(EXCP_ARITH, EXC_M_UNF);
+ arith_excp(EXC_M_UNF, 0);
}
}
} else if (exp == 0x7ff && frac) {
/* NaN. */
- helper_excp(EXCP_ARITH, EXC_M_INV);
+ arith_excp(EXC_M_INV, 0);
}
return val;
}
@@ -513,7 +552,7 @@ static inline float32 f_to_float32(uint64_t a)
if (unlikely(!exp && mant_sig)) {
/* Reserved operands / Dirty zero */
- helper_excp(EXCP_OPCDEC, 0);
+ dynamic_excp(EXCP_OPCDEC, 0);
}
if (exp < 3) {
@@ -643,7 +682,7 @@ static inline float64 g_to_float64(uint64_t a)
if (!exp && mant_sig) {
/* Reserved operands / Dirty zero */
- helper_excp(EXCP_OPCDEC, 0);
+ dynamic_excp(EXCP_OPCDEC, 0);
}
if (exp < 3) {
@@ -1156,187 +1195,122 @@ uint64_t helper_cvtqg (uint64_t a)
/* PALcode support special instructions */
#if !defined (CONFIG_USER_ONLY)
-void helper_hw_rei (void)
-{
- env->pc = env->ipr[IPR_EXC_ADDR] & ~3;
- env->ipr[IPR_EXC_ADDR] = env->ipr[IPR_EXC_ADDR] & 1;
- env->intr_flag = 0;
- env->lock_addr = -1;
- /* XXX: re-enable interrupts and memory mapping */
-}
-
void helper_hw_ret (uint64_t a)
{
env->pc = a & ~3;
- env->ipr[IPR_EXC_ADDR] = a & 1;
env->intr_flag = 0;
env->lock_addr = -1;
- /* XXX: re-enable interrupts and memory mapping */
-}
-
-uint64_t helper_mfpr (int iprn, uint64_t val)
-{
- uint64_t tmp;
-
- if (cpu_alpha_mfpr(env, iprn, &tmp) == 0)
- val = tmp;
-
- return val;
-}
-
-void helper_mtpr (int iprn, uint64_t val)
-{
- cpu_alpha_mtpr(env, iprn, val, NULL);
+ if ((a & 1) == 0) {
+ env->pal_mode = 0;
+ swap_shadow_regs(env);
+ }
}
-void helper_set_alt_mode (void)
+void helper_tbia(void)
{
- env->saved_mode = env->ps & 0xC;
- env->ps = (env->ps & ~0xC) | (env->ipr[IPR_ALT_MODE] & 0xC);
+ tlb_flush(env, 1);
}
-void helper_restore_mode (void)
+void helper_tbis(uint64_t p)
{
- env->ps = (env->ps & ~0xC) | env->saved_mode;
+ tlb_flush_page(env, p);
}
-
#endif
/*****************************************************************************/
/* Softmmu support */
#if !defined (CONFIG_USER_ONLY)
-
-/* XXX: the two following helpers are pure hacks.
- * Hopefully, we emulate the PALcode, then we should never see
- * HW_LD / HW_ST instructions.
- */
-uint64_t helper_ld_virt_to_phys (uint64_t virtaddr)
-{
- uint64_t tlb_addr, physaddr;
- int index, mmu_idx;
- void *retaddr;
-
- mmu_idx = cpu_mmu_index(env);
- index = (virtaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
- redo:
- tlb_addr = env->tlb_table[mmu_idx][index].addr_read;
- if ((virtaddr & TARGET_PAGE_MASK) ==
- (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
- physaddr = virtaddr + env->tlb_table[mmu_idx][index].addend;
- } else {
- /* the page is not in the TLB : fill it */
- retaddr = GETPC();
- tlb_fill(virtaddr, 0, mmu_idx, retaddr);
- goto redo;
- }
- return physaddr;
-}
-
-uint64_t helper_st_virt_to_phys (uint64_t virtaddr)
+uint64_t helper_ldl_phys(uint64_t p)
{
- uint64_t tlb_addr, physaddr;
- int index, mmu_idx;
- void *retaddr;
-
- mmu_idx = cpu_mmu_index(env);
- index = (virtaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
- redo:
- tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
- if ((virtaddr & TARGET_PAGE_MASK) ==
- (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
- physaddr = virtaddr + env->tlb_table[mmu_idx][index].addend;
- } else {
- /* the page is not in the TLB : fill it */
- retaddr = GETPC();
- tlb_fill(virtaddr, 1, mmu_idx, retaddr);
- goto redo;
- }
- return physaddr;
+ return (int32_t)ldl_phys(p);
}
-void helper_ldl_raw(uint64_t t0, uint64_t t1)
+uint64_t helper_ldq_phys(uint64_t p)
{
- ldl_raw(t1, t0);
+ return ldq_phys(p);
}
-void helper_ldq_raw(uint64_t t0, uint64_t t1)
+uint64_t helper_ldl_l_phys(uint64_t p)
{
- ldq_raw(t1, t0);
+ env->lock_addr = p;
+ return env->lock_value = (int32_t)ldl_phys(p);
}
-void helper_ldl_l_raw(uint64_t t0, uint64_t t1)
+uint64_t helper_ldq_l_phys(uint64_t p)
{
- env->lock = t1;
- ldl_raw(t1, t0);
+ env->lock_addr = p;
+ return env->lock_value = ldl_phys(p);
}
-void helper_ldq_l_raw(uint64_t t0, uint64_t t1)
+void helper_stl_phys(uint64_t p, uint64_t v)
{
- env->lock = t1;
- ldl_raw(t1, t0);
+ stl_phys(p, v);
}
-void helper_ldl_kernel(uint64_t t0, uint64_t t1)
+void helper_stq_phys(uint64_t p, uint64_t v)
{
- ldl_kernel(t1, t0);
+ stq_phys(p, v);
}
-void helper_ldq_kernel(uint64_t t0, uint64_t t1)
+uint64_t helper_stl_c_phys(uint64_t p, uint64_t v)
{
- ldq_kernel(t1, t0);
-}
+ uint64_t ret = 0;
-void helper_ldl_data(uint64_t t0, uint64_t t1)
-{
- ldl_data(t1, t0);
-}
+ if (p == env->lock_addr) {
+ int32_t old = ldl_phys(p);
+ if (old == (int32_t)env->lock_value) {
+ stl_phys(p, v);
+ ret = 1;
+ }
+ }
+ env->lock_addr = -1;
-void helper_ldq_data(uint64_t t0, uint64_t t1)
-{
- ldq_data(t1, t0);
+ return ret;
}
-void helper_stl_raw(uint64_t t0, uint64_t t1)
+uint64_t helper_stq_c_phys(uint64_t p, uint64_t v)
{
- stl_raw(t1, t0);
-}
+ uint64_t ret = 0;
-void helper_stq_raw(uint64_t t0, uint64_t t1)
-{
- stq_raw(t1, t0);
+ if (p == env->lock_addr) {
+ uint64_t old = ldq_phys(p);
+ if (old == env->lock_value) {
+ stq_phys(p, v);
+ ret = 1;
+ }
+ }
+ env->lock_addr = -1;
+
+ return ret;
}
-uint64_t helper_stl_c_raw(uint64_t t0, uint64_t t1)
+static void QEMU_NORETURN do_unaligned_access(target_ulong addr, int is_write,
+ int is_user, void *retaddr)
{
- uint64_t ret;
+ uint64_t pc;
+ uint32_t insn;
- if (t1 == env->lock) {
- stl_raw(t1, t0);
- ret = 0;
- } else
- ret = 1;
+ do_restore_state(retaddr);
- env->lock = 1;
+ pc = env->pc;
+ insn = ldl_code(pc);
- return ret;
+ env->trap_arg0 = addr;
+ env->trap_arg1 = insn >> 26; /* opcode */
+ env->trap_arg2 = (insn >> 21) & 31; /* dest regno */
+ helper_excp(EXCP_UNALIGN, 0);
}
-uint64_t helper_stq_c_raw(uint64_t t0, uint64_t t1)
+void QEMU_NORETURN do_unassigned_access(target_phys_addr_t addr, int is_write,
+ int is_exec, int unused, int size)
{
- uint64_t ret;
-
- if (t1 == env->lock) {
- stq_raw(t1, t0);
- ret = 0;
- } else
- ret = 1;
-
- env->lock = 1;
-
- return ret;
+ env->trap_arg0 = addr;
+ env->trap_arg1 = is_write;
+ dynamic_excp(EXCP_MCHK, 0);
}
#define MMUSUFFIX _mmu
+#define ALIGNED_ONLY
#define SHIFT 0
#include "softmmu_template.h"
@@ -1356,9 +1330,7 @@ uint64_t helper_stq_c_raw(uint64_t t0, uint64_t t1)
/* XXX: fix it to restore all registers */
void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
{
- TranslationBlock *tb;
CPUState *saved_env;
- unsigned long pc;
int ret;
/* XXX: hack to restore env in all cases, even if not called from
@@ -1366,21 +1338,11 @@ void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
saved_env = env;
env = cpu_single_env;
ret = cpu_alpha_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
- if (!likely(ret == 0)) {
- if (likely(retaddr)) {
- /* now we have a real cpu fault */
- pc = (unsigned long)retaddr;
- tb = tb_find_pc(pc);
- if (likely(tb)) {
- /* the PC is inside the translated code. It means that we have
- a virtual CPU fault */
- cpu_restore_state(tb, env, pc);
- }
- }
+ if (unlikely(ret != 0)) {
+ do_restore_state(retaddr);
/* Exception index and error code are already set */
cpu_loop_exit();
}
env = saved_env;
}
-
#endif
diff --git a/target-alpha/translate.c b/target-alpha/translate.c
index 456ba51ac6..ad6c2ca448 100644
--- a/target-alpha/translate.c
+++ b/target-alpha/translate.c
@@ -47,10 +47,6 @@ struct DisasContext {
CPUAlphaState *env;
uint64_t pc;
int mem_idx;
-#if !defined (CONFIG_USER_ONLY)
- int pal_mode;
-#endif
- uint32_t amask;
/* Current rounding mode for this TB. */
int tb_rm;
@@ -89,8 +85,10 @@ static TCGv cpu_pc;
static TCGv cpu_lock_addr;
static TCGv cpu_lock_st_addr;
static TCGv cpu_lock_value;
-#ifdef CONFIG_USER_ONLY
-static TCGv cpu_uniq;
+static TCGv cpu_unique;
+#ifndef CONFIG_USER_ONLY
+static TCGv cpu_sysval;
+static TCGv cpu_usp;
#endif
/* register names */
@@ -135,9 +133,13 @@ static void alpha_translate_init(void)
offsetof(CPUState, lock_value),
"lock_value");
-#ifdef CONFIG_USER_ONLY
- cpu_uniq = tcg_global_mem_new_i64(TCG_AREG0,
- offsetof(CPUState, unique), "uniq");
+ cpu_unique = tcg_global_mem_new_i64(TCG_AREG0,
+ offsetof(CPUState, unique), "unique");
+#ifndef CONFIG_USER_ONLY
+ cpu_sysval = tcg_global_mem_new_i64(TCG_AREG0,
+ offsetof(CPUState, sysval), "sysval");
+ cpu_usp = tcg_global_mem_new_i64(TCG_AREG0,
+ offsetof(CPUState, usp), "usp");
#endif
/* register helpers */
@@ -147,17 +149,21 @@ static void alpha_translate_init(void)
done_init = 1;
}
-static ExitStatus gen_excp(DisasContext *ctx, int exception, int error_code)
+static void gen_excp_1(int exception, int error_code)
{
TCGv_i32 tmp1, tmp2;
- tcg_gen_movi_i64(cpu_pc, ctx->pc);
tmp1 = tcg_const_i32(exception);
tmp2 = tcg_const_i32(error_code);
gen_helper_excp(tmp1, tmp2);
tcg_temp_free_i32(tmp2);
tcg_temp_free_i32(tmp1);
+}
+static ExitStatus gen_excp(DisasContext *ctx, int exception, int error_code)
+{
+ tcg_gen_movi_i64(cpu_pc, ctx->pc);
+ gen_excp_1(exception, error_code);
return EXIT_NORETURN;
}
@@ -322,7 +328,7 @@ static ExitStatus gen_store_conditional(DisasContext *ctx, int ra, int rb,
#if defined(CONFIG_USER_ONLY)
addr = cpu_lock_st_addr;
#else
- addr = tcg_local_new();
+ addr = tcg_temp_local_new();
#endif
if (rb != 31) {
@@ -345,7 +351,7 @@ static ExitStatus gen_store_conditional(DisasContext *ctx, int ra, int rb,
lab_fail = gen_new_label();
lab_done = gen_new_label();
- tcg_gen_brcond(TCG_COND_NE, addr, cpu_lock_addr, lab_fail);
+ tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail);
val = tcg_temp_new();
if (quad) {
@@ -353,7 +359,7 @@ static ExitStatus gen_store_conditional(DisasContext *ctx, int ra, int rb,
} else {
tcg_gen_qemu_ld32s(val, addr, ctx->mem_idx);
}
- tcg_gen_brcond(TCG_COND_NE, val, cpu_lock_value, lab_fail);
+ tcg_gen_brcond_i64(TCG_COND_NE, val, cpu_lock_value, lab_fail);
if (quad) {
tcg_gen_qemu_st64(cpu_ir[ra], addr, ctx->mem_idx);
@@ -1464,6 +1470,194 @@ static void gen_rx(int ra, int set)
tcg_temp_free_i32(tmp);
}
+static ExitStatus gen_call_pal(DisasContext *ctx, int palcode)
+{
+ /* We're emulating OSF/1 PALcode. Many of these are trivial access
+ to internal cpu registers. */
+
+ /* Unprivileged PAL call */
+ if (palcode >= 0x80 && palcode < 0xC0) {
+ switch (palcode) {
+ case 0x86:
+ /* IMB */
+ /* No-op inside QEMU. */
+ break;
+ case 0x9E:
+ /* RDUNIQUE */
+ tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_unique);
+ break;
+ case 0x9F:
+ /* WRUNIQUE */
+ tcg_gen_mov_i64(cpu_unique, cpu_ir[IR_A0]);
+ break;
+ default:
+ return gen_excp(ctx, EXCP_CALL_PAL, palcode & 0xbf);
+ }
+ return NO_EXIT;
+ }
+
+#ifndef CONFIG_USER_ONLY
+ /* Privileged PAL code */
+ if (palcode < 0x40 && (ctx->tb->flags & TB_FLAGS_USER_MODE) == 0) {
+ switch (palcode) {
+ case 0x01:
+ /* CFLUSH */
+ /* No-op inside QEMU. */
+ break;
+ case 0x02:
+ /* DRAINA */
+ /* No-op inside QEMU. */
+ break;
+ case 0x2D:
+ /* WRVPTPTR */
+ tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env, offsetof(CPUState, vptptr));
+ break;
+ case 0x31:
+ /* WRVAL */
+ tcg_gen_mov_i64(cpu_sysval, cpu_ir[IR_A0]);
+ break;
+ case 0x32:
+ /* RDVAL */
+ tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_sysval);
+ break;
+
+ case 0x35: {
+ /* SWPIPL */
+ TCGv tmp;
+
+ /* Note that we already know we're in kernel mode, so we know
+ that PS only contains the 3 IPL bits. */
+ tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env, offsetof(CPUState, ps));
+
+ /* But make sure and store only the 3 IPL bits from the user. */
+ tmp = tcg_temp_new();
+ tcg_gen_andi_i64(tmp, cpu_ir[IR_A0], PS_INT_MASK);
+ tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUState, ps));
+ tcg_temp_free(tmp);
+ break;
+ }
+
+ case 0x36:
+ /* RDPS */
+ tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env, offsetof(CPUState, ps));
+ break;
+ case 0x38:
+ /* WRUSP */
+ tcg_gen_mov_i64(cpu_usp, cpu_ir[IR_A0]);
+ break;
+ case 0x3A:
+ /* RDUSP */
+ tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_usp);
+ break;
+ case 0x3C:
+ /* WHAMI */
+ tcg_gen_ld32s_i64(cpu_ir[IR_V0], cpu_env,
+ offsetof(CPUState, cpu_index));
+ break;
+
+ default:
+ return gen_excp(ctx, EXCP_CALL_PAL, palcode & 0x3f);
+ }
+ return NO_EXIT;
+ }
+#endif
+
+ return gen_invalid(ctx);
+}
+
+#ifndef CONFIG_USER_ONLY
+
+#define PR_BYTE 0x100000
+#define PR_LONG 0x200000
+
+static int cpu_pr_data(int pr)
+{
+ switch (pr) {
+ case 0: return offsetof(CPUAlphaState, ps) | PR_BYTE;
+ case 1: return offsetof(CPUAlphaState, fen) | PR_BYTE;
+ case 2: return offsetof(CPUAlphaState, pcc_ofs) | PR_LONG;
+ case 3: return offsetof(CPUAlphaState, trap_arg0);
+ case 4: return offsetof(CPUAlphaState, trap_arg1);
+ case 5: return offsetof(CPUAlphaState, trap_arg2);
+ case 6: return offsetof(CPUAlphaState, exc_addr);
+ case 7: return offsetof(CPUAlphaState, palbr);
+ case 8: return offsetof(CPUAlphaState, ptbr);
+ case 9: return offsetof(CPUAlphaState, vptptr);
+ case 10: return offsetof(CPUAlphaState, unique);
+ case 11: return offsetof(CPUAlphaState, sysval);
+ case 12: return offsetof(CPUAlphaState, usp);
+
+ case 32 ... 39:
+ return offsetof(CPUAlphaState, shadow[pr - 32]);
+ case 40 ... 63:
+ return offsetof(CPUAlphaState, scratch[pr - 40]);
+ }
+ return 0;
+}
+
+static void gen_mfpr(int ra, int regno)
+{
+ int data = cpu_pr_data(regno);
+
+ /* In our emulated PALcode, these processor registers have no
+ side effects from reading. */
+ if (ra == 31) {
+ return;
+ }
+
+ /* The basic registers are data only, and unknown registers
+ are read-zero, write-ignore. */
+ if (data == 0) {
+ tcg_gen_movi_i64(cpu_ir[ra], 0);
+ } else if (data & PR_BYTE) {
+ tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, data & ~PR_BYTE);
+ } else if (data & PR_LONG) {
+ tcg_gen_ld32s_i64(cpu_ir[ra], cpu_env, data & ~PR_LONG);
+ } else {
+ tcg_gen_ld_i64(cpu_ir[ra], cpu_env, data);
+ }
+}
+
+static void gen_mtpr(int rb, int regno)
+{
+ TCGv tmp;
+
+ if (rb == 31) {
+ tmp = tcg_const_i64(0);
+ } else {
+ tmp = cpu_ir[rb];
+ }
+
+ /* These two register numbers perform a TLB cache flush. Thankfully we
+ can only do this inside PALmode, which means that the current basic
+ block cannot be affected by the change in mappings. */
+ if (regno == 255) {
+ /* TBIA */
+ gen_helper_tbia();
+ } else if (regno == 254) {
+ /* TBIS */
+ gen_helper_tbis(tmp);
+ } else {
+ /* The basic registers are data only, and unknown registers
+ are read-zero, write-ignore. */
+ int data = cpu_pr_data(regno);
+ if (data != 0) {
+ if (data & PR_BYTE) {
+ tcg_gen_st8_i64(tmp, cpu_env, data & ~PR_BYTE);
+ } else if (data & PR_LONG) {
+ tcg_gen_st32_i64(tmp, cpu_env, data & ~PR_LONG);
+ } else {
+ tcg_gen_st_i64(tmp, cpu_env, data);
+ }
+ }
+ }
+
+ if (rb == 31) {
+ tcg_temp_free(tmp);
+ }
+}
+#endif /* !USER_ONLY*/
+
static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
{
uint32_t palcode;
@@ -1499,32 +1693,8 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
switch (opc) {
case 0x00:
/* CALL_PAL */
-#ifdef CONFIG_USER_ONLY
- if (palcode == 0x9E) {
- /* RDUNIQUE */
- tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_uniq);
- break;
- } else if (palcode == 0x9F) {
- /* WRUNIQUE */
- tcg_gen_mov_i64(cpu_uniq, cpu_ir[IR_A0]);
- break;
- }
-#endif
- if (palcode >= 0x80 && palcode < 0xC0) {
- /* Unprivileged PAL call */
- ret = gen_excp(ctx, EXCP_CALL_PAL + ((palcode & 0x3F) << 6), 0);
- break;
- }
-#ifndef CONFIG_USER_ONLY
- if (palcode < 0x40) {
- /* Privileged PAL code */
- if (ctx->mem_idx & 1)
- goto invalid_opc;
- ret = gen_excp(ctx, EXCP_CALL_PALP + ((palcode & 0x3F) << 6), 0);
- }
-#endif
- /* Invalid PAL call */
- goto invalid_opc;
+ ret = gen_call_pal(ctx, palcode);
+ break;
case 0x01:
/* OPC01 */
goto invalid_opc;
@@ -1566,20 +1736,22 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
break;
case 0x0A:
/* LDBU */
- if (!(ctx->amask & AMASK_BWX))
- goto invalid_opc;
- gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
- break;
+ if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
+ gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
+ break;
+ }
+ goto invalid_opc;
case 0x0B:
/* LDQ_U */
gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
break;
case 0x0C:
/* LDWU */
- if (!(ctx->amask & AMASK_BWX))
- goto invalid_opc;
- gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
- break;
+ if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
+ gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
+ break;
+ }
+ goto invalid_opc;
case 0x0D:
/* STW */
gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0);
@@ -1983,20 +2155,12 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
case 0x61:
/* AMASK */
if (likely(rc != 31)) {
- if (islit)
- tcg_gen_movi_i64(cpu_ir[rc], lit);
- else
- tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
- switch (ctx->env->implver) {
- case IMPLVER_2106x:
- /* EV4, EV45, LCA, LCA45 & EV5 */
- break;
- case IMPLVER_21164:
- case IMPLVER_21264:
- case IMPLVER_21364:
- tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[rc],
- ~(uint64_t)ctx->amask);
- break;
+ uint64_t amask = ctx->tb->flags >> TB_FLAGS_AMASK_SHIFT;
+
+ if (islit) {
+ tcg_gen_movi_i64(cpu_ir[rc], lit & ~amask);
+ } else {
+ tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[rb], ~amask);
}
}
break;
@@ -2210,8 +2374,9 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
switch (fpfn) { /* fn11 & 0x3F */
case 0x04:
/* ITOFS */
- if (!(ctx->amask & AMASK_FIX))
+ if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
goto invalid_opc;
+ }
if (likely(rc != 31)) {
if (ra != 31) {
TCGv_i32 tmp = tcg_temp_new_i32();
@@ -2224,20 +2389,23 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
break;
case 0x0A:
/* SQRTF */
- if (!(ctx->amask & AMASK_FIX))
- goto invalid_opc;
- gen_fsqrtf(rb, rc);
- break;
+ if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
+ gen_fsqrtf(rb, rc);
+ break;
+ }
+ goto invalid_opc;
case 0x0B:
/* SQRTS */
- if (!(ctx->amask & AMASK_FIX))
- goto invalid_opc;
- gen_fsqrts(ctx, rb, rc, fn11);
- break;
+ if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
+ gen_fsqrts(ctx, rb, rc, fn11);
+ break;
+ }
+ goto invalid_opc;
case 0x14:
/* ITOFF */
- if (!(ctx->amask & AMASK_FIX))
+ if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
goto invalid_opc;
+ }
if (likely(rc != 31)) {
if (ra != 31) {
TCGv_i32 tmp = tcg_temp_new_i32();
@@ -2250,8 +2418,9 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
break;
case 0x24:
/* ITOFT */
- if (!(ctx->amask & AMASK_FIX))
+ if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
goto invalid_opc;
+ }
if (likely(rc != 31)) {
if (ra != 31)
tcg_gen_mov_i64(cpu_fir[rc], cpu_ir[ra]);
@@ -2261,16 +2430,18 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
break;
case 0x2A:
/* SQRTG */
- if (!(ctx->amask & AMASK_FIX))
- goto invalid_opc;
- gen_fsqrtg(rb, rc);
- break;
+ if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
+ gen_fsqrtg(rb, rc);
+ break;
+ }
+ goto invalid_opc;
case 0x02B:
/* SQRTT */
- if (!(ctx->amask & AMASK_FIX))
- goto invalid_opc;
- gen_fsqrtt(ctx, rb, rc, fn11);
- break;
+ if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
+ gen_fsqrtt(ctx, rb, rc, fn11);
+ break;
+ }
+ goto invalid_opc;
default:
goto invalid_opc;
}
@@ -2571,18 +2742,13 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
break;
case 0x19:
/* HW_MFPR (PALcode) */
-#if defined (CONFIG_USER_ONLY)
- goto invalid_opc;
-#else
- if (!ctx->pal_mode)
- goto invalid_opc;
- if (ra != 31) {
- TCGv tmp = tcg_const_i32(insn & 0xFF);
- gen_helper_mfpr(cpu_ir[ra], tmp, cpu_ir[ra]);
- tcg_temp_free(tmp);
+#ifndef CONFIG_USER_ONLY
+ if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
+ gen_mfpr(ra, insn & 0xffff);
+ break;
}
- break;
#endif
+ goto invalid_opc;
case 0x1A:
/* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
prediction stack action, which of course we don't implement. */
@@ -2598,13 +2764,15 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
break;
case 0x1B:
/* HW_LD (PALcode) */
-#if defined (CONFIG_USER_ONLY)
- goto invalid_opc;
-#else
- if (!ctx->pal_mode)
- goto invalid_opc;
- if (ra != 31) {
- TCGv addr = tcg_temp_new();
+#ifndef CONFIG_USER_ONLY
+ if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
+ TCGv addr;
+
+ if (ra == 31) {
+ break;
+ }
+
+ addr = tcg_temp_new();
if (rb != 31)
tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
else
@@ -2612,27 +2780,26 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
switch ((insn >> 12) & 0xF) {
case 0x0:
/* Longword physical access (hw_ldl/p) */
- gen_helper_ldl_raw(cpu_ir[ra], addr);
+ gen_helper_ldl_phys(cpu_ir[ra], addr);
break;
case 0x1:
/* Quadword physical access (hw_ldq/p) */
- gen_helper_ldq_raw(cpu_ir[ra], addr);
+ gen_helper_ldq_phys(cpu_ir[ra], addr);
break;
case 0x2:
/* Longword physical access with lock (hw_ldl_l/p) */
- gen_helper_ldl_l_raw(cpu_ir[ra], addr);
+ gen_helper_ldl_l_phys(cpu_ir[ra], addr);
break;
case 0x3:
/* Quadword physical access with lock (hw_ldq_l/p) */
- gen_helper_ldq_l_raw(cpu_ir[ra], addr);
+ gen_helper_ldq_l_phys(cpu_ir[ra], addr);
break;
case 0x4:
/* Longword virtual PTE fetch (hw_ldl/v) */
- tcg_gen_qemu_ld32s(cpu_ir[ra], addr, 0);
- break;
+ goto invalid_opc;
case 0x5:
/* Quadword virtual PTE fetch (hw_ldq/v) */
- tcg_gen_qemu_ld64(cpu_ir[ra], addr, 0);
+ goto invalid_opc;
break;
case 0x6:
/* Incpu_ir[ra]id */
@@ -2642,63 +2809,47 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
goto invalid_opc;
case 0x8:
/* Longword virtual access (hw_ldl) */
- gen_helper_st_virt_to_phys(addr, addr);
- gen_helper_ldl_raw(cpu_ir[ra], addr);
- break;
+ goto invalid_opc;
case 0x9:
/* Quadword virtual access (hw_ldq) */
- gen_helper_st_virt_to_phys(addr, addr);
- gen_helper_ldq_raw(cpu_ir[ra], addr);
- break;
+ goto invalid_opc;
case 0xA:
/* Longword virtual access with protection check (hw_ldl/w) */
- tcg_gen_qemu_ld32s(cpu_ir[ra], addr, 0);
+ tcg_gen_qemu_ld32s(cpu_ir[ra], addr, MMU_KERNEL_IDX);
break;
case 0xB:
/* Quadword virtual access with protection check (hw_ldq/w) */
- tcg_gen_qemu_ld64(cpu_ir[ra], addr, 0);
+ tcg_gen_qemu_ld64(cpu_ir[ra], addr, MMU_KERNEL_IDX);
break;
case 0xC:
/* Longword virtual access with alt access mode (hw_ldl/a)*/
- gen_helper_set_alt_mode();
- gen_helper_st_virt_to_phys(addr, addr);
- gen_helper_ldl_raw(cpu_ir[ra], addr);
- gen_helper_restore_mode();
- break;
+ goto invalid_opc;
case 0xD:
/* Quadword virtual access with alt access mode (hw_ldq/a) */
- gen_helper_set_alt_mode();
- gen_helper_st_virt_to_phys(addr, addr);
- gen_helper_ldq_raw(cpu_ir[ra], addr);
- gen_helper_restore_mode();
- break;
+ goto invalid_opc;
case 0xE:
/* Longword virtual access with alternate access mode and
- * protection checks (hw_ldl/wa)
- */
- gen_helper_set_alt_mode();
- gen_helper_ldl_data(cpu_ir[ra], addr);
- gen_helper_restore_mode();
+ protection checks (hw_ldl/wa) */
+ tcg_gen_qemu_ld32s(cpu_ir[ra], addr, MMU_USER_IDX);
break;
case 0xF:
/* Quadword virtual access with alternate access mode and
- * protection checks (hw_ldq/wa)
- */
- gen_helper_set_alt_mode();
- gen_helper_ldq_data(cpu_ir[ra], addr);
- gen_helper_restore_mode();
+ protection checks (hw_ldq/wa) */
+ tcg_gen_qemu_ld64(cpu_ir[ra], addr, MMU_USER_IDX);
break;
}
tcg_temp_free(addr);
+ break;
}
- break;
#endif
+ goto invalid_opc;
case 0x1C:
switch (fn7) {
case 0x00:
/* SEXTB */
- if (!(ctx->amask & AMASK_BWX))
+ if ((ctx->tb->flags & TB_FLAGS_AMASK_BWX) == 0) {
goto invalid_opc;
+ }
if (likely(rc != 31)) {
if (islit)
tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int8_t)lit));
@@ -2708,138 +2859,164 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
break;
case 0x01:
/* SEXTW */
- if (!(ctx->amask & AMASK_BWX))
- goto invalid_opc;
- if (likely(rc != 31)) {
- if (islit)
- tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int16_t)lit));
- else
- tcg_gen_ext16s_i64(cpu_ir[rc], cpu_ir[rb]);
+ if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
+ if (likely(rc != 31)) {
+ if (islit) {
+ tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int16_t)lit));
+ } else {
+ tcg_gen_ext16s_i64(cpu_ir[rc], cpu_ir[rb]);
+ }
+ }
+ break;
}
- break;
+ goto invalid_opc;
case 0x30:
/* CTPOP */
- if (!(ctx->amask & AMASK_CIX))
- goto invalid_opc;
- if (likely(rc != 31)) {
- if (islit)
- tcg_gen_movi_i64(cpu_ir[rc], ctpop64(lit));
- else
- gen_helper_ctpop(cpu_ir[rc], cpu_ir[rb]);
+ if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
+ if (likely(rc != 31)) {
+ if (islit) {
+ tcg_gen_movi_i64(cpu_ir[rc], ctpop64(lit));
+ } else {
+ gen_helper_ctpop(cpu_ir[rc], cpu_ir[rb]);
+ }
+ }
+ break;
}
- break;
+ goto invalid_opc;
case 0x31:
/* PERR */
- if (!(ctx->amask & AMASK_MVI))
- goto invalid_opc;
- gen_perr(ra, rb, rc, islit, lit);
- break;
+ if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
+ gen_perr(ra, rb, rc, islit, lit);
+ break;
+ }
+ goto invalid_opc;
case 0x32:
/* CTLZ */
- if (!(ctx->amask & AMASK_CIX))
- goto invalid_opc;
- if (likely(rc != 31)) {
- if (islit)
- tcg_gen_movi_i64(cpu_ir[rc], clz64(lit));
- else
- gen_helper_ctlz(cpu_ir[rc], cpu_ir[rb]);
+ if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
+ if (likely(rc != 31)) {
+ if (islit) {
+ tcg_gen_movi_i64(cpu_ir[rc], clz64(lit));
+ } else {
+ gen_helper_ctlz(cpu_ir[rc], cpu_ir[rb]);
+ }
+ }
+ break;
}
- break;
+ goto invalid_opc;
case 0x33:
/* CTTZ */
- if (!(ctx->amask & AMASK_CIX))
- goto invalid_opc;
- if (likely(rc != 31)) {
- if (islit)
- tcg_gen_movi_i64(cpu_ir[rc], ctz64(lit));
- else
- gen_helper_cttz(cpu_ir[rc], cpu_ir[rb]);
+ if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
+ if (likely(rc != 31)) {
+ if (islit) {
+ tcg_gen_movi_i64(cpu_ir[rc], ctz64(lit));
+ } else {
+ gen_helper_cttz(cpu_ir[rc], cpu_ir[rb]);
+ }
+ }
+ break;
}
- break;
+ goto invalid_opc;
case 0x34:
/* UNPKBW */
- if (!(ctx->amask & AMASK_MVI))
- goto invalid_opc;
- if (real_islit || ra != 31)
- goto invalid_opc;
- gen_unpkbw (rb, rc);
- break;
+ if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
+ if (real_islit || ra != 31) {
+ goto invalid_opc;
+ }
+ gen_unpkbw(rb, rc);
+ break;
+ }
+ goto invalid_opc;
case 0x35:
/* UNPKBL */
- if (!(ctx->amask & AMASK_MVI))
- goto invalid_opc;
- if (real_islit || ra != 31)
- goto invalid_opc;
- gen_unpkbl (rb, rc);
- break;
+ if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
+ if (real_islit || ra != 31) {
+ goto invalid_opc;
+ }
+ gen_unpkbl(rb, rc);
+ break;
+ }
+ goto invalid_opc;
case 0x36:
/* PKWB */
- if (!(ctx->amask & AMASK_MVI))
- goto invalid_opc;
- if (real_islit || ra != 31)
- goto invalid_opc;
- gen_pkwb (rb, rc);
- break;
+ if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
+ if (real_islit || ra != 31) {
+ goto invalid_opc;
+ }
+ gen_pkwb(rb, rc);
+ break;
+ }
+ goto invalid_opc;
case 0x37:
/* PKLB */
- if (!(ctx->amask & AMASK_MVI))
- goto invalid_opc;
- if (real_islit || ra != 31)
- goto invalid_opc;
- gen_pklb (rb, rc);
- break;
+ if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
+ if (real_islit || ra != 31) {
+ goto invalid_opc;
+ }
+ gen_pklb(rb, rc);
+ break;
+ }
+ goto invalid_opc;
case 0x38:
/* MINSB8 */
- if (!(ctx->amask & AMASK_MVI))
- goto invalid_opc;
- gen_minsb8 (ra, rb, rc, islit, lit);
- break;
+ if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
+ gen_minsb8(ra, rb, rc, islit, lit);
+ break;
+ }
+ goto invalid_opc;
case 0x39:
/* MINSW4 */
- if (!(ctx->amask & AMASK_MVI))
- goto invalid_opc;
- gen_minsw4 (ra, rb, rc, islit, lit);
- break;
+ if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
+ gen_minsw4(ra, rb, rc, islit, lit);
+ break;
+ }
+ goto invalid_opc;
case 0x3A:
/* MINUB8 */
- if (!(ctx->amask & AMASK_MVI))
- goto invalid_opc;
- gen_minub8 (ra, rb, rc, islit, lit);
- break;
+ if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
+ gen_minub8(ra, rb, rc, islit, lit);
+ break;
+ }
+ goto invalid_opc;
case 0x3B:
/* MINUW4 */
- if (!(ctx->amask & AMASK_MVI))
- goto invalid_opc;
- gen_minuw4 (ra, rb, rc, islit, lit);
- break;
+ if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
+ gen_minuw4(ra, rb, rc, islit, lit);
+ break;
+ }
+ goto invalid_opc;
case 0x3C:
/* MAXUB8 */
- if (!(ctx->amask & AMASK_MVI))
- goto invalid_opc;
- gen_maxub8 (ra, rb, rc, islit, lit);
- break;
+ if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
+ gen_maxub8(ra, rb, rc, islit, lit);
+ break;
+ }
+ goto invalid_opc;
case 0x3D:
/* MAXUW4 */
- if (!(ctx->amask & AMASK_MVI))
- goto invalid_opc;
- gen_maxuw4 (ra, rb, rc, islit, lit);
- break;
+ if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
+ gen_maxuw4(ra, rb, rc, islit, lit);
+ break;
+ }
+ goto invalid_opc;
case 0x3E:
/* MAXSB8 */
- if (!(ctx->amask & AMASK_MVI))
- goto invalid_opc;
- gen_maxsb8 (ra, rb, rc, islit, lit);
- break;
+ if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
+ gen_maxsb8(ra, rb, rc, islit, lit);
+ break;
+ }
+ goto invalid_opc;
case 0x3F:
/* MAXSW4 */
- if (!(ctx->amask & AMASK_MVI))
- goto invalid_opc;
- gen_maxsw4 (ra, rb, rc, islit, lit);
- break;
+ if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
+ gen_maxsw4(ra, rb, rc, islit, lit);
+ break;
+ }
+ goto invalid_opc;
case 0x70:
/* FTOIT */
- if (!(ctx->amask & AMASK_FIX))
+ if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
goto invalid_opc;
+ }
if (likely(rc != 31)) {
if (ra != 31)
tcg_gen_mov_i64(cpu_ir[rc], cpu_fir[ra]);
@@ -2849,8 +3026,9 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
break;
case 0x78:
/* FTOIS */
- if (!(ctx->amask & AMASK_FIX))
+ if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
goto invalid_opc;
+ }
if (rc != 31) {
TCGv_i32 tmp1 = tcg_temp_new_i32();
if (ra != 31)
@@ -2870,57 +3048,37 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
break;
case 0x1D:
/* HW_MTPR (PALcode) */
-#if defined (CONFIG_USER_ONLY)
- goto invalid_opc;
-#else
- if (!ctx->pal_mode)
- goto invalid_opc;
- else {
- TCGv tmp1 = tcg_const_i32(insn & 0xFF);
- if (ra != 31)
- gen_helper_mtpr(tmp1, cpu_ir[ra]);
- else {
- TCGv tmp2 = tcg_const_i64(0);
- gen_helper_mtpr(tmp1, tmp2);
- tcg_temp_free(tmp2);
- }
- tcg_temp_free(tmp1);
- ret = EXIT_PC_STALE;
+#ifndef CONFIG_USER_ONLY
+ if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
+ gen_mtpr(rb, insn & 0xffff);
+ break;
}
- break;
#endif
- case 0x1E:
- /* HW_REI (PALcode) */
-#if defined (CONFIG_USER_ONLY)
goto invalid_opc;
-#else
- if (!ctx->pal_mode)
- goto invalid_opc;
- if (rb == 31) {
- /* "Old" alpha */
- gen_helper_hw_rei();
- } else {
- TCGv tmp;
-
- if (ra != 31) {
- tmp = tcg_temp_new();
- tcg_gen_addi_i64(tmp, cpu_ir[rb], (((int64_t)insn << 51) >> 51));
- } else
- tmp = tcg_const_i64(((int64_t)insn << 51) >> 51);
- gen_helper_hw_ret(tmp);
- tcg_temp_free(tmp);
+ case 0x1E:
+ /* HW_RET (PALcode) */
+#ifndef CONFIG_USER_ONLY
+ if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
+ if (rb == 31) {
+ /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
+ address from EXC_ADDR. This turns out to be useful for our
+ emulation PALcode, so continue to accept it. */
+ TCGv tmp = tcg_temp_new();
+ tcg_gen_ld_i64(tmp, cpu_env, offsetof(CPUState, exc_addr));
+ gen_helper_hw_ret(tmp);
+ tcg_temp_free(tmp);
+ } else {
+ gen_helper_hw_ret(cpu_ir[rb]);
+ }
+ ret = EXIT_PC_UPDATED;
+ break;
}
- ret = EXIT_PC_UPDATED;
- break;
#endif
+ goto invalid_opc;
case 0x1F:
/* HW_ST (PALcode) */
-#if defined (CONFIG_USER_ONLY)
- goto invalid_opc;
-#else
- if (!ctx->pal_mode)
- goto invalid_opc;
- else {
+#ifndef CONFIG_USER_ONLY
+ if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
TCGv addr, val;
addr = tcg_temp_new();
if (rb != 31)
@@ -2936,30 +3094,26 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
switch ((insn >> 12) & 0xF) {
case 0x0:
/* Longword physical access */
- gen_helper_stl_raw(val, addr);
+ gen_helper_stl_phys(addr, val);
break;
case 0x1:
/* Quadword physical access */
- gen_helper_stq_raw(val, addr);
+ gen_helper_stq_phys(addr, val);
break;
case 0x2:
/* Longword physical access with lock */
- gen_helper_stl_c_raw(val, val, addr);
+ gen_helper_stl_c_phys(val, addr, val);
break;
case 0x3:
/* Quadword physical access with lock */
- gen_helper_stq_c_raw(val, val, addr);
+ gen_helper_stq_c_phys(val, addr, val);
break;
case 0x4:
/* Longword virtual access */
- gen_helper_st_virt_to_phys(addr, addr);
- gen_helper_stl_raw(val, addr);
- break;
+ goto invalid_opc;
case 0x5:
/* Quadword virtual access */
- gen_helper_st_virt_to_phys(addr, addr);
- gen_helper_stq_raw(val, addr);
- break;
+ goto invalid_opc;
case 0x6:
/* Invalid */
goto invalid_opc;
@@ -2980,18 +3134,10 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
goto invalid_opc;
case 0xC:
/* Longword virtual access with alternate access mode */
- gen_helper_set_alt_mode();
- gen_helper_st_virt_to_phys(addr, addr);
- gen_helper_stl_raw(val, addr);
- gen_helper_restore_mode();
- break;
+ goto invalid_opc;
case 0xD:
/* Quadword virtual access with alternate access mode */
- gen_helper_set_alt_mode();
- gen_helper_st_virt_to_phys(addr, addr);
- gen_helper_stl_raw(val, addr);
- gen_helper_restore_mode();
- break;
+ goto invalid_opc;
case 0xE:
/* Invalid */
goto invalid_opc;
@@ -3002,9 +3148,10 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
if (ra == 31)
tcg_temp_free(val);
tcg_temp_free(addr);
+ break;
}
- break;
#endif
+ goto invalid_opc;
case 0x20:
/* LDF */
gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
@@ -3155,13 +3302,7 @@ static inline void gen_intermediate_code_internal(CPUState *env,
ctx.tb = tb;
ctx.env = env;
ctx.pc = pc_start;
- ctx.amask = env->amask;
-#if defined (CONFIG_USER_ONLY)
- ctx.mem_idx = 0;
-#else
- ctx.mem_idx = ((env->ps >> 3) & 3);
- ctx.pal_mode = env->ipr[IPR_EXC_ADDR] & 1;
-#endif
+ ctx.mem_idx = cpu_mmu_index(env);
/* ??? Every TB begins with unset rounding mode, to be initialized on
the first fp insn of the TB. Alternately we could define a proper
@@ -3211,18 +3352,15 @@ static inline void gen_intermediate_code_internal(CPUState *env,
ctx.pc += 4;
ret = translate_one(ctxp, insn);
- if (ret == NO_EXIT) {
- /* If we reach a page boundary, are single stepping,
- or exhaust instruction count, stop generation. */
- if (env->singlestep_enabled) {
- gen_excp(&ctx, EXCP_DEBUG, 0);
- ret = EXIT_PC_UPDATED;
- } else if ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0
- || gen_opc_ptr >= gen_opc_end
- || num_insns >= max_insns
- || singlestep) {
- ret = EXIT_PC_STALE;
- }
+ /* If we reach a page boundary, are single stepping,
+ or exhaust instruction count, stop generation. */
+ if (ret == NO_EXIT
+ && ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0
+ || gen_opc_ptr >= gen_opc_end
+ || num_insns >= max_insns
+ || singlestep
+ || env->singlestep_enabled)) {
+ ret = EXIT_PC_STALE;
}
} while (ret == NO_EXIT);
@@ -3238,7 +3376,11 @@ static inline void gen_intermediate_code_internal(CPUState *env,
tcg_gen_movi_i64(cpu_pc, ctx.pc);
/* FALLTHRU */
case EXIT_PC_UPDATED:
- tcg_gen_exit_tb(0);
+ if (env->singlestep_enabled) {
+ gen_excp_1(EXCP_DEBUG, 0);
+ } else {
+ tcg_gen_exit_tb(0);
+ }
break;
default:
abort();
@@ -3325,43 +3467,13 @@ CPUAlphaState * cpu_alpha_init (const char *cpu_model)
env->implver = implver;
env->amask = amask;
- env->ps = 0x1F00;
#if defined (CONFIG_USER_ONLY)
- env->ps |= 1 << 3;
+ env->ps = PS_USER_MODE;
cpu_alpha_store_fpcr(env, (FPCR_INVD | FPCR_DZED | FPCR_OVFD
| FPCR_UNFD | FPCR_INED | FPCR_DNOD));
-#else
- pal_init(env);
#endif
env->lock_addr = -1;
-
- /* Initialize IPR */
-#if defined (CONFIG_USER_ONLY)
- env->ipr[IPR_EXC_ADDR] = 0;
- env->ipr[IPR_EXC_SUM] = 0;
- env->ipr[IPR_EXC_MASK] = 0;
-#else
- {
- // uint64_t hwpcb;
- // hwpcb = env->ipr[IPR_PCBB];
- env->ipr[IPR_ASN] = 0;
- env->ipr[IPR_ASTEN] = 0;
- env->ipr[IPR_ASTSR] = 0;
- env->ipr[IPR_DATFX] = 0;
- /* XXX: fix this */
- // env->ipr[IPR_ESP] = ldq_raw(hwpcb + 8);
- // env->ipr[IPR_KSP] = ldq_raw(hwpcb + 0);
- // env->ipr[IPR_SSP] = ldq_raw(hwpcb + 16);
- // env->ipr[IPR_USP] = ldq_raw(hwpcb + 24);
- env->ipr[IPR_FEN] = 0;
- env->ipr[IPR_IPL] = 31;
- env->ipr[IPR_MCES] = 0;
- env->ipr[IPR_PERFMON] = 0; /* Implementation specific */
- // env->ipr[IPR_PTBR] = ldq_raw(hwpcb + 32);
- env->ipr[IPR_SISR] = 0;
- env->ipr[IPR_VIRBND] = -1ULL;
- }
-#endif
+ env->fen = 1;
qemu_init_vcpu(env);
return env;