summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAurelien Jarno <aurelien@aurel32.net>2011-05-23 22:33:39 +0200
committerAurelien Jarno <aurelien@aurel32.net>2011-05-23 22:33:39 +0200
commit05c8a1e423736006580e4dd2bd94d0faafc9afdc (patch)
tree490e0cb3eb784244bba388cb1890584a32dbf160
parentdcfd14b3741983c466ad92fa2ae91eeafce3e5d5 (diff)
parentfb8b273579eaa1e6cee4017e4b23104e17a36f07 (diff)
downloadqemu-05c8a1e423736006580e4dd2bd94d0faafc9afdc.tar.gz
qemu-05c8a1e423736006580e4dd2bd94d0faafc9afdc.tar.bz2
qemu-05c8a1e423736006580e4dd2bd94d0faafc9afdc.zip
Merge branch 's390-next' of git://repo.or.cz/qemu/agraf
* 's390-next' of git://repo.or.cz/qemu/agraf: s390x: complain when allocating ram fails s390x: fix memory detection for guests > 64GB s390x: change mapping base to allow guests > 2GB s390x: Fix debugging for unknown sigp order codes s390x: build s390x by default s390x: remove compatibility cc field s390x: Adjust GDB stub s390x: translate engine for s390x CPU s390x: Adjust internal kvm code s390x: Implement opcode helpers s390x: helper functions for system emulation s390x: Shift variables in CPUState for memset(0) s390x: keep hint on virtio managing size s390x: make kvm exported functions conditional on kvm s390x: s390x-linux-user support tcg: extend max tcg opcodes when using 64-on-32bit s390x: fix smp support for kvm
-rwxr-xr-xconfigure2
-rw-r--r--default-configs/s390x-linux-user.mak1
-rw-r--r--exec-all.h4
-rw-r--r--exec.c14
-rw-r--r--gdbstub.c8
-rw-r--r--hw/s390-virtio-bus.c3
-rw-r--r--hw/s390-virtio-bus.h2
-rw-r--r--hw/s390-virtio.c20
-rw-r--r--linux-user/elfload.c19
-rw-r--r--linux-user/main.c83
-rw-r--r--linux-user/s390x/syscall.h23
-rw-r--r--linux-user/s390x/syscall_nr.h349
-rw-r--r--linux-user/s390x/target_signal.h26
-rw-r--r--linux-user/s390x/termbits.h283
-rw-r--r--linux-user/signal.c333
-rw-r--r--linux-user/syscall.c16
-rw-r--r--linux-user/syscall_defs.h55
-rw-r--r--scripts/qemu-binfmt-conf.sh4
-rw-r--r--target-s390x/cpu.h28
-rw-r--r--target-s390x/helper.c565
-rw-r--r--target-s390x/helpers.h151
-rw-r--r--target-s390x/kvm.c48
-rw-r--r--target-s390x/op_helper.c2929
-rw-r--r--target-s390x/translate.c5167
24 files changed, 10058 insertions, 75 deletions
diff --git a/configure b/configure
index d7dba5d6ac..bc54293308 100755
--- a/configure
+++ b/configure
@@ -848,6 +848,7 @@ sh4-softmmu \
sh4eb-softmmu \
sparc-softmmu \
sparc64-softmmu \
+s390x-softmmu \
"
fi
# the following are Linux specific
@@ -873,6 +874,7 @@ sparc-linux-user \
sparc64-linux-user \
sparc32plus-linux-user \
unicore32-linux-user \
+s390x-linux-user \
"
fi
# the following are Darwin specific
diff --git a/default-configs/s390x-linux-user.mak b/default-configs/s390x-linux-user.mak
new file mode 100644
index 0000000000..a243c99874
--- /dev/null
+++ b/default-configs/s390x-linux-user.mak
@@ -0,0 +1 @@
+# Default configuration for s390x-linux-user
diff --git a/exec-all.h b/exec-all.h
index cf3a704e6d..026864e908 100644
--- a/exec-all.h
+++ b/exec-all.h
@@ -43,7 +43,11 @@ typedef ram_addr_t tb_page_addr_t;
typedef struct TranslationBlock TranslationBlock;
/* XXX: make safe guess about sizes */
+#if (HOST_LONG_BITS == 32) && (TARGET_LONG_BITS == 64)
+#define MAX_OP_PER_INSTR 128
+#else
#define MAX_OP_PER_INSTR 96
+#endif
#if HOST_LONG_BITS == 32
#define MAX_OPC_PARAM_PER_ARG 2
diff --git a/exec.c b/exec.c
index a6df2d6139..a4785b25f8 100644
--- a/exec.c
+++ b/exec.c
@@ -2910,10 +2910,18 @@ ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name,
#endif
} else {
#if defined(TARGET_S390X) && defined(CONFIG_KVM)
- /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
- new_block->host = mmap((void*)0x1000000, size,
+ /* S390 KVM requires the topmost vma of the RAM to be smaller than
+ an system defined value, which is at least 256GB. Larger systems
+ have larger values. We put the guest between the end of data
+ segment (system break) and this value. We use 32GB as a base to
+ have enough room for the system break to grow. */
+ new_block->host = mmap((void*)0x800000000, size,
PROT_EXEC|PROT_READ|PROT_WRITE,
- MAP_SHARED | MAP_ANONYMOUS, -1, 0);
+ MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
+ if (new_block->host == MAP_FAILED) {
+ fprintf(stderr, "Allocating RAM failed\n");
+ abort();
+ }
#else
if (xen_mapcache_enabled()) {
xen_ram_alloc(new_block->offset, size);
diff --git a/gdbstub.c b/gdbstub.c
index 0838948c5c..ae856f91d4 100644
--- a/gdbstub.c
+++ b/gdbstub.c
@@ -1436,7 +1436,11 @@ static int cpu_gdb_read_register(CPUState *env, uint8_t *mem_buf, int n)
/* XXX */
break;
case S390_PC_REGNUM: GET_REGL(env->psw.addr); break;
- case S390_CC_REGNUM: GET_REG32(env->cc); break;
+ case S390_CC_REGNUM:
+ env->cc_op = calc_cc(env, env->cc_op, env->cc_src, env->cc_dst,
+ env->cc_vr);
+ GET_REG32(env->cc_op);
+ break;
}
return 0;
@@ -1462,7 +1466,7 @@ static int cpu_gdb_write_register(CPUState *env, uint8_t *mem_buf, int n)
/* XXX */
break;
case S390_PC_REGNUM: env->psw.addr = tmpl; break;
- case S390_CC_REGNUM: env->cc = tmp32; r=4; break;
+ case S390_CC_REGNUM: env->cc_op = tmp32; r=4; break;
}
return r;
diff --git a/hw/s390-virtio-bus.c b/hw/s390-virtio-bus.c
index bb49e393ec..d4a12f7531 100644
--- a/hw/s390-virtio-bus.c
+++ b/hw/s390-virtio-bus.c
@@ -60,6 +60,9 @@ static const VirtIOBindings virtio_s390_bindings;
static ram_addr_t s390_virtio_device_num_vq(VirtIOS390Device *dev);
+/* length of VirtIO device pages */
+const target_phys_addr_t virtio_size = S390_DEVICE_PAGES * TARGET_PAGE_SIZE;
+
VirtIOS390Bus *s390_virtio_bus_init(ram_addr_t *ram_size)
{
VirtIOS390Bus *bus;
diff --git a/hw/s390-virtio-bus.h b/hw/s390-virtio-bus.h
index edf6d04570..0c412d08e1 100644
--- a/hw/s390-virtio-bus.h
+++ b/hw/s390-virtio-bus.h
@@ -33,7 +33,7 @@
#define VIRTIO_VQCONFIG_LEN 24
#define VIRTIO_RING_LEN (TARGET_PAGE_SIZE * 3)
-#define S390_DEVICE_PAGES 256
+#define S390_DEVICE_PAGES 512
typedef struct VirtIOS390Device {
DeviceState qdev;
diff --git a/hw/s390-virtio.c b/hw/s390-virtio.c
index 698ff6f345..3eba7ea1e8 100644
--- a/hw/s390-virtio.c
+++ b/hw/s390-virtio.c
@@ -131,7 +131,7 @@ int s390_virtio_hypercall(CPUState *env, uint64_t mem, uint64_t hypercall)
}
/* PC hardware initialisation */
-static void s390_init(ram_addr_t ram_size,
+static void s390_init(ram_addr_t my_ram_size,
const char *boot_device,
const char *kernel_filename,
const char *kernel_cmdline,
@@ -143,19 +143,29 @@ static void s390_init(ram_addr_t ram_size,
ram_addr_t kernel_size = 0;
ram_addr_t initrd_offset;
ram_addr_t initrd_size = 0;
+ int shift = 0;
uint8_t *storage_keys;
int i;
+ /* s390x ram size detection needs a 16bit multiplier + an increment. So
+ guests > 64GB can be specified in 2MB steps etc. */
+ while ((my_ram_size >> (20 + shift)) > 65535) {
+ shift++;
+ }
+ my_ram_size = my_ram_size >> (20 + shift) << (20 + shift);
+
+ /* lets propagate the changed ram size into the global variable. */
+ ram_size = my_ram_size;
/* get a BUS */
- s390_bus = s390_virtio_bus_init(&ram_size);
+ s390_bus = s390_virtio_bus_init(&my_ram_size);
/* allocate RAM */
- ram_addr = qemu_ram_alloc(NULL, "s390.ram", ram_size);
- cpu_register_physical_memory(0, ram_size, ram_addr);
+ ram_addr = qemu_ram_alloc(NULL, "s390.ram", my_ram_size);
+ cpu_register_physical_memory(0, my_ram_size, ram_addr);
/* allocate storage keys */
- storage_keys = qemu_mallocz(ram_size / TARGET_PAGE_SIZE);
+ storage_keys = qemu_mallocz(my_ram_size / TARGET_PAGE_SIZE);
/* init CPUs */
if (cpu_model == NULL) {
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
index 4c399f8e33..dcfeb7a286 100644
--- a/linux-user/elfload.c
+++ b/linux-user/elfload.c
@@ -867,6 +867,25 @@ static inline void init_thread(struct target_pt_regs *regs,
#endif /* TARGET_ALPHA */
+#ifdef TARGET_S390X
+
+#define ELF_START_MMAP (0x20000000000ULL)
+
+#define elf_check_arch(x) ( (x) == ELF_ARCH )
+
+#define ELF_CLASS ELFCLASS64
+#define ELF_DATA ELFDATA2MSB
+#define ELF_ARCH EM_S390
+
+static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
+{
+ regs->psw.addr = infop->entry;
+ regs->psw.mask = PSW_MASK_64 | PSW_MASK_32;
+ regs->gprs[15] = infop->start_stack;
+}
+
+#endif /* TARGET_S390X */
+
#ifndef ELF_PLATFORM
#define ELF_PLATFORM (NULL)
#endif
diff --git a/linux-user/main.c b/linux-user/main.c
index 8336639d08..088def3cfd 100644
--- a/linux-user/main.c
+++ b/linux-user/main.c
@@ -2683,6 +2683,80 @@ void cpu_loop (CPUState *env)
}
#endif /* TARGET_ALPHA */
+#ifdef TARGET_S390X
+void cpu_loop(CPUS390XState *env)
+{
+ int trapnr;
+ target_siginfo_t info;
+
+ while (1) {
+ trapnr = cpu_s390x_exec (env);
+
+ switch (trapnr) {
+ case EXCP_INTERRUPT:
+ /* just indicate that signals should be handled asap */
+ break;
+ case EXCP_DEBUG:
+ {
+ int sig;
+
+ sig = gdb_handlesig (env, TARGET_SIGTRAP);
+ if (sig) {
+ info.si_signo = sig;
+ info.si_errno = 0;
+ info.si_code = TARGET_TRAP_BRKPT;
+ queue_signal(env, info.si_signo, &info);
+ }
+ }
+ break;
+ case EXCP_SVC:
+ {
+ int n = env->int_svc_code;
+ if (!n) {
+ /* syscalls > 255 */
+ n = env->regs[1];
+ }
+ env->psw.addr += env->int_svc_ilc;
+ env->regs[2] = do_syscall(env, n,
+ env->regs[2],
+ env->regs[3],
+ env->regs[4],
+ env->regs[5],
+ env->regs[6],
+ env->regs[7]);
+ }
+ break;
+ case EXCP_ADDR:
+ {
+ info.si_signo = SIGSEGV;
+ info.si_errno = 0;
+ /* XXX: check env->error_code */
+ info.si_code = TARGET_SEGV_MAPERR;
+ info._sifields._sigfault._addr = env->__excp_addr;
+ queue_signal(env, info.si_signo, &info);
+ }
+ break;
+ case EXCP_SPEC:
+ {
+ fprintf(stderr,"specification exception insn 0x%08x%04x\n", ldl(env->psw.addr), lduw(env->psw.addr + 4));
+ info.si_signo = SIGILL;
+ info.si_errno = 0;
+ info.si_code = TARGET_ILL_ILLOPC;
+ info._sifields._sigfault._addr = env->__excp_addr;
+ queue_signal(env, info.si_signo, &info);
+ }
+ break;
+ default:
+ printf ("Unhandled trap: 0x%x\n", trapnr);
+ cpu_dump_state(env, stderr, fprintf, 0);
+ exit (1);
+ }
+ process_pending_signals (env);
+ }
+}
+
+#endif /* TARGET_S390X */
+
static void version(void)
{
printf("qemu-" TARGET_ARCH " version " QEMU_VERSION QEMU_PKGVERSION
@@ -3432,6 +3506,15 @@ int main(int argc, char **argv, char **envp)
env->regs[15] = regs->acr;
env->pc = regs->erp;
}
+#elif defined(TARGET_S390X)
+ {
+ int i;
+ for (i = 0; i < 16; i++) {
+ env->regs[i] = regs->gprs[i];
+ }
+ env->psw.mask = regs->psw.mask;
+ env->psw.addr = regs->psw.addr;
+ }
#else
#error unsupported target CPU
#endif
diff --git a/linux-user/s390x/syscall.h b/linux-user/s390x/syscall.h
new file mode 100644
index 0000000000..c2ea151ea5
--- /dev/null
+++ b/linux-user/s390x/syscall.h
@@ -0,0 +1,23 @@
+/* this typedef defines how a Program Status Word looks like */
+typedef struct {
+ abi_ulong mask;
+ abi_ulong addr;
+} __attribute__ ((aligned(8))) target_psw_t;
+
+/*
+ * The pt_regs struct defines the way the registers are stored on
+ * the stack during a system call.
+ */
+
+#define TARGET_NUM_GPRS 16
+
+struct target_pt_regs {
+ abi_ulong args[1];
+ target_psw_t psw;
+ abi_ulong gprs[TARGET_NUM_GPRS];
+ abi_ulong orig_gpr2;
+ unsigned short ilc;
+ unsigned short trap;
+};
+
+#define UNAME_MACHINE "s390x"
diff --git a/linux-user/s390x/syscall_nr.h b/linux-user/s390x/syscall_nr.h
new file mode 100644
index 0000000000..7cc6db2e1b
--- /dev/null
+++ b/linux-user/s390x/syscall_nr.h
@@ -0,0 +1,349 @@
+/*
+ * This file contains the system call numbers.
+ */
+
+#define TARGET_NR_exit 1
+#define TARGET_NR_fork 2
+#define TARGET_NR_read 3
+#define TARGET_NR_write 4
+#define TARGET_NR_open 5
+#define TARGET_NR_close 6
+#define TARGET_NR_restart_syscall 7
+#define TARGET_NR_creat 8
+#define TARGET_NR_link 9
+#define TARGET_NR_unlink 10
+#define TARGET_NR_execve 11
+#define TARGET_NR_chdir 12
+#define TARGET_NR_mknod 14
+#define TARGET_NR_chmod 15
+#define TARGET_NR_lseek 19
+#define TARGET_NR_getpid 20
+#define TARGET_NR_mount 21
+#define TARGET_NR_umount 22
+#define TARGET_NR_ptrace 26
+#define TARGET_NR_alarm 27
+#define TARGET_NR_pause 29
+#define TARGET_NR_utime 30
+#define TARGET_NR_access 33
+#define TARGET_NR_nice 34
+#define TARGET_NR_sync 36
+#define TARGET_NR_kill 37
+#define TARGET_NR_rename 38
+#define TARGET_NR_mkdir 39
+#define TARGET_NR_rmdir 40
+#define TARGET_NR_dup 41
+#define TARGET_NR_pipe 42
+#define TARGET_NR_times 43
+#define TARGET_NR_brk 45
+#define TARGET_NR_signal 48
+#define TARGET_NR_acct 51
+#define TARGET_NR_umount2 52
+#define TARGET_NR_ioctl 54
+#define TARGET_NR_fcntl 55
+#define TARGET_NR_setpgid 57
+#define TARGET_NR_umask 60
+#define TARGET_NR_chroot 61
+#define TARGET_NR_ustat 62
+#define TARGET_NR_dup2 63
+#define TARGET_NR_getppid 64
+#define TARGET_NR_getpgrp 65
+#define TARGET_NR_setsid 66
+#define TARGET_NR_sigaction 67
+#define TARGET_NR_sigsuspend 72
+#define TARGET_NR_sigpending 73
+#define TARGET_NR_sethostname 74
+#define TARGET_NR_setrlimit 75
+#define TARGET_NR_getrusage 77
+#define TARGET_NR_gettimeofday 78
+#define TARGET_NR_settimeofday 79
+#define TARGET_NR_symlink 83
+#define TARGET_NR_readlink 85
+#define TARGET_NR_uselib 86
+#define TARGET_NR_swapon 87
+#define TARGET_NR_reboot 88
+#define TARGET_NR_readdir 89
+#define TARGET_NR_mmap 90
+#define TARGET_NR_munmap 91
+#define TARGET_NR_truncate 92
+#define TARGET_NR_ftruncate 93
+#define TARGET_NR_fchmod 94
+#define TARGET_NR_getpriority 96
+#define TARGET_NR_setpriority 97
+#define TARGET_NR_statfs 99
+#define TARGET_NR_fstatfs 100
+#define TARGET_NR_socketcall 102
+#define TARGET_NR_syslog 103
+#define TARGET_NR_setitimer 104
+#define TARGET_NR_getitimer 105
+#define TARGET_NR_stat 106
+#define TARGET_NR_lstat 107
+#define TARGET_NR_fstat 108
+#define TARGET_NR_lookup_dcookie 110
+#define TARGET_NR_vhangup 111
+#define TARGET_NR_idle 112
+#define TARGET_NR_wait4 114
+#define TARGET_NR_swapoff 115
+#define TARGET_NR_sysinfo 116
+#define TARGET_NR_ipc 117
+#define TARGET_NR_fsync 118
+#define TARGET_NR_sigreturn 119
+#define TARGET_NR_clone 120
+#define TARGET_NR_setdomainname 121
+#define TARGET_NR_uname 122
+#define TARGET_NR_adjtimex 124
+#define TARGET_NR_mprotect 125
+#define TARGET_NR_sigprocmask 126
+#define TARGET_NR_create_module 127
+#define TARGET_NR_init_module 128
+#define TARGET_NR_delete_module 129
+#define TARGET_NR_get_kernel_syms 130
+#define TARGET_NR_quotactl 131
+#define TARGET_NR_getpgid 132
+#define TARGET_NR_fchdir 133
+#define TARGET_NR_bdflush 134
+#define TARGET_NR_sysfs 135
+#define TARGET_NR_personality 136
+#define TARGET_NR_afs_syscall 137 /* Syscall for Andrew File System */
+#define TARGET_NR_getdents 141
+#define TARGET_NR_flock 143
+#define TARGET_NR_msync 144
+#define TARGET_NR_readv 145
+#define TARGET_NR_writev 146
+#define TARGET_NR_getsid 147
+#define TARGET_NR_fdatasync 148
+#define TARGET_NR__sysctl 149
+#define TARGET_NR_mlock 150
+#define TARGET_NR_munlock 151
+#define TARGET_NR_mlockall 152
+#define TARGET_NR_munlockall 153
+#define TARGET_NR_sched_setparam 154
+#define TARGET_NR_sched_getparam 155
+#define TARGET_NR_sched_setscheduler 156
+#define TARGET_NR_sched_getscheduler 157
+#define TARGET_NR_sched_yield 158
+#define TARGET_NR_sched_get_priority_max 159
+#define TARGET_NR_sched_get_priority_min 160
+#define TARGET_NR_sched_rr_get_interval 161
+#define TARGET_NR_nanosleep 162
+#define TARGET_NR_mremap 163
+#define TARGET_NR_query_module 167
+#define TARGET_NR_poll 168
+#define TARGET_NR_nfsservctl 169
+#define TARGET_NR_prctl 172
+#define TARGET_NR_rt_sigreturn 173
+#define TARGET_NR_rt_sigaction 174
+#define TARGET_NR_rt_sigprocmask 175
+#define TARGET_NR_rt_sigpending 176
+#define TARGET_NR_rt_sigtimedwait 177
+#define TARGET_NR_rt_sigqueueinfo 178
+#define TARGET_NR_rt_sigsuspend 179
+#define TARGET_NR_pread64 180
+#define TARGET_NR_pwrite64 181
+#define TARGET_NR_getcwd 183
+#define TARGET_NR_capget 184
+#define TARGET_NR_capset 185
+#define TARGET_NR_sigaltstack 186
+#define TARGET_NR_sendfile 187
+#define TARGET_NR_getpmsg 188
+#define TARGET_NR_putpmsg 189
+#define TARGET_NR_vfork 190
+#define TARGET_NR_pivot_root 217
+#define TARGET_NR_mincore 218
+#define TARGET_NR_madvise 219
+#define TARGET_NR_getdents64 220
+#define TARGET_NR_readahead 222
+#define TARGET_NR_setxattr 224
+#define TARGET_NR_lsetxattr 225
+#define TARGET_NR_fsetxattr 226
+#define TARGET_NR_getxattr 227
+#define TARGET_NR_lgetxattr 228
+#define TARGET_NR_fgetxattr 229
+#define TARGET_NR_listxattr 230
+#define TARGET_NR_llistxattr 231
+#define TARGET_NR_flistxattr 232
+#define TARGET_NR_removexattr 233
+#define TARGET_NR_lremovexattr 234
+#define TARGET_NR_fremovexattr 235
+#define TARGET_NR_gettid 236
+#define TARGET_NR_tkill 237
+#define TARGET_NR_futex 238
+#define TARGET_NR_sched_setaffinity 239
+#define TARGET_NR_sched_getaffinity 240
+#define TARGET_NR_tgkill 241
+/* Number 242 is reserved for tux */
+#define TARGET_NR_io_setup 243
+#define TARGET_NR_io_destroy 244
+#define TARGET_NR_io_getevents 245
+#define TARGET_NR_io_submit 246
+#define TARGET_NR_io_cancel 247
+#define TARGET_NR_exit_group 248
+#define TARGET_NR_epoll_create 249
+#define TARGET_NR_epoll_ctl 250
+#define TARGET_NR_epoll_wait 251
+#define TARGET_NR_set_tid_address 252
+#define TARGET_NR_fadvise64 253
+#define TARGET_NR_timer_create 254
+#define TARGET_NR_timer_settime (TARGET_NR_timer_create+1)
+#define TARGET_NR_timer_gettime (TARGET_NR_timer_create+2)
+#define TARGET_NR_timer_getoverrun (TARGET_NR_timer_create+3)
+#define TARGET_NR_timer_delete (TARGET_NR_timer_create+4)
+#define TARGET_NR_clock_settime (TARGET_NR_timer_create+5)
+#define TARGET_NR_clock_gettime (TARGET_NR_timer_create+6)
+#define TARGET_NR_clock_getres (TARGET_NR_timer_create+7)
+#define TARGET_NR_clock_nanosleep (TARGET_NR_timer_create+8)
+/* Number 263 is reserved for vserver */
+#define TARGET_NR_statfs64 265
+#define TARGET_NR_fstatfs64 266
+#define TARGET_NR_remap_file_pages 267
+/* Number 268 is reserved for new sys_mbind */
+/* Number 269 is reserved for new sys_get_mempolicy */
+/* Number 270 is reserved for new sys_set_mempolicy */
+#define TARGET_NR_mq_open 271
+#define TARGET_NR_mq_unlink 272
+#define TARGET_NR_mq_timedsend 273
+#define TARGET_NR_mq_timedreceive 274
+#define TARGET_NR_mq_notify 275
+#define TARGET_NR_mq_getsetattr 276
+#define TARGET_NR_kexec_load 277
+#define TARGET_NR_add_key 278
+#define TARGET_NR_request_key 279
+#define TARGET_NR_keyctl 280
+#define TARGET_NR_waitid 281
+#define TARGET_NR_ioprio_set 282
+#define TARGET_NR_ioprio_get 283
+#define TARGET_NR_inotify_init 284
+#define TARGET_NR_inotify_add_watch 285
+#define TARGET_NR_inotify_rm_watch 286
+/* Number 287 is reserved for new sys_migrate_pages */
+#define TARGET_NR_openat 288
+#define TARGET_NR_mkdirat 289
+#define TARGET_NR_mknodat 290
+#define TARGET_NR_fchownat 291
+#define TARGET_NR_futimesat 292
+#define TARGET_NR_unlinkat 294
+#define TARGET_NR_renameat 295
+#define TARGET_NR_linkat 296
+#define TARGET_NR_symlinkat 297
+#define TARGET_NR_readlinkat 298
+#define TARGET_NR_fchmodat 299
+#define TARGET_NR_faccessat 300
+#define TARGET_NR_pselect6 301
+#define TARGET_NR_ppoll 302
+#define TARGET_NR_unshare 303
+#define TARGET_NR_set_robust_list 304
+#define TARGET_NR_get_robust_list 305
+#define TARGET_NR_splice 306
+#define TARGET_NR_sync_file_range 307
+#define TARGET_NR_tee 308
+#define TARGET_NR_vmsplice 309
+/* Number 310 is reserved for new sys_move_pages */
+#define TARGET_NR_getcpu 311
+#define TARGET_NR_epoll_pwait 312
+#define TARGET_NR_utimes 313
+#define TARGET_NR_fallocate 314
+#define TARGET_NR_utimensat 315
+#define TARGET_NR_signalfd 316
+#define TARGET_NR_timerfd 317
+#define TARGET_NR_eventfd 318
+#define TARGET_NR_timerfd_create 319
+#define TARGET_NR_timerfd_settime 320
+#define TARGET_NR_timerfd_gettime 321
+#define TARGET_NR_signalfd4 322
+#define TARGET_NR_eventfd2 323
+#define TARGET_NR_inotify_init1 324
+#define TARGET_NR_pipe2 325
+#define TARGET_NR_dup3 326
+#define TARGET_NR_epoll_create1 327
+#undef NR_syscalls
+#define NR_syscalls 328
+
+/*
+ * There are some system calls that are not present on 64 bit, some
+ * have a different name although they do the same (e.g. TARGET_NR_chown32
+ * is TARGET_NR_chown on 64 bit).
+ */
+#ifndef TARGET_S390X
+
+#define TARGET_NR_time 13
+#define TARGET_NR_lchown 16
+#define TARGET_NR_setuid 23
+#define TARGET_NR_getuid 24
+#define TARGET_NR_stime 25
+#define TARGET_NR_setgid 46
+#define TARGET_NR_getgid 47
+#define TARGET_NR_geteuid 49
+#define TARGET_NR_getegid 50
+#define TARGET_NR_setreuid 70
+#define TARGET_NR_setregid 71
+#define TARGET_NR_getrlimit 76
+#define TARGET_NR_getgroups 80
+#define TARGET_NR_setgroups 81
+#define TARGET_NR_fchown 95
+#define TARGET_NR_ioperm 101
+#define TARGET_NR_setfsuid 138
+#define TARGET_NR_setfsgid 139
+#define TARGET_NR__llseek 140
+#define TARGET_NR__newselect 142
+#define TARGET_NR_setresuid 164
+#define TARGET_NR_getresuid 165
+#define TARGET_NR_setresgid 170
+#define TARGET_NR_getresgid 171
+#define TARGET_NR_chown 182
+#define TARGET_NR_ugetrlimit 191 /* SuS compliant getrlimit */
+#define TARGET_NR_mmap2 192
+#define TARGET_NR_truncate64 193
+#define TARGET_NR_ftruncate64 194
+#define TARGET_NR_stat64 195
+#define TARGET_NR_lstat64 196
+#define TARGET_NR_fstat64 197
+#define TARGET_NR_lchown32 198
+#define TARGET_NR_getuid32 199
+#define TARGET_NR_getgid32 200
+#define TARGET_NR_geteuid32 201
+#define TARGET_NR_getegid32 202
+#define TARGET_NR_setreuid32 203
+#define TARGET_NR_setregid32 204
+#define TARGET_NR_getgroups32 205
+#define TARGET_NR_setgroups32 206
+#define TARGET_NR_fchown32 207
+#define TARGET_NR_setresuid32 208
+#define TARGET_NR_getresuid32 209
+#define TARGET_NR_setresgid32 210
+#define TARGET_NR_getresgid32 211
+#define TARGET_NR_chown32 212
+#define TARGET_NR_setuid32 213
+#define TARGET_NR_setgid32 214
+#define TARGET_NR_setfsuid32 215
+#define TARGET_NR_setfsgid32 216
+#define TARGET_NR_fcntl64 221
+#define TARGET_NR_sendfile64 223
+#define TARGET_NR_fadvise64_64 264
+#define TARGET_NR_fstatat64 293
+
+#else
+
+#define TARGET_NR_select 142
+#define TARGET_NR_getrlimit 191 /* SuS compliant getrlimit */
+#define TARGET_NR_lchown 198
+#define TARGET_NR_getuid 199
+#define TARGET_NR_getgid 200
+#define TARGET_NR_geteuid 201
+#define TARGET_NR_getegid 202
+#define TARGET_NR_setreuid 203
+#define TARGET_NR_setregid 204
+#define TARGET_NR_getgroups 205
+#define TARGET_NR_setgroups 206
+#define TARGET_NR_fchown 207
+#define TARGET_NR_setresuid 208
+#define TARGET_NR_getresuid 209
+#define TARGET_NR_setresgid 210
+#define TARGET_NR_getresgid 211
+#define TARGET_NR_chown 212
+#define TARGET_NR_setuid 213
+#define TARGET_NR_setgid 214
+#define TARGET_NR_setfsuid 215
+#define TARGET_NR_setfsgid 216
+#define TARGET_NR_newfstatat 293
+
+#endif
+
diff --git a/linux-user/s390x/target_signal.h b/linux-user/s390x/target_signal.h
new file mode 100644
index 0000000000..b4816b040f
--- /dev/null
+++ b/linux-user/s390x/target_signal.h
@@ -0,0 +1,26 @@
+#ifndef TARGET_SIGNAL_H
+#define TARGET_SIGNAL_H
+
+#include "cpu.h"
+
+typedef struct target_sigaltstack {
+ abi_ulong ss_sp;
+ int ss_flags;
+ abi_ulong ss_size;
+} target_stack_t;
+
+/*
+ * sigaltstack controls
+ */
+#define TARGET_SS_ONSTACK 1
+#define TARGET_SS_DISABLE 2
+
+#define TARGET_MINSIGSTKSZ 2048
+#define TARGET_SIGSTKSZ 8192
+
+static inline abi_ulong get_sp_from_cpustate(CPUS390XState *state)
+{
+ return state->regs[15];
+}
+
+#endif /* TARGET_SIGNAL_H */
diff --git a/linux-user/s390x/termbits.h b/linux-user/s390x/termbits.h
new file mode 100644
index 0000000000..2a78a05594
--- /dev/null
+++ b/linux-user/s390x/termbits.h
@@ -0,0 +1,283 @@
+/*
+ * include/asm-s390/termbits.h
+ *
+ * S390 version
+ *
+ * Derived from "include/asm-i386/termbits.h"
+ */
+
+#define TARGET_NCCS 19
+struct target_termios {
+ unsigned int c_iflag; /* input mode flags */
+ unsigned int c_oflag; /* output mode flags */
+ unsigned int c_cflag; /* control mode flags */
+ unsigned int c_lflag; /* local mode flags */
+ unsigned char c_line; /* line discipline */
+ unsigned char c_cc[TARGET_NCCS]; /* control characters */
+};
+
+struct target_termios2 {
+ unsigned int c_iflag; /* input mode flags */
+ unsigned int c_oflag; /* output mode flags */
+ unsigned int c_cflag; /* control mode flags */
+ unsigned int c_lflag; /* local mode flags */
+ unsigned char c_line; /* line discipline */
+ unsigned char c_cc[TARGET_NCCS]; /* control characters */
+ unsigned int c_ispeed; /* input speed */
+ unsigned int c_ospeed; /* output speed */
+};
+
+struct target_ktermios {
+ unsigned int c_iflag; /* input mode flags */
+ unsigned int c_oflag; /* output mode flags */
+ unsigned int c_cflag; /* control mode flags */
+ unsigned int c_lflag; /* local mode flags */
+ unsigned char c_line; /* line discipline */
+ unsigned char c_cc[TARGET_NCCS]; /* control characters */
+ unsigned int c_ispeed; /* input speed */
+ unsigned int c_ospeed; /* output speed */
+};
+
+/* c_cc characters */
+#define TARGET_VINTR 0
+#define TARGET_VQUIT 1
+#define TARGET_VERASE 2
+#define TARGET_VKILL 3
+#define TARGET_VEOF 4
+#define TARGET_VTIME 5
+#define TARGET_VMIN 6
+#define TARGET_VSWTC 7
+#define TARGET_VSTART 8
+#define TARGET_VSTOP 9
+#define TARGET_VSUSP 10
+#define TARGET_VEOL 11
+#define TARGET_VREPRINT 12
+#define TARGET_VDISCARD 13
+#define TARGET_VWERASE 14
+#define TARGET_VLNEXT 15
+#define TARGET_VEOL2 16
+
+/* c_iflag bits */
+#define TARGET_IGNBRK 0000001
+#define TARGET_BRKINT 0000002
+#define TARGET_IGNPAR 0000004
+#define TARGET_PARMRK 0000010
+#define TARGET_INPCK 0000020
+#define TARGET_ISTRIP 0000040
+#define TARGET_INLCR 0000100
+#define TARGET_IGNCR 0000200
+#define TARGET_ICRNL 0000400
+#define TARGET_IUCLC 0001000
+#define TARGET_IXON 0002000
+#define TARGET_IXANY 0004000
+#define TARGET_IXOFF 0010000
+#define TARGET_IMAXBEL 0020000
+#define TARGET_IUTF8 0040000
+
+/* c_oflag bits */
+#define TARGET_OPOST 0000001
+#define TARGET_OLCUC 0000002
+#define TARGET_ONLCR 0000004
+#define TARGET_OCRNL 0000010
+#define TARGET_ONOCR 0000020
+#define TARGET_ONLRET 0000040
+#define TARGET_OFILL 0000100
+#define TARGET_OFDEL 0000200
+#define TARGET_NLDLY 0000400
+#define TARGET_NL0 0000000
+#define TARGET_NL1 0000400
+#define TARGET_CRDLY 0003000
+#define TARGET_CR0 0000000
+#define TARGET_CR1 0001000
+#define TARGET_CR2 0002000
+#define TARGET_CR3 0003000
+#define TARGET_TABDLY 0014000
+#define TARGET_TAB0 0000000
+#define TARGET_TAB1 0004000
+#define TARGET_TAB2 0010000
+#define TARGET_TAB3 0014000
+#define TARGET_XTABS 0014000
+#define TARGET_BSDLY 0020000
+#define TARGET_BS0 0000000
+#define TARGET_BS1 0020000
+#define TARGET_VTDLY 0040000
+#define TARGET_VT0 0000000
+#define TARGET_VT1 0040000
+#define TARGET_FFDLY 0100000
+#define TARGET_FF0 0000000
+#define TARGET_FF1 0100000
+
+/* c_cflag bit meaning */
+#define TARGET_CBAUD 0010017
+#define TARGET_B0 0000000 /* hang up */
+#define TARGET_B50 0000001
+#define TARGET_B75 0000002
+#define TARGET_B110 0000003
+#define TARGET_B134 0000004
+#define TARGET_B150 0000005
+#define TARGET_B200 0000006
+#define TARGET_B300 0000007
+#define TARGET_B600 0000010
+#define TARGET_B1200 0000011
+#define TARGET_B1800 0000012
+#define TARGET_B2400 0000013
+#define TARGET_B4800 0000014
+#define TARGET_B9600 0000015
+#define TARGET_B19200 0000016
+#define TARGET_B38400 0000017
+#define TARGET_EXTA B19200
+#define TARGET_EXTB B38400
+#define TARGET_CSIZE 0000060
+#define TARGET_CS5 0000000
+#define TARGET_CS6 0000020
+#define TARGET_CS7 0000040
+#define TARGET_CS8 0000060
+#define TARGET_CSTOPB 0000100
+#define TARGET_CREAD 0000200
+#define TARGET_PARENB 0000400
+#define TARGET_PARODD 0001000
+#define TARGET_HUPCL 0002000
+#define TARGET_CLOCAL 0004000
+#define TARGET_CBAUDEX 0010000
+#define TARGET_BOTHER 0010000
+#define TARGET_B57600 0010001
+#define TARGET_B115200 0010002
+#define TARGET_B230400 0010003
+#define TARGET_B460800 0010004
+#define TARGET_B500000 0010005
+#define TARGET_B576000 0010006
+#define TARGET_B921600 0010007
+#define TARGET_B1000000 0010010
+#define TARGET_B1152000 0010011
+#define TARGET_B1500000 0010012
+#define TARGET_B2000000 0010013
+#define TARGET_B2500000 0010014
+#define TARGET_B3000000 0010015
+#define TARGET_B3500000 0010016
+#define TARGET_B4000000 0010017
+#define TARGET_CIBAUD 002003600000 /* input baud rate */
+#define TARGET_CMSPAR 010000000000 /* mark or space (stick) parity */
+#define TARGET_CRTSCTS 020000000000 /* flow control */
+
+#define TARGET_IBSHIFT 16 /* Shift from CBAUD to CIBAUD */
+
+/* c_lflag bits */
+#define TARGET_ISIG 0000001
+#define TARGET_ICANON 0000002
+#define TARGET_XCASE 0000004
+#define TARGET_ECHO 0000010
+#define TARGET_ECHOE 0000020
+#define TARGET_ECHOK 0000040
+#define TARGET_ECHONL 0000100
+#define TARGET_NOFLSH 0000200
+#define TARGET_TOSTOP 0000400
+#define TARGET_ECHOCTL 0001000
+#define TARGET_ECHOPRT 0002000
+#define TARGET_ECHOKE 0004000
+#define TARGET_FLUSHO 0010000
+#define TARGET_PENDIN 0040000
+#define TARGET_IEXTEN 0100000
+
+/* tcflow() and TCXONC use these */
+#define TARGET_TCOOFF 0
+#define TARGET_TCOON 1
+#define TARGET_TCIOFF 2
+#define TARGET_TCION 3
+
+/* tcflush() and TCFLSH use these */
+#define TARGET_TCIFLUSH 0
+#define TARGET_TCOFLUSH 1
+#define TARGET_TCIOFLUSH 2
+
+/* tcsetattr uses these */
+#define TARGET_TCSANOW 0
+#define TARGET_TCSADRAIN 1
+#define TARGET_TCSAFLUSH 2
+
+/*
+ * include/asm-s390/ioctls.h
+ *
+ * S390 version
+ *
+ * Derived from "include/asm-i386/ioctls.h"
+ */
+
+/* 0x54 is just a magic number to make these relatively unique ('T') */
+
+#define TARGET_TCGETS 0x5401
+#define TARGET_TCSETS 0x5402
+#define TARGET_TCSETSW 0x5403
+#define TARGET_TCSETSF 0x5404
+#define TARGET_TCGETA 0x5405
+#define TARGET_TCSETA 0x5406
+#define TARGET_TCSETAW 0x5407
+#define TARGET_TCSETAF 0x5408
+#define TARGET_TCSBRK 0x5409
+#define TARGET_TCXONC 0x540A
+#define TARGET_TCFLSH 0x540B
+#define TARGET_TIOCEXCL 0x540C
+#define TARGET_TIOCNXCL 0x540D
+#define TARGET_TIOCSCTTY 0x540E
+#define TARGET_TIOCGPGRP 0x540F
+#define TARGET_TIOCSPGRP 0x5410
+#define TARGET_TIOCOUTQ 0x5411
+#define TARGET_TIOCSTI 0x5412
+#define TARGET_TIOCGWINSZ 0x5413
+#define TARGET_TIOCSWINSZ 0x5414
+#define TARGET_TIOCMGET 0x5415
+#define TARGET_TIOCMBIS 0x5416
+#define TARGET_TIOCMBIC 0x5417
+#define TARGET_TIOCMSET 0x5418
+#define TARGET_TIOCGSOFTCAR 0x5419
+#define TARGET_TIOCSSOFTCAR 0x541A
+#define TARGET_FIONREAD 0x541B
+#define TARGET_TIOCINQ FIONREAD
+#define TARGET_TIOCLINUX 0x541C
+#define TARGET_TIOCCONS 0x541D
+#define TARGET_TIOCGSERIAL 0x541E
+#define TARGET_TIOCSSERIAL 0x541F
+#define TARGET_TIOCPKT 0x5420
+#define TARGET_FIONBIO 0x5421
+#define TARGET_TIOCNOTTY 0x5422
+#define TARGET_TIOCSETD 0x5423
+#define TARGET_TIOCGETD 0x5424
+#define TARGET_TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */
+#define TARGET_TIOCSBRK 0x5427 /* BSD compatibility */
+#define TARGET_TIOCCBRK 0x5428 /* BSD compatibility */
+#define TARGET_TIOCGSID 0x5429 /* Return the session ID of FD */
+#define TARGET_TCGETS2 _IOR('T',0x2A, struct termios2)
+#define TARGET_TCSETS2 _IOW('T',0x2B, struct termios2)
+#define TARGET_TCSETSW2 _IOW('T',0x2C, struct termios2)
+#define TARGET_TCSETSF2 _IOW('T',0x2D, struct termios2)
+#define TARGET_TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
+#define TARGET_TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */
+#define TARGET_TIOCGDEV _IOR('T',0x32, unsigned int) /* Get real dev no below /dev/console */
+
+#define TARGET_FIONCLEX 0x5450 /* these numbers need to be adjusted. */
+#define TARGET_FIOCLEX 0x5451
+#define TARGET_FIOASYNC 0x5452
+#define TARGET_TIOCSERCONFIG 0x5453
+#define TARGET_TIOCSERGWILD 0x5454
+#define TARGET_TIOCSERSWILD 0x5455
+#define TARGET_TIOCGLCKTRMIOS 0x5456
+#define TARGET_TIOCSLCKTRMIOS 0x5457
+#define TARGET_TIOCSERGSTRUCT 0x5458 /* For debugging only */
+#define TARGET_TIOCSERGETLSR 0x5459 /* Get line status register */
+#define TARGET_TIOCSERGETMULTI 0x545A /* Get multiport config */
+#define TARGET_TIOCSERSETMULTI 0x545B /* Set multiport config */
+
+#define TARGET_TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */
+#define TARGET_TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
+#define TARGET_FIOQSIZE 0x545E
+
+/* Used for packet mode */
+#define TARGET_TIOCPKT_DATA 0
+#define TARGET_TIOCPKT_FLUSHREAD 1
+#define TARGET_TIOCPKT_FLUSHWRITE 2
+#define TARGET_TIOCPKT_STOP 4
+#define TARGET_TIOCPKT_START 8
+#define TARGET_TIOCPKT_NOSTOP 16
+#define TARGET_TIOCPKT_DOSTOP 32
+
+#define TARGET_TIOCSER_TEMT 0x01 /* Transmitter physically empty */
+
diff --git a/linux-user/signal.c b/linux-user/signal.c
index 6fe086b4cd..c7a375fe0e 100644
--- a/linux-user/signal.c
+++ b/linux-user/signal.c
@@ -3614,6 +3614,339 @@ long do_rt_sigreturn(CPUState *env)
return -TARGET_ENOSYS;
}
+#elif defined(TARGET_S390X)
+
+#define __NUM_GPRS 16
+#define __NUM_FPRS 16
+#define __NUM_ACRS 16
+
+#define S390_SYSCALL_SIZE 2
+#define __SIGNAL_FRAMESIZE 160 /* FIXME: 31-bit mode -> 96 */
+
+#define _SIGCONTEXT_NSIG 64
+#define _SIGCONTEXT_NSIG_BPW 64 /* FIXME: 31-bit mode -> 32 */
+#define _SIGCONTEXT_NSIG_WORDS (_SIGCONTEXT_NSIG / _SIGCONTEXT_NSIG_BPW)
+#define _SIGMASK_COPY_SIZE (sizeof(unsigned long)*_SIGCONTEXT_NSIG_WORDS)
+#define PSW_ADDR_AMODE 0x0000000000000000UL /* 0x80000000UL for 31-bit */
+#define S390_SYSCALL_OPCODE ((uint16_t)0x0a00)
+
+typedef struct {
+ target_psw_t psw;
+ target_ulong gprs[__NUM_GPRS];
+ unsigned int acrs[__NUM_ACRS];
+} target_s390_regs_common;
+
+typedef struct {
+ unsigned int fpc;
+ double fprs[__NUM_FPRS];
+} target_s390_fp_regs;
+
+typedef struct {
+ target_s390_regs_common regs;
+ target_s390_fp_regs fpregs;
+} target_sigregs;
+
+struct target_sigcontext {
+ target_ulong oldmask[_SIGCONTEXT_NSIG_WORDS];
+ target_sigregs *sregs;
+};
+
+typedef struct {
+ uint8_t callee_used_stack[__SIGNAL_FRAMESIZE];
+ struct target_sigcontext sc;
+ target_sigregs sregs;
+ int signo;
+ uint8_t retcode[S390_SYSCALL_SIZE];
+} sigframe;
+
+struct target_ucontext {
+ target_ulong uc_flags;
+ struct target_ucontext *uc_link;
+ target_stack_t uc_stack;
+ target_sigregs uc_mcontext;
+ target_sigset_t uc_sigmask; /* mask last for extensibility */
+};
+
+typedef struct {
+ uint8_t callee_used_stack[__SIGNAL_FRAMESIZE];
+ uint8_t retcode[S390_SYSCALL_SIZE];
+ struct target_siginfo info;
+ struct target_ucontext uc;
+} rt_sigframe;
+
+static inline abi_ulong
+get_sigframe(struct target_sigaction *ka, CPUState *env, size_t frame_size)
+{
+ abi_ulong sp;
+
+ /* Default to using normal stack */
+ sp = env->regs[15];
+
+ /* This is the X/Open sanctioned signal stack switching. */
+ if (ka->sa_flags & TARGET_SA_ONSTACK) {
+ if (!sas_ss_flags(sp)) {
+ sp = target_sigaltstack_used.ss_sp +
+ target_sigaltstack_used.ss_size;
+ }
+ }
+
+ /* This is the legacy signal stack switching. */
+ else if (/* FIXME !user_mode(regs) */ 0 &&
+ !(ka->sa_flags & TARGET_SA_RESTORER) &&
+ ka->sa_restorer) {
+ sp = (abi_ulong) ka->sa_restorer;
+ }
+
+ return (sp - frame_size) & -8ul;
+}
+
+static void save_sigregs(CPUState *env, target_sigregs *sregs)
+{
+ int i;
+ //save_access_regs(current->thread.acrs); FIXME
+
+ /* Copy a 'clean' PSW mask to the user to avoid leaking
+ information about whether PER is currently on. */
+ __put_user(env->psw.mask, &sregs->regs.psw.mask);
+ __put_user(env->psw.addr, &sregs->regs.psw.addr);
+ for (i = 0; i < 16; i++) {
+ __put_user(env->regs[i], &sregs->regs.gprs[i]);
+ }
+ for (i = 0; i < 16; i++) {
+ __put_user(env->aregs[i], &sregs->regs.acrs[i]);
+ }
+ /*
+ * We have to store the fp registers to current->thread.fp_regs
+ * to merge them with the emulated registers.
+ */
+ //save_fp_regs(&current->thread.fp_regs); FIXME
+ for (i = 0; i < 16; i++) {
+ __put_user(env->fregs[i].ll, &sregs->fpregs.fprs[i]);
+ }
+}
+
+static void setup_frame(int sig, struct target_sigaction *ka,
+ target_sigset_t *set, CPUState *env)
+{
+ sigframe *frame;
+ abi_ulong frame_addr;
+
+ frame_addr = get_sigframe(ka, env, sizeof(*frame));
+ qemu_log("%s: frame_addr 0x%llx\n", __FUNCTION__,
+ (unsigned long long)frame_addr);
+ if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
+ goto give_sigsegv;
+ }
+
+ qemu_log("%s: 1\n", __FUNCTION__);
+ if (__put_user(set->sig[0], &frame->sc.oldmask[0])) {
+ goto give_sigsegv;
+ }
+
+ save_sigregs(env, &frame->sregs);
+
+ __put_user((abi_ulong)(unsigned long)&frame->sregs,
+ (abi_ulong *)&frame->sc.sregs);
+
+ /* Set up to return from userspace. If provided, use a stub
+ already in userspace. */
+ if (ka->sa_flags & TARGET_SA_RESTORER) {
+ env->regs[14] = (unsigned long)
+ ka->sa_restorer | PSW_ADDR_AMODE;
+ } else {
+ env->regs[14] = (unsigned long)
+ frame->retcode | PSW_ADDR_AMODE;
+ if (__put_user(S390_SYSCALL_OPCODE | TARGET_NR_sigreturn,
+ (uint16_t *)(frame->retcode)))
+ goto give_sigsegv;
+ }
+
+ /* Set up backchain. */
+ if (__put_user(env->regs[15], (abi_ulong *) frame)) {
+ goto give_sigsegv;
+ }
+
+ /* Set up registers for signal handler */
+ env->regs[15] = (target_ulong)(unsigned long) frame;
+ env->psw.addr = (target_ulong) ka->_sa_handler | PSW_ADDR_AMODE;
+
+ env->regs[2] = sig; //map_signal(sig);
+ env->regs[3] = (target_ulong)(unsigned long) &frame->sc;
+
+ /* We forgot to include these in the sigcontext.
+ To avoid breaking binary compatibility, they are passed as args. */
+ env->regs[4] = 0; // FIXME: no clue... current->thread.trap_no;
+ env->regs[5] = 0; // FIXME: no clue... current->thread.prot_addr;
+
+ /* Place signal number on stack to allow backtrace from handler. */
+ if (__put_user(env->regs[2], (int *) &frame->signo)) {
+ goto give_sigsegv;
+ }
+ unlock_user_struct(frame, frame_addr, 1);
+ return;
+
+give_sigsegv:
+ qemu_log("%s: give_sigsegv\n", __FUNCTION__);
+ unlock_user_struct(frame, frame_addr, 1);
+ force_sig(TARGET_SIGSEGV);
+}
+
+static void setup_rt_frame(int sig, struct target_sigaction *ka,
+ target_siginfo_t *info,
+ target_sigset_t *set, CPUState *env)
+{
+ int i;
+ rt_sigframe *frame;
+ abi_ulong frame_addr;
+
+ frame_addr = get_sigframe(ka, env, sizeof *frame);
+ qemu_log("%s: frame_addr 0x%llx\n", __FUNCTION__,
+ (unsigned long long)frame_addr);
+ if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
+ goto give_sigsegv;
+ }
+
+ qemu_log("%s: 1\n", __FUNCTION__);
+ if (copy_siginfo_to_user(&frame->info, info)) {
+ goto give_sigsegv;
+ }
+
+ /* Create the ucontext. */
+ __put_user(0, &frame->uc.uc_flags);
+ __put_user((abi_ulong)0, (abi_ulong *)&frame->uc.uc_link);
+ __put_user(target_sigaltstack_used.ss_sp, &frame->uc.uc_stack.ss_sp);
+ __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
+ &frame->uc.uc_stack.ss_flags);
+ __put_user(target_sigaltstack_used.ss_size, &frame->uc.uc_stack.ss_size);
+ save_sigregs(env, &frame->uc.uc_mcontext);
+ for (i = 0; i < TARGET_NSIG_WORDS; i++) {
+ __put_user((abi_ulong)set->sig[i],
+ (abi_ulong *)&frame->uc.uc_sigmask.sig[i]);
+ }
+
+ /* Set up to return from userspace. If provided, use a stub
+ already in userspace. */
+ if (ka->sa_flags & TARGET_SA_RESTORER) {
+ env->regs[14] = (unsigned long) ka->sa_restorer | PSW_ADDR_AMODE;
+ } else {
+ env->regs[14] = (unsigned long) frame->retcode | PSW_ADDR_AMODE;
+ if (__put_user(S390_SYSCALL_OPCODE | TARGET_NR_rt_sigreturn,
+ (uint16_t *)(frame->retcode))) {
+ goto give_sigsegv;
+ }
+ }
+
+ /* Set up backchain. */
+ if (__put_user(env->regs[15], (abi_ulong *) frame)) {
+ goto give_sigsegv;
+ }
+
+ /* Set up registers for signal handler */
+ env->regs[15] = (target_ulong)(unsigned long) frame;
+ env->psw.addr = (target_ulong) ka->_sa_handler | PSW_ADDR_AMODE;
+
+ env->regs[2] = sig; //map_signal(sig);
+ env->regs[3] = (target_ulong)(unsigned long) &frame->info;
+ env->regs[4] = (target_ulong)(unsigned long) &frame->uc;
+ return;
+
+give_sigsegv:
+ qemu_log("%s: give_sigsegv\n", __FUNCTION__);
+ unlock_user_struct(frame, frame_addr, 1);
+ force_sig(TARGET_SIGSEGV);
+}
+
+static int
+restore_sigregs(CPUState *env, target_sigregs *sc)
+{
+ int err = 0;
+ int i;
+
+ for (i = 0; i < 16; i++) {
+ err |= __get_user(env->regs[i], &sc->regs.gprs[i]);
+ }
+
+ err |= __get_user(env->psw.mask, &sc->regs.psw.mask);
+ qemu_log("%s: sc->regs.psw.addr 0x%llx env->psw.addr 0x%llx\n",
+ __FUNCTION__, (unsigned long long)sc->regs.psw.addr,
+ (unsigned long long)env->psw.addr);
+ err |= __get_user(env->psw.addr, &sc->regs.psw.addr);
+ /* FIXME: 31-bit -> | PSW_ADDR_AMODE */
+
+ for (i = 0; i < 16; i++) {
+ err |= __get_user(env->aregs[i], &sc->regs.acrs[i]);
+ }
+ for (i = 0; i < 16; i++) {
+ err |= __get_user(env->fregs[i].ll, &sc->fpregs.fprs[i]);
+ }
+
+ return err;
+}
+
+long do_sigreturn(CPUState *env)
+{
+ sigframe *frame;
+ abi_ulong frame_addr = env->regs[15];
+ qemu_log("%s: frame_addr 0x%llx\n", __FUNCTION__,
+ (unsigned long long)frame_addr);
+ target_sigset_t target_set;
+ sigset_t set;
+
+ if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
+ goto badframe;
+ }
+ if (__get_user(target_set.sig[0], &frame->sc.oldmask[0])) {
+ goto badframe;
+ }
+
+ target_to_host_sigset_internal(&set, &target_set);
+ sigprocmask(SIG_SETMASK, &set, NULL); /* ~_BLOCKABLE? */
+
+ if (restore_sigregs(env, &frame->sregs)) {
+ goto badframe;
+ }
+
+ unlock_user_struct(frame, frame_addr, 0);
+ return env->regs[2];
+
+badframe:
+ unlock_user_struct(frame, frame_addr, 0);
+ force_sig(TARGET_SIGSEGV);
+ return 0;
+}
+
+long do_rt_sigreturn(CPUState *env)
+{
+ rt_sigframe *frame;
+ abi_ulong frame_addr = env->regs[15];
+ qemu_log("%s: frame_addr 0x%llx\n", __FUNCTION__,
+ (unsigned long long)frame_addr);
+ sigset_t set;
+
+ if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
+ goto badframe;
+ }
+ target_to_host_sigset(&set, &frame->uc.uc_sigmask);
+
+ sigprocmask(SIG_SETMASK, &set, NULL); /* ~_BLOCKABLE? */
+
+ if (restore_sigregs(env, &frame->uc.uc_mcontext)) {
+ goto badframe;
+ }
+
+ if (do_sigaltstack(frame_addr + offsetof(rt_sigframe, uc.uc_stack), 0,
+ get_sp_from_cpustate(env)) == -EFAULT) {
+ goto badframe;
+ }
+ unlock_user_struct(frame, frame_addr, 0);
+ return env->regs[2];
+
+badframe:
+ unlock_user_struct(frame, frame_addr, 0);
+ force_sig(TARGET_SIGSEGV);
+ return 0;
+}
+
#elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
/* FIXME: Many of the structures are defined for both PPC and PPC64, but
diff --git a/linux-user/syscall.c b/linux-user/syscall.c
index 6e7d88ec08..5cb27c7f9f 100644
--- a/linux-user/syscall.c
+++ b/linux-user/syscall.c
@@ -5548,7 +5548,7 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
ret = get_errno(settimeofday(&tv, NULL));
}
break;
-#ifdef TARGET_NR_select
+#if defined(TARGET_NR_select) && !defined(TARGET_S390X) && !defined(TARGET_S390)
case TARGET_NR_select:
{
struct target_sel_arg_struct *sel;
@@ -5659,7 +5659,9 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
#endif
#ifdef TARGET_NR_mmap
case TARGET_NR_mmap:
-#if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE)
+#if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || \
+ defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
+ || defined(TARGET_S390X)
{
abi_ulong *v;
abi_ulong v1, v2, v3, v4, v5, v6;
@@ -6155,6 +6157,8 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
#elif defined(TARGET_CRIS)
ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg4, arg5));
+#elif defined(TARGET_S390X)
+ ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
#else
ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
#endif
@@ -6363,8 +6367,12 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
}
break;
#endif /* TARGET_NR_getdents64 */
-#ifdef TARGET_NR__newselect
+#if defined(TARGET_NR__newselect) || defined(TARGET_S390X)
+#ifdef TARGET_S390X
+ case TARGET_NR_select:
+#else
case TARGET_NR__newselect:
+#endif
ret = do_select(arg1, arg2, arg3, arg4, arg5);
break;
#endif
@@ -6681,7 +6689,7 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
case TARGET_NR_sigaltstack:
#if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
- defined(TARGET_M68K)
+ defined(TARGET_M68K) || defined(TARGET_S390X)
ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUState *)cpu_env));
break;
#else
diff --git a/linux-user/syscall_defs.h b/linux-user/syscall_defs.h
index e05ddf9120..04c268de7c 100644
--- a/linux-user/syscall_defs.h
+++ b/linux-user/syscall_defs.h
@@ -58,7 +58,8 @@
#endif
#if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_SH4) \
- || defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_UNICORE32)
+ || defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_UNICORE32) \
+ || defined(TARGET_S390X)
#define TARGET_IOC_SIZEBITS 14
#define TARGET_IOC_DIRBITS 2
@@ -321,7 +322,8 @@ int do_sigaction(int sig, const struct target_sigaction *act,
#if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_SPARC) \
|| defined(TARGET_PPC) || defined(TARGET_MIPS) || defined(TARGET_SH4) \
|| defined(TARGET_M68K) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) \
- || defined(TARGET_MICROBLAZE) || defined(TARGET_UNICORE32)
+ || defined(TARGET_MICROBLAZE) || defined(TARGET_UNICORE32) \
+ || defined(TARGET_S390X)
#if defined(TARGET_SPARC)
#define TARGET_SA_NOCLDSTOP 8u
@@ -1688,6 +1690,27 @@ struct target_stat {
abi_long __unused[3];
};
+#elif defined(TARGET_S390X)
+struct target_stat {
+ abi_ulong st_dev;
+ abi_ulong st_ino;
+ abi_ulong st_nlink;
+ unsigned int st_mode;
+ unsigned int st_uid;
+ unsigned int st_gid;
+ unsigned int __pad1;
+ abi_ulong st_rdev;
+ abi_ulong st_size;
+ abi_ulong target_st_atime;
+ abi_ulong target_st_atime_nsec;
+ abi_ulong target_st_mtime;
+ abi_ulong target_st_mtime_nsec;
+ abi_ulong target_st_ctime;
+ abi_ulong target_st_ctime_nsec;
+ abi_ulong st_blksize;
+ abi_long st_blocks;
+ abi_ulong __unused[3];
+};
#else
#error unsupported CPU
#endif
@@ -1774,6 +1797,34 @@ struct target_statfs64 {
abi_long f_frsize;
abi_long f_spare[5];
};
+#elif defined(TARGET_S390X)
+struct target_statfs {
+ int32_t f_type;
+ int32_t f_bsize;
+ abi_long f_blocks;
+ abi_long f_bfree;
+ abi_long f_bavail;
+ abi_long f_files;
+ abi_long f_ffree;
+ kernel_fsid_t f_fsid;
+ int32_t f_namelen;
+ int32_t f_frsize;
+ int32_t f_spare[5];
+};
+
+struct target_statfs64 {
+ int32_t f_type;
+ int32_t f_bsize;
+ abi_long f_blocks;
+ abi_long f_bfree;
+ abi_long f_bavail;
+ abi_long f_files;
+ abi_long f_ffree;
+ kernel_fsid_t f_fsid;
+ int32_t f_namelen;
+ int32_t f_frsize;
+ int32_t f_spare[5];
+};
#else
struct target_statfs {
uint32_t f_type;
diff --git a/scripts/qemu-binfmt-conf.sh b/scripts/qemu-binfmt-conf.sh
index c50beb7337..83a44d8e2a 100644
--- a/scripts/qemu-binfmt-conf.sh
+++ b/scripts/qemu-binfmt-conf.sh
@@ -1,5 +1,5 @@
#!/bin/sh
-# enable automatic i386/ARM/M68K/MIPS/SPARC/PPC program execution by the kernel
+# enable automatic i386/ARM/M68K/MIPS/SPARC/PPC/s390 program execution by the kernel
# load the binfmt_misc module
if [ ! -d /proc/sys/fs/binfmt_misc ]; then
@@ -63,4 +63,6 @@ fi
if [ $cpu != "sh" ] ; then
echo ':sh4:M::\x7fELF\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x2a\x00:\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff:/usr/local/bin/qemu-sh4:' > /proc/sys/fs/binfmt_misc/register
echo ':sh4eb:M::\x7fELF\x01\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x2a:\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff:/usr/local/bin/qemu-sh4eb:' > /proc/sys/fs/binfmt_misc/register
+if [ $cpu != "s390x" ] ; then
+ echo ':s390x:M::\x7fELF\x02\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x16:\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff:/usr/local/bin/qemu-s390x:' > /proc/sys/fs/binfmt_misc/register
fi
diff --git a/target-s390x/cpu.h b/target-s390x/cpu.h
index a84b3ee184..4e5c3917d3 100644
--- a/target-s390x/cpu.h
+++ b/target-s390x/cpu.h
@@ -67,7 +67,6 @@ typedef struct CPUS390XState {
PSW psw;
- uint32_t cc;
uint32_t cc_op;
uint64_t cc_src;
uint64_t cc_dst;
@@ -87,9 +86,12 @@ typedef struct CPUS390XState {
int pending_int;
ExtQueue ext_queue[MAX_EXT_QUEUE];
+ int ext_index;
+
+ CPU_COMMON
+
/* reset does memset(0) up to here */
- int ext_index;
int cpu_num;
uint8_t *storage_keys;
@@ -98,8 +100,6 @@ typedef struct CPUS390XState {
QEMUTimer *tod_timer;
QEMUTimer *cpu_timer;
-
- CPU_COMMON
} CPUS390XState;
#if defined(CONFIG_USER_ONLY)
@@ -287,12 +287,32 @@ int cpu_s390x_handle_mmu_fault (CPUS390XState *env, target_ulong address, int rw
#ifndef CONFIG_USER_ONLY
int s390_virtio_hypercall(CPUState *env, uint64_t mem, uint64_t hypercall);
+#ifdef CONFIG_KVM
void kvm_s390_interrupt(CPUState *env, int type, uint32_t code);
void kvm_s390_virtio_irq(CPUState *env, int config_change, uint64_t token);
void kvm_s390_interrupt_internal(CPUState *env, int type, uint32_t parm,
uint64_t parm64, int vm);
+#else
+static inline void kvm_s390_interrupt(CPUState *env, int type, uint32_t code)
+{
+}
+
+static inline void kvm_s390_virtio_irq(CPUState *env, int config_change,
+ uint64_t token)
+{
+}
+
+static inline void kvm_s390_interrupt_internal(CPUState *env, int type,
+ uint32_t parm, uint64_t parm64,
+ int vm)
+{
+}
+#endif
CPUState *s390_cpu_addr2state(uint16_t cpu_addr);
+/* from s390-virtio-bus */
+extern const target_phys_addr_t virtio_size;
+
#ifndef KVM_S390_SIGP_STOP
#define KVM_S390_SIGP_STOP 0
#define KVM_S390_PROGRAM_INT 0
diff --git a/target-s390x/helper.c b/target-s390x/helper.c
index 629dfd9708..c79af46693 100644
--- a/target-s390x/helper.c
+++ b/target-s390x/helper.c
@@ -2,6 +2,7 @@
* S/390 helpers
*
* Copyright (c) 2009 Ulrich Hecht
+ * Copyright (c) 2011 Alexander Graf
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
@@ -25,27 +26,107 @@
#include "exec-all.h"
#include "gdbstub.h"
#include "qemu-common.h"
+#include "qemu-timer.h"
+#if !defined(CONFIG_USER_ONLY)
#include <linux/kvm.h>
#include "kvm.h"
+#endif
+
+//#define DEBUG_S390
+//#define DEBUG_S390_PTE
+//#define DEBUG_S390_STDOUT
+
+#ifdef DEBUG_S390
+#ifdef DEBUG_S390_STDOUT
+#define DPRINTF(fmt, ...) \
+ do { fprintf(stderr, fmt, ## __VA_ARGS__); \
+ qemu_log(fmt, ##__VA_ARGS__); } while (0)
+#else
+#define DPRINTF(fmt, ...) \
+ do { qemu_log(fmt, ## __VA_ARGS__); } while (0)
+#endif
+#else
+#define DPRINTF(fmt, ...) \
+ do { } while (0)
+#endif
+
+#ifdef DEBUG_S390_PTE
+#define PTE_DPRINTF DPRINTF
+#else
+#define PTE_DPRINTF(fmt, ...) \
+ do { } while (0)
+#endif
+
+#ifndef CONFIG_USER_ONLY
+static void s390x_tod_timer(void *opaque)
+{
+ CPUState *env = opaque;
+
+ env->pending_int |= INTERRUPT_TOD;
+ cpu_interrupt(env, CPU_INTERRUPT_HARD);
+}
+
+static void s390x_cpu_timer(void *opaque)
+{
+ CPUState *env = opaque;
+
+ env->pending_int |= INTERRUPT_CPUTIMER;
+ cpu_interrupt(env, CPU_INTERRUPT_HARD);
+}
+#endif
CPUS390XState *cpu_s390x_init(const char *cpu_model)
{
CPUS390XState *env;
+#if !defined (CONFIG_USER_ONLY)
+ struct tm tm;
+#endif
static int inited = 0;
+ static int cpu_num = 0;
env = qemu_mallocz(sizeof(CPUS390XState));
cpu_exec_init(env);
if (!inited) {
inited = 1;
+ s390x_translate_init();
}
+#if !defined(CONFIG_USER_ONLY)
+ qemu_get_timedate(&tm, 0);
+ env->tod_offset = TOD_UNIX_EPOCH +
+ (time2tod(mktimegm(&tm)) * 1000000000ULL);
+ env->tod_basetime = 0;
+ env->tod_timer = qemu_new_timer_ns(vm_clock, s390x_tod_timer, env);
+ env->cpu_timer = qemu_new_timer_ns(vm_clock, s390x_cpu_timer, env);
+#endif
env->cpu_model_str = cpu_model;
+ env->cpu_num = cpu_num++;
+ env->ext_index = -1;
cpu_reset(env);
qemu_init_vcpu(env);
return env;
}
+#if defined(CONFIG_USER_ONLY)
+
+void do_interrupt (CPUState *env)
+{
+ env->exception_index = -1;
+}
+
+int cpu_s390x_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
+ int mmu_idx, int is_softmmu)
+{
+ /* fprintf(stderr,"%s: address 0x%lx rw %d mmu_idx %d is_softmmu %d\n",
+ __FUNCTION__, address, rw, mmu_idx, is_softmmu); */
+ env->exception_index = EXCP_ADDR;
+ env->__excp_addr = address; /* FIXME: find out how this works on a real machine */
+ return 1;
+}
+
+#endif /* CONFIG_USER_ONLY */
+
void cpu_reset(CPUS390XState *env)
{
if (qemu_loglevel_mask(CPU_LOG_RESET)) {
@@ -58,31 +139,495 @@ void cpu_reset(CPUS390XState *env)
tlb_flush(env, 1);
}
-target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
+#ifndef CONFIG_USER_ONLY
+
+/* Ensure to exit the TB after this call! */
+static void trigger_pgm_exception(CPUState *env, uint32_t code, uint32_t ilc)
+{
+ env->exception_index = EXCP_PGM;
+ env->int_pgm_code = code;
+ env->int_pgm_ilc = ilc;
+}
+
+static int trans_bits(CPUState *env, uint64_t mode)
+{
+ int bits = 0;
+
+ switch (mode) {
+ case PSW_ASC_PRIMARY:
+ bits = 1;
+ break;
+ case PSW_ASC_SECONDARY:
+ bits = 2;
+ break;
+ case PSW_ASC_HOME:
+ bits = 3;
+ break;
+ default:
+ cpu_abort(env, "unknown asc mode\n");
+ break;
+ }
+
+ return bits;
+}
+
+static void trigger_prot_fault(CPUState *env, target_ulong vaddr, uint64_t mode)
+{
+ int ilc = ILC_LATER_INC_2;
+ int bits = trans_bits(env, mode) | 4;
+
+ DPRINTF("%s: vaddr=%016" PRIx64 " bits=%d\n", __FUNCTION__, vaddr, bits);
+
+ stq_phys(env->psa + offsetof(LowCore, trans_exc_code), vaddr | bits);
+ trigger_pgm_exception(env, PGM_PROTECTION, ilc);
+}
+
+static void trigger_page_fault(CPUState *env, target_ulong vaddr, uint32_t type,
+ uint64_t asc, int rw)
+{
+ int ilc = ILC_LATER;
+ int bits = trans_bits(env, asc);
+
+ if (rw == 2) {
+ /* code has is undefined ilc */
+ ilc = 2;
+ }
+
+ DPRINTF("%s: vaddr=%016" PRIx64 " bits=%d\n", __FUNCTION__, vaddr, bits);
+
+ stq_phys(env->psa + offsetof(LowCore, trans_exc_code), vaddr | bits);
+ trigger_pgm_exception(env, type, ilc);
+}
+
+static int mmu_translate_asce(CPUState *env, target_ulong vaddr, uint64_t asc,
+ uint64_t asce, int level, target_ulong *raddr,
+ int *flags, int rw)
{
+ uint64_t offs = 0;
+ uint64_t origin;
+ uint64_t new_asce;
+
+ PTE_DPRINTF("%s: 0x%" PRIx64 "\n", __FUNCTION__, asce);
+
+ if (((level != _ASCE_TYPE_SEGMENT) && (asce & _REGION_ENTRY_INV)) ||
+ ((level == _ASCE_TYPE_SEGMENT) && (asce & _SEGMENT_ENTRY_INV))) {
+ /* XXX different regions have different faults */
+ DPRINTF("%s: invalid region\n", __FUNCTION__);
+ trigger_page_fault(env, vaddr, PGM_SEGMENT_TRANS, asc, rw);
+ return -1;
+ }
+
+ if ((level <= _ASCE_TYPE_MASK) && ((asce & _ASCE_TYPE_MASK) != level)) {
+ trigger_page_fault(env, vaddr, PGM_TRANS_SPEC, asc, rw);
+ return -1;
+ }
+
+ if (asce & _ASCE_REAL_SPACE) {
+ /* direct mapping */
+
+ *raddr = vaddr;
+ return 0;
+ }
+
+ origin = asce & _ASCE_ORIGIN;
+
+ switch (level) {
+ case _ASCE_TYPE_REGION1 + 4:
+ offs = (vaddr >> 50) & 0x3ff8;
+ break;
+ case _ASCE_TYPE_REGION1:
+ offs = (vaddr >> 39) & 0x3ff8;
+ break;
+ case _ASCE_TYPE_REGION2:
+ offs = (vaddr >> 28) & 0x3ff8;
+ break;
+ case _ASCE_TYPE_REGION3:
+ offs = (vaddr >> 17) & 0x3ff8;
+ break;
+ case _ASCE_TYPE_SEGMENT:
+ offs = (vaddr >> 9) & 0x07f8;
+ origin = asce & _SEGMENT_ENTRY_ORIGIN;
+ break;
+ }
+
+ /* XXX region protection flags */
+ /* *flags &= ~PAGE_WRITE */
+
+ new_asce = ldq_phys(origin + offs);
+ PTE_DPRINTF("%s: 0x%" PRIx64 " + 0x%" PRIx64 " => 0x%016" PRIx64 "\n",
+ __FUNCTION__, origin, offs, new_asce);
+
+ if (level != _ASCE_TYPE_SEGMENT) {
+ /* yet another region */
+ return mmu_translate_asce(env, vaddr, asc, new_asce, level - 4, raddr,
+ flags, rw);
+ }
+
+ /* PTE */
+ if (new_asce & _PAGE_INVALID) {
+ DPRINTF("%s: PTE=0x%" PRIx64 " invalid\n", __FUNCTION__, new_asce);
+ trigger_page_fault(env, vaddr, PGM_PAGE_TRANS, asc, rw);
+ return -1;
+ }
+
+ if (new_asce & _PAGE_RO) {
+ *flags &= ~PAGE_WRITE;
+ }
+
+ *raddr = new_asce & _ASCE_ORIGIN;
+
+ PTE_DPRINTF("%s: PTE=0x%" PRIx64 "\n", __FUNCTION__, new_asce);
+
return 0;
}
-#ifndef CONFIG_USER_ONLY
+static int mmu_translate_asc(CPUState *env, target_ulong vaddr, uint64_t asc,
+ target_ulong *raddr, int *flags, int rw)
+{
+ uint64_t asce = 0;
+ int level, new_level;
+ int r;
-int cpu_s390x_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
+ switch (asc) {
+ case PSW_ASC_PRIMARY:
+ PTE_DPRINTF("%s: asc=primary\n", __FUNCTION__);
+ asce = env->cregs[1];
+ break;
+ case PSW_ASC_SECONDARY:
+ PTE_DPRINTF("%s: asc=secondary\n", __FUNCTION__);
+ asce = env->cregs[7];
+ break;
+ case PSW_ASC_HOME:
+ PTE_DPRINTF("%s: asc=home\n", __FUNCTION__);
+ asce = env->cregs[13];
+ break;
+ }
+
+ switch (asce & _ASCE_TYPE_MASK) {
+ case _ASCE_TYPE_REGION1:
+ break;
+ case _ASCE_TYPE_REGION2:
+ if (vaddr & 0xffe0000000000000ULL) {
+ DPRINTF("%s: vaddr doesn't fit 0x%16" PRIx64
+ " 0xffe0000000000000ULL\n", __FUNCTION__,
+ vaddr);
+ trigger_page_fault(env, vaddr, PGM_TRANS_SPEC, asc, rw);
+ return -1;
+ }
+ break;
+ case _ASCE_TYPE_REGION3:
+ if (vaddr & 0xfffffc0000000000ULL) {
+ DPRINTF("%s: vaddr doesn't fit 0x%16" PRIx64
+ " 0xfffffc0000000000ULL\n", __FUNCTION__,
+ vaddr);
+ trigger_page_fault(env, vaddr, PGM_TRANS_SPEC, asc, rw);
+ return -1;
+ }
+ break;
+ case _ASCE_TYPE_SEGMENT:
+ if (vaddr & 0xffffffff80000000ULL) {
+ DPRINTF("%s: vaddr doesn't fit 0x%16" PRIx64
+ " 0xffffffff80000000ULL\n", __FUNCTION__,
+ vaddr);
+ trigger_page_fault(env, vaddr, PGM_TRANS_SPEC, asc, rw);
+ return -1;
+ }
+ break;
+ }
+
+ /* fake level above current */
+ level = asce & _ASCE_TYPE_MASK;
+ new_level = level + 4;
+ asce = (asce & ~_ASCE_TYPE_MASK) | (new_level & _ASCE_TYPE_MASK);
+
+ r = mmu_translate_asce(env, vaddr, asc, asce, new_level, raddr, flags, rw);
+
+ if ((rw == 1) && !(*flags & PAGE_WRITE)) {
+ trigger_prot_fault(env, vaddr, asc);
+ return -1;
+ }
+
+ return r;
+}
+
+int mmu_translate(CPUState *env, target_ulong vaddr, int rw, uint64_t asc,
+ target_ulong *raddr, int *flags)
+{
+ int r = -1;
+
+ *flags = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ vaddr &= TARGET_PAGE_MASK;
+
+ if (!(env->psw.mask & PSW_MASK_DAT)) {
+ *raddr = vaddr;
+ r = 0;
+ goto out;
+ }
+
+ switch (asc) {
+ case PSW_ASC_PRIMARY:
+ case PSW_ASC_HOME:
+ r = mmu_translate_asc(env, vaddr, asc, raddr, flags, rw);
+ break;
+ case PSW_ASC_SECONDARY:
+ /*
+ * Instruction: Primary
+ * Data: Secondary
+ */
+ if (rw == 2) {
+ r = mmu_translate_asc(env, vaddr, PSW_ASC_PRIMARY, raddr, flags,
+ rw);
+ *flags &= ~(PAGE_READ | PAGE_WRITE);
+ } else {
+ r = mmu_translate_asc(env, vaddr, PSW_ASC_SECONDARY, raddr, flags,
+ rw);
+ *flags &= ~(PAGE_EXEC);
+ }
+ break;
+ case PSW_ASC_ACCREG:
+ default:
+ hw_error("guest switched to unknown asc mode\n");
+ break;
+ }
+
+out:
+ /* Convert real address -> absolute address */
+ if (*raddr < 0x2000) {
+ *raddr = *raddr + env->psa;
+ }
+
+ return r;
+}
+
+int cpu_s390x_handle_mmu_fault (CPUState *env, target_ulong _vaddr, int rw,
int mmu_idx, int is_softmmu)
{
- target_ulong phys;
+ uint64_t asc = env->psw.mask & PSW_MASK_ASC;
+ target_ulong vaddr, raddr;
int prot;
- /* XXX: implement mmu */
+ DPRINTF("%s: address 0x%" PRIx64 " rw %d mmu_idx %d is_softmmu %d\n",
+ __FUNCTION__, _vaddr, rw, mmu_idx, is_softmmu);
+
+ _vaddr &= TARGET_PAGE_MASK;
+ vaddr = _vaddr;
+
+ /* 31-Bit mode */
+ if (!(env->psw.mask & PSW_MASK_64)) {
+ vaddr &= 0x7fffffff;
+ }
+
+ if (mmu_translate(env, vaddr, rw, asc, &raddr, &prot)) {
+ /* Translation ended in exception */
+ return 1;
+ }
- phys = address;
- prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ /* check out of RAM access */
+ if (raddr > (ram_size + virtio_size)) {
+ DPRINTF("%s: aaddr %" PRIx64 " > ram_size %" PRIx64 "\n", __FUNCTION__,
+ (uint64_t)aaddr, (uint64_t)ram_size);
+ trigger_pgm_exception(env, PGM_ADDRESSING, ILC_LATER);
+ return 1;
+ }
- tlb_set_page(env, address & TARGET_PAGE_MASK,
- phys & TARGET_PAGE_MASK, prot,
+ DPRINTF("%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n", __FUNCTION__,
+ (uint64_t)vaddr, (uint64_t)raddr, prot);
+
+ tlb_set_page(env, _vaddr, raddr, prot,
mmu_idx, TARGET_PAGE_SIZE);
+
return 0;
}
-#endif /* CONFIG_USER_ONLY */
+
+target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong vaddr)
+{
+ target_ulong raddr;
+ int prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ int old_exc = env->exception_index;
+ uint64_t asc = env->psw.mask & PSW_MASK_ASC;
+
+ /* 31-Bit mode */
+ if (!(env->psw.mask & PSW_MASK_64)) {
+ vaddr &= 0x7fffffff;
+ }
+
+ mmu_translate(env, vaddr, 2, asc, &raddr, &prot);
+ env->exception_index = old_exc;
+
+ return raddr;
+}
+
+void load_psw(CPUState *env, uint64_t mask, uint64_t addr)
+{
+ if (mask & PSW_MASK_WAIT) {
+ env->halted = 1;
+ env->exception_index = EXCP_HLT;
+ if (!(mask & (PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK))) {
+ /* XXX disabled wait state - CPU is dead */
+ }
+ }
+
+ env->psw.addr = addr;
+ env->psw.mask = mask;
+ env->cc_op = (mask >> 13) & 3;
+}
+
+static uint64_t get_psw_mask(CPUState *env)
+{
+ uint64_t r = env->psw.mask;
+
+ env->cc_op = calc_cc(env, env->cc_op, env->cc_src, env->cc_dst, env->cc_vr);
+
+ r &= ~(3ULL << 13);
+ assert(!(env->cc_op & ~3));
+ r |= env->cc_op << 13;
+
+ return r;
+}
+
+static void do_svc_interrupt(CPUState *env)
+{
+ uint64_t mask, addr;
+ LowCore *lowcore;
+ target_phys_addr_t len = TARGET_PAGE_SIZE;
+
+ lowcore = cpu_physical_memory_map(env->psa, &len, 1);
+
+ lowcore->svc_code = cpu_to_be16(env->int_svc_code);
+ lowcore->svc_ilc = cpu_to_be16(env->int_svc_ilc);
+ lowcore->svc_old_psw.mask = cpu_to_be64(get_psw_mask(env));
+ lowcore->svc_old_psw.addr = cpu_to_be64(env->psw.addr + (env->int_svc_ilc));
+ mask = be64_to_cpu(lowcore->svc_new_psw.mask);
+ addr = be64_to_cpu(lowcore->svc_new_psw.addr);
+
+ cpu_physical_memory_unmap(lowcore, len, 1, len);
+
+ load_psw(env, mask, addr);
+}
+
+static void do_program_interrupt(CPUState *env)
+{
+ uint64_t mask, addr;
+ LowCore *lowcore;
+ target_phys_addr_t len = TARGET_PAGE_SIZE;
+ int ilc = env->int_pgm_ilc;
+
+ switch (ilc) {
+ case ILC_LATER:
+ ilc = get_ilc(ldub_code(env->psw.addr));
+ break;
+ case ILC_LATER_INC:
+ ilc = get_ilc(ldub_code(env->psw.addr));
+ env->psw.addr += ilc * 2;
+ break;
+ case ILC_LATER_INC_2:
+ ilc = get_ilc(ldub_code(env->psw.addr)) * 2;
+ env->psw.addr += ilc;
+ break;
+ }
+
+ qemu_log("%s: code=0x%x ilc=%d\n", __FUNCTION__, env->int_pgm_code, ilc);
+
+ lowcore = cpu_physical_memory_map(env->psa, &len, 1);
+
+ lowcore->pgm_ilc = cpu_to_be16(ilc);
+ lowcore->pgm_code = cpu_to_be16(env->int_pgm_code);
+ lowcore->program_old_psw.mask = cpu_to_be64(get_psw_mask(env));
+ lowcore->program_old_psw.addr = cpu_to_be64(env->psw.addr);
+ mask = be64_to_cpu(lowcore->program_new_psw.mask);
+ addr = be64_to_cpu(lowcore->program_new_psw.addr);
+
+ cpu_physical_memory_unmap(lowcore, len, 1, len);
+
+ DPRINTF("%s: %x %x %" PRIx64 " %" PRIx64 "\n", __FUNCTION__,
+ env->int_pgm_code, ilc, env->psw.mask,
+ env->psw.addr);
+
+ load_psw(env, mask, addr);
+}
+
+#define VIRTIO_SUBCODE_64 0x0D00
+
+static void do_ext_interrupt(CPUState *env)
+{
+ uint64_t mask, addr;
+ LowCore *lowcore;
+ target_phys_addr_t len = TARGET_PAGE_SIZE;
+ ExtQueue *q;
+
+ if (!(env->psw.mask & PSW_MASK_EXT)) {
+ cpu_abort(env, "Ext int w/o ext mask\n");
+ }
+
+ if (env->ext_index < 0 || env->ext_index > MAX_EXT_QUEUE) {
+ cpu_abort(env, "Ext queue overrun: %d\n", env->ext_index);
+ }
+
+ q = &env->ext_queue[env->ext_index];
+ lowcore = cpu_physical_memory_map(env->psa, &len, 1);
+
+ lowcore->ext_int_code = cpu_to_be16(q->code);
+ lowcore->ext_params = cpu_to_be32(q->param);
+ lowcore->ext_params2 = cpu_to_be64(q->param64);
+ lowcore->external_old_psw.mask = cpu_to_be64(get_psw_mask(env));
+ lowcore->external_old_psw.addr = cpu_to_be64(env->psw.addr);
+ lowcore->cpu_addr = cpu_to_be16(env->cpu_num | VIRTIO_SUBCODE_64);
+ mask = be64_to_cpu(lowcore->external_new_psw.mask);
+ addr = be64_to_cpu(lowcore->external_new_psw.addr);
+
+ cpu_physical_memory_unmap(lowcore, len, 1, len);
+
+ env->ext_index--;
+ if (env->ext_index == -1) {
+ env->pending_int &= ~INTERRUPT_EXT;
+ }
+
+ DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __FUNCTION__,
+ env->psw.mask, env->psw.addr);
+
+ load_psw(env, mask, addr);
+}
void do_interrupt (CPUState *env)
{
+ qemu_log("%s: %d at pc=%" PRIx64 "\n", __FUNCTION__, env->exception_index,
+ env->psw.addr);
+
+ /* handle external interrupts */
+ if ((env->psw.mask & PSW_MASK_EXT) &&
+ env->exception_index == -1) {
+ if (env->pending_int & INTERRUPT_EXT) {
+ /* code is already in env */
+ env->exception_index = EXCP_EXT;
+ } else if (env->pending_int & INTERRUPT_TOD) {
+ cpu_inject_ext(env, 0x1004, 0, 0);
+ env->exception_index = EXCP_EXT;
+ env->pending_int &= ~INTERRUPT_EXT;
+ env->pending_int &= ~INTERRUPT_TOD;
+ } else if (env->pending_int & INTERRUPT_CPUTIMER) {
+ cpu_inject_ext(env, 0x1005, 0, 0);
+ env->exception_index = EXCP_EXT;
+ env->pending_int &= ~INTERRUPT_EXT;
+ env->pending_int &= ~INTERRUPT_TOD;
+ }
+ }
+
+ switch (env->exception_index) {
+ case EXCP_PGM:
+ do_program_interrupt(env);
+ break;
+ case EXCP_SVC:
+ do_svc_interrupt(env);
+ break;
+ case EXCP_EXT:
+ do_ext_interrupt(env);
+ break;
+ }
+ env->exception_index = -1;
+
+ if (!env->pending_int) {
+ env->interrupt_request &= ~CPU_INTERRUPT_HARD;
+ }
}
+
+#endif /* CONFIG_USER_ONLY */
diff --git a/target-s390x/helpers.h b/target-s390x/helpers.h
new file mode 100644
index 0000000000..6ca48ebe00
--- /dev/null
+++ b/target-s390x/helpers.h
@@ -0,0 +1,151 @@
+#include "def-helper.h"
+
+DEF_HELPER_1(exception, void, i32)
+DEF_HELPER_3(nc, i32, i32, i64, i64)
+DEF_HELPER_3(oc, i32, i32, i64, i64)
+DEF_HELPER_3(xc, i32, i32, i64, i64)
+DEF_HELPER_3(mvc, void, i32, i64, i64)
+DEF_HELPER_3(clc, i32, i32, i64, i64)
+DEF_HELPER_2(mvcl, i32, i32, i32)
+DEF_HELPER_FLAGS_1(set_cc_comp_s32, TCG_CALL_PURE|TCG_CALL_CONST, i32, s32)
+DEF_HELPER_FLAGS_1(set_cc_comp_s64, TCG_CALL_PURE|TCG_CALL_CONST, i32, s64)
+DEF_HELPER_FLAGS_2(set_cc_icm, TCG_CALL_PURE|TCG_CALL_CONST, i32, i32, i32)
+DEF_HELPER_3(clm, i32, i32, i32, i64)
+DEF_HELPER_3(stcm, void, i32, i32, i64)
+DEF_HELPER_2(mlg, void, i32, i64)
+DEF_HELPER_2(dlg, void, i32, i64)
+DEF_HELPER_FLAGS_3(set_cc_add64, TCG_CALL_PURE|TCG_CALL_CONST, i32, s64, s64, s64)
+DEF_HELPER_FLAGS_3(set_cc_addu64, TCG_CALL_PURE|TCG_CALL_CONST, i32, i64, i64, i64)
+DEF_HELPER_FLAGS_3(set_cc_add32, TCG_CALL_PURE|TCG_CALL_CONST, i32, s32, s32, s32)
+DEF_HELPER_FLAGS_3(set_cc_addu32, TCG_CALL_PURE|TCG_CALL_CONST, i32, i32, i32, i32)
+DEF_HELPER_FLAGS_3(set_cc_sub64, TCG_CALL_PURE|TCG_CALL_CONST, i32, s64, s64, s64)
+DEF_HELPER_FLAGS_3(set_cc_subu64, TCG_CALL_PURE|TCG_CALL_CONST, i32, i64, i64, i64)
+DEF_HELPER_FLAGS_3(set_cc_sub32, TCG_CALL_PURE|TCG_CALL_CONST, i32, s32, s32, s32)
+DEF_HELPER_FLAGS_3(set_cc_subu32, TCG_CALL_PURE|TCG_CALL_CONST, i32, i32, i32, i32)
+DEF_HELPER_3(srst, i32, i32, i32, i32)
+DEF_HELPER_3(clst, i32, i32, i32, i32)
+DEF_HELPER_3(mvpg, void, i64, i64, i64)
+DEF_HELPER_3(mvst, void, i32, i32, i32)
+DEF_HELPER_3(csg, i32, i32, i64, i32)
+DEF_HELPER_3(cdsg, i32, i32, i64, i32)
+DEF_HELPER_3(cs, i32, i32, i64, i32)
+DEF_HELPER_4(ex, i32, i32, i64, i64, i64)
+DEF_HELPER_FLAGS_1(abs_i32, TCG_CALL_PURE|TCG_CALL_CONST, i32, s32)
+DEF_HELPER_FLAGS_1(nabs_i32, TCG_CALL_PURE|TCG_CALL_CONST, s32, s32)
+DEF_HELPER_FLAGS_1(abs_i64, TCG_CALL_PURE|TCG_CALL_CONST, i64, s64)
+DEF_HELPER_FLAGS_1(nabs_i64, TCG_CALL_PURE|TCG_CALL_CONST, s64, s64)
+DEF_HELPER_3(stcmh, void, i32, i64, i32)
+DEF_HELPER_3(icmh, i32, i32, i64, i32)
+DEF_HELPER_2(ipm, void, i32, i32)
+DEF_HELPER_FLAGS_3(addc_u32, TCG_CALL_PURE|TCG_CALL_CONST, i32, i32, i32, i32)
+DEF_HELPER_FLAGS_3(set_cc_addc_u64, TCG_CALL_PURE|TCG_CALL_CONST, i32, i64, i64, i64)
+DEF_HELPER_3(stam, void, i32, i64, i32)
+DEF_HELPER_3(lam, void, i32, i64, i32)
+DEF_HELPER_3(mvcle, i32, i32, i64, i32)
+DEF_HELPER_3(clcle, i32, i32, i64, i32)
+DEF_HELPER_3(slb, i32, i32, i32, i32)
+DEF_HELPER_4(slbg, i32, i32, i32, i64, i64)
+DEF_HELPER_2(cefbr, void, i32, s32)
+DEF_HELPER_2(cdfbr, void, i32, s32)
+DEF_HELPER_2(cxfbr, void, i32, s32)
+DEF_HELPER_2(cegbr, void, i32, s64)
+DEF_HELPER_2(cdgbr, void, i32, s64)
+DEF_HELPER_2(cxgbr, void, i32, s64)
+DEF_HELPER_2(adbr, i32, i32, i32)
+DEF_HELPER_2(aebr, i32, i32, i32)
+DEF_HELPER_2(sebr, i32, i32, i32)
+DEF_HELPER_2(sdbr, i32, i32, i32)
+DEF_HELPER_2(debr, void, i32, i32)
+DEF_HELPER_2(dxbr, void, i32, i32)
+DEF_HELPER_2(mdbr, void, i32, i32)
+DEF_HELPER_2(mxbr, void, i32, i32)
+DEF_HELPER_2(ldebr, void, i32, i32)
+DEF_HELPER_2(ldxbr, void, i32, i32)
+DEF_HELPER_2(lxdbr, void, i32, i32)
+DEF_HELPER_2(ledbr, void, i32, i32)
+DEF_HELPER_2(lexbr, void, i32, i32)
+DEF_HELPER_2(lpebr, i32, i32, i32)
+DEF_HELPER_2(lpdbr, i32, i32, i32)
+DEF_HELPER_2(lpxbr, i32, i32, i32)
+DEF_HELPER_2(ltebr, i32, i32, i32)
+DEF_HELPER_2(ltdbr, i32, i32, i32)
+DEF_HELPER_2(ltxbr, i32, i32, i32)
+DEF_HELPER_2(lcebr, i32, i32, i32)
+DEF_HELPER_2(lcdbr, i32, i32, i32)
+DEF_HELPER_2(lcxbr, i32, i32, i32)
+DEF_HELPER_2(aeb, void, i32, i32)
+DEF_HELPER_2(deb, void, i32, i32)
+DEF_HELPER_2(meeb, void, i32, i32)
+DEF_HELPER_2(cdb, i32, i32, i64)
+DEF_HELPER_2(adb, i32, i32, i64)
+DEF_HELPER_2(seb, void, i32, i32)
+DEF_HELPER_2(sdb, i32, i32, i64)
+DEF_HELPER_2(mdb, void, i32, i64)
+DEF_HELPER_2(ddb, void, i32, i64)
+DEF_HELPER_FLAGS_2(cebr, TCG_CALL_PURE, i32, i32, i32)
+DEF_HELPER_FLAGS_2(cdbr, TCG_CALL_PURE, i32, i32, i32)
+DEF_HELPER_FLAGS_2(cxbr, TCG_CALL_PURE, i32, i32, i32)
+DEF_HELPER_3(cgebr, i32, i32, i32, i32)
+DEF_HELPER_3(cgdbr, i32, i32, i32, i32)
+DEF_HELPER_3(cgxbr, i32, i32, i32, i32)
+DEF_HELPER_1(lzer, void, i32)
+DEF_HELPER_1(lzdr, void, i32)
+DEF_HELPER_1(lzxr, void, i32)
+DEF_HELPER_3(cfebr, i32, i32, i32, i32)
+DEF_HELPER_3(cfdbr, i32, i32, i32, i32)
+DEF_HELPER_3(cfxbr, i32, i32, i32, i32)
+DEF_HELPER_2(axbr, i32, i32, i32)
+DEF_HELPER_2(sxbr, i32, i32, i32)
+DEF_HELPER_2(meebr, void, i32, i32)
+DEF_HELPER_2(ddbr, void, i32, i32)
+DEF_HELPER_3(madb, void, i32, i64, i32)
+DEF_HELPER_3(maebr, void, i32, i32, i32)
+DEF_HELPER_3(madbr, void, i32, i32, i32)
+DEF_HELPER_3(msdbr, void, i32, i32, i32)
+DEF_HELPER_2(lxdb, void, i32, i64)
+DEF_HELPER_FLAGS_2(tceb, TCG_CALL_PURE, i32, i32, i64)
+DEF_HELPER_FLAGS_2(tcdb, TCG_CALL_PURE, i32, i32, i64)
+DEF_HELPER_FLAGS_2(tcxb, TCG_CALL_PURE, i32, i32, i64)
+DEF_HELPER_2(flogr, i32, i32, i64)
+DEF_HELPER_2(sqdbr, void, i32, i32)
+DEF_HELPER_FLAGS_1(cvd, TCG_CALL_PURE|TCG_CALL_CONST, i64, s32)
+DEF_HELPER_3(unpk, void, i32, i64, i64)
+DEF_HELPER_3(tr, void, i32, i64, i64)
+
+DEF_HELPER_2(servc, i32, i32, i64)
+DEF_HELPER_3(diag, i64, i32, i64, i64)
+DEF_HELPER_2(load_psw, void, i64, i64)
+DEF_HELPER_1(program_interrupt, void, i32)
+DEF_HELPER_FLAGS_1(stidp, TCG_CALL_CONST, void, i64)
+DEF_HELPER_FLAGS_1(spx, TCG_CALL_CONST, void, i64)
+DEF_HELPER_FLAGS_1(sck, TCG_CALL_CONST, i32, i64)
+DEF_HELPER_1(stck, i32, i64)
+DEF_HELPER_1(stcke, i32, i64)
+DEF_HELPER_FLAGS_1(sckc, TCG_CALL_CONST, void, i64)
+DEF_HELPER_FLAGS_1(stckc, TCG_CALL_CONST, void, i64)
+DEF_HELPER_FLAGS_1(spt, TCG_CALL_CONST, void, i64)
+DEF_HELPER_FLAGS_1(stpt, TCG_CALL_CONST, void, i64)
+DEF_HELPER_3(stsi, i32, i64, i32, i32)
+DEF_HELPER_3(lctl, void, i32, i64, i32)
+DEF_HELPER_3(lctlg, void, i32, i64, i32)
+DEF_HELPER_3(stctl, void, i32, i64, i32)
+DEF_HELPER_3(stctg, void, i32, i64, i32)
+DEF_HELPER_FLAGS_2(tprot, TCG_CALL_CONST, i32, i64, i64)
+DEF_HELPER_FLAGS_1(iske, TCG_CALL_PURE|TCG_CALL_CONST, i64, i64)
+DEF_HELPER_FLAGS_2(sske, TCG_CALL_CONST, void, i32, i64)
+DEF_HELPER_FLAGS_2(rrbe, TCG_CALL_CONST, i32, i32, i64)
+DEF_HELPER_2(csp, i32, i32, i32)
+DEF_HELPER_3(mvcs, i32, i64, i64, i64)
+DEF_HELPER_3(mvcp, i32, i64, i64, i64)
+DEF_HELPER_3(sigp, i32, i64, i32, i64)
+DEF_HELPER_1(sacf, void, i64)
+DEF_HELPER_FLAGS_2(ipte, TCG_CALL_CONST, void, i64, i64)
+DEF_HELPER_FLAGS_0(ptlb, TCG_CALL_CONST, void)
+DEF_HELPER_2(lra, i32, i64, i32)
+DEF_HELPER_2(stura, void, i64, i32)
+DEF_HELPER_2(cksm, void, i32, i32)
+
+DEF_HELPER_FLAGS_4(calc_cc, TCG_CALL_PURE|TCG_CALL_CONST,
+ i32, i32, i64, i64, i64)
+
+#include "def-helper.h"
diff --git a/target-s390x/kvm.c b/target-s390x/kvm.c
index 2643460722..4beb794cca 100644
--- a/target-s390x/kvm.c
+++ b/target-s390x/kvm.c
@@ -49,13 +49,6 @@
#define DIAG_KVM_HYPERCALL 0x500
#define DIAG_KVM_BREAKPOINT 0x501
-#define SCP_LENGTH 0x00
-#define SCP_FUNCTION_CODE 0x02
-#define SCP_CONTROL_MASK 0x03
-#define SCP_RESPONSE_CODE 0x06
-#define SCP_MEM_CODE 0x08
-#define SCP_INCREMENT 0x0a
-
#define ICPT_INSTRUCTION 0x04
#define ICPT_WAITPSW 0x1c
#define ICPT_SOFT_INTERCEPT 0x24
@@ -179,7 +172,7 @@ void kvm_arch_post_run(CPUState *env, struct kvm_run *run)
int kvm_arch_process_async_events(CPUState *env)
{
- return 0;
+ return env->halted;
}
void kvm_s390_interrupt_internal(CPUState *env, int type, uint32_t parm,
@@ -228,9 +221,9 @@ static void enter_pgmcheck(CPUState *env, uint16_t code)
kvm_s390_interrupt(env, KVM_S390_PROGRAM_INT, code);
}
-static void setcc(CPUState *env, uint64_t cc)
+static inline void setcc(CPUState *env, uint64_t cc)
{
- env->kvm_run->psw_mask &= ~(3ul << 44);
+ env->kvm_run->psw_mask &= ~(3ull << 44);
env->kvm_run->psw_mask |= (cc & 3) << 44;
env->psw.mask &= ~(3ul << 44);
@@ -248,35 +241,11 @@ static int kvm_sclp_service_call(CPUState *env, struct kvm_run *run,
sccb = env->regs[ipbh0 & 0xf];
code = env->regs[(ipbh0 & 0xf0) >> 4];
- dprintf("sclp(0x%x, 0x%lx)\n", sccb, code);
-
- if (sccb & ~0x7ffffff8ul) {
- fprintf(stderr, "KVM: invalid sccb address 0x%x\n", sccb);
- r = -1;
- goto out;
- }
-
- switch(code) {
- case SCLP_CMDW_READ_SCP_INFO:
- case SCLP_CMDW_READ_SCP_INFO_FORCED:
- stw_phys(sccb + SCP_MEM_CODE, ram_size >> 20);
- stb_phys(sccb + SCP_INCREMENT, 1);
- stw_phys(sccb + SCP_RESPONSE_CODE, 0x10);
- setcc(env, 0);
-
- kvm_s390_interrupt_internal(env, KVM_S390_INT_SERVICE,
- sccb & ~3, 0, 1);
- break;
- default:
- dprintf("KVM: invalid sclp call 0x%x / 0x%lx\n", sccb, code);
- r = -1;
- break;
- }
-
-out:
- if (r < 0) {
+ r = sclp_service_call(env, sccb, code);
+ if (r) {
setcc(env, 3);
}
+
return 0;
}
@@ -408,7 +377,7 @@ static int handle_sigp(CPUState *env, struct kvm_run *run, uint8_t ipa1)
r = s390_cpu_initial_reset(target_env);
break;
default:
- fprintf(stderr, "KVM: unknown SIGP: 0x%x\n", ipa1);
+ fprintf(stderr, "KVM: unknown SIGP: 0x%x\n", order_code);
break;
}
@@ -449,7 +418,8 @@ static int handle_intercept(CPUState *env)
int icpt_code = run->s390_sieic.icptcode;
int r = 0;
- dprintf("intercept: 0x%x (at 0x%lx)\n", icpt_code, env->kvm_run->psw_addr);
+ dprintf("intercept: 0x%x (at 0x%lx)\n", icpt_code,
+ (long)env->kvm_run->psw_addr);
switch (icpt_code) {
case ICPT_INSTRUCTION:
r = handle_instruction(env, run);
diff --git a/target-s390x/op_helper.c b/target-s390x/op_helper.c
index be455b9de2..49760a40a2 100644
--- a/target-s390x/op_helper.c
+++ b/target-s390x/op_helper.c
@@ -1,6 +1,7 @@
/*
* S/390 helper routines
*
+ * Copyright (c) 2009 Ulrich Hecht
* Copyright (c) 2009 Alexander Graf
*
* This library is free software; you can redistribute it and/or
@@ -18,6 +19,11 @@
*/
#include "exec.h"
+#include "host-utils.h"
+#include "helpers.h"
+#include <string.h>
+#include "kvm.h"
+#include "qemu-timer.h"
/*****************************************************************************/
/* Softmmu support */
@@ -64,10 +70,2929 @@ void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
cpu_restore_state(tb, env, pc);
}
}
- /* XXX */
- /* helper_raise_exception_err(env->exception_index, env->error_code); */
+ cpu_loop_exit();
}
env = saved_env;
}
#endif
+
+/* #define DEBUG_HELPER */
+#ifdef DEBUG_HELPER
+#define HELPER_LOG(x...) qemu_log(x)
+#else
+#define HELPER_LOG(x...)
+#endif
+
+/* raise an exception */
+void HELPER(exception)(uint32_t excp)
+{
+ HELPER_LOG("%s: exception %d\n", __FUNCTION__, excp);
+ env->exception_index = excp;
+ cpu_loop_exit();
+}
+
+#ifndef CONFIG_USER_ONLY
+static void mvc_fast_memset(CPUState *env, uint32_t l, uint64_t dest,
+ uint8_t byte)
+{
+ target_phys_addr_t dest_phys;
+ target_phys_addr_t len = l;
+ void *dest_p;
+ uint64_t asc = env->psw.mask & PSW_MASK_ASC;
+ int flags;
+
+ if (mmu_translate(env, dest, 1, asc, &dest_phys, &flags)) {
+ stb(dest, byte);
+ cpu_abort(env, "should never reach here");
+ }
+ dest_phys |= dest & ~TARGET_PAGE_MASK;
+
+ dest_p = cpu_physical_memory_map(dest_phys, &len, 1);
+
+ memset(dest_p, byte, len);
+
+ cpu_physical_memory_unmap(dest_p, 1, len, len);
+}
+
+static void mvc_fast_memmove(CPUState *env, uint32_t l, uint64_t dest,
+ uint64_t src)
+{
+ target_phys_addr_t dest_phys;
+ target_phys_addr_t src_phys;
+ target_phys_addr_t len = l;
+ void *dest_p;
+ void *src_p;
+ uint64_t asc = env->psw.mask & PSW_MASK_ASC;
+ int flags;
+
+ if (mmu_translate(env, dest, 1, asc, &dest_phys, &flags)) {
+ stb(dest, 0);
+ cpu_abort(env, "should never reach here");
+ }
+ dest_phys |= dest & ~TARGET_PAGE_MASK;
+
+ if (mmu_translate(env, src, 0, asc, &src_phys, &flags)) {
+ ldub(src);
+ cpu_abort(env, "should never reach here");
+ }
+ src_phys |= src & ~TARGET_PAGE_MASK;
+
+ dest_p = cpu_physical_memory_map(dest_phys, &len, 1);
+ src_p = cpu_physical_memory_map(src_phys, &len, 0);
+
+ memmove(dest_p, src_p, len);
+
+ cpu_physical_memory_unmap(dest_p, 1, len, len);
+ cpu_physical_memory_unmap(src_p, 0, len, len);
+}
+#endif
+
+/* and on array */
+uint32_t HELPER(nc)(uint32_t l, uint64_t dest, uint64_t src)
+{
+ int i;
+ unsigned char x;
+ uint32_t cc = 0;
+
+ HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
+ __FUNCTION__, l, dest, src);
+ for (i = 0; i <= l; i++) {
+ x = ldub(dest + i) & ldub(src + i);
+ if (x) {
+ cc = 1;
+ }
+ stb(dest + i, x);
+ }
+ return cc;
+}
+
+/* xor on array */
+uint32_t HELPER(xc)(uint32_t l, uint64_t dest, uint64_t src)
+{
+ int i;
+ unsigned char x;
+ uint32_t cc = 0;
+
+ HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
+ __FUNCTION__, l, dest, src);
+
+#ifndef CONFIG_USER_ONLY
+ /* xor with itself is the same as memset(0) */
+ if ((l > 32) && (src == dest) &&
+ (src & TARGET_PAGE_MASK) == ((src + l) & TARGET_PAGE_MASK)) {
+ mvc_fast_memset(env, l + 1, dest, 0);
+ return 0;
+ }
+#else
+ if (src == dest) {
+ memset(g2h(dest), 0, l + 1);
+ return 0;
+ }
+#endif
+
+ for (i = 0; i <= l; i++) {
+ x = ldub(dest + i) ^ ldub(src + i);
+ if (x) {
+ cc = 1;
+ }
+ stb(dest + i, x);
+ }
+ return cc;
+}
+
+/* or on array */
+uint32_t HELPER(oc)(uint32_t l, uint64_t dest, uint64_t src)
+{
+ int i;
+ unsigned char x;
+ uint32_t cc = 0;
+
+ HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
+ __FUNCTION__, l, dest, src);
+ for (i = 0; i <= l; i++) {
+ x = ldub(dest + i) | ldub(src + i);
+ if (x) {
+ cc = 1;
+ }
+ stb(dest + i, x);
+ }
+ return cc;
+}
+
+/* memmove */
+void HELPER(mvc)(uint32_t l, uint64_t dest, uint64_t src)
+{
+ int i = 0;
+ int x = 0;
+ uint32_t l_64 = (l + 1) / 8;
+
+ HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
+ __FUNCTION__, l, dest, src);
+
+#ifndef CONFIG_USER_ONLY
+ if ((l > 32) &&
+ (src & TARGET_PAGE_MASK) == ((src + l) & TARGET_PAGE_MASK) &&
+ (dest & TARGET_PAGE_MASK) == ((dest + l) & TARGET_PAGE_MASK)) {
+ if (dest == (src + 1)) {
+ mvc_fast_memset(env, l + 1, dest, ldub(src));
+ return;
+ } else if ((src & TARGET_PAGE_MASK) != (dest & TARGET_PAGE_MASK)) {
+ mvc_fast_memmove(env, l + 1, dest, src);
+ return;
+ }
+ }
+#else
+ if (dest == (src + 1)) {
+ memset(g2h(dest), ldub(src), l + 1);
+ return;
+ } else {
+ memmove(g2h(dest), g2h(src), l + 1);
+ return;
+ }
+#endif
+
+ /* handle the parts that fit into 8-byte loads/stores */
+ if (dest != (src + 1)) {
+ for (i = 0; i < l_64; i++) {
+ stq(dest + x, ldq(src + x));
+ x += 8;
+ }
+ }
+
+ /* slow version crossing pages with byte accesses */
+ for (i = x; i <= l; i++) {
+ stb(dest + i, ldub(src + i));
+ }
+}
+
+/* compare unsigned byte arrays */
+uint32_t HELPER(clc)(uint32_t l, uint64_t s1, uint64_t s2)
+{
+ int i;
+ unsigned char x,y;
+ uint32_t cc;
+ HELPER_LOG("%s l %d s1 %" PRIx64 " s2 %" PRIx64 "\n",
+ __FUNCTION__, l, s1, s2);
+ for (i = 0; i <= l; i++) {
+ x = ldub(s1 + i);
+ y = ldub(s2 + i);
+ HELPER_LOG("%02x (%c)/%02x (%c) ", x, x, y, y);
+ if (x < y) {
+ cc = 1;
+ goto done;
+ } else if (x > y) {
+ cc = 2;
+ goto done;
+ }
+ }
+ cc = 0;
+done:
+ HELPER_LOG("\n");
+ return cc;
+}
+
+/* compare logical under mask */
+uint32_t HELPER(clm)(uint32_t r1, uint32_t mask, uint64_t addr)
+{
+ uint8_t r,d;
+ uint32_t cc;
+ HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%" PRIx64 "\n", __FUNCTION__, r1,
+ mask, addr);
+ cc = 0;
+ while (mask) {
+ if (mask & 8) {
+ d = ldub(addr);
+ r = (r1 & 0xff000000UL) >> 24;
+ HELPER_LOG("mask 0x%x %02x/%02x (0x%" PRIx64 ") ", mask, r, d,
+ addr);
+ if (r < d) {
+ cc = 1;
+ break;
+ } else if (r > d) {
+ cc = 2;
+ break;
+ }
+ addr++;
+ }
+ mask = (mask << 1) & 0xf;
+ r1 <<= 8;
+ }
+ HELPER_LOG("\n");
+ return cc;
+}
+
+/* store character under mask */
+void HELPER(stcm)(uint32_t r1, uint32_t mask, uint64_t addr)
+{
+ uint8_t r;
+ HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%lx\n", __FUNCTION__, r1, mask,
+ addr);
+ while (mask) {
+ if (mask & 8) {
+ r = (r1 & 0xff000000UL) >> 24;
+ stb(addr, r);
+ HELPER_LOG("mask 0x%x %02x (0x%lx) ", mask, r, addr);
+ addr++;
+ }
+ mask = (mask << 1) & 0xf;
+ r1 <<= 8;
+ }
+ HELPER_LOG("\n");
+}
+
+/* 64/64 -> 128 unsigned multiplication */
+void HELPER(mlg)(uint32_t r1, uint64_t v2)
+{
+#if HOST_LONG_BITS == 64 && defined(__GNUC__)
+ /* assuming 64-bit hosts have __uint128_t */
+ __uint128_t res = (__uint128_t)env->regs[r1 + 1];
+ res *= (__uint128_t)v2;
+ env->regs[r1] = (uint64_t)(res >> 64);
+ env->regs[r1 + 1] = (uint64_t)res;
+#else
+ mulu64(&env->regs[r1 + 1], &env->regs[r1], env->regs[r1 + 1], v2);
+#endif
+}
+
+/* 128 -> 64/64 unsigned division */
+void HELPER(dlg)(uint32_t r1, uint64_t v2)
+{
+ uint64_t divisor = v2;
+
+ if (!env->regs[r1]) {
+ /* 64 -> 64/64 case */
+ env->regs[r1] = env->regs[r1+1] % divisor;
+ env->regs[r1+1] = env->regs[r1+1] / divisor;
+ return;
+ } else {
+
+#if HOST_LONG_BITS == 64 && defined(__GNUC__)
+ /* assuming 64-bit hosts have __uint128_t */
+ __uint128_t dividend = (((__uint128_t)env->regs[r1]) << 64) |
+ (env->regs[r1+1]);
+ __uint128_t quotient = dividend / divisor;
+ env->regs[r1+1] = quotient;
+ __uint128_t remainder = dividend % divisor;
+ env->regs[r1] = remainder;
+#else
+ /* 32-bit hosts would need special wrapper functionality - just abort if
+ we encounter such a case; it's very unlikely anyways. */
+ cpu_abort(env, "128 -> 64/64 division not implemented\n");
+#endif
+ }
+}
+
+static inline uint64_t get_address(int x2, int b2, int d2)
+{
+ uint64_t r = d2;
+
+ if (x2) {
+ r += env->regs[x2];
+ }
+
+ if (b2) {
+ r += env->regs[b2];
+ }
+
+ /* 31-Bit mode */
+ if (!(env->psw.mask & PSW_MASK_64)) {
+ r &= 0x7fffffff;
+ }
+
+ return r;
+}
+
+static inline uint64_t get_address_31fix(int reg)
+{
+ uint64_t r = env->regs[reg];
+
+ /* 31-Bit mode */
+ if (!(env->psw.mask & PSW_MASK_64)) {
+ r &= 0x7fffffff;
+ }
+
+ return r;
+}
+
+/* search string (c is byte to search, r2 is string, r1 end of string) */
+uint32_t HELPER(srst)(uint32_t c, uint32_t r1, uint32_t r2)
+{
+ uint64_t i;
+ uint32_t cc = 2;
+ uint64_t str = get_address_31fix(r2);
+ uint64_t end = get_address_31fix(r1);
+
+ HELPER_LOG("%s: c %d *r1 0x%" PRIx64 " *r2 0x%" PRIx64 "\n", __FUNCTION__,
+ c, env->regs[r1], env->regs[r2]);
+
+ for (i = str; i != end; i++) {
+ if (ldub(i) == c) {
+ env->regs[r1] = i;
+ cc = 1;
+ break;
+ }
+ }
+
+ return cc;
+}
+
+/* unsigned string compare (c is string terminator) */
+uint32_t HELPER(clst)(uint32_t c, uint32_t r1, uint32_t r2)
+{
+ uint64_t s1 = get_address_31fix(r1);
+ uint64_t s2 = get_address_31fix(r2);
+ uint8_t v1, v2;
+ uint32_t cc;
+ c = c & 0xff;
+#ifdef CONFIG_USER_ONLY
+ if (!c) {
+ HELPER_LOG("%s: comparing '%s' and '%s'\n",
+ __FUNCTION__, (char*)g2h(s1), (char*)g2h(s2));
+ }
+#endif
+ for (;;) {
+ v1 = ldub(s1);
+ v2 = ldub(s2);
+ if ((v1 == c || v2 == c) || (v1 != v2)) {
+ break;
+ }
+ s1++;
+ s2++;
+ }
+
+ if (v1 == v2) {
+ cc = 0;
+ } else {
+ cc = (v1 < v2) ? 1 : 2;
+ /* FIXME: 31-bit mode! */
+ env->regs[r1] = s1;
+ env->regs[r2] = s2;
+ }
+ return cc;
+}
+
+/* move page */
+void HELPER(mvpg)(uint64_t r0, uint64_t r1, uint64_t r2)
+{
+ /* XXX missing r0 handling */
+#ifdef CONFIG_USER_ONLY
+ int i;
+
+ for (i = 0; i < TARGET_PAGE_SIZE; i++) {
+ stb(r1 + i, ldub(r2 + i));
+ }
+#else
+ mvc_fast_memmove(env, TARGET_PAGE_SIZE, r1, r2);
+#endif
+}
+
+/* string copy (c is string terminator) */
+void HELPER(mvst)(uint32_t c, uint32_t r1, uint32_t r2)
+{
+ uint64_t dest = get_address_31fix(r1);
+ uint64_t src = get_address_31fix(r2);
+ uint8_t v;
+ c = c & 0xff;
+#ifdef CONFIG_USER_ONLY
+ if (!c) {
+ HELPER_LOG("%s: copy '%s' to 0x%lx\n", __FUNCTION__, (char*)g2h(src),
+ dest);
+ }
+#endif
+ for (;;) {
+ v = ldub(src);
+ stb(dest, v);
+ if (v == c) {
+ break;
+ }
+ src++;
+ dest++;
+ }
+ env->regs[r1] = dest; /* FIXME: 31-bit mode! */
+}
+
+/* compare and swap 64-bit */
+uint32_t HELPER(csg)(uint32_t r1, uint64_t a2, uint32_t r3)
+{
+ /* FIXME: locking? */
+ uint32_t cc;
+ uint64_t v2 = ldq(a2);
+ if (env->regs[r1] == v2) {
+ cc = 0;
+ stq(a2, env->regs[r3]);
+ } else {
+ cc = 1;
+ env->regs[r1] = v2;
+ }
+ return cc;
+}
+
+/* compare double and swap 64-bit */
+uint32_t HELPER(cdsg)(uint32_t r1, uint64_t a2, uint32_t r3)
+{
+ /* FIXME: locking? */
+ uint32_t cc;
+ uint64_t v2_hi = ldq(a2);
+ uint64_t v2_lo = ldq(a2 + 8);
+ uint64_t v1_hi = env->regs[r1];
+ uint64_t v1_lo = env->regs[r1 + 1];
+
+ if ((v1_hi == v2_hi) && (v1_lo == v2_lo)) {
+ cc = 0;
+ stq(a2, env->regs[r3]);
+ stq(a2 + 8, env->regs[r3 + 1]);
+ } else {
+ cc = 1;
+ env->regs[r1] = v2_hi;
+ env->regs[r1 + 1] = v2_lo;
+ }
+
+ return cc;
+}
+
+/* compare and swap 32-bit */
+uint32_t HELPER(cs)(uint32_t r1, uint64_t a2, uint32_t r3)
+{
+ /* FIXME: locking? */
+ uint32_t cc;
+ HELPER_LOG("%s: r1 %d a2 0x%lx r3 %d\n", __FUNCTION__, r1, a2, r3);
+ uint32_t v2 = ldl(a2);
+ if (((uint32_t)env->regs[r1]) == v2) {
+ cc = 0;
+ stl(a2, (uint32_t)env->regs[r3]);
+ } else {
+ cc = 1;
+ env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | v2;
+ }
+ return cc;
+}
+
+static uint32_t helper_icm(uint32_t r1, uint64_t address, uint32_t mask)
+{
+ int pos = 24; /* top of the lower half of r1 */
+ uint64_t rmask = 0xff000000ULL;
+ uint8_t val = 0;
+ int ccd = 0;
+ uint32_t cc = 0;
+
+ while (mask) {
+ if (mask & 8) {
+ env->regs[r1] &= ~rmask;
+ val = ldub(address);
+ if ((val & 0x80) && !ccd) {
+ cc = 1;
+ }
+ ccd = 1;
+ if (val && cc == 0) {
+ cc = 2;
+ }
+ env->regs[r1] |= (uint64_t)val << pos;
+ address++;
+ }
+ mask = (mask << 1) & 0xf;
+ pos -= 8;
+ rmask >>= 8;
+ }
+
+ return cc;
+}
+
+/* execute instruction
+ this instruction executes an insn modified with the contents of r1
+ it does not change the executed instruction in memory
+ it does not change the program counter
+ in other words: tricky...
+ currently implemented by interpreting the cases it is most commonly used in
+ */
+uint32_t HELPER(ex)(uint32_t cc, uint64_t v1, uint64_t addr, uint64_t ret)
+{
+ uint16_t insn = lduw_code(addr);
+ HELPER_LOG("%s: v1 0x%lx addr 0x%lx insn 0x%x\n", __FUNCTION__, v1, addr,
+ insn);
+ if ((insn & 0xf0ff) == 0xd000) {
+ uint32_t l, insn2, b1, b2, d1, d2;
+ l = v1 & 0xff;
+ insn2 = ldl_code(addr + 2);
+ b1 = (insn2 >> 28) & 0xf;
+ b2 = (insn2 >> 12) & 0xf;
+ d1 = (insn2 >> 16) & 0xfff;
+ d2 = insn2 & 0xfff;
+ switch (insn & 0xf00) {
+ case 0x200:
+ helper_mvc(l, get_address(0, b1, d1), get_address(0, b2, d2));
+ break;
+ case 0x500:
+ cc = helper_clc(l, get_address(0, b1, d1), get_address(0, b2, d2));
+ break;
+ case 0x700:
+ cc = helper_xc(l, get_address(0, b1, d1), get_address(0, b2, d2));
+ break;
+ default:
+ goto abort;
+ break;
+ }
+ } else if ((insn & 0xff00) == 0x0a00) {
+ /* supervisor call */
+ HELPER_LOG("%s: svc %ld via execute\n", __FUNCTION__, (insn|v1) & 0xff);
+ env->psw.addr = ret - 4;
+ env->int_svc_code = (insn|v1) & 0xff;
+ env->int_svc_ilc = 4;
+ helper_exception(EXCP_SVC);
+ } else if ((insn & 0xff00) == 0xbf00) {
+ uint32_t insn2, r1, r3, b2, d2;
+ insn2 = ldl_code(addr + 2);
+ r1 = (insn2 >> 20) & 0xf;
+ r3 = (insn2 >> 16) & 0xf;
+ b2 = (insn2 >> 12) & 0xf;
+ d2 = insn2 & 0xfff;
+ cc = helper_icm(r1, get_address(0, b2, d2), r3);
+ } else {
+abort:
+ cpu_abort(env, "EXECUTE on instruction prefix 0x%x not implemented\n",
+ insn);
+ }
+ return cc;
+}
+
+/* absolute value 32-bit */
+uint32_t HELPER(abs_i32)(int32_t val)
+{
+ if (val < 0) {
+ return -val;
+ } else {
+ return val;
+ }
+}
+
+/* negative absolute value 32-bit */
+int32_t HELPER(nabs_i32)(int32_t val)
+{
+ if (val < 0) {
+ return val;
+ } else {
+ return -val;
+ }
+}
+
+/* absolute value 64-bit */
+uint64_t HELPER(abs_i64)(int64_t val)
+{
+ HELPER_LOG("%s: val 0x%" PRIx64 "\n", __FUNCTION__, val);
+
+ if (val < 0) {
+ return -val;
+ } else {
+ return val;
+ }
+}
+
+/* negative absolute value 64-bit */
+int64_t HELPER(nabs_i64)(int64_t val)
+{
+ if (val < 0) {
+ return val;
+ } else {
+ return -val;
+ }
+}
+
+/* add with carry 32-bit unsigned */
+uint32_t HELPER(addc_u32)(uint32_t cc, uint32_t v1, uint32_t v2)
+{
+ uint32_t res;
+
+ res = v1 + v2;
+ if (cc & 2) {
+ res++;
+ }
+
+ return res;
+}
+
+/* store character under mask high operates on the upper half of r1 */
+void HELPER(stcmh)(uint32_t r1, uint64_t address, uint32_t mask)
+{
+ int pos = 56; /* top of the upper half of r1 */
+
+ while (mask) {
+ if (mask & 8) {
+ stb(address, (env->regs[r1] >> pos) & 0xff);
+ address++;
+ }
+ mask = (mask << 1) & 0xf;
+ pos -= 8;
+ }
+}
+
+/* insert character under mask high; same as icm, but operates on the
+ upper half of r1 */
+uint32_t HELPER(icmh)(uint32_t r1, uint64_t address, uint32_t mask)
+{
+ int pos = 56; /* top of the upper half of r1 */
+ uint64_t rmask = 0xff00000000000000ULL;
+ uint8_t val = 0;
+ int ccd = 0;
+ uint32_t cc = 0;
+
+ while (mask) {
+ if (mask & 8) {
+ env->regs[r1] &= ~rmask;
+ val = ldub(address);
+ if ((val & 0x80) && !ccd) {
+ cc = 1;
+ }
+ ccd = 1;
+ if (val && cc == 0) {
+ cc = 2;
+ }
+ env->regs[r1] |= (uint64_t)val << pos;
+ address++;
+ }
+ mask = (mask << 1) & 0xf;
+ pos -= 8;
+ rmask >>= 8;
+ }
+
+ return cc;
+}
+
+/* insert psw mask and condition code into r1 */
+void HELPER(ipm)(uint32_t cc, uint32_t r1)
+{
+ uint64_t r = env->regs[r1];
+
+ r &= 0xffffffff00ffffffULL;
+ r |= (cc << 28) | ( (env->psw.mask >> 40) & 0xf );
+ env->regs[r1] = r;
+ HELPER_LOG("%s: cc %d psw.mask 0x%lx r1 0x%lx\n", __FUNCTION__,
+ cc, env->psw.mask, r);
+}
+
+/* load access registers r1 to r3 from memory at a2 */
+void HELPER(lam)(uint32_t r1, uint64_t a2, uint32_t r3)
+{
+ int i;
+
+ for (i = r1;; i = (i + 1) % 16) {
+ env->aregs[i] = ldl(a2);
+ a2 += 4;
+
+ if (i == r3) {
+ break;
+ }
+ }
+}
+
+/* store access registers r1 to r3 in memory at a2 */
+void HELPER(stam)(uint32_t r1, uint64_t a2, uint32_t r3)
+{
+ int i;
+
+ for (i = r1;; i = (i + 1) % 16) {
+ stl(a2, env->aregs[i]);
+ a2 += 4;
+
+ if (i == r3) {
+ break;
+ }
+ }
+}
+
+/* move long */
+uint32_t HELPER(mvcl)(uint32_t r1, uint32_t r2)
+{
+ uint64_t destlen = env->regs[r1 + 1] & 0xffffff;
+ uint64_t dest = get_address_31fix(r1);
+ uint64_t srclen = env->regs[r2 + 1] & 0xffffff;
+ uint64_t src = get_address_31fix(r2);
+ uint8_t pad = src >> 24;
+ uint8_t v;
+ uint32_t cc;
+
+ if (destlen == srclen) {
+ cc = 0;
+ } else if (destlen < srclen) {
+ cc = 1;
+ } else {
+ cc = 2;
+ }
+
+ if (srclen > destlen) {
+ srclen = destlen;
+ }
+
+ for (; destlen && srclen; src++, dest++, destlen--, srclen--) {
+ v = ldub(src);
+ stb(dest, v);
+ }
+
+ for (; destlen; dest++, destlen--) {
+ stb(dest, pad);
+ }
+
+ env->regs[r1 + 1] = destlen;
+ /* can't use srclen here, we trunc'ed it */
+ env->regs[r2 + 1] -= src - env->regs[r2];
+ env->regs[r1] = dest;
+ env->regs[r2] = src;
+
+ return cc;
+}
+
+/* move long extended another memcopy insn with more bells and whistles */
+uint32_t HELPER(mvcle)(uint32_t r1, uint64_t a2, uint32_t r3)
+{
+ uint64_t destlen = env->regs[r1 + 1];
+ uint64_t dest = env->regs[r1];
+ uint64_t srclen = env->regs[r3 + 1];
+ uint64_t src = env->regs[r3];
+ uint8_t pad = a2 & 0xff;
+ uint8_t v;
+ uint32_t cc;
+
+ if (!(env->psw.mask & PSW_MASK_64)) {
+ destlen = (uint32_t)destlen;
+ srclen = (uint32_t)srclen;
+ dest &= 0x7fffffff;
+ src &= 0x7fffffff;
+ }
+
+ if (destlen == srclen) {
+ cc = 0;
+ } else if (destlen < srclen) {
+ cc = 1;
+ } else {
+ cc = 2;
+ }
+
+ if (srclen > destlen) {
+ srclen = destlen;
+ }
+
+ for (; destlen && srclen; src++, dest++, destlen--, srclen--) {
+ v = ldub(src);
+ stb(dest, v);
+ }
+
+ for (; destlen; dest++, destlen--) {
+ stb(dest, pad);
+ }
+
+ env->regs[r1 + 1] = destlen;
+ /* can't use srclen here, we trunc'ed it */
+ /* FIXME: 31-bit mode! */
+ env->regs[r3 + 1] -= src - env->regs[r3];
+ env->regs[r1] = dest;
+ env->regs[r3] = src;
+
+ return cc;
+}
+
+/* compare logical long extended memcompare insn with padding */
+uint32_t HELPER(clcle)(uint32_t r1, uint64_t a2, uint32_t r3)
+{
+ uint64_t destlen = env->regs[r1 + 1];
+ uint64_t dest = get_address_31fix(r1);
+ uint64_t srclen = env->regs[r3 + 1];
+ uint64_t src = get_address_31fix(r3);
+ uint8_t pad = a2 & 0xff;
+ uint8_t v1 = 0,v2 = 0;
+ uint32_t cc = 0;
+
+ if (!(destlen || srclen)) {
+ return cc;
+ }
+
+ if (srclen > destlen) {
+ srclen = destlen;
+ }
+
+ for (; destlen || srclen; src++, dest++, destlen--, srclen--) {
+ v1 = srclen ? ldub(src) : pad;
+ v2 = destlen ? ldub(dest) : pad;
+ if (v1 != v2) {
+ cc = (v1 < v2) ? 1 : 2;
+ break;
+ }
+ }
+
+ env->regs[r1 + 1] = destlen;
+ /* can't use srclen here, we trunc'ed it */
+ env->regs[r3 + 1] -= src - env->regs[r3];
+ env->regs[r1] = dest;
+ env->regs[r3] = src;
+
+ return cc;
+}
+
+/* subtract unsigned v2 from v1 with borrow */
+uint32_t HELPER(slb)(uint32_t cc, uint32_t r1, uint32_t v2)
+{
+ uint32_t v1 = env->regs[r1];
+ uint32_t res = v1 + (~v2) + (cc >> 1);
+
+ env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | res;
+ if (cc & 2) {
+ /* borrow */
+ return v1 ? 1 : 0;
+ } else {
+ return v1 ? 3 : 2;
+ }
+}
+
+/* subtract unsigned v2 from v1 with borrow */
+uint32_t HELPER(slbg)(uint32_t cc, uint32_t r1, uint64_t v1, uint64_t v2)
+{
+ uint64_t res = v1 + (~v2) + (cc >> 1);
+
+ env->regs[r1] = res;
+ if (cc & 2) {
+ /* borrow */
+ return v1 ? 1 : 0;
+ } else {
+ return v1 ? 3 : 2;
+ }
+}
+
+static inline int float_comp_to_cc(int float_compare)
+{
+ switch (float_compare) {
+ case float_relation_equal:
+ return 0;
+ case float_relation_less:
+ return 1;
+ case float_relation_greater:
+ return 2;
+ case float_relation_unordered:
+ return 3;
+ default:
+ cpu_abort(env, "unknown return value for float compare\n");
+ }
+}
+
+/* condition codes for binary FP ops */
+static uint32_t set_cc_f32(float32 v1, float32 v2)
+{
+ return float_comp_to_cc(float32_compare_quiet(v1, v2, &env->fpu_status));
+}
+
+static uint32_t set_cc_f64(float64 v1, float64 v2)
+{
+ return float_comp_to_cc(float64_compare_quiet(v1, v2, &env->fpu_status));
+}
+
+/* condition codes for unary FP ops */
+static uint32_t set_cc_nz_f32(float32 v)
+{
+ if (float32_is_any_nan(v)) {
+ return 3;
+ } else if (float32_is_zero(v)) {
+ return 0;
+ } else if (float32_is_neg(v)) {
+ return 1;
+ } else {
+ return 2;
+ }
+}
+
+static uint32_t set_cc_nz_f64(float64 v)
+{
+ if (float64_is_any_nan(v)) {
+ return 3;
+ } else if (float64_is_zero(v)) {
+ return 0;
+ } else if (float64_is_neg(v)) {
+ return 1;
+ } else {
+ return 2;
+ }
+}
+
+static uint32_t set_cc_nz_f128(float128 v)
+{
+ if (float128_is_any_nan(v)) {
+ return 3;
+ } else if (float128_is_zero(v)) {
+ return 0;
+ } else if (float128_is_neg(v)) {
+ return 1;
+ } else {
+ return 2;
+ }
+}
+
+/* convert 32-bit int to 64-bit float */
+void HELPER(cdfbr)(uint32_t f1, int32_t v2)
+{
+ HELPER_LOG("%s: converting %d to f%d\n", __FUNCTION__, v2, f1);
+ env->fregs[f1].d = int32_to_float64(v2, &env->fpu_status);
+}
+
+/* convert 32-bit int to 128-bit float */
+void HELPER(cxfbr)(uint32_t f1, int32_t v2)
+{
+ CPU_QuadU v1;
+ v1.q = int32_to_float128(v2, &env->fpu_status);
+ env->fregs[f1].ll = v1.ll.upper;
+ env->fregs[f1 + 2].ll = v1.ll.lower;
+}
+
+/* convert 64-bit int to 32-bit float */
+void HELPER(cegbr)(uint32_t f1, int64_t v2)
+{
+ HELPER_LOG("%s: converting %ld to f%d\n", __FUNCTION__, v2, f1);
+ env->fregs[f1].l.upper = int64_to_float32(v2, &env->fpu_status);
+}
+
+/* convert 64-bit int to 64-bit float */
+void HELPER(cdgbr)(uint32_t f1, int64_t v2)
+{
+ HELPER_LOG("%s: converting %ld to f%d\n", __FUNCTION__, v2, f1);
+ env->fregs[f1].d = int64_to_float64(v2, &env->fpu_status);
+}
+
+/* convert 64-bit int to 128-bit float */
+void HELPER(cxgbr)(uint32_t f1, int64_t v2)
+{
+ CPU_QuadU x1;
+ x1.q = int64_to_float128(v2, &env->fpu_status);
+ HELPER_LOG("%s: converted %ld to 0x%lx and 0x%lx\n", __FUNCTION__, v2,
+ x1.ll.upper, x1.ll.lower);
+ env->fregs[f1].ll = x1.ll.upper;
+ env->fregs[f1 + 2].ll = x1.ll.lower;
+}
+
+/* convert 32-bit int to 32-bit float */
+void HELPER(cefbr)(uint32_t f1, int32_t v2)
+{
+ env->fregs[f1].l.upper = int32_to_float32(v2, &env->fpu_status);
+ HELPER_LOG("%s: converting %d to 0x%d in f%d\n", __FUNCTION__, v2,
+ env->fregs[f1].l.upper, f1);
+}
+
+/* 32-bit FP addition RR */
+uint32_t HELPER(aebr)(uint32_t f1, uint32_t f2)
+{
+ env->fregs[f1].l.upper = float32_add(env->fregs[f1].l.upper,
+ env->fregs[f2].l.upper,
+ &env->fpu_status);
+ HELPER_LOG("%s: adding 0x%d resulting in 0x%d in f%d\n", __FUNCTION__,
+ env->fregs[f2].l.upper, env->fregs[f1].l.upper, f1);
+
+ return set_cc_nz_f32(env->fregs[f1].l.upper);
+}
+
+/* 64-bit FP addition RR */
+uint32_t HELPER(adbr)(uint32_t f1, uint32_t f2)
+{
+ env->fregs[f1].d = float64_add(env->fregs[f1].d, env->fregs[f2].d,
+ &env->fpu_status);
+ HELPER_LOG("%s: adding 0x%ld resulting in 0x%ld in f%d\n", __FUNCTION__,
+ env->fregs[f2].d, env->fregs[f1].d, f1);
+
+ return set_cc_nz_f64(env->fregs[f1].d);
+}
+
+/* 32-bit FP subtraction RR */
+uint32_t HELPER(sebr)(uint32_t f1, uint32_t f2)
+{
+ env->fregs[f1].l.upper = float32_sub(env->fregs[f1].l.upper,
+ env->fregs[f2].l.upper,
+ &env->fpu_status);
+ HELPER_LOG("%s: adding 0x%d resulting in 0x%d in f%d\n", __FUNCTION__,
+ env->fregs[f2].l.upper, env->fregs[f1].l.upper, f1);
+
+ return set_cc_nz_f32(env->fregs[f1].l.upper);
+}
+
+/* 64-bit FP subtraction RR */
+uint32_t HELPER(sdbr)(uint32_t f1, uint32_t f2)
+{
+ env->fregs[f1].d = float64_sub(env->fregs[f1].d, env->fregs[f2].d,
+ &env->fpu_status);
+ HELPER_LOG("%s: subtracting 0x%ld resulting in 0x%ld in f%d\n",
+ __FUNCTION__, env->fregs[f2].d, env->fregs[f1].d, f1);
+
+ return set_cc_nz_f64(env->fregs[f1].d);
+}
+
+/* 32-bit FP division RR */
+void HELPER(debr)(uint32_t f1, uint32_t f2)
+{
+ env->fregs[f1].l.upper = float32_div(env->fregs[f1].l.upper,
+ env->fregs[f2].l.upper,
+ &env->fpu_status);
+}
+
+/* 128-bit FP division RR */
+void HELPER(dxbr)(uint32_t f1, uint32_t f2)
+{
+ CPU_QuadU v1;
+ v1.ll.upper = env->fregs[f1].ll;
+ v1.ll.lower = env->fregs[f1 + 2].ll;
+ CPU_QuadU v2;
+ v2.ll.upper = env->fregs[f2].ll;
+ v2.ll.lower = env->fregs[f2 + 2].ll;
+ CPU_QuadU res;
+ res.q = float128_div(v1.q, v2.q, &env->fpu_status);
+ env->fregs[f1].ll = res.ll.upper;
+ env->fregs[f1 + 2].ll = res.ll.lower;
+}
+
+/* 64-bit FP multiplication RR */
+void HELPER(mdbr)(uint32_t f1, uint32_t f2)
+{
+ env->fregs[f1].d = float64_mul(env->fregs[f1].d, env->fregs[f2].d,
+ &env->fpu_status);
+}
+
+/* 128-bit FP multiplication RR */
+void HELPER(mxbr)(uint32_t f1, uint32_t f2)
+{
+ CPU_QuadU v1;
+ v1.ll.upper = env->fregs[f1].ll;
+ v1.ll.lower = env->fregs[f1 + 2].ll;
+ CPU_QuadU v2;
+ v2.ll.upper = env->fregs[f2].ll;
+ v2.ll.lower = env->fregs[f2 + 2].ll;
+ CPU_QuadU res;
+ res.q = float128_mul(v1.q, v2.q, &env->fpu_status);
+ env->fregs[f1].ll = res.ll.upper;
+ env->fregs[f1 + 2].ll = res.ll.lower;
+}
+
+/* convert 32-bit float to 64-bit float */
+void HELPER(ldebr)(uint32_t r1, uint32_t r2)
+{
+ env->fregs[r1].d = float32_to_float64(env->fregs[r2].l.upper,
+ &env->fpu_status);
+}
+
+/* convert 128-bit float to 64-bit float */
+void HELPER(ldxbr)(uint32_t f1, uint32_t f2)
+{
+ CPU_QuadU x2;
+ x2.ll.upper = env->fregs[f2].ll;
+ x2.ll.lower = env->fregs[f2 + 2].ll;
+ env->fregs[f1].d = float128_to_float64(x2.q, &env->fpu_status);
+ HELPER_LOG("%s: to 0x%ld\n", __FUNCTION__, env->fregs[f1].d);
+}
+
+/* convert 64-bit float to 128-bit float */
+void HELPER(lxdbr)(uint32_t f1, uint32_t f2)
+{
+ CPU_QuadU res;
+ res.q = float64_to_float128(env->fregs[f2].d, &env->fpu_status);
+ env->fregs[f1].ll = res.ll.upper;
+ env->fregs[f1 + 2].ll = res.ll.lower;
+}
+
+/* convert 64-bit float to 32-bit float */
+void HELPER(ledbr)(uint32_t f1, uint32_t f2)
+{
+ float64 d2 = env->fregs[f2].d;
+ env->fregs[f1].l.upper = float64_to_float32(d2, &env->fpu_status);
+}
+
+/* convert 128-bit float to 32-bit float */
+void HELPER(lexbr)(uint32_t f1, uint32_t f2)
+{
+ CPU_QuadU x2;
+ x2.ll.upper = env->fregs[f2].ll;
+ x2.ll.lower = env->fregs[f2 + 2].ll;
+ env->fregs[f1].l.upper = float128_to_float32(x2.q, &env->fpu_status);
+ HELPER_LOG("%s: to 0x%d\n", __FUNCTION__, env->fregs[f1].l.upper);
+}
+
+/* absolute value of 32-bit float */
+uint32_t HELPER(lpebr)(uint32_t f1, uint32_t f2)
+{
+ float32 v1;
+ float32 v2 = env->fregs[f2].d;
+ v1 = float32_abs(v2);
+ env->fregs[f1].d = v1;
+ return set_cc_nz_f32(v1);
+}
+
+/* absolute value of 64-bit float */
+uint32_t HELPER(lpdbr)(uint32_t f1, uint32_t f2)
+{
+ float64 v1;
+ float64 v2 = env->fregs[f2].d;
+ v1 = float64_abs(v2);
+ env->fregs[f1].d = v1;
+ return set_cc_nz_f64(v1);
+}
+
+/* absolute value of 128-bit float */
+uint32_t HELPER(lpxbr)(uint32_t f1, uint32_t f2)
+{
+ CPU_QuadU v1;
+ CPU_QuadU v2;
+ v2.ll.upper = env->fregs[f2].ll;
+ v2.ll.lower = env->fregs[f2 + 2].ll;
+ v1.q = float128_abs(v2.q);
+ env->fregs[f1].ll = v1.ll.upper;
+ env->fregs[f1 + 2].ll = v1.ll.lower;
+ return set_cc_nz_f128(v1.q);
+}
+
+/* load and test 64-bit float */
+uint32_t HELPER(ltdbr)(uint32_t f1, uint32_t f2)
+{
+ env->fregs[f1].d = env->fregs[f2].d;
+ return set_cc_nz_f64(env->fregs[f1].d);
+}
+
+/* load and test 32-bit float */
+uint32_t HELPER(ltebr)(uint32_t f1, uint32_t f2)
+{
+ env->fregs[f1].l.upper = env->fregs[f2].l.upper;
+ return set_cc_nz_f32(env->fregs[f1].l.upper);
+}
+
+/* load and test 128-bit float */
+uint32_t HELPER(ltxbr)(uint32_t f1, uint32_t f2)
+{
+ CPU_QuadU x;
+ x.ll.upper = env->fregs[f2].ll;
+ x.ll.lower = env->fregs[f2 + 2].ll;
+ env->fregs[f1].ll = x.ll.upper;
+ env->fregs[f1 + 2].ll = x.ll.lower;
+ return set_cc_nz_f128(x.q);
+}
+
+/* load complement of 32-bit float */
+uint32_t HELPER(lcebr)(uint32_t f1, uint32_t f2)
+{
+ env->fregs[f1].l.upper = float32_chs(env->fregs[f2].l.upper);
+
+ return set_cc_nz_f32(env->fregs[f1].l.upper);
+}
+
+/* load complement of 64-bit float */
+uint32_t HELPER(lcdbr)(uint32_t f1, uint32_t f2)
+{
+ env->fregs[f1].d = float64_chs(env->fregs[f2].d);
+
+ return set_cc_nz_f64(env->fregs[f1].d);
+}
+
+/* load complement of 128-bit float */
+uint32_t HELPER(lcxbr)(uint32_t f1, uint32_t f2)
+{
+ CPU_QuadU x1, x2;
+ x2.ll.upper = env->fregs[f2].ll;
+ x2.ll.lower = env->fregs[f2 + 2].ll;
+ x1.q = float128_chs(x2.q);
+ env->fregs[f1].ll = x1.ll.upper;
+ env->fregs[f1 + 2].ll = x1.ll.lower;
+ return set_cc_nz_f128(x1.q);
+}
+
+/* 32-bit FP addition RM */
+void HELPER(aeb)(uint32_t f1, uint32_t val)
+{
+ float32 v1 = env->fregs[f1].l.upper;
+ CPU_FloatU v2;
+ v2.l = val;
+ HELPER_LOG("%s: adding 0x%d from f%d and 0x%d\n", __FUNCTION__,
+ v1, f1, v2.f);
+ env->fregs[f1].l.upper = float32_add(v1, v2.f, &env->fpu_status);
+}
+
+/* 32-bit FP division RM */
+void HELPER(deb)(uint32_t f1, uint32_t val)
+{
+ float32 v1 = env->fregs[f1].l.upper;
+ CPU_FloatU v2;
+ v2.l = val;
+ HELPER_LOG("%s: dividing 0x%d from f%d by 0x%d\n", __FUNCTION__,
+ v1, f1, v2.f);
+ env->fregs[f1].l.upper = float32_div(v1, v2.f, &env->fpu_status);
+}
+
+/* 32-bit FP multiplication RM */
+void HELPER(meeb)(uint32_t f1, uint32_t val)
+{
+ float32 v1 = env->fregs[f1].l.upper;
+ CPU_FloatU v2;
+ v2.l = val;
+ HELPER_LOG("%s: multiplying 0x%d from f%d and 0x%d\n", __FUNCTION__,
+ v1, f1, v2.f);
+ env->fregs[f1].l.upper = float32_mul(v1, v2.f, &env->fpu_status);
+}
+
+/* 32-bit FP compare RR */
+uint32_t HELPER(cebr)(uint32_t f1, uint32_t f2)
+{
+ float32 v1 = env->fregs[f1].l.upper;
+ float32 v2 = env->fregs[f2].l.upper;;
+ HELPER_LOG("%s: comparing 0x%d from f%d and 0x%d\n", __FUNCTION__,
+ v1, f1, v2);
+ return set_cc_f32(v1, v2);
+}
+
+/* 64-bit FP compare RR */
+uint32_t HELPER(cdbr)(uint32_t f1, uint32_t f2)
+{
+ float64 v1 = env->fregs[f1].d;
+ float64 v2 = env->fregs[f2].d;;
+ HELPER_LOG("%s: comparing 0x%ld from f%d and 0x%ld\n", __FUNCTION__,
+ v1, f1, v2);
+ return set_cc_f64(v1, v2);
+}
+
+/* 128-bit FP compare RR */
+uint32_t HELPER(cxbr)(uint32_t f1, uint32_t f2)
+{
+ CPU_QuadU v1;
+ v1.ll.upper = env->fregs[f1].ll;
+ v1.ll.lower = env->fregs[f1 + 2].ll;
+ CPU_QuadU v2;
+ v2.ll.upper = env->fregs[f2].ll;
+ v2.ll.lower = env->fregs[f2 + 2].ll;
+
+ return float_comp_to_cc(float128_compare_quiet(v1.q, v2.q,
+ &env->fpu_status));
+}
+
+/* 64-bit FP compare RM */
+uint32_t HELPER(cdb)(uint32_t f1, uint64_t a2)
+{
+ float64 v1 = env->fregs[f1].d;
+ CPU_DoubleU v2;
+ v2.ll = ldq(a2);
+ HELPER_LOG("%s: comparing 0x%ld from f%d and 0x%lx\n", __FUNCTION__, v1,
+ f1, v2.d);
+ return set_cc_f64(v1, v2.d);
+}
+
+/* 64-bit FP addition RM */
+uint32_t HELPER(adb)(uint32_t f1, uint64_t a2)
+{
+ float64 v1 = env->fregs[f1].d;
+ CPU_DoubleU v2;
+ v2.ll = ldq(a2);
+ HELPER_LOG("%s: adding 0x%lx from f%d and 0x%lx\n", __FUNCTION__,
+ v1, f1, v2.d);
+ env->fregs[f1].d = v1 = float64_add(v1, v2.d, &env->fpu_status);
+ return set_cc_nz_f64(v1);
+}
+
+/* 32-bit FP subtraction RM */
+void HELPER(seb)(uint32_t f1, uint32_t val)
+{
+ float32 v1 = env->fregs[f1].l.upper;
+ CPU_FloatU v2;
+ v2.l = val;
+ env->fregs[f1].l.upper = float32_sub(v1, v2.f, &env->fpu_status);
+}
+
+/* 64-bit FP subtraction RM */
+uint32_t HELPER(sdb)(uint32_t f1, uint64_t a2)
+{
+ float64 v1 = env->fregs[f1].d;
+ CPU_DoubleU v2;
+ v2.ll = ldq(a2);
+ env->fregs[f1].d = v1 = float64_sub(v1, v2.d, &env->fpu_status);
+ return set_cc_nz_f64(v1);
+}
+
+/* 64-bit FP multiplication RM */
+void HELPER(mdb)(uint32_t f1, uint64_t a2)
+{
+ float64 v1 = env->fregs[f1].d;
+ CPU_DoubleU v2;
+ v2.ll = ldq(a2);
+ HELPER_LOG("%s: multiplying 0x%lx from f%d and 0x%ld\n", __FUNCTION__,
+ v1, f1, v2.d);
+ env->fregs[f1].d = float64_mul(v1, v2.d, &env->fpu_status);
+}
+
+/* 64-bit FP division RM */
+void HELPER(ddb)(uint32_t f1, uint64_t a2)
+{
+ float64 v1 = env->fregs[f1].d;
+ CPU_DoubleU v2;
+ v2.ll = ldq(a2);
+ HELPER_LOG("%s: dividing 0x%lx from f%d by 0x%ld\n", __FUNCTION__,
+ v1, f1, v2.d);
+ env->fregs[f1].d = float64_div(v1, v2.d, &env->fpu_status);
+}
+
+static void set_round_mode(int m3)
+{
+ switch (m3) {
+ case 0:
+ /* current mode */
+ break;
+ case 1:
+ /* biased round no nearest */
+ case 4:
+ /* round to nearest */
+ set_float_rounding_mode(float_round_nearest_even, &env->fpu_status);
+ break;
+ case 5:
+ /* round to zero */
+ set_float_rounding_mode(float_round_to_zero, &env->fpu_status);
+ break;
+ case 6:
+ /* round to +inf */
+ set_float_rounding_mode(float_round_up, &env->fpu_status);
+ break;
+ case 7:
+ /* round to -inf */
+ set_float_rounding_mode(float_round_down, &env->fpu_status);
+ break;
+ }
+}
+
+/* convert 32-bit float to 64-bit int */
+uint32_t HELPER(cgebr)(uint32_t r1, uint32_t f2, uint32_t m3)
+{
+ float32 v2 = env->fregs[f2].l.upper;
+ set_round_mode(m3);
+ env->regs[r1] = float32_to_int64(v2, &env->fpu_status);
+ return set_cc_nz_f32(v2);
+}
+
+/* convert 64-bit float to 64-bit int */
+uint32_t HELPER(cgdbr)(uint32_t r1, uint32_t f2, uint32_t m3)
+{
+ float64 v2 = env->fregs[f2].d;
+ set_round_mode(m3);
+ env->regs[r1] = float64_to_int64(v2, &env->fpu_status);
+ return set_cc_nz_f64(v2);
+}
+
+/* convert 128-bit float to 64-bit int */
+uint32_t HELPER(cgxbr)(uint32_t r1, uint32_t f2, uint32_t m3)
+{
+ CPU_QuadU v2;
+ v2.ll.upper = env->fregs[f2].ll;
+ v2.ll.lower = env->fregs[f2 + 2].ll;
+ set_round_mode(m3);
+ env->regs[r1] = float128_to_int64(v2.q, &env->fpu_status);
+ if (float128_is_any_nan(v2.q)) {
+ return 3;
+ } else if (float128_is_zero(v2.q)) {
+ return 0;
+ } else if (float128_is_neg(v2.q)) {
+ return 1;
+ } else {
+ return 2;
+ }
+}
+
+/* convert 32-bit float to 32-bit int */
+uint32_t HELPER(cfebr)(uint32_t r1, uint32_t f2, uint32_t m3)
+{
+ float32 v2 = env->fregs[f2].l.upper;
+ set_round_mode(m3);
+ env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) |
+ float32_to_int32(v2, &env->fpu_status);
+ return set_cc_nz_f32(v2);
+}
+
+/* convert 64-bit float to 32-bit int */
+uint32_t HELPER(cfdbr)(uint32_t r1, uint32_t f2, uint32_t m3)
+{
+ float64 v2 = env->fregs[f2].d;
+ set_round_mode(m3);
+ env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) |
+ float64_to_int32(v2, &env->fpu_status);
+ return set_cc_nz_f64(v2);
+}
+
+/* convert 128-bit float to 32-bit int */
+uint32_t HELPER(cfxbr)(uint32_t r1, uint32_t f2, uint32_t m3)
+{
+ CPU_QuadU v2;
+ v2.ll.upper = env->fregs[f2].ll;
+ v2.ll.lower = env->fregs[f2 + 2].ll;
+ env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) |
+ float128_to_int32(v2.q, &env->fpu_status);
+ return set_cc_nz_f128(v2.q);
+}
+
+/* load 32-bit FP zero */
+void HELPER(lzer)(uint32_t f1)
+{
+ env->fregs[f1].l.upper = float32_zero;
+}
+
+/* load 64-bit FP zero */
+void HELPER(lzdr)(uint32_t f1)
+{
+ env->fregs[f1].d = float64_zero;
+}
+
+/* load 128-bit FP zero */
+void HELPER(lzxr)(uint32_t f1)
+{
+ CPU_QuadU x;
+ x.q = float64_to_float128(float64_zero, &env->fpu_status);
+ env->fregs[f1].ll = x.ll.upper;
+ env->fregs[f1 + 1].ll = x.ll.lower;
+}
+
+/* 128-bit FP subtraction RR */
+uint32_t HELPER(sxbr)(uint32_t f1, uint32_t f2)
+{
+ CPU_QuadU v1;
+ v1.ll.upper = env->fregs[f1].ll;
+ v1.ll.lower = env->fregs[f1 + 2].ll;
+ CPU_QuadU v2;
+ v2.ll.upper = env->fregs[f2].ll;
+ v2.ll.lower = env->fregs[f2 + 2].ll;
+ CPU_QuadU res;
+ res.q = float128_sub(v1.q, v2.q, &env->fpu_status);
+ env->fregs[f1].ll = res.ll.upper;
+ env->fregs[f1 + 2].ll = res.ll.lower;
+ return set_cc_nz_f128(res.q);
+}
+
+/* 128-bit FP addition RR */
+uint32_t HELPER(axbr)(uint32_t f1, uint32_t f2)
+{
+ CPU_QuadU v1;
+ v1.ll.upper = env->fregs[f1].ll;
+ v1.ll.lower = env->fregs[f1 + 2].ll;
+ CPU_QuadU v2;
+ v2.ll.upper = env->fregs[f2].ll;
+ v2.ll.lower = env->fregs[f2 + 2].ll;
+ CPU_QuadU res;
+ res.q = float128_add(v1.q, v2.q, &env->fpu_status);
+ env->fregs[f1].ll = res.ll.upper;
+ env->fregs[f1 + 2].ll = res.ll.lower;
+ return set_cc_nz_f128(res.q);
+}
+
+/* 32-bit FP multiplication RR */
+void HELPER(meebr)(uint32_t f1, uint32_t f2)
+{
+ env->fregs[f1].l.upper = float32_mul(env->fregs[f1].l.upper,
+ env->fregs[f2].l.upper,
+ &env->fpu_status);
+}
+
+/* 64-bit FP division RR */
+void HELPER(ddbr)(uint32_t f1, uint32_t f2)
+{
+ env->fregs[f1].d = float64_div(env->fregs[f1].d, env->fregs[f2].d,
+ &env->fpu_status);
+}
+
+/* 64-bit FP multiply and add RM */
+void HELPER(madb)(uint32_t f1, uint64_t a2, uint32_t f3)
+{
+ HELPER_LOG("%s: f1 %d a2 0x%lx f3 %d\n", __FUNCTION__, f1, a2, f3);
+ CPU_DoubleU v2;
+ v2.ll = ldq(a2);
+ env->fregs[f1].d = float64_add(env->fregs[f1].d,
+ float64_mul(v2.d, env->fregs[f3].d,
+ &env->fpu_status),
+ &env->fpu_status);
+}
+
+/* 64-bit FP multiply and add RR */
+void HELPER(madbr)(uint32_t f1, uint32_t f3, uint32_t f2)
+{
+ HELPER_LOG("%s: f1 %d f2 %d f3 %d\n", __FUNCTION__, f1, f2, f3);
+ env->fregs[f1].d = float64_add(float64_mul(env->fregs[f2].d,
+ env->fregs[f3].d,
+ &env->fpu_status),
+ env->fregs[f1].d, &env->fpu_status);
+}
+
+/* 64-bit FP multiply and subtract RR */
+void HELPER(msdbr)(uint32_t f1, uint32_t f3, uint32_t f2)
+{
+ HELPER_LOG("%s: f1 %d f2 %d f3 %d\n", __FUNCTION__, f1, f2, f3);
+ env->fregs[f1].d = float64_sub(float64_mul(env->fregs[f2].d,
+ env->fregs[f3].d,
+ &env->fpu_status),
+ env->fregs[f1].d, &env->fpu_status);
+}
+
+/* 32-bit FP multiply and add RR */
+void HELPER(maebr)(uint32_t f1, uint32_t f3, uint32_t f2)
+{
+ env->fregs[f1].l.upper = float32_add(env->fregs[f1].l.upper,
+ float32_mul(env->fregs[f2].l.upper,
+ env->fregs[f3].l.upper,
+ &env->fpu_status),
+ &env->fpu_status);
+}
+
+/* convert 64-bit float to 128-bit float */
+void HELPER(lxdb)(uint32_t f1, uint64_t a2)
+{
+ CPU_DoubleU v2;
+ v2.ll = ldq(a2);
+ CPU_QuadU v1;
+ v1.q = float64_to_float128(v2.d, &env->fpu_status);
+ env->fregs[f1].ll = v1.ll.upper;
+ env->fregs[f1 + 2].ll = v1.ll.lower;
+}
+
+/* test data class 32-bit */
+uint32_t HELPER(tceb)(uint32_t f1, uint64_t m2)
+{
+ float32 v1 = env->fregs[f1].l.upper;
+ int neg = float32_is_neg(v1);
+ uint32_t cc = 0;
+
+ HELPER_LOG("%s: v1 0x%lx m2 0x%lx neg %d\n", __FUNCTION__, (long)v1, m2, neg);
+ if ((float32_is_zero(v1) && (m2 & (1 << (11-neg)))) ||
+ (float32_is_infinity(v1) && (m2 & (1 << (5-neg)))) ||
+ (float32_is_any_nan(v1) && (m2 & (1 << (3-neg)))) ||
+ (float32_is_signaling_nan(v1) && (m2 & (1 << (1-neg))))) {
+ cc = 1;
+ } else if (m2 & (1 << (9-neg))) {
+ /* assume normalized number */
+ cc = 1;
+ }
+
+ /* FIXME: denormalized? */
+ return cc;
+}
+
+/* test data class 64-bit */
+uint32_t HELPER(tcdb)(uint32_t f1, uint64_t m2)
+{
+ float64 v1 = env->fregs[f1].d;
+ int neg = float64_is_neg(v1);
+ uint32_t cc = 0;
+
+ HELPER_LOG("%s: v1 0x%lx m2 0x%lx neg %d\n", __FUNCTION__, v1, m2, neg);
+ if ((float64_is_zero(v1) && (m2 & (1 << (11-neg)))) ||
+ (float64_is_infinity(v1) && (m2 & (1 << (5-neg)))) ||
+ (float64_is_any_nan(v1) && (m2 & (1 << (3-neg)))) ||
+ (float64_is_signaling_nan(v1) && (m2 & (1 << (1-neg))))) {
+ cc = 1;
+ } else if (m2 & (1 << (9-neg))) {
+ /* assume normalized number */
+ cc = 1;
+ }
+ /* FIXME: denormalized? */
+ return cc;
+}
+
+/* test data class 128-bit */
+uint32_t HELPER(tcxb)(uint32_t f1, uint64_t m2)
+{
+ CPU_QuadU v1;
+ uint32_t cc = 0;
+ v1.ll.upper = env->fregs[f1].ll;
+ v1.ll.lower = env->fregs[f1 + 2].ll;
+
+ int neg = float128_is_neg(v1.q);
+ if ((float128_is_zero(v1.q) && (m2 & (1 << (11-neg)))) ||
+ (float128_is_infinity(v1.q) && (m2 & (1 << (5-neg)))) ||
+ (float128_is_any_nan(v1.q) && (m2 & (1 << (3-neg)))) ||
+ (float128_is_signaling_nan(v1.q) && (m2 & (1 << (1-neg))))) {
+ cc = 1;
+ } else if (m2 & (1 << (9-neg))) {
+ /* assume normalized number */
+ cc = 1;
+ }
+ /* FIXME: denormalized? */
+ return cc;
+}
+
+/* find leftmost one */
+uint32_t HELPER(flogr)(uint32_t r1, uint64_t v2)
+{
+ uint64_t res = 0;
+ uint64_t ov2 = v2;
+
+ while (!(v2 & 0x8000000000000000ULL) && v2) {
+ v2 <<= 1;
+ res++;
+ }
+
+ if (!v2) {
+ env->regs[r1] = 64;
+ env->regs[r1 + 1] = 0;
+ return 0;
+ } else {
+ env->regs[r1] = res;
+ env->regs[r1 + 1] = ov2 & ~(0x8000000000000000ULL >> res);
+ return 2;
+ }
+}
+
+/* square root 64-bit RR */
+void HELPER(sqdbr)(uint32_t f1, uint32_t f2)
+{
+ env->fregs[f1].d = float64_sqrt(env->fregs[f2].d, &env->fpu_status);
+}
+
+static inline uint64_t cksm_overflow(uint64_t cksm)
+{
+ if (cksm > 0xffffffffULL) {
+ cksm &= 0xffffffffULL;
+ cksm++;
+ }
+ return cksm;
+}
+
+/* checksum */
+void HELPER(cksm)(uint32_t r1, uint32_t r2)
+{
+ uint64_t src = get_address_31fix(r2);
+ uint64_t src_len = env->regs[(r2 + 1) & 15];
+ uint64_t cksm = 0;
+
+ while (src_len >= 4) {
+ cksm += ldl(src);
+ cksm = cksm_overflow(cksm);
+
+ /* move to next word */
+ src_len -= 4;
+ src += 4;
+ }
+
+ switch (src_len) {
+ case 0:
+ break;
+ case 1:
+ cksm += ldub(src);
+ cksm = cksm_overflow(cksm);
+ break;
+ case 2:
+ cksm += lduw(src);
+ cksm = cksm_overflow(cksm);
+ break;
+ case 3:
+ /* XXX check if this really is correct */
+ cksm += lduw(src) << 8;
+ cksm += ldub(src + 2);
+ cksm = cksm_overflow(cksm);
+ break;
+ }
+
+ /* indicate we've processed everything */
+ env->regs[(r2 + 1) & 15] = 0;
+
+ /* store result */
+ env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | (uint32_t)cksm;
+}
+
+static inline uint32_t cc_calc_ltgt_32(CPUState *env, int32_t src,
+ int32_t dst)
+{
+ if (src == dst) {
+ return 0;
+ } else if (src < dst) {
+ return 1;
+ } else {
+ return 2;
+ }
+}
+
+static inline uint32_t cc_calc_ltgt0_32(CPUState *env, int32_t dst)
+{
+ return cc_calc_ltgt_32(env, dst, 0);
+}
+
+static inline uint32_t cc_calc_ltgt_64(CPUState *env, int64_t src,
+ int64_t dst)
+{
+ if (src == dst) {
+ return 0;
+ } else if (src < dst) {
+ return 1;
+ } else {
+ return 2;
+ }
+}
+
+static inline uint32_t cc_calc_ltgt0_64(CPUState *env, int64_t dst)
+{
+ return cc_calc_ltgt_64(env, dst, 0);
+}
+
+static inline uint32_t cc_calc_ltugtu_32(CPUState *env, uint32_t src,
+ uint32_t dst)
+{
+ if (src == dst) {
+ return 0;
+ } else if (src < dst) {
+ return 1;
+ } else {
+ return 2;
+ }
+}
+
+static inline uint32_t cc_calc_ltugtu_64(CPUState *env, uint64_t src,
+ uint64_t dst)
+{
+ if (src == dst) {
+ return 0;
+ } else if (src < dst) {
+ return 1;
+ } else {
+ return 2;
+ }
+}
+
+static inline uint32_t cc_calc_tm_32(CPUState *env, uint32_t val, uint32_t mask)
+{
+ HELPER_LOG("%s: val 0x%x mask 0x%x\n", __FUNCTION__, val, mask);
+ uint16_t r = val & mask;
+ if (r == 0 || mask == 0) {
+ return 0;
+ } else if (r == mask) {
+ return 3;
+ } else {
+ return 1;
+ }
+}
+
+/* set condition code for test under mask */
+static inline uint32_t cc_calc_tm_64(CPUState *env, uint64_t val, uint32_t mask)
+{
+ uint16_t r = val & mask;
+ HELPER_LOG("%s: val 0x%lx mask 0x%x r 0x%x\n", __FUNCTION__, val, mask, r);
+ if (r == 0 || mask == 0) {
+ return 0;
+ } else if (r == mask) {
+ return 3;
+ } else {
+ while (!(mask & 0x8000)) {
+ mask <<= 1;
+ val <<= 1;
+ }
+ if (val & 0x8000) {
+ return 2;
+ } else {
+ return 1;
+ }
+ }
+}
+
+static inline uint32_t cc_calc_nz(CPUState *env, uint64_t dst)
+{
+ return !!dst;
+}
+
+static inline uint32_t cc_calc_add_64(CPUState *env, int64_t a1, int64_t a2,
+ int64_t ar)
+{
+ if ((a1 > 0 && a2 > 0 && ar < 0) || (a1 < 0 && a2 < 0 && ar > 0)) {
+ return 3; /* overflow */
+ } else {
+ if (ar < 0) {
+ return 1;
+ } else if (ar > 0) {
+ return 2;
+ } else {
+ return 0;
+ }
+ }
+}
+
+static inline uint32_t cc_calc_addu_64(CPUState *env, uint64_t a1, uint64_t a2,
+ uint64_t ar)
+{
+ if (ar == 0) {
+ if (a1) {
+ return 2;
+ } else {
+ return 0;
+ }
+ } else {
+ if (ar < a1 || ar < a2) {
+ return 3;
+ } else {
+ return 1;
+ }
+ }
+}
+
+static inline uint32_t cc_calc_sub_64(CPUState *env, int64_t a1, int64_t a2,
+ int64_t ar)
+{
+ if ((a1 > 0 && a2 < 0 && ar < 0) || (a1 < 0 && a2 > 0 && ar > 0)) {
+ return 3; /* overflow */
+ } else {
+ if (ar < 0) {
+ return 1;
+ } else if (ar > 0) {
+ return 2;
+ } else {
+ return 0;
+ }
+ }
+}
+
+static inline uint32_t cc_calc_subu_64(CPUState *env, uint64_t a1, uint64_t a2,
+ uint64_t ar)
+{
+ if (ar == 0) {
+ return 2;
+ } else {
+ if (a2 > a1) {
+ return 1;
+ } else {
+ return 3;
+ }
+ }
+}
+
+static inline uint32_t cc_calc_abs_64(CPUState *env, int64_t dst)
+{
+ if ((uint64_t)dst == 0x8000000000000000ULL) {
+ return 3;
+ } else if (dst) {
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+static inline uint32_t cc_calc_nabs_64(CPUState *env, int64_t dst)
+{
+ return !!dst;
+}
+
+static inline uint32_t cc_calc_comp_64(CPUState *env, int64_t dst)
+{
+ if ((uint64_t)dst == 0x8000000000000000ULL) {
+ return 3;
+ } else if (dst < 0) {
+ return 1;
+ } else if (dst > 0) {
+ return 2;
+ } else {
+ return 0;
+ }
+}
+
+
+static inline uint32_t cc_calc_add_32(CPUState *env, int32_t a1, int32_t a2,
+ int32_t ar)
+{
+ if ((a1 > 0 && a2 > 0 && ar < 0) || (a1 < 0 && a2 < 0 && ar > 0)) {
+ return 3; /* overflow */
+ } else {
+ if (ar < 0) {
+ return 1;
+ } else if (ar > 0) {
+ return 2;
+ } else {
+ return 0;
+ }
+ }
+}
+
+static inline uint32_t cc_calc_addu_32(CPUState *env, uint32_t a1, uint32_t a2,
+ uint32_t ar)
+{
+ if (ar == 0) {
+ if (a1) {
+ return 2;
+ } else {
+ return 0;
+ }
+ } else {
+ if (ar < a1 || ar < a2) {
+ return 3;
+ } else {
+ return 1;
+ }
+ }
+}
+
+static inline uint32_t cc_calc_sub_32(CPUState *env, int32_t a1, int32_t a2,
+ int32_t ar)
+{
+ if ((a1 > 0 && a2 < 0 && ar < 0) || (a1 < 0 && a2 > 0 && ar > 0)) {
+ return 3; /* overflow */
+ } else {
+ if (ar < 0) {
+ return 1;
+ } else if (ar > 0) {
+ return 2;
+ } else {
+ return 0;
+ }
+ }
+}
+
+static inline uint32_t cc_calc_subu_32(CPUState *env, uint32_t a1, uint32_t a2,
+ uint32_t ar)
+{
+ if (ar == 0) {
+ return 2;
+ } else {
+ if (a2 > a1) {
+ return 1;
+ } else {
+ return 3;
+ }
+ }
+}
+
+static inline uint32_t cc_calc_abs_32(CPUState *env, int32_t dst)
+{
+ if ((uint32_t)dst == 0x80000000UL) {
+ return 3;
+ } else if (dst) {
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+static inline uint32_t cc_calc_nabs_32(CPUState *env, int32_t dst)
+{
+ return !!dst;
+}
+
+static inline uint32_t cc_calc_comp_32(CPUState *env, int32_t dst)
+{
+ if ((uint32_t)dst == 0x80000000UL) {
+ return 3;
+ } else if (dst < 0) {
+ return 1;
+ } else if (dst > 0) {
+ return 2;
+ } else {
+ return 0;
+ }
+}
+
+/* calculate condition code for insert character under mask insn */
+static inline uint32_t cc_calc_icm_32(CPUState *env, uint32_t mask, uint32_t val)
+{
+ HELPER_LOG("%s: mask 0x%x val %d\n", __FUNCTION__, mask, val);
+ uint32_t cc;
+
+ if (mask == 0xf) {
+ if (!val) {
+ return 0;
+ } else if (val & 0x80000000) {
+ return 1;
+ } else {
+ return 2;
+ }
+ }
+
+ if (!val || !mask) {
+ cc = 0;
+ } else {
+ while (mask != 1) {
+ mask >>= 1;
+ val >>= 8;
+ }
+ if (val & 0x80) {
+ cc = 1;
+ } else {
+ cc = 2;
+ }
+ }
+ return cc;
+}
+
+static inline uint32_t cc_calc_slag(CPUState *env, uint64_t src, uint64_t shift)
+{
+ uint64_t mask = ((1ULL << shift) - 1ULL) << (64 - shift);
+ uint64_t match, r;
+
+ /* check if the sign bit stays the same */
+ if (src & (1ULL << 63)) {
+ match = mask;
+ } else {
+ match = 0;
+ }
+
+ if ((src & mask) != match) {
+ /* overflow */
+ return 3;
+ }
+
+ r = ((src << shift) & ((1ULL << 63) - 1)) | (src & (1ULL << 63));
+
+ if ((int64_t)r == 0) {
+ return 0;
+ } else if ((int64_t)r < 0) {
+ return 1;
+ }
+
+ return 2;
+}
+
+
+static inline uint32_t do_calc_cc(CPUState *env, uint32_t cc_op, uint64_t src,
+ uint64_t dst, uint64_t vr)
+{
+ uint32_t r = 0;
+
+ switch (cc_op) {
+ case CC_OP_CONST0:
+ case CC_OP_CONST1:
+ case CC_OP_CONST2:
+ case CC_OP_CONST3:
+ /* cc_op value _is_ cc */
+ r = cc_op;
+ break;
+ case CC_OP_LTGT0_32:
+ r = cc_calc_ltgt0_32(env, dst);
+ break;
+ case CC_OP_LTGT0_64:
+ r = cc_calc_ltgt0_64(env, dst);
+ break;
+ case CC_OP_LTGT_32:
+ r = cc_calc_ltgt_32(env, src, dst);
+ break;
+ case CC_OP_LTGT_64:
+ r = cc_calc_ltgt_64(env, src, dst);
+ break;
+ case CC_OP_LTUGTU_32:
+ r = cc_calc_ltugtu_32(env, src, dst);
+ break;
+ case CC_OP_LTUGTU_64:
+ r = cc_calc_ltugtu_64(env, src, dst);
+ break;
+ case CC_OP_TM_32:
+ r = cc_calc_tm_32(env, src, dst);
+ break;
+ case CC_OP_TM_64:
+ r = cc_calc_tm_64(env, src, dst);
+ break;
+ case CC_OP_NZ:
+ r = cc_calc_nz(env, dst);
+ break;
+ case CC_OP_ADD_64:
+ r = cc_calc_add_64(env, src, dst, vr);
+ break;
+ case CC_OP_ADDU_64:
+ r = cc_calc_addu_64(env, src, dst, vr);
+ break;
+ case CC_OP_SUB_64:
+ r = cc_calc_sub_64(env, src, dst, vr);
+ break;
+ case CC_OP_SUBU_64:
+ r = cc_calc_subu_64(env, src, dst, vr);
+ break;
+ case CC_OP_ABS_64:
+ r = cc_calc_abs_64(env, dst);
+ break;
+ case CC_OP_NABS_64:
+ r = cc_calc_nabs_64(env, dst);
+ break;
+ case CC_OP_COMP_64:
+ r = cc_calc_comp_64(env, dst);
+ break;
+
+ case CC_OP_ADD_32:
+ r = cc_calc_add_32(env, src, dst, vr);
+ break;
+ case CC_OP_ADDU_32:
+ r = cc_calc_addu_32(env, src, dst, vr);
+ break;
+ case CC_OP_SUB_32:
+ r = cc_calc_sub_32(env, src, dst, vr);
+ break;
+ case CC_OP_SUBU_32:
+ r = cc_calc_subu_32(env, src, dst, vr);
+ break;
+ case CC_OP_ABS_32:
+ r = cc_calc_abs_64(env, dst);
+ break;
+ case CC_OP_NABS_32:
+ r = cc_calc_nabs_64(env, dst);
+ break;
+ case CC_OP_COMP_32:
+ r = cc_calc_comp_32(env, dst);
+ break;
+
+ case CC_OP_ICM:
+ r = cc_calc_icm_32(env, src, dst);
+ break;
+ case CC_OP_SLAG:
+ r = cc_calc_slag(env, src, dst);
+ break;
+
+ case CC_OP_LTGT_F32:
+ r = set_cc_f32(src, dst);
+ break;
+ case CC_OP_LTGT_F64:
+ r = set_cc_f64(src, dst);
+ break;
+ case CC_OP_NZ_F32:
+ r = set_cc_nz_f32(dst);
+ break;
+ case CC_OP_NZ_F64:
+ r = set_cc_nz_f64(dst);
+ break;
+
+ default:
+ cpu_abort(env, "Unknown CC operation: %s\n", cc_name(cc_op));
+ }
+
+ HELPER_LOG("%s: %15s 0x%016lx 0x%016lx 0x%016lx = %d\n", __FUNCTION__,
+ cc_name(cc_op), src, dst, vr, r);
+ return r;
+}
+
+uint32_t calc_cc(CPUState *env, uint32_t cc_op, uint64_t src, uint64_t dst,
+ uint64_t vr)
+{
+ return do_calc_cc(env, cc_op, src, dst, vr);
+}
+
+uint32_t HELPER(calc_cc)(uint32_t cc_op, uint64_t src, uint64_t dst,
+ uint64_t vr)
+{
+ return do_calc_cc(env, cc_op, src, dst, vr);
+}
+
+uint64_t HELPER(cvd)(int32_t bin)
+{
+ /* positive 0 */
+ uint64_t dec = 0x0c;
+ int shift = 4;
+
+ if (bin < 0) {
+ bin = -bin;
+ dec = 0x0d;
+ }
+
+ for (shift = 4; (shift < 64) && bin; shift += 4) {
+ int current_number = bin % 10;
+
+ dec |= (current_number) << shift;
+ bin /= 10;
+ }
+
+ return dec;
+}
+
+void HELPER(unpk)(uint32_t len, uint64_t dest, uint64_t src)
+{
+ int len_dest = len >> 4;
+ int len_src = len & 0xf;
+ uint8_t b;
+ int second_nibble = 0;
+
+ dest += len_dest;
+ src += len_src;
+
+ /* last byte is special, it only flips the nibbles */
+ b = ldub(src);
+ stb(dest, (b << 4) | (b >> 4));
+ src--;
+ len_src--;
+
+ /* now pad every nibble with 0xf0 */
+
+ while (len_dest > 0) {
+ uint8_t cur_byte = 0;
+
+ if (len_src > 0) {
+ cur_byte = ldub(src);
+ }
+
+ len_dest--;
+ dest--;
+
+ /* only advance one nibble at a time */
+ if (second_nibble) {
+ cur_byte >>= 4;
+ len_src--;
+ src--;
+ }
+ second_nibble = !second_nibble;
+
+ /* digit */
+ cur_byte = (cur_byte & 0xf);
+ /* zone bits */
+ cur_byte |= 0xf0;
+
+ stb(dest, cur_byte);
+ }
+}
+
+void HELPER(tr)(uint32_t len, uint64_t array, uint64_t trans)
+{
+ int i;
+
+ for (i = 0; i <= len; i++) {
+ uint8_t byte = ldub(array + i);
+ uint8_t new_byte = ldub(trans + byte);
+ stb(array + i, new_byte);
+ }
+}
+
+#ifndef CONFIG_USER_ONLY
+
+void HELPER(load_psw)(uint64_t mask, uint64_t addr)
+{
+ load_psw(env, mask, addr);
+ cpu_loop_exit();
+}
+
+static void program_interrupt(CPUState *env, uint32_t code, int ilc)
+{
+ qemu_log("program interrupt at %#" PRIx64 "\n", env->psw.addr);
+
+ if (kvm_enabled()) {
+ kvm_s390_interrupt(env, KVM_S390_PROGRAM_INT, code);
+ } else {
+ env->int_pgm_code = code;
+ env->int_pgm_ilc = ilc;
+ env->exception_index = EXCP_PGM;
+ cpu_loop_exit();
+ }
+}
+
+static void ext_interrupt(CPUState *env, int type, uint32_t param,
+ uint64_t param64)
+{
+ cpu_inject_ext(env, type, param, param64);
+}
+
+int sclp_service_call(CPUState *env, uint32_t sccb, uint64_t code)
+{
+ int r = 0;
+ int shift = 0;
+
+#ifdef DEBUG_HELPER
+ printf("sclp(0x%x, 0x%" PRIx64 ")\n", sccb, code);
+#endif
+
+ if (sccb & ~0x7ffffff8ul) {
+ fprintf(stderr, "KVM: invalid sccb address 0x%x\n", sccb);
+ r = -1;
+ goto out;
+ }
+
+ switch(code) {
+ case SCLP_CMDW_READ_SCP_INFO:
+ case SCLP_CMDW_READ_SCP_INFO_FORCED:
+ while ((ram_size >> (20 + shift)) > 65535) {
+ shift++;
+ }
+ stw_phys(sccb + SCP_MEM_CODE, ram_size >> (20 + shift));
+ stb_phys(sccb + SCP_INCREMENT, 1 << shift);
+ stw_phys(sccb + SCP_RESPONSE_CODE, 0x10);
+
+ if (kvm_enabled()) {
+#ifdef CONFIG_KVM
+ kvm_s390_interrupt_internal(env, KVM_S390_INT_SERVICE,
+ sccb & ~3, 0, 1);
+#endif
+ } else {
+ env->psw.addr += 4;
+ ext_interrupt(env, EXT_SERVICE, sccb & ~3, 0);
+ }
+ break;
+ default:
+#ifdef DEBUG_HELPER
+ printf("KVM: invalid sclp call 0x%x / 0x%" PRIx64 "x\n", sccb, code);
+#endif
+ r = -1;
+ break;
+ }
+
+out:
+ return r;
+}
+
+/* SCLP service call */
+uint32_t HELPER(servc)(uint32_t r1, uint64_t r2)
+{
+ if (sclp_service_call(env, r1, r2)) {
+ return 3;
+ }
+
+ return 0;
+}
+
+/* DIAG */
+uint64_t HELPER(diag)(uint32_t num, uint64_t mem, uint64_t code)
+{
+ uint64_t r;
+
+ switch (num) {
+ case 0x500:
+ /* KVM hypercall */
+ r = s390_virtio_hypercall(env, mem, code);
+ break;
+ case 0x44:
+ /* yield */
+ r = 0;
+ break;
+ case 0x308:
+ /* ipl */
+ r = 0;
+ break;
+ default:
+ r = -1;
+ break;
+ }
+
+ if (r) {
+ program_interrupt(env, PGM_OPERATION, ILC_LATER_INC);
+ }
+
+ return r;
+}
+
+/* Store CPU ID */
+void HELPER(stidp)(uint64_t a1)
+{
+ stq(a1, env->cpu_num);
+}
+
+/* Set Prefix */
+void HELPER(spx)(uint64_t a1)
+{
+ uint32_t prefix;
+
+ prefix = ldl(a1);
+ env->psa = prefix & 0xfffff000;
+ qemu_log("prefix: %#x\n", prefix);
+ tlb_flush_page(env, 0);
+ tlb_flush_page(env, TARGET_PAGE_SIZE);
+}
+
+/* Set Clock */
+uint32_t HELPER(sck)(uint64_t a1)
+{
+ /* XXX not implemented - is it necessary? */
+
+ return 0;
+}
+
+static inline uint64_t clock_value(CPUState *env)
+{
+ uint64_t time;
+
+ time = env->tod_offset +
+ time2tod(qemu_get_clock_ns(vm_clock) - env->tod_basetime);
+
+ return time;
+}
+
+/* Store Clock */
+uint32_t HELPER(stck)(uint64_t a1)
+{
+ stq(a1, clock_value(env));
+
+ return 0;
+}
+
+/* Store Clock Extended */
+uint32_t HELPER(stcke)(uint64_t a1)
+{
+ stb(a1, 0);
+ /* basically the same value as stck */
+ stq(a1 + 1, clock_value(env) | env->cpu_num);
+ /* more fine grained than stck */
+ stq(a1 + 9, 0);
+ /* XXX programmable fields */
+ stw(a1 + 17, 0);
+
+
+ return 0;
+}
+
+/* Set Clock Comparator */
+void HELPER(sckc)(uint64_t a1)
+{
+ uint64_t time = ldq(a1);
+
+ if (time == -1ULL) {
+ return;
+ }
+
+ /* difference between now and then */
+ time -= clock_value(env);
+ /* nanoseconds */
+ time = (time * 125) >> 9;
+
+ qemu_mod_timer(env->tod_timer, qemu_get_clock_ns(vm_clock) + time);
+}
+
+/* Store Clock Comparator */
+void HELPER(stckc)(uint64_t a1)
+{
+ /* XXX implement */
+ stq(a1, 0);
+}
+
+/* Set CPU Timer */
+void HELPER(spt)(uint64_t a1)
+{
+ uint64_t time = ldq(a1);
+
+ if (time == -1ULL) {
+ return;
+ }
+
+ /* nanoseconds */
+ time = (time * 125) >> 9;
+
+ qemu_mod_timer(env->cpu_timer, qemu_get_clock_ns(vm_clock) + time);
+}
+
+/* Store CPU Timer */
+void HELPER(stpt)(uint64_t a1)
+{
+ /* XXX implement */
+ stq(a1, 0);
+}
+
+/* Store System Information */
+uint32_t HELPER(stsi)(uint64_t a0, uint32_t r0, uint32_t r1)
+{
+ int cc = 0;
+ int sel1, sel2;
+
+ if ((r0 & STSI_LEVEL_MASK) <= STSI_LEVEL_3 &&
+ ((r0 & STSI_R0_RESERVED_MASK) || (r1 & STSI_R1_RESERVED_MASK))) {
+ /* valid function code, invalid reserved bits */
+ program_interrupt(env, PGM_SPECIFICATION, 2);
+ }
+
+ sel1 = r0 & STSI_R0_SEL1_MASK;
+ sel2 = r1 & STSI_R1_SEL2_MASK;
+
+ /* XXX: spec exception if sysib is not 4k-aligned */
+
+ switch (r0 & STSI_LEVEL_MASK) {
+ case STSI_LEVEL_1:
+ if ((sel1 == 1) && (sel2 == 1)) {
+ /* Basic Machine Configuration */
+ struct sysib_111 sysib;
+
+ memset(&sysib, 0, sizeof(sysib));
+ ebcdic_put(sysib.manuf, "QEMU ", 16);
+ /* same as machine type number in STORE CPU ID */
+ ebcdic_put(sysib.type, "QEMU", 4);
+ /* same as model number in STORE CPU ID */
+ ebcdic_put(sysib.model, "QEMU ", 16);
+ ebcdic_put(sysib.sequence, "QEMU ", 16);
+ ebcdic_put(sysib.plant, "QEMU", 4);
+ cpu_physical_memory_rw(a0, (uint8_t*)&sysib, sizeof(sysib), 1);
+ } else if ((sel1 == 2) && (sel2 == 1)) {
+ /* Basic Machine CPU */
+ struct sysib_121 sysib;
+
+ memset(&sysib, 0, sizeof(sysib));
+ /* XXX make different for different CPUs? */
+ ebcdic_put(sysib.sequence, "QEMUQEMUQEMUQEMU", 16);
+ ebcdic_put(sysib.plant, "QEMU", 4);
+ stw_p(&sysib.cpu_addr, env->cpu_num);
+ cpu_physical_memory_rw(a0, (uint8_t*)&sysib, sizeof(sysib), 1);
+ } else if ((sel1 == 2) && (sel2 == 2)) {
+ /* Basic Machine CPUs */
+ struct sysib_122 sysib;
+
+ memset(&sysib, 0, sizeof(sysib));
+ stl_p(&sysib.capability, 0x443afc29);
+ /* XXX change when SMP comes */
+ stw_p(&sysib.total_cpus, 1);
+ stw_p(&sysib.active_cpus, 1);
+ stw_p(&sysib.standby_cpus, 0);
+ stw_p(&sysib.reserved_cpus, 0);
+ cpu_physical_memory_rw(a0, (uint8_t*)&sysib, sizeof(sysib), 1);
+ } else {
+ cc = 3;
+ }
+ break;
+ case STSI_LEVEL_2:
+ {
+ if ((sel1 == 2) && (sel2 == 1)) {
+ /* LPAR CPU */
+ struct sysib_221 sysib;
+
+ memset(&sysib, 0, sizeof(sysib));
+ /* XXX make different for different CPUs? */
+ ebcdic_put(sysib.sequence, "QEMUQEMUQEMUQEMU", 16);
+ ebcdic_put(sysib.plant, "QEMU", 4);
+ stw_p(&sysib.cpu_addr, env->cpu_num);
+ stw_p(&sysib.cpu_id, 0);
+ cpu_physical_memory_rw(a0, (uint8_t*)&sysib, sizeof(sysib), 1);
+ } else if ((sel1 == 2) && (sel2 == 2)) {
+ /* LPAR CPUs */
+ struct sysib_222 sysib;
+
+ memset(&sysib, 0, sizeof(sysib));
+ stw_p(&sysib.lpar_num, 0);
+ sysib.lcpuc = 0;
+ /* XXX change when SMP comes */
+ stw_p(&sysib.total_cpus, 1);
+ stw_p(&sysib.conf_cpus, 1);
+ stw_p(&sysib.standby_cpus, 0);
+ stw_p(&sysib.reserved_cpus, 0);
+ ebcdic_put(sysib.name, "QEMU ", 8);
+ stl_p(&sysib.caf, 1000);
+ stw_p(&sysib.dedicated_cpus, 0);
+ stw_p(&sysib.shared_cpus, 0);
+ cpu_physical_memory_rw(a0, (uint8_t*)&sysib, sizeof(sysib), 1);
+ } else {
+ cc = 3;
+ }
+ break;
+ }
+ case STSI_LEVEL_3:
+ {
+ if ((sel1 == 2) && (sel2 == 2)) {
+ /* VM CPUs */
+ struct sysib_322 sysib;
+
+ memset(&sysib, 0, sizeof(sysib));
+ sysib.count = 1;
+ /* XXX change when SMP comes */
+ stw_p(&sysib.vm[0].total_cpus, 1);
+ stw_p(&sysib.vm[0].conf_cpus, 1);
+ stw_p(&sysib.vm[0].standby_cpus, 0);
+ stw_p(&sysib.vm[0].reserved_cpus, 0);
+ ebcdic_put(sysib.vm[0].name, "KVMguest", 8);
+ stl_p(&sysib.vm[0].caf, 1000);
+ ebcdic_put(sysib.vm[0].cpi, "KVM/Linux ", 16);
+ cpu_physical_memory_rw(a0, (uint8_t*)&sysib, sizeof(sysib), 1);
+ } else {
+ cc = 3;
+ }
+ break;
+ }
+ case STSI_LEVEL_CURRENT:
+ env->regs[0] = STSI_LEVEL_3;
+ break;
+ default:
+ cc = 3;
+ break;
+ }
+
+ return cc;
+}
+
+void HELPER(lctlg)(uint32_t r1, uint64_t a2, uint32_t r3)
+{
+ int i;
+ uint64_t src = a2;
+
+ for (i = r1;; i = (i + 1) % 16) {
+ env->cregs[i] = ldq(src);
+ HELPER_LOG("load ctl %d from 0x%" PRIx64 " == 0x%" PRIx64 "\n",
+ i, src, env->cregs[i]);
+ src += sizeof(uint64_t);
+
+ if (i == r3) {
+ break;
+ }
+ }
+
+ tlb_flush(env, 1);
+}
+
+void HELPER(lctl)(uint32_t r1, uint64_t a2, uint32_t r3)
+{
+ int i;
+ uint64_t src = a2;
+
+ for (i = r1;; i = (i + 1) % 16) {
+ env->cregs[i] = (env->cregs[i] & 0xFFFFFFFF00000000ULL) | ldl(src);
+ src += sizeof(uint32_t);
+
+ if (i == r3) {
+ break;
+ }
+ }
+
+ tlb_flush(env, 1);
+}
+
+void HELPER(stctg)(uint32_t r1, uint64_t a2, uint32_t r3)
+{
+ int i;
+ uint64_t dest = a2;
+
+ for (i = r1;; i = (i + 1) % 16) {
+ stq(dest, env->cregs[i]);
+ dest += sizeof(uint64_t);
+
+ if (i == r3) {
+ break;
+ }
+ }
+}
+
+void HELPER(stctl)(uint32_t r1, uint64_t a2, uint32_t r3)
+{
+ int i;
+ uint64_t dest = a2;
+
+ for (i = r1;; i = (i + 1) % 16) {
+ stl(dest, env->cregs[i]);
+ dest += sizeof(uint32_t);
+
+ if (i == r3) {
+ break;
+ }
+ }
+}
+
+uint32_t HELPER(tprot)(uint64_t a1, uint64_t a2)
+{
+ /* XXX implement */
+
+ return 0;
+}
+
+/* insert storage key extended */
+uint64_t HELPER(iske)(uint64_t r2)
+{
+ uint64_t addr = get_address(0, 0, r2);
+
+ if (addr > ram_size) {
+ return 0;
+ }
+
+ /* XXX maybe use qemu's internal keys? */
+ return env->storage_keys[addr / TARGET_PAGE_SIZE];
+}
+
+/* set storage key extended */
+void HELPER(sske)(uint32_t r1, uint64_t r2)
+{
+ uint64_t addr = get_address(0, 0, r2);
+
+ if (addr > ram_size) {
+ return;
+ }
+
+ env->storage_keys[addr / TARGET_PAGE_SIZE] = r1;
+}
+
+/* reset reference bit extended */
+uint32_t HELPER(rrbe)(uint32_t r1, uint64_t r2)
+{
+ if (r2 > ram_size) {
+ return 0;
+ }
+
+ /* XXX implement */
+#if 0
+ env->storage_keys[r2 / TARGET_PAGE_SIZE] &= ~SK_REFERENCED;
+#endif
+
+ /*
+ * cc
+ *
+ * 0 Reference bit zero; change bit zero
+ * 1 Reference bit zero; change bit one
+ * 2 Reference bit one; change bit zero
+ * 3 Reference bit one; change bit one
+ */
+ return 0;
+}
+
+/* compare and swap and purge */
+uint32_t HELPER(csp)(uint32_t r1, uint32_t r2)
+{
+ uint32_t cc;
+ uint32_t o1 = env->regs[r1];
+ uint64_t a2 = get_address_31fix(r2) & ~3ULL;
+ uint32_t o2 = ldl(a2);
+
+ if (o1 == o2) {
+ stl(a2, env->regs[(r1 + 1) & 15]);
+ if (env->regs[r2] & 0x3) {
+ /* flush TLB / ALB */
+ tlb_flush(env, 1);
+ }
+ cc = 0;
+ } else {
+ env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | o2;
+ cc = 1;
+ }
+
+ return cc;
+}
+
+static uint32_t mvc_asc(int64_t l, uint64_t a1, uint64_t mode1, uint64_t a2,
+ uint64_t mode2)
+{
+ target_ulong src, dest;
+ int flags, cc = 0, i;
+
+ if (!l) {
+ return 0;
+ } else if (l > 256) {
+ /* max 256 */
+ l = 256;
+ cc = 3;
+ }
+
+ if (mmu_translate(env, a1 & TARGET_PAGE_MASK, 1, mode1, &dest, &flags)) {
+ cpu_loop_exit();
+ }
+ dest |= a1 & ~TARGET_PAGE_MASK;
+
+ if (mmu_translate(env, a2 & TARGET_PAGE_MASK, 0, mode2, &src, &flags)) {
+ cpu_loop_exit();
+ }
+ src |= a2 & ~TARGET_PAGE_MASK;
+
+ /* XXX replace w/ memcpy */
+ for (i = 0; i < l; i++) {
+ /* XXX be more clever */
+ if ((((dest + i) & TARGET_PAGE_MASK) != (dest & TARGET_PAGE_MASK)) ||
+ (((src + i) & TARGET_PAGE_MASK) != (src & TARGET_PAGE_MASK))) {
+ mvc_asc(l - i, a1 + i, mode1, a2 + i, mode2);
+ break;
+ }
+ stb_phys(dest + i, ldub_phys(src + i));
+ }
+
+ return cc;
+}
+
+uint32_t HELPER(mvcs)(uint64_t l, uint64_t a1, uint64_t a2)
+{
+ HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n",
+ __FUNCTION__, l, a1, a2);
+
+ return mvc_asc(l, a1, PSW_ASC_SECONDARY, a2, PSW_ASC_PRIMARY);
+}
+
+uint32_t HELPER(mvcp)(uint64_t l, uint64_t a1, uint64_t a2)
+{
+ HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n",
+ __FUNCTION__, l, a1, a2);
+
+ return mvc_asc(l, a1, PSW_ASC_PRIMARY, a2, PSW_ASC_SECONDARY);
+}
+
+uint32_t HELPER(sigp)(uint64_t order_code, uint32_t r1, uint64_t cpu_addr)
+{
+ int cc = 0;
+
+ HELPER_LOG("%s: %016" PRIx64 " %08x %016" PRIx64 "\n",
+ __FUNCTION__, order_code, r1, cpu_addr);
+
+ /* Remember: Use "R1 or R1+1, whichever is the odd-numbered register"
+ as parameter (input). Status (output) is always R1. */
+
+ switch (order_code) {
+ case SIGP_SET_ARCH:
+ /* switch arch */
+ break;
+ case SIGP_SENSE:
+ /* enumerate CPU status */
+ if (cpu_addr) {
+ /* XXX implement when SMP comes */
+ return 3;
+ }
+ env->regs[r1] &= 0xffffffff00000000ULL;
+ cc = 1;
+ break;
+ default:
+ /* unknown sigp */
+ fprintf(stderr, "XXX unknown sigp: 0x%" PRIx64 "\n", order_code);
+ cc = 3;
+ }
+
+ return cc;
+}
+
+void HELPER(sacf)(uint64_t a1)
+{
+ HELPER_LOG("%s: %16" PRIx64 "\n", __FUNCTION__, a1);
+
+ switch (a1 & 0xf00) {
+ case 0x000:
+ env->psw.mask &= ~PSW_MASK_ASC;
+ env->psw.mask |= PSW_ASC_PRIMARY;
+ break;
+ case 0x100:
+ env->psw.mask &= ~PSW_MASK_ASC;
+ env->psw.mask |= PSW_ASC_SECONDARY;
+ break;
+ case 0x300:
+ env->psw.mask &= ~PSW_MASK_ASC;
+ env->psw.mask |= PSW_ASC_HOME;
+ break;
+ default:
+ qemu_log("unknown sacf mode: %" PRIx64 "\n", a1);
+ program_interrupt(env, PGM_SPECIFICATION, 2);
+ break;
+ }
+}
+
+/* invalidate pte */
+void HELPER(ipte)(uint64_t pte_addr, uint64_t vaddr)
+{
+ uint64_t page = vaddr & TARGET_PAGE_MASK;
+ uint64_t pte = 0;
+
+ /* XXX broadcast to other CPUs */
+
+ /* XXX Linux is nice enough to give us the exact pte address.
+ According to spec we'd have to find it out ourselves */
+ /* XXX Linux is fine with overwriting the pte, the spec requires
+ us to only set the invalid bit */
+ stq_phys(pte_addr, pte | _PAGE_INVALID);
+
+ /* XXX we exploit the fact that Linux passes the exact virtual
+ address here - it's not obliged to! */
+ tlb_flush_page(env, page);
+}
+
+/* flush local tlb */
+void HELPER(ptlb)(void)
+{
+ tlb_flush(env, 1);
+}
+
+/* store using real address */
+void HELPER(stura)(uint64_t addr, uint32_t v1)
+{
+ stw_phys(get_address(0, 0, addr), v1);
+}
+
+/* load real address */
+uint32_t HELPER(lra)(uint64_t addr, uint32_t r1)
+{
+ uint32_t cc = 0;
+ int old_exc = env->exception_index;
+ uint64_t asc = env->psw.mask & PSW_MASK_ASC;
+ uint64_t ret;
+ int flags;
+
+ /* XXX incomplete - has more corner cases */
+ if (!(env->psw.mask & PSW_MASK_64) && (addr >> 32)) {
+ program_interrupt(env, PGM_SPECIAL_OP, 2);
+ }
+
+ env->exception_index = old_exc;
+ if (mmu_translate(env, addr, 0, asc, &ret, &flags)) {
+ cc = 3;
+ }
+ if (env->exception_index == EXCP_PGM) {
+ ret = env->int_pgm_code | 0x80000000;
+ } else {
+ ret |= addr & ~TARGET_PAGE_MASK;
+ }
+ env->exception_index = old_exc;
+
+ if (!(env->psw.mask & PSW_MASK_64)) {
+ env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | (ret & 0xffffffffULL);
+ } else {
+ env->regs[r1] = ret;
+ }
+
+ return cc;
+}
+
+#endif
diff --git a/target-s390x/translate.c b/target-s390x/translate.c
index 4d45e32616..8e71df3d26 100644
--- a/target-s390x/translate.c
+++ b/target-s390x/translate.c
@@ -2,6 +2,7 @@
* S/390 translation
*
* Copyright (c) 2009 Ulrich Hecht
+ * Copyright (c) 2010 Alexander Graf
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
@@ -16,6 +17,22 @@
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
+#include <stdarg.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <inttypes.h>
+
+/* #define DEBUG_ILLEGAL_INSTRUCTIONS */
+/* #define DEBUG_INLINE_BRANCHES */
+#define S390X_DEBUG_DISAS
+/* #define S390X_DEBUG_DISAS_VERBOSE */
+
+#ifdef S390X_DEBUG_DISAS_VERBOSE
+# define LOG_DISAS(...) qemu_log(__VA_ARGS__)
+#else
+# define LOG_DISAS(...) do { } while (0)
+#endif
#include "cpu.h"
#include "exec-all.h"
@@ -23,18 +40,60 @@
#include "tcg-op.h"
#include "qemu-log.h"
+/* global register indexes */
+static TCGv_ptr cpu_env;
+
+#include "gen-icount.h"
+#include "helpers.h"
+#define GEN_HELPER 1
+#include "helpers.h"
+
+typedef struct DisasContext DisasContext;
+struct DisasContext {
+ uint64_t pc;
+ int is_jmp;
+ enum cc_op cc_op;
+ struct TranslationBlock *tb;
+};
+
+#define DISAS_EXCP 4
+
+static void gen_op_calc_cc(DisasContext *s);
+
+#ifdef DEBUG_INLINE_BRANCHES
+static uint64_t inline_branch_hit[CC_OP_MAX];
+static uint64_t inline_branch_miss[CC_OP_MAX];
+#endif
+
+static inline void debug_insn(uint64_t insn)
+{
+ LOG_DISAS("insn: 0x%" PRIx64 "\n", insn);
+}
+
+static inline uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
+{
+ if (!(s->tb->flags & FLAG_MASK_64)) {
+ if (s->tb->flags & FLAG_MASK_32) {
+ return pc | 0x80000000;
+ }
+ }
+ return pc;
+}
+
void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
int flags)
{
int i;
+
for (i = 0; i < 16; i++) {
- cpu_fprintf(f, "R%02d=%016lx", i, env->regs[i]);
+ cpu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]);
if ((i % 4) == 3) {
cpu_fprintf(f, "\n");
} else {
cpu_fprintf(f, " ");
}
}
+
for (i = 0; i < 16; i++) {
cpu_fprintf(f, "F%02d=%016" PRIx64, i, *(uint64_t *)&env->fregs[i]);
if ((i % 4) == 3) {
@@ -43,18 +102,5122 @@ void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
cpu_fprintf(f, " ");
}
}
- cpu_fprintf(f, "PSW=mask %016lx addr %016lx cc %02x\n", env->psw.mask, env->psw.addr, env->cc);
+
+ cpu_fprintf(f, "\n");
+
+#ifndef CONFIG_USER_ONLY
+ for (i = 0; i < 16; i++) {
+ cpu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]);
+ if ((i % 4) == 3) {
+ cpu_fprintf(f, "\n");
+ } else {
+ cpu_fprintf(f, " ");
+ }
+ }
+#endif
+
+ cpu_fprintf(f, "\n");
+
+ if (env->cc_op > 3) {
+ cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n",
+ env->psw.mask, env->psw.addr, cc_name(env->cc_op));
+ } else {
+ cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n",
+ env->psw.mask, env->psw.addr, env->cc_op);
+ }
+
+#ifdef DEBUG_INLINE_BRANCHES
+ for (i = 0; i < CC_OP_MAX; i++) {
+ cpu_fprintf(f, " %15s = %10ld\t%10ld\n", cc_name(i),
+ inline_branch_miss[i], inline_branch_hit[i]);
+ }
+#endif
+}
+
+static TCGv_i64 psw_addr;
+static TCGv_i64 psw_mask;
+
+static TCGv_i32 cc_op;
+static TCGv_i64 cc_src;
+static TCGv_i64 cc_dst;
+static TCGv_i64 cc_vr;
+
+static char cpu_reg_names[10*3 + 6*4];
+static TCGv_i64 regs[16];
+
+static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
+
+void s390x_translate_init(void)
+{
+ int i;
+ size_t cpu_reg_names_size = sizeof(cpu_reg_names);
+ char *p;
+
+ cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
+ psw_addr = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUState, psw.addr),
+ "psw_addr");
+ psw_mask = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUState, psw.mask),
+ "psw_mask");
+
+ cc_op = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUState, cc_op),
+ "cc_op");
+ cc_src = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUState, cc_src),
+ "cc_src");
+ cc_dst = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUState, cc_dst),
+ "cc_dst");
+ cc_vr = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUState, cc_vr),
+ "cc_vr");
+
+ p = cpu_reg_names;
+ for (i = 0; i < 16; i++) {
+ snprintf(p, cpu_reg_names_size, "r%d", i);
+ regs[i] = tcg_global_mem_new(TCG_AREG0,
+ offsetof(CPUState, regs[i]), p);
+ p += (i < 10) ? 3 : 4;
+ cpu_reg_names_size -= (i < 10) ? 3 : 4;
+ }
+}
+
+static inline TCGv_i64 load_reg(int reg)
+{
+ TCGv_i64 r = tcg_temp_new_i64();
+ tcg_gen_mov_i64(r, regs[reg]);
+ return r;
+}
+
+static inline TCGv_i64 load_freg(int reg)
+{
+ TCGv_i64 r = tcg_temp_new_i64();
+ tcg_gen_ld_i64(r, cpu_env, offsetof(CPUState, fregs[reg].d));
+ return r;
+}
+
+static inline TCGv_i32 load_freg32(int reg)
+{
+ TCGv_i32 r = tcg_temp_new_i32();
+ tcg_gen_ld_i32(r, cpu_env, offsetof(CPUState, fregs[reg].l.upper));
+ return r;
+}
+
+static inline TCGv_i32 load_reg32(int reg)
+{
+ TCGv_i32 r = tcg_temp_new_i32();
+ tcg_gen_trunc_i64_i32(r, regs[reg]);
+ return r;
+}
+
+static inline TCGv_i64 load_reg32_i64(int reg)
+{
+ TCGv_i64 r = tcg_temp_new_i64();
+ tcg_gen_ext32s_i64(r, regs[reg]);
+ return r;
+}
+
+static inline void store_reg(int reg, TCGv_i64 v)
+{
+ tcg_gen_mov_i64(regs[reg], v);
+}
+
+static inline void store_freg(int reg, TCGv_i64 v)
+{
+ tcg_gen_st_i64(v, cpu_env, offsetof(CPUState, fregs[reg].d));
+}
+
+static inline void store_reg32(int reg, TCGv_i32 v)
+{
+#if HOST_LONG_BITS == 32
+ tcg_gen_mov_i32(TCGV_LOW(regs[reg]), v);
+#else
+ TCGv_i64 tmp = tcg_temp_new_i64();
+ tcg_gen_extu_i32_i64(tmp, v);
+ /* 32 bit register writes keep the upper half */
+ tcg_gen_deposit_i64(regs[reg], regs[reg], tmp, 0, 32);
+ tcg_temp_free_i64(tmp);
+#endif
+}
+
+static inline void store_reg32_i64(int reg, TCGv_i64 v)
+{
+ /* 32 bit register writes keep the upper half */
+#if HOST_LONG_BITS == 32
+ tcg_gen_mov_i32(TCGV_LOW(regs[reg]), TCGV_LOW(v));
+#else
+ tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
+#endif
+}
+
+static inline void store_reg16(int reg, TCGv_i32 v)
+{
+ TCGv_i64 tmp = tcg_temp_new_i64();
+ tcg_gen_extu_i32_i64(tmp, v);
+ /* 16 bit register writes keep the upper bytes */
+ tcg_gen_deposit_i64(regs[reg], regs[reg], tmp, 0, 16);
+ tcg_temp_free_i64(tmp);
+}
+
+static inline void store_reg8(int reg, TCGv_i64 v)
+{
+ /* 8 bit register writes keep the upper bytes */
+ tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 8);
+}
+
+static inline void store_freg32(int reg, TCGv_i32 v)
+{
+ tcg_gen_st_i32(v, cpu_env, offsetof(CPUState, fregs[reg].l.upper));
+}
+
+static inline void update_psw_addr(DisasContext *s)
+{
+ /* psw.addr */
+ tcg_gen_movi_i64(psw_addr, s->pc);
+}
+
+static inline void potential_page_fault(DisasContext *s)
+{
+#ifndef CONFIG_USER_ONLY
+ update_psw_addr(s);
+ gen_op_calc_cc(s);
+#endif
+}
+
+static inline uint64_t ld_code2(uint64_t pc)
+{
+ return (uint64_t)lduw_code(pc);
+}
+
+static inline uint64_t ld_code4(uint64_t pc)
+{
+ return (uint64_t)ldl_code(pc);
+}
+
+static inline uint64_t ld_code6(uint64_t pc)
+{
+ uint64_t opc;
+ opc = (uint64_t)lduw_code(pc) << 32;
+ opc |= (uint64_t)(uint32_t)ldl_code(pc+2);
+ return opc;
+}
+
+static inline int get_mem_index(DisasContext *s)
+{
+ switch (s->tb->flags & FLAG_MASK_ASC) {
+ case PSW_ASC_PRIMARY >> 32:
+ return 0;
+ case PSW_ASC_SECONDARY >> 32:
+ return 1;
+ case PSW_ASC_HOME >> 32:
+ return 2;
+ default:
+ tcg_abort();
+ break;
+ }
+}
+
+static inline void gen_debug(DisasContext *s)
+{
+ TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);
+ update_psw_addr(s);
+ gen_op_calc_cc(s);
+ gen_helper_exception(tmp);
+ tcg_temp_free_i32(tmp);
+ s->is_jmp = DISAS_EXCP;
+}
+
+#ifdef CONFIG_USER_ONLY
+
+static void gen_illegal_opcode(DisasContext *s, int ilc)
+{
+ TCGv_i32 tmp = tcg_const_i32(EXCP_SPEC);
+ update_psw_addr(s);
+ gen_op_calc_cc(s);
+ gen_helper_exception(tmp);
+ tcg_temp_free_i32(tmp);
+ s->is_jmp = DISAS_EXCP;
+}
+
+#else /* CONFIG_USER_ONLY */
+
+static void debug_print_inst(DisasContext *s, int ilc)
+{
+#ifdef DEBUG_ILLEGAL_INSTRUCTIONS
+ uint64_t inst = 0;
+
+ switch (ilc & 3) {
+ case 1:
+ inst = ld_code2(s->pc);
+ break;
+ case 2:
+ inst = ld_code4(s->pc);
+ break;
+ case 3:
+ inst = ld_code6(s->pc);
+ break;
+ }
+
+ fprintf(stderr, "Illegal instruction [%d at %016" PRIx64 "]: 0x%016"
+ PRIx64 "\n", ilc, s->pc, inst);
+#endif
+}
+
+static void gen_program_exception(DisasContext *s, int ilc, int code)
+{
+ TCGv_i32 tmp;
+
+ debug_print_inst(s, ilc);
+
+ /* remember what pgm exeption this was */
+ tmp = tcg_const_i32(code);
+ tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, int_pgm_code));
+ tcg_temp_free_i32(tmp);
+
+ tmp = tcg_const_i32(ilc);
+ tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, int_pgm_ilc));
+ tcg_temp_free_i32(tmp);
+
+ /* advance past instruction */
+ s->pc += (ilc * 2);
+ update_psw_addr(s);
+
+ /* save off cc */
+ gen_op_calc_cc(s);
+
+ /* trigger exception */
+ tmp = tcg_const_i32(EXCP_PGM);
+ gen_helper_exception(tmp);
+ tcg_temp_free_i32(tmp);
+
+ /* end TB here */
+ s->is_jmp = DISAS_EXCP;
+}
+
+
+static void gen_illegal_opcode(DisasContext *s, int ilc)
+{
+ gen_program_exception(s, ilc, PGM_SPECIFICATION);
+}
+
+static void gen_privileged_exception(DisasContext *s, int ilc)
+{
+ gen_program_exception(s, ilc, PGM_PRIVILEGED);
+}
+
+static void check_privileged(DisasContext *s, int ilc)
+{
+ if (s->tb->flags & (PSW_MASK_PSTATE >> 32)) {
+ gen_privileged_exception(s, ilc);
+ }
+}
+
+#endif /* CONFIG_USER_ONLY */
+
+static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
+{
+ TCGv_i64 tmp;
+
+ /* 31-bitify the immediate part; register contents are dealt with below */
+ if (!(s->tb->flags & FLAG_MASK_64)) {
+ d2 &= 0x7fffffffUL;
+ }
+
+ if (x2) {
+ if (d2) {
+ tmp = tcg_const_i64(d2);
+ tcg_gen_add_i64(tmp, tmp, regs[x2]);
+ } else {
+ tmp = load_reg(x2);
+ }
+ if (b2) {
+ tcg_gen_add_i64(tmp, tmp, regs[b2]);
+ }
+ } else if (b2) {
+ if (d2) {
+ tmp = tcg_const_i64(d2);
+ tcg_gen_add_i64(tmp, tmp, regs[b2]);
+ } else {
+ tmp = load_reg(b2);
+ }
+ } else {
+ tmp = tcg_const_i64(d2);
+ }
+
+ /* 31-bit mode mask if there are values loaded from registers */
+ if (!(s->tb->flags & FLAG_MASK_64) && (x2 || b2)) {
+ tcg_gen_andi_i64(tmp, tmp, 0x7fffffffUL);
+ }
+
+ return tmp;
+}
+
+static void gen_op_movi_cc(DisasContext *s, uint32_t val)
+{
+ s->cc_op = CC_OP_CONST0 + val;
+}
+
+static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
+{
+ tcg_gen_discard_i64(cc_src);
+ tcg_gen_mov_i64(cc_dst, dst);
+ tcg_gen_discard_i64(cc_vr);
+ s->cc_op = op;
+}
+
+static void gen_op_update1_cc_i32(DisasContext *s, enum cc_op op, TCGv_i32 dst)
+{
+ tcg_gen_discard_i64(cc_src);
+ tcg_gen_extu_i32_i64(cc_dst, dst);
+ tcg_gen_discard_i64(cc_vr);
+ s->cc_op = op;
+}
+
+static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
+ TCGv_i64 dst)
+{
+ tcg_gen_mov_i64(cc_src, src);
+ tcg_gen_mov_i64(cc_dst, dst);
+ tcg_gen_discard_i64(cc_vr);
+ s->cc_op = op;
+}
+
+static void gen_op_update2_cc_i32(DisasContext *s, enum cc_op op, TCGv_i32 src,
+ TCGv_i32 dst)
+{
+ tcg_gen_extu_i32_i64(cc_src, src);
+ tcg_gen_extu_i32_i64(cc_dst, dst);
+ tcg_gen_discard_i64(cc_vr);
+ s->cc_op = op;
+}
+
+static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
+ TCGv_i64 dst, TCGv_i64 vr)
+{
+ tcg_gen_mov_i64(cc_src, src);
+ tcg_gen_mov_i64(cc_dst, dst);
+ tcg_gen_mov_i64(cc_vr, vr);
+ s->cc_op = op;
+}
+
+static void gen_op_update3_cc_i32(DisasContext *s, enum cc_op op, TCGv_i32 src,
+ TCGv_i32 dst, TCGv_i32 vr)
+{
+ tcg_gen_extu_i32_i64(cc_src, src);
+ tcg_gen_extu_i32_i64(cc_dst, dst);
+ tcg_gen_extu_i32_i64(cc_vr, vr);
+ s->cc_op = op;
+}
+
+static inline void set_cc_nz_u32(DisasContext *s, TCGv_i32 val)
+{
+ gen_op_update1_cc_i32(s, CC_OP_NZ, val);
+}
+
+static inline void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
+{
+ gen_op_update1_cc_i64(s, CC_OP_NZ, val);
+}
+
+static inline void cmp_32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2,
+ enum cc_op cond)
+{
+ gen_op_update2_cc_i32(s, cond, v1, v2);
+}
+
+static inline void cmp_64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2,
+ enum cc_op cond)
+{
+ gen_op_update2_cc_i64(s, cond, v1, v2);
+}
+
+static inline void cmp_s32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2)
+{
+ cmp_32(s, v1, v2, CC_OP_LTGT_32);
+}
+
+static inline void cmp_u32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2)
+{
+ cmp_32(s, v1, v2, CC_OP_LTUGTU_32);
+}
+
+static inline void cmp_s32c(DisasContext *s, TCGv_i32 v1, int32_t v2)
+{
+ /* XXX optimize for the constant? put it in s? */
+ TCGv_i32 tmp = tcg_const_i32(v2);
+ cmp_32(s, v1, tmp, CC_OP_LTGT_32);
+ tcg_temp_free_i32(tmp);
+}
+
+static inline void cmp_u32c(DisasContext *s, TCGv_i32 v1, uint32_t v2)
+{
+ TCGv_i32 tmp = tcg_const_i32(v2);
+ cmp_32(s, v1, tmp, CC_OP_LTUGTU_32);
+ tcg_temp_free_i32(tmp);
+}
+
+static inline void cmp_s64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2)
+{
+ cmp_64(s, v1, v2, CC_OP_LTGT_64);
+}
+
+static inline void cmp_u64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2)
+{
+ cmp_64(s, v1, v2, CC_OP_LTUGTU_64);
+}
+
+static inline void cmp_s64c(DisasContext *s, TCGv_i64 v1, int64_t v2)
+{
+ TCGv_i64 tmp = tcg_const_i64(v2);
+ cmp_s64(s, v1, tmp);
+ tcg_temp_free_i64(tmp);
+}
+
+static inline void cmp_u64c(DisasContext *s, TCGv_i64 v1, uint64_t v2)
+{
+ TCGv_i64 tmp = tcg_const_i64(v2);
+ cmp_u64(s, v1, tmp);
+ tcg_temp_free_i64(tmp);
+}
+
+static inline void set_cc_s32(DisasContext *s, TCGv_i32 val)
+{
+ gen_op_update1_cc_i32(s, CC_OP_LTGT0_32, val);
+}
+
+static inline void set_cc_s64(DisasContext *s, TCGv_i64 val)
+{
+ gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, val);
+}
+
+static void set_cc_add64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2, TCGv_i64 vr)
+{
+ gen_op_update3_cc_i64(s, CC_OP_ADD_64, v1, v2, vr);
+}
+
+static void set_cc_addu64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2,
+ TCGv_i64 vr)
+{
+ gen_op_update3_cc_i64(s, CC_OP_ADDU_64, v1, v2, vr);
+}
+
+static void set_cc_sub64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2, TCGv_i64 vr)
+{
+ gen_op_update3_cc_i64(s, CC_OP_SUB_64, v1, v2, vr);
+}
+
+static void set_cc_subu64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2,
+ TCGv_i64 vr)
+{
+ gen_op_update3_cc_i64(s, CC_OP_SUBU_64, v1, v2, vr);
+}
+
+static void set_cc_abs64(DisasContext *s, TCGv_i64 v1)
+{
+ gen_op_update1_cc_i64(s, CC_OP_ABS_64, v1);
+}
+
+static void set_cc_nabs64(DisasContext *s, TCGv_i64 v1)
+{
+ gen_op_update1_cc_i64(s, CC_OP_NABS_64, v1);
+}
+
+static void set_cc_add32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2, TCGv_i32 vr)
+{
+ gen_op_update3_cc_i32(s, CC_OP_ADD_32, v1, v2, vr);
+}
+
+static void set_cc_addu32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2,
+ TCGv_i32 vr)
+{
+ gen_op_update3_cc_i32(s, CC_OP_ADDU_32, v1, v2, vr);
+}
+
+static void set_cc_sub32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2, TCGv_i32 vr)
+{
+ gen_op_update3_cc_i32(s, CC_OP_SUB_32, v1, v2, vr);
+}
+
+static void set_cc_subu32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2,
+ TCGv_i32 vr)
+{
+ gen_op_update3_cc_i32(s, CC_OP_SUBU_32, v1, v2, vr);
+}
+
+static void set_cc_abs32(DisasContext *s, TCGv_i32 v1)
+{
+ gen_op_update1_cc_i32(s, CC_OP_ABS_32, v1);
+}
+
+static void set_cc_nabs32(DisasContext *s, TCGv_i32 v1)
+{
+ gen_op_update1_cc_i32(s, CC_OP_NABS_32, v1);
+}
+
+static void set_cc_comp32(DisasContext *s, TCGv_i32 v1)
+{
+ gen_op_update1_cc_i32(s, CC_OP_COMP_32, v1);
+}
+
+static void set_cc_comp64(DisasContext *s, TCGv_i64 v1)
+{
+ gen_op_update1_cc_i64(s, CC_OP_COMP_64, v1);
+}
+
+static void set_cc_icm(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2)
+{
+ gen_op_update2_cc_i32(s, CC_OP_ICM, v1, v2);
+}
+
+static void set_cc_cmp_f32_i64(DisasContext *s, TCGv_i32 v1, TCGv_i64 v2)
+{
+ tcg_gen_extu_i32_i64(cc_src, v1);
+ tcg_gen_mov_i64(cc_dst, v2);
+ tcg_gen_discard_i64(cc_vr);
+ s->cc_op = CC_OP_LTGT_F32;
+}
+
+static void set_cc_nz_f32(DisasContext *s, TCGv_i32 v1)
+{
+ gen_op_update1_cc_i32(s, CC_OP_NZ_F32, v1);
+}
+
+static inline void set_cc_nz_f64(DisasContext *s, TCGv_i64 v1)
+{
+ gen_op_update1_cc_i64(s, CC_OP_NZ_F64, v1);
+}
+
+/* CC value is in env->cc_op */
+static inline void set_cc_static(DisasContext *s)
+{
+ tcg_gen_discard_i64(cc_src);
+ tcg_gen_discard_i64(cc_dst);
+ tcg_gen_discard_i64(cc_vr);
+ s->cc_op = CC_OP_STATIC;
+}
+
+static inline void gen_op_set_cc_op(DisasContext *s)
+{
+ if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
+ tcg_gen_movi_i32(cc_op, s->cc_op);
+ }
+}
+
+static inline void gen_update_cc_op(DisasContext *s)
+{
+ gen_op_set_cc_op(s);
+}
+
+/* calculates cc into cc_op */
+static void gen_op_calc_cc(DisasContext *s)
+{
+ TCGv_i32 local_cc_op = tcg_const_i32(s->cc_op);
+ TCGv_i64 dummy = tcg_const_i64(0);
+
+ switch (s->cc_op) {
+ case CC_OP_CONST0:
+ case CC_OP_CONST1:
+ case CC_OP_CONST2:
+ case CC_OP_CONST3:
+ /* s->cc_op is the cc value */
+ tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
+ break;
+ case CC_OP_STATIC:
+ /* env->cc_op already is the cc value */
+ break;
+ case CC_OP_NZ:
+ case CC_OP_ABS_64:
+ case CC_OP_NABS_64:
+ case CC_OP_ABS_32:
+ case CC_OP_NABS_32:
+ case CC_OP_LTGT0_32:
+ case CC_OP_LTGT0_64:
+ case CC_OP_COMP_32:
+ case CC_OP_COMP_64:
+ case CC_OP_NZ_F32:
+ case CC_OP_NZ_F64:
+ /* 1 argument */
+ gen_helper_calc_cc(cc_op, local_cc_op, dummy, cc_dst, dummy);
+ break;
+ case CC_OP_ICM:
+ case CC_OP_LTGT_32:
+ case CC_OP_LTGT_64:
+ case CC_OP_LTUGTU_32:
+ case CC_OP_LTUGTU_64:
+ case CC_OP_TM_32:
+ case CC_OP_TM_64:
+ case CC_OP_LTGT_F32:
+ case CC_OP_LTGT_F64:
+ case CC_OP_SLAG:
+ /* 2 arguments */
+ gen_helper_calc_cc(cc_op, local_cc_op, cc_src, cc_dst, dummy);
+ break;
+ case CC_OP_ADD_64:
+ case CC_OP_ADDU_64:
+ case CC_OP_SUB_64:
+ case CC_OP_SUBU_64:
+ case CC_OP_ADD_32:
+ case CC_OP_ADDU_32:
+ case CC_OP_SUB_32:
+ case CC_OP_SUBU_32:
+ /* 3 arguments */
+ gen_helper_calc_cc(cc_op, local_cc_op, cc_src, cc_dst, cc_vr);
+ break;
+ case CC_OP_DYNAMIC:
+ /* unknown operation - assume 3 arguments and cc_op in env */
+ gen_helper_calc_cc(cc_op, cc_op, cc_src, cc_dst, cc_vr);
+ break;
+ default:
+ tcg_abort();
+ }
+
+ tcg_temp_free_i32(local_cc_op);
+
+ /* We now have cc in cc_op as constant */
+ set_cc_static(s);
+}
+
+static inline void decode_rr(DisasContext *s, uint64_t insn, int *r1, int *r2)
+{
+ debug_insn(insn);
+
+ *r1 = (insn >> 4) & 0xf;
+ *r2 = insn & 0xf;
+}
+
+static inline TCGv_i64 decode_rx(DisasContext *s, uint64_t insn, int *r1,
+ int *x2, int *b2, int *d2)
+{
+ debug_insn(insn);
+
+ *r1 = (insn >> 20) & 0xf;
+ *x2 = (insn >> 16) & 0xf;
+ *b2 = (insn >> 12) & 0xf;
+ *d2 = insn & 0xfff;
+
+ return get_address(s, *x2, *b2, *d2);
+}
+
+static inline void decode_rs(DisasContext *s, uint64_t insn, int *r1, int *r3,
+ int *b2, int *d2)
+{
+ debug_insn(insn);
+
+ *r1 = (insn >> 20) & 0xf;
+ /* aka m3 */
+ *r3 = (insn >> 16) & 0xf;
+ *b2 = (insn >> 12) & 0xf;
+ *d2 = insn & 0xfff;
+}
+
+static inline TCGv_i64 decode_si(DisasContext *s, uint64_t insn, int *i2,
+ int *b1, int *d1)
+{
+ debug_insn(insn);
+
+ *i2 = (insn >> 16) & 0xff;
+ *b1 = (insn >> 12) & 0xf;
+ *d1 = insn & 0xfff;
+
+ return get_address(s, 0, *b1, *d1);
+}
+
+static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong pc)
+{
+ TranslationBlock *tb;
+
+ gen_update_cc_op(s);
+
+ tb = s->tb;
+ /* NOTE: we handle the case where the TB spans two pages here */
+ if ((pc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) ||
+ (pc & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK)) {
+ /* jump to same page: we can use a direct jump */
+ tcg_gen_goto_tb(tb_num);
+ tcg_gen_movi_i64(psw_addr, pc);
+ tcg_gen_exit_tb((long)tb + tb_num);
+ } else {
+ /* jump to another page: currently not optimized */
+ tcg_gen_movi_i64(psw_addr, pc);
+ tcg_gen_exit_tb(0);
+ }
+}
+
+static inline void account_noninline_branch(DisasContext *s, int cc_op)
+{
+#ifdef DEBUG_INLINE_BRANCHES
+ inline_branch_miss[cc_op]++;
+#endif
+}
+
+static inline void account_inline_branch(DisasContext *s)
+{
+#ifdef DEBUG_INLINE_BRANCHES
+ inline_branch_hit[s->cc_op]++;
+#endif
+}
+
+static void gen_jcc(DisasContext *s, uint32_t mask, int skip)
+{
+ TCGv_i32 tmp, tmp2, r;
+ TCGv_i64 tmp64;
+ int old_cc_op;
+
+ switch (s->cc_op) {
+ case CC_OP_LTGT0_32:
+ tmp = tcg_temp_new_i32();
+ tcg_gen_trunc_i64_i32(tmp, cc_dst);
+ switch (mask) {
+ case 0x8 | 0x4: /* dst <= 0 */
+ tcg_gen_brcondi_i32(TCG_COND_GT, tmp, 0, skip);
+ break;
+ case 0x8 | 0x2: /* dst >= 0 */
+ tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, skip);
+ break;
+ case 0x8: /* dst == 0 */
+ tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, skip);
+ break;
+ case 0x7: /* dst != 0 */
+ case 0x6: /* dst != 0 */
+ tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, skip);
+ break;
+ case 0x4: /* dst < 0 */
+ tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, skip);
+ break;
+ case 0x2: /* dst > 0 */
+ tcg_gen_brcondi_i32(TCG_COND_LE, tmp, 0, skip);
+ break;
+ default:
+ tcg_temp_free_i32(tmp);
+ goto do_dynamic;
+ }
+ account_inline_branch(s);
+ tcg_temp_free_i32(tmp);
+ break;
+ case CC_OP_LTGT0_64:
+ switch (mask) {
+ case 0x8 | 0x4: /* dst <= 0 */
+ tcg_gen_brcondi_i64(TCG_COND_GT, cc_dst, 0, skip);
+ break;
+ case 0x8 | 0x2: /* dst >= 0 */
+ tcg_gen_brcondi_i64(TCG_COND_LT, cc_dst, 0, skip);
+ break;
+ case 0x8: /* dst == 0 */
+ tcg_gen_brcondi_i64(TCG_COND_NE, cc_dst, 0, skip);
+ break;
+ case 0x7: /* dst != 0 */
+ case 0x6: /* dst != 0 */
+ tcg_gen_brcondi_i64(TCG_COND_EQ, cc_dst, 0, skip);
+ break;
+ case 0x4: /* dst < 0 */
+ tcg_gen_brcondi_i64(TCG_COND_GE, cc_dst, 0, skip);
+ break;
+ case 0x2: /* dst > 0 */
+ tcg_gen_brcondi_i64(TCG_COND_LE, cc_dst, 0, skip);
+ break;
+ default:
+ goto do_dynamic;
+ }
+ account_inline_branch(s);
+ break;
+ case CC_OP_LTGT_32:
+ tmp = tcg_temp_new_i32();
+ tmp2 = tcg_temp_new_i32();
+ tcg_gen_trunc_i64_i32(tmp, cc_src);
+ tcg_gen_trunc_i64_i32(tmp2, cc_dst);
+ switch (mask) {
+ case 0x8 | 0x4: /* src <= dst */
+ tcg_gen_brcond_i32(TCG_COND_GT, tmp, tmp2, skip);
+ break;
+ case 0x8 | 0x2: /* src >= dst */
+ tcg_gen_brcond_i32(TCG_COND_LT, tmp, tmp2, skip);
+ break;
+ case 0x8: /* src == dst */
+ tcg_gen_brcond_i32(TCG_COND_NE, tmp, tmp2, skip);
+ break;
+ case 0x7: /* src != dst */
+ case 0x6: /* src != dst */
+ tcg_gen_brcond_i32(TCG_COND_EQ, tmp, tmp2, skip);
+ break;
+ case 0x4: /* src < dst */
+ tcg_gen_brcond_i32(TCG_COND_GE, tmp, tmp2, skip);
+ break;
+ case 0x2: /* src > dst */
+ tcg_gen_brcond_i32(TCG_COND_LE, tmp, tmp2, skip);
+ break;
+ default:
+ tcg_temp_free_i32(tmp);
+ tcg_temp_free_i32(tmp2);
+ goto do_dynamic;
+ }
+ account_inline_branch(s);
+ tcg_temp_free_i32(tmp);
+ tcg_temp_free_i32(tmp2);
+ break;
+ case CC_OP_LTGT_64:
+ switch (mask) {
+ case 0x8 | 0x4: /* src <= dst */
+ tcg_gen_brcond_i64(TCG_COND_GT, cc_src, cc_dst, skip);
+ break;
+ case 0x8 | 0x2: /* src >= dst */
+ tcg_gen_brcond_i64(TCG_COND_LT, cc_src, cc_dst, skip);
+ break;
+ case 0x8: /* src == dst */
+ tcg_gen_brcond_i64(TCG_COND_NE, cc_src, cc_dst, skip);
+ break;
+ case 0x7: /* src != dst */
+ case 0x6: /* src != dst */
+ tcg_gen_brcond_i64(TCG_COND_EQ, cc_src, cc_dst, skip);
+ break;
+ case 0x4: /* src < dst */
+ tcg_gen_brcond_i64(TCG_COND_GE, cc_src, cc_dst, skip);
+ break;
+ case 0x2: /* src > dst */
+ tcg_gen_brcond_i64(TCG_COND_LE, cc_src, cc_dst, skip);
+ break;
+ default:
+ goto do_dynamic;
+ }
+ account_inline_branch(s);
+ break;
+ case CC_OP_LTUGTU_32:
+ tmp = tcg_temp_new_i32();
+ tmp2 = tcg_temp_new_i32();
+ tcg_gen_trunc_i64_i32(tmp, cc_src);
+ tcg_gen_trunc_i64_i32(tmp2, cc_dst);
+ switch (mask) {
+ case 0x8 | 0x4: /* src <= dst */
+ tcg_gen_brcond_i32(TCG_COND_GTU, tmp, tmp2, skip);
+ break;
+ case 0x8 | 0x2: /* src >= dst */
+ tcg_gen_brcond_i32(TCG_COND_LTU, tmp, tmp2, skip);
+ break;
+ case 0x8: /* src == dst */
+ tcg_gen_brcond_i32(TCG_COND_NE, tmp, tmp2, skip);
+ break;
+ case 0x7: /* src != dst */
+ case 0x6: /* src != dst */
+ tcg_gen_brcond_i32(TCG_COND_EQ, tmp, tmp2, skip);
+ break;
+ case 0x4: /* src < dst */
+ tcg_gen_brcond_i32(TCG_COND_GEU, tmp, tmp2, skip);
+ break;
+ case 0x2: /* src > dst */
+ tcg_gen_brcond_i32(TCG_COND_LEU, tmp, tmp2, skip);
+ break;
+ default:
+ tcg_temp_free_i32(tmp);
+ tcg_temp_free_i32(tmp2);
+ goto do_dynamic;
+ }
+ account_inline_branch(s);
+ tcg_temp_free_i32(tmp);
+ tcg_temp_free_i32(tmp2);
+ break;
+ case CC_OP_LTUGTU_64:
+ switch (mask) {
+ case 0x8 | 0x4: /* src <= dst */
+ tcg_gen_brcond_i64(TCG_COND_GTU, cc_src, cc_dst, skip);
+ break;
+ case 0x8 | 0x2: /* src >= dst */
+ tcg_gen_brcond_i64(TCG_COND_LTU, cc_src, cc_dst, skip);
+ break;
+ case 0x8: /* src == dst */
+ tcg_gen_brcond_i64(TCG_COND_NE, cc_src, cc_dst, skip);
+ break;
+ case 0x7: /* src != dst */
+ case 0x6: /* src != dst */
+ tcg_gen_brcond_i64(TCG_COND_EQ, cc_src, cc_dst, skip);
+ break;
+ case 0x4: /* src < dst */
+ tcg_gen_brcond_i64(TCG_COND_GEU, cc_src, cc_dst, skip);
+ break;
+ case 0x2: /* src > dst */
+ tcg_gen_brcond_i64(TCG_COND_LEU, cc_src, cc_dst, skip);
+ break;
+ default:
+ goto do_dynamic;
+ }
+ account_inline_branch(s);
+ break;
+ case CC_OP_NZ:
+ switch (mask) {
+ /* dst == 0 || dst != 0 */
+ case 0x8 | 0x4:
+ case 0x8 | 0x4 | 0x2:
+ case 0x8 | 0x4 | 0x2 | 0x1:
+ case 0x8 | 0x4 | 0x1:
+ break;
+ /* dst == 0 */
+ case 0x8:
+ case 0x8 | 0x2:
+ case 0x8 | 0x2 | 0x1:
+ case 0x8 | 0x1:
+ tcg_gen_brcondi_i64(TCG_COND_NE, cc_dst, 0, skip);
+ break;
+ /* dst != 0 */
+ case 0x4:
+ case 0x4 | 0x2:
+ case 0x4 | 0x2 | 0x1:
+ case 0x4 | 0x1:
+ tcg_gen_brcondi_i64(TCG_COND_EQ, cc_dst, 0, skip);
+ break;
+ default:
+ goto do_dynamic;
+ }
+ account_inline_branch(s);
+ break;
+ case CC_OP_TM_32:
+ tmp = tcg_temp_new_i32();
+ tmp2 = tcg_temp_new_i32();
+
+ tcg_gen_trunc_i64_i32(tmp, cc_src);
+ tcg_gen_trunc_i64_i32(tmp2, cc_dst);
+ tcg_gen_and_i32(tmp, tmp, tmp2);
+ switch (mask) {
+ case 0x8: /* val & mask == 0 */
+ tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, skip);
+ break;
+ case 0x4 | 0x2 | 0x1: /* val & mask != 0 */
+ tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, skip);
+ break;
+ default:
+ goto do_dynamic;
+ }
+ tcg_temp_free_i32(tmp);
+ account_inline_branch(s);
+ break;
+ case CC_OP_TM_64:
+ tmp64 = tcg_temp_new_i64();
+
+ tcg_gen_and_i64(tmp64, cc_src, cc_dst);
+ switch (mask) {
+ case 0x8: /* val & mask == 0 */
+ tcg_gen_brcondi_i64(TCG_COND_NE, tmp64, 0, skip);
+ break;
+ case 0x4 | 0x2 | 0x1: /* val & mask != 0 */
+ tcg_gen_brcondi_i64(TCG_COND_EQ, tmp64, 0, skip);
+ break;
+ default:
+ goto do_dynamic;
+ }
+ tcg_temp_free_i64(tmp64);
+ account_inline_branch(s);
+ break;
+ case CC_OP_ICM:
+ switch (mask) {
+ case 0x8: /* val == 0 */
+ tcg_gen_brcondi_i64(TCG_COND_NE, cc_dst, 0, skip);
+ break;
+ case 0x4 | 0x2 | 0x1: /* val != 0 */
+ case 0x4 | 0x2: /* val != 0 */
+ tcg_gen_brcondi_i64(TCG_COND_EQ, cc_dst, 0, skip);
+ break;
+ default:
+ goto do_dynamic;
+ }
+ account_inline_branch(s);
+ break;
+ case CC_OP_STATIC:
+ old_cc_op = s->cc_op;
+ goto do_dynamic_nocccalc;
+ case CC_OP_DYNAMIC:
+ default:
+do_dynamic:
+ old_cc_op = s->cc_op;
+ /* calculate cc value */
+ gen_op_calc_cc(s);
+
+do_dynamic_nocccalc:
+ /* jump based on cc */
+ account_noninline_branch(s, old_cc_op);
+
+ switch (mask) {
+ case 0x8 | 0x4 | 0x2 | 0x1:
+ /* always true */
+ break;
+ case 0x8 | 0x4 | 0x2: /* cc != 3 */
+ tcg_gen_brcondi_i32(TCG_COND_EQ, cc_op, 3, skip);
+ break;
+ case 0x8 | 0x4 | 0x1: /* cc != 2 */
+ tcg_gen_brcondi_i32(TCG_COND_EQ, cc_op, 2, skip);
+ break;
+ case 0x8 | 0x2 | 0x1: /* cc != 1 */
+ tcg_gen_brcondi_i32(TCG_COND_EQ, cc_op, 1, skip);
+ break;
+ case 0x8 | 0x2: /* cc == 0 || cc == 2 */
+ tmp = tcg_temp_new_i32();
+ tcg_gen_andi_i32(tmp, cc_op, 1);
+ tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, skip);
+ tcg_temp_free_i32(tmp);
+ break;
+ case 0x8 | 0x4: /* cc < 2 */
+ tcg_gen_brcondi_i32(TCG_COND_GEU, cc_op, 2, skip);
+ break;
+ case 0x8: /* cc == 0 */
+ tcg_gen_brcondi_i32(TCG_COND_NE, cc_op, 0, skip);
+ break;
+ case 0x4 | 0x2 | 0x1: /* cc != 0 */
+ tcg_gen_brcondi_i32(TCG_COND_EQ, cc_op, 0, skip);
+ break;
+ case 0x4 | 0x1: /* cc == 1 || cc == 3 */
+ tmp = tcg_temp_new_i32();
+ tcg_gen_andi_i32(tmp, cc_op, 1);
+ tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, skip);
+ tcg_temp_free_i32(tmp);
+ break;
+ case 0x4: /* cc == 1 */
+ tcg_gen_brcondi_i32(TCG_COND_NE, cc_op, 1, skip);
+ break;
+ case 0x2 | 0x1: /* cc > 1 */
+ tcg_gen_brcondi_i32(TCG_COND_LEU, cc_op, 1, skip);
+ break;
+ case 0x2: /* cc == 2 */
+ tcg_gen_brcondi_i32(TCG_COND_NE, cc_op, 2, skip);
+ break;
+ case 0x1: /* cc == 3 */
+ tcg_gen_brcondi_i32(TCG_COND_NE, cc_op, 3, skip);
+ break;
+ default: /* cc is masked by something else */
+ tmp = tcg_const_i32(3);
+ /* 3 - cc */
+ tcg_gen_sub_i32(tmp, tmp, cc_op);
+ tmp2 = tcg_const_i32(1);
+ /* 1 << (3 - cc) */
+ tcg_gen_shl_i32(tmp2, tmp2, tmp);
+ r = tcg_const_i32(mask);
+ /* mask & (1 << (3 - cc)) */
+ tcg_gen_and_i32(r, r, tmp2);
+ tcg_temp_free_i32(tmp);
+ tcg_temp_free_i32(tmp2);
+
+ tcg_gen_brcondi_i32(TCG_COND_EQ, r, 0, skip);
+ tcg_temp_free_i32(r);
+ break;
+ }
+ break;
+ }
+}
+
+static void gen_bcr(DisasContext *s, uint32_t mask, TCGv_i64 target,
+ uint64_t offset)
+{
+ int skip;
+
+ if (mask == 0xf) {
+ /* unconditional */
+ tcg_gen_mov_i64(psw_addr, target);
+ tcg_gen_exit_tb(0);
+ } else if (mask == 0) {
+ /* ignore cc and never match */
+ gen_goto_tb(s, 0, offset + 2);
+ } else {
+ TCGv_i64 new_addr = tcg_temp_local_new_i64();
+
+ tcg_gen_mov_i64(new_addr, target);
+ skip = gen_new_label();
+ gen_jcc(s, mask, skip);
+ tcg_gen_mov_i64(psw_addr, new_addr);
+ tcg_temp_free_i64(new_addr);
+ tcg_gen_exit_tb(0);
+ gen_set_label(skip);
+ tcg_temp_free_i64(new_addr);
+ gen_goto_tb(s, 1, offset + 2);
+ }
+}
+
+static void gen_brc(uint32_t mask, DisasContext *s, int32_t offset)
+{
+ int skip;
+
+ if (mask == 0xf) {
+ /* unconditional */
+ gen_goto_tb(s, 0, s->pc + offset);
+ } else if (mask == 0) {
+ /* ignore cc and never match */
+ gen_goto_tb(s, 0, s->pc + 4);
+ } else {
+ skip = gen_new_label();
+ gen_jcc(s, mask, skip);
+ gen_goto_tb(s, 0, s->pc + offset);
+ gen_set_label(skip);
+ gen_goto_tb(s, 1, s->pc + 4);
+ }
+ s->is_jmp = DISAS_TB_JUMP;
+}
+
+static void gen_op_mvc(DisasContext *s, int l, TCGv_i64 s1, TCGv_i64 s2)
+{
+ TCGv_i64 tmp, tmp2;
+ int i;
+ int l_memset = gen_new_label();
+ int l_out = gen_new_label();
+ TCGv_i64 dest = tcg_temp_local_new_i64();
+ TCGv_i64 src = tcg_temp_local_new_i64();
+ TCGv_i32 vl;
+
+ /* Find out if we should use the inline version of mvc */
+ switch (l) {
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ case 11:
+ case 15:
+ /* use inline */
+ break;
+ default:
+ /* Fall back to helper */
+ vl = tcg_const_i32(l);
+ potential_page_fault(s);
+ gen_helper_mvc(vl, s1, s2);
+ tcg_temp_free_i32(vl);
+ return;
+ }
+
+ tcg_gen_mov_i64(dest, s1);
+ tcg_gen_mov_i64(src, s2);
+
+ if (!(s->tb->flags & FLAG_MASK_64)) {
+ /* XXX what if we overflow while moving? */
+ tcg_gen_andi_i64(dest, dest, 0x7fffffffUL);
+ tcg_gen_andi_i64(src, src, 0x7fffffffUL);
+ }
+
+ tmp = tcg_temp_new_i64();
+ tcg_gen_addi_i64(tmp, src, 1);
+ tcg_gen_brcond_i64(TCG_COND_EQ, dest, tmp, l_memset);
+ tcg_temp_free_i64(tmp);
+
+ switch (l) {
+ case 0:
+ tmp = tcg_temp_new_i64();
+
+ tcg_gen_qemu_ld8u(tmp, src, get_mem_index(s));
+ tcg_gen_qemu_st8(tmp, dest, get_mem_index(s));
+
+ tcg_temp_free_i64(tmp);
+ break;
+ case 1:
+ tmp = tcg_temp_new_i64();
+
+ tcg_gen_qemu_ld16u(tmp, src, get_mem_index(s));
+ tcg_gen_qemu_st16(tmp, dest, get_mem_index(s));
+
+ tcg_temp_free_i64(tmp);
+ break;
+ case 3:
+ tmp = tcg_temp_new_i64();
+
+ tcg_gen_qemu_ld32u(tmp, src, get_mem_index(s));
+ tcg_gen_qemu_st32(tmp, dest, get_mem_index(s));
+
+ tcg_temp_free_i64(tmp);
+ break;
+ case 4:
+ tmp = tcg_temp_new_i64();
+ tmp2 = tcg_temp_new_i64();
+
+ tcg_gen_qemu_ld32u(tmp, src, get_mem_index(s));
+ tcg_gen_addi_i64(src, src, 4);
+ tcg_gen_qemu_ld8u(tmp2, src, get_mem_index(s));
+ tcg_gen_qemu_st32(tmp, dest, get_mem_index(s));
+ tcg_gen_addi_i64(dest, dest, 4);
+ tcg_gen_qemu_st8(tmp2, dest, get_mem_index(s));
+
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 7:
+ tmp = tcg_temp_new_i64();
+
+ tcg_gen_qemu_ld64(tmp, src, get_mem_index(s));
+ tcg_gen_qemu_st64(tmp, dest, get_mem_index(s));
+
+ tcg_temp_free_i64(tmp);
+ break;
+ default:
+ /* The inline version can become too big for too uneven numbers, only
+ use it on known good lengths */
+ tmp = tcg_temp_new_i64();
+ tmp2 = tcg_const_i64(8);
+ for (i = 0; (i + 7) <= l; i += 8) {
+ tcg_gen_qemu_ld64(tmp, src, get_mem_index(s));
+ tcg_gen_qemu_st64(tmp, dest, get_mem_index(s));
+
+ tcg_gen_add_i64(src, src, tmp2);
+ tcg_gen_add_i64(dest, dest, tmp2);
+ }
+
+ tcg_temp_free_i64(tmp2);
+ tmp2 = tcg_const_i64(1);
+
+ for (; i <= l; i++) {
+ tcg_gen_qemu_ld8u(tmp, src, get_mem_index(s));
+ tcg_gen_qemu_st8(tmp, dest, get_mem_index(s));
+
+ tcg_gen_add_i64(src, src, tmp2);
+ tcg_gen_add_i64(dest, dest, tmp2);
+ }
+
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i64(tmp);
+ break;
+ }
+
+ tcg_gen_br(l_out);
+
+ gen_set_label(l_memset);
+ /* memset case (dest == (src + 1)) */
+
+ tmp = tcg_temp_new_i64();
+ tmp2 = tcg_temp_new_i64();
+ /* fill tmp with the byte */
+ tcg_gen_qemu_ld8u(tmp, src, get_mem_index(s));
+ tcg_gen_shli_i64(tmp2, tmp, 8);
+ tcg_gen_or_i64(tmp, tmp, tmp2);
+ tcg_gen_shli_i64(tmp2, tmp, 16);
+ tcg_gen_or_i64(tmp, tmp, tmp2);
+ tcg_gen_shli_i64(tmp2, tmp, 32);
+ tcg_gen_or_i64(tmp, tmp, tmp2);
+ tcg_temp_free_i64(tmp2);
+
+ tmp2 = tcg_const_i64(8);
+
+ for (i = 0; (i + 7) <= l; i += 8) {
+ tcg_gen_qemu_st64(tmp, dest, get_mem_index(s));
+ tcg_gen_addi_i64(dest, dest, 8);
+ }
+
+ tcg_temp_free_i64(tmp2);
+ tmp2 = tcg_const_i64(1);
+
+ for (; i <= l; i++) {
+ tcg_gen_qemu_st8(tmp, dest, get_mem_index(s));
+ tcg_gen_addi_i64(dest, dest, 1);
+ }
+
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i64(tmp);
+
+ gen_set_label(l_out);
+
+ tcg_temp_free(dest);
+ tcg_temp_free(src);
+}
+
+static void gen_op_clc(DisasContext *s, int l, TCGv_i64 s1, TCGv_i64 s2)
+{
+ TCGv_i64 tmp;
+ TCGv_i64 tmp2;
+ TCGv_i32 vl;
+
+ /* check for simple 32bit or 64bit match */
+ switch (l) {
+ case 0:
+ tmp = tcg_temp_new_i64();
+ tmp2 = tcg_temp_new_i64();
+
+ tcg_gen_qemu_ld8u(tmp, s1, get_mem_index(s));
+ tcg_gen_qemu_ld8u(tmp2, s2, get_mem_index(s));
+ cmp_u64(s, tmp, tmp2);
+
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ return;
+ case 1:
+ tmp = tcg_temp_new_i64();
+ tmp2 = tcg_temp_new_i64();
+
+ tcg_gen_qemu_ld16u(tmp, s1, get_mem_index(s));
+ tcg_gen_qemu_ld16u(tmp2, s2, get_mem_index(s));
+ cmp_u64(s, tmp, tmp2);
+
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ return;
+ case 3:
+ tmp = tcg_temp_new_i64();
+ tmp2 = tcg_temp_new_i64();
+
+ tcg_gen_qemu_ld32u(tmp, s1, get_mem_index(s));
+ tcg_gen_qemu_ld32u(tmp2, s2, get_mem_index(s));
+ cmp_u64(s, tmp, tmp2);
+
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ return;
+ case 7:
+ tmp = tcg_temp_new_i64();
+ tmp2 = tcg_temp_new_i64();
+
+ tcg_gen_qemu_ld64(tmp, s1, get_mem_index(s));
+ tcg_gen_qemu_ld64(tmp2, s2, get_mem_index(s));
+ cmp_u64(s, tmp, tmp2);
+
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ return;
+ }
+
+ potential_page_fault(s);
+ vl = tcg_const_i32(l);
+ gen_helper_clc(cc_op, vl, s1, s2);
+ tcg_temp_free_i32(vl);
+ set_cc_static(s);
+}
+
+static void disas_e3(DisasContext* s, int op, int r1, int x2, int b2, int d2)
+{
+ TCGv_i64 addr, tmp, tmp2, tmp3, tmp4;
+ TCGv_i32 tmp32_1, tmp32_2, tmp32_3;
+
+ LOG_DISAS("disas_e3: op 0x%x r1 %d x2 %d b2 %d d2 %d\n",
+ op, r1, x2, b2, d2);
+ addr = get_address(s, x2, b2, d2);
+ switch (op) {
+ case 0x2: /* LTG R1,D2(X2,B2) [RXY] */
+ case 0x4: /* lg r1,d2(x2,b2) */
+ tcg_gen_qemu_ld64(regs[r1], addr, get_mem_index(s));
+ if (op == 0x2) {
+ set_cc_s64(s, regs[r1]);
+ }
+ break;
+ case 0x12: /* LT R1,D2(X2,B2) [RXY] */
+ tmp2 = tcg_temp_new_i64();
+ tmp32_1 = tcg_temp_new_i32();
+ tcg_gen_qemu_ld32s(tmp2, addr, get_mem_index(s));
+ tcg_gen_trunc_i64_i32(tmp32_1, tmp2);
+ store_reg32(r1, tmp32_1);
+ set_cc_s32(s, tmp32_1);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0xc: /* MSG R1,D2(X2,B2) [RXY] */
+ case 0x1c: /* MSGF R1,D2(X2,B2) [RXY] */
+ tmp2 = tcg_temp_new_i64();
+ if (op == 0xc) {
+ tcg_gen_qemu_ld64(tmp2, addr, get_mem_index(s));
+ } else {
+ tcg_gen_qemu_ld32s(tmp2, addr, get_mem_index(s));
+ }
+ tcg_gen_mul_i64(regs[r1], regs[r1], tmp2);
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0xd: /* DSG R1,D2(X2,B2) [RXY] */
+ case 0x1d: /* DSGF R1,D2(X2,B2) [RXY] */
+ tmp2 = tcg_temp_new_i64();
+ if (op == 0x1d) {
+ tcg_gen_qemu_ld32s(tmp2, addr, get_mem_index(s));
+ } else {
+ tcg_gen_qemu_ld64(tmp2, addr, get_mem_index(s));
+ }
+ tmp4 = load_reg(r1 + 1);
+ tmp3 = tcg_temp_new_i64();
+ tcg_gen_div_i64(tmp3, tmp4, tmp2);
+ store_reg(r1 + 1, tmp3);
+ tcg_gen_rem_i64(tmp3, tmp4, tmp2);
+ store_reg(r1, tmp3);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i64(tmp3);
+ tcg_temp_free_i64(tmp4);
+ break;
+ case 0x8: /* AG R1,D2(X2,B2) [RXY] */
+ case 0xa: /* ALG R1,D2(X2,B2) [RXY] */
+ case 0x18: /* AGF R1,D2(X2,B2) [RXY] */
+ case 0x1a: /* ALGF R1,D2(X2,B2) [RXY] */
+ if (op == 0x1a) {
+ tmp2 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld32u(tmp2, addr, get_mem_index(s));
+ } else if (op == 0x18) {
+ tmp2 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld32s(tmp2, addr, get_mem_index(s));
+ } else {
+ tmp2 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld64(tmp2, addr, get_mem_index(s));
+ }
+ tmp4 = load_reg(r1);
+ tmp3 = tcg_temp_new_i64();
+ tcg_gen_add_i64(tmp3, tmp4, tmp2);
+ store_reg(r1, tmp3);
+ switch (op) {
+ case 0x8:
+ case 0x18:
+ set_cc_add64(s, tmp4, tmp2, tmp3);
+ break;
+ case 0xa:
+ case 0x1a:
+ set_cc_addu64(s, tmp4, tmp2, tmp3);
+ break;
+ default:
+ tcg_abort();
+ }
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i64(tmp3);
+ tcg_temp_free_i64(tmp4);
+ break;
+ case 0x9: /* SG R1,D2(X2,B2) [RXY] */
+ case 0xb: /* SLG R1,D2(X2,B2) [RXY] */
+ case 0x19: /* SGF R1,D2(X2,B2) [RXY] */
+ case 0x1b: /* SLGF R1,D2(X2,B2) [RXY] */
+ tmp2 = tcg_temp_new_i64();
+ if (op == 0x19) {
+ tcg_gen_qemu_ld32s(tmp2, addr, get_mem_index(s));
+ } else if (op == 0x1b) {
+ tcg_gen_qemu_ld32u(tmp2, addr, get_mem_index(s));
+ } else {
+ tcg_gen_qemu_ld64(tmp2, addr, get_mem_index(s));
+ }
+ tmp4 = load_reg(r1);
+ tmp3 = tcg_temp_new_i64();
+ tcg_gen_sub_i64(tmp3, tmp4, tmp2);
+ store_reg(r1, tmp3);
+ switch (op) {
+ case 0x9:
+ case 0x19:
+ set_cc_sub64(s, tmp4, tmp2, tmp3);
+ break;
+ case 0xb:
+ case 0x1b:
+ set_cc_subu64(s, tmp4, tmp2, tmp3);
+ break;
+ default:
+ tcg_abort();
+ }
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i64(tmp3);
+ tcg_temp_free_i64(tmp4);
+ break;
+ case 0xf: /* LRVG R1,D2(X2,B2) [RXE] */
+ tmp2 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld64(tmp2, addr, get_mem_index(s));
+ tcg_gen_bswap64_i64(tmp2, tmp2);
+ store_reg(r1, tmp2);
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0x14: /* LGF R1,D2(X2,B2) [RXY] */
+ case 0x16: /* LLGF R1,D2(X2,B2) [RXY] */
+ tmp2 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld32u(tmp2, addr, get_mem_index(s));
+ if (op == 0x14) {
+ tcg_gen_ext32s_i64(tmp2, tmp2);
+ }
+ store_reg(r1, tmp2);
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0x15: /* LGH R1,D2(X2,B2) [RXY] */
+ tmp2 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld16s(tmp2, addr, get_mem_index(s));
+ store_reg(r1, tmp2);
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0x17: /* LLGT R1,D2(X2,B2) [RXY] */
+ tmp2 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld32u(tmp2, addr, get_mem_index(s));
+ tcg_gen_andi_i64(tmp2, tmp2, 0x7fffffffULL);
+ store_reg(r1, tmp2);
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0x1e: /* LRV R1,D2(X2,B2) [RXY] */
+ tmp2 = tcg_temp_new_i64();
+ tmp32_1 = tcg_temp_new_i32();
+ tcg_gen_qemu_ld32u(tmp2, addr, get_mem_index(s));
+ tcg_gen_trunc_i64_i32(tmp32_1, tmp2);
+ tcg_temp_free_i64(tmp2);
+ tcg_gen_bswap32_i32(tmp32_1, tmp32_1);
+ store_reg32(r1, tmp32_1);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0x1f: /* LRVH R1,D2(X2,B2) [RXY] */
+ tmp2 = tcg_temp_new_i64();
+ tmp32_1 = tcg_temp_new_i32();
+ tcg_gen_qemu_ld16u(tmp2, addr, get_mem_index(s));
+ tcg_gen_trunc_i64_i32(tmp32_1, tmp2);
+ tcg_temp_free_i64(tmp2);
+ tcg_gen_bswap16_i32(tmp32_1, tmp32_1);
+ store_reg16(r1, tmp32_1);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0x20: /* CG R1,D2(X2,B2) [RXY] */
+ case 0x21: /* CLG R1,D2(X2,B2) */
+ case 0x30: /* CGF R1,D2(X2,B2) [RXY] */
+ case 0x31: /* CLGF R1,D2(X2,B2) [RXY] */
+ tmp2 = tcg_temp_new_i64();
+ switch (op) {
+ case 0x20:
+ case 0x21:
+ tcg_gen_qemu_ld64(tmp2, addr, get_mem_index(s));
+ break;
+ case 0x30:
+ tcg_gen_qemu_ld32s(tmp2, addr, get_mem_index(s));
+ break;
+ case 0x31:
+ tcg_gen_qemu_ld32u(tmp2, addr, get_mem_index(s));
+ break;
+ default:
+ tcg_abort();
+ }
+ switch (op) {
+ case 0x20:
+ case 0x30:
+ cmp_s64(s, regs[r1], tmp2);
+ break;
+ case 0x21:
+ case 0x31:
+ cmp_u64(s, regs[r1], tmp2);
+ break;
+ default:
+ tcg_abort();
+ }
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0x24: /* stg r1, d2(x2,b2) */
+ tcg_gen_qemu_st64(regs[r1], addr, get_mem_index(s));
+ break;
+ case 0x3e: /* STRV R1,D2(X2,B2) [RXY] */
+ tmp32_1 = load_reg32(r1);
+ tmp2 = tcg_temp_new_i64();
+ tcg_gen_bswap32_i32(tmp32_1, tmp32_1);
+ tcg_gen_extu_i32_i64(tmp2, tmp32_1);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_gen_qemu_st32(tmp2, addr, get_mem_index(s));
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0x50: /* STY R1,D2(X2,B2) [RXY] */
+ tmp32_1 = load_reg32(r1);
+ tmp2 = tcg_temp_new_i64();
+ tcg_gen_extu_i32_i64(tmp2, tmp32_1);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_gen_qemu_st32(tmp2, addr, get_mem_index(s));
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0x57: /* XY R1,D2(X2,B2) [RXY] */
+ tmp32_1 = load_reg32(r1);
+ tmp32_2 = tcg_temp_new_i32();
+ tmp2 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld32u(tmp2, addr, get_mem_index(s));
+ tcg_gen_trunc_i64_i32(tmp32_2, tmp2);
+ tcg_temp_free_i64(tmp2);
+ tcg_gen_xor_i32(tmp32_2, tmp32_1, tmp32_2);
+ store_reg32(r1, tmp32_2);
+ set_cc_nz_u32(s, tmp32_2);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ break;
+ case 0x58: /* LY R1,D2(X2,B2) [RXY] */
+ tmp3 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld32u(tmp3, addr, get_mem_index(s));
+ store_reg32_i64(r1, tmp3);
+ tcg_temp_free_i64(tmp3);
+ break;
+ case 0x5a: /* AY R1,D2(X2,B2) [RXY] */
+ case 0x5b: /* SY R1,D2(X2,B2) [RXY] */
+ tmp32_1 = load_reg32(r1);
+ tmp32_2 = tcg_temp_new_i32();
+ tmp32_3 = tcg_temp_new_i32();
+ tmp2 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld32s(tmp2, addr, get_mem_index(s));
+ tcg_gen_trunc_i64_i32(tmp32_2, tmp2);
+ tcg_temp_free_i64(tmp2);
+ switch (op) {
+ case 0x5a:
+ tcg_gen_add_i32(tmp32_3, tmp32_1, tmp32_2);
+ break;
+ case 0x5b:
+ tcg_gen_sub_i32(tmp32_3, tmp32_1, tmp32_2);
+ break;
+ default:
+ tcg_abort();
+ }
+ store_reg32(r1, tmp32_3);
+ switch (op) {
+ case 0x5a:
+ set_cc_add32(s, tmp32_1, tmp32_2, tmp32_3);
+ break;
+ case 0x5b:
+ set_cc_sub32(s, tmp32_1, tmp32_2, tmp32_3);
+ break;
+ default:
+ tcg_abort();
+ }
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ tcg_temp_free_i32(tmp32_3);
+ break;
+ case 0x71: /* LAY R1,D2(X2,B2) [RXY] */
+ store_reg(r1, addr);
+ break;
+ case 0x72: /* STCY R1,D2(X2,B2) [RXY] */
+ tmp32_1 = load_reg32(r1);
+ tmp2 = tcg_temp_new_i64();
+ tcg_gen_ext_i32_i64(tmp2, tmp32_1);
+ tcg_gen_qemu_st8(tmp2, addr, get_mem_index(s));
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0x73: /* ICY R1,D2(X2,B2) [RXY] */
+ tmp3 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld8u(tmp3, addr, get_mem_index(s));
+ store_reg8(r1, tmp3);
+ tcg_temp_free_i64(tmp3);
+ break;
+ case 0x76: /* LB R1,D2(X2,B2) [RXY] */
+ case 0x77: /* LGB R1,D2(X2,B2) [RXY] */
+ tmp2 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld8s(tmp2, addr, get_mem_index(s));
+ switch (op) {
+ case 0x76:
+ tcg_gen_ext8s_i64(tmp2, tmp2);
+ store_reg32_i64(r1, tmp2);
+ break;
+ case 0x77:
+ tcg_gen_ext8s_i64(tmp2, tmp2);
+ store_reg(r1, tmp2);
+ break;
+ default:
+ tcg_abort();
+ }
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0x78: /* LHY R1,D2(X2,B2) [RXY] */
+ tmp2 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld16s(tmp2, addr, get_mem_index(s));
+ store_reg32_i64(r1, tmp2);
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0x80: /* NG R1,D2(X2,B2) [RXY] */
+ case 0x81: /* OG R1,D2(X2,B2) [RXY] */
+ case 0x82: /* XG R1,D2(X2,B2) [RXY] */
+ tmp3 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld64(tmp3, addr, get_mem_index(s));
+ switch (op) {
+ case 0x80:
+ tcg_gen_and_i64(regs[r1], regs[r1], tmp3);
+ break;
+ case 0x81:
+ tcg_gen_or_i64(regs[r1], regs[r1], tmp3);
+ break;
+ case 0x82:
+ tcg_gen_xor_i64(regs[r1], regs[r1], tmp3);
+ break;
+ default:
+ tcg_abort();
+ }
+ set_cc_nz_u64(s, regs[r1]);
+ tcg_temp_free_i64(tmp3);
+ break;
+ case 0x86: /* MLG R1,D2(X2,B2) [RXY] */
+ tmp2 = tcg_temp_new_i64();
+ tmp32_1 = tcg_const_i32(r1);
+ tcg_gen_qemu_ld64(tmp2, addr, get_mem_index(s));
+ gen_helper_mlg(tmp32_1, tmp2);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0x87: /* DLG R1,D2(X2,B2) [RXY] */
+ tmp2 = tcg_temp_new_i64();
+ tmp32_1 = tcg_const_i32(r1);
+ tcg_gen_qemu_ld64(tmp2, addr, get_mem_index(s));
+ gen_helper_dlg(tmp32_1, tmp2);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0x88: /* ALCG R1,D2(X2,B2) [RXY] */
+ tmp2 = tcg_temp_new_i64();
+ tmp3 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld64(tmp2, addr, get_mem_index(s));
+ /* XXX possible optimization point */
+ gen_op_calc_cc(s);
+ tcg_gen_extu_i32_i64(tmp3, cc_op);
+ tcg_gen_shri_i64(tmp3, tmp3, 1);
+ tcg_gen_andi_i64(tmp3, tmp3, 1);
+ tcg_gen_add_i64(tmp3, tmp2, tmp3);
+ tcg_gen_add_i64(tmp3, regs[r1], tmp3);
+ store_reg(r1, tmp3);
+ set_cc_addu64(s, regs[r1], tmp2, tmp3);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i64(tmp3);
+ break;
+ case 0x89: /* SLBG R1,D2(X2,B2) [RXY] */
+ tmp2 = tcg_temp_new_i64();
+ tmp32_1 = tcg_const_i32(r1);
+ tcg_gen_qemu_ld64(tmp2, addr, get_mem_index(s));
+ /* XXX possible optimization point */
+ gen_op_calc_cc(s);
+ gen_helper_slbg(cc_op, cc_op, tmp32_1, regs[r1], tmp2);
+ set_cc_static(s);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0x90: /* LLGC R1,D2(X2,B2) [RXY] */
+ tcg_gen_qemu_ld8u(regs[r1], addr, get_mem_index(s));
+ break;
+ case 0x91: /* LLGH R1,D2(X2,B2) [RXY] */
+ tcg_gen_qemu_ld16u(regs[r1], addr, get_mem_index(s));
+ break;
+ case 0x94: /* LLC R1,D2(X2,B2) [RXY] */
+ tmp2 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld8u(tmp2, addr, get_mem_index(s));
+ store_reg32_i64(r1, tmp2);
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0x95: /* LLH R1,D2(X2,B2) [RXY] */
+ tmp2 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld16u(tmp2, addr, get_mem_index(s));
+ store_reg32_i64(r1, tmp2);
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0x96: /* ML R1,D2(X2,B2) [RXY] */
+ tmp2 = tcg_temp_new_i64();
+ tmp3 = load_reg((r1 + 1) & 15);
+ tcg_gen_ext32u_i64(tmp3, tmp3);
+ tcg_gen_qemu_ld32u(tmp2, addr, get_mem_index(s));
+ tcg_gen_mul_i64(tmp2, tmp2, tmp3);
+ store_reg32_i64((r1 + 1) & 15, tmp2);
+ tcg_gen_shri_i64(tmp2, tmp2, 32);
+ store_reg32_i64(r1, tmp2);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i64(tmp3);
+ break;
+ case 0x97: /* DL R1,D2(X2,B2) [RXY] */
+ /* reg(r1) = reg(r1, r1+1) % ld32(addr) */
+ /* reg(r1+1) = reg(r1, r1+1) / ld32(addr) */
+ tmp = load_reg(r1);
+ tmp2 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld32u(tmp2, addr, get_mem_index(s));
+ tmp3 = load_reg((r1 + 1) & 15);
+ tcg_gen_ext32u_i64(tmp2, tmp2);
+ tcg_gen_ext32u_i64(tmp3, tmp3);
+ tcg_gen_shli_i64(tmp, tmp, 32);
+ tcg_gen_or_i64(tmp, tmp, tmp3);
+
+ tcg_gen_rem_i64(tmp3, tmp, tmp2);
+ tcg_gen_div_i64(tmp, tmp, tmp2);
+ store_reg32_i64((r1 + 1) & 15, tmp);
+ store_reg32_i64(r1, tmp3);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i64(tmp3);
+ break;
+ case 0x98: /* ALC R1,D2(X2,B2) [RXY] */
+ tmp2 = tcg_temp_new_i64();
+ tmp32_1 = load_reg32(r1);
+ tmp32_2 = tcg_temp_new_i32();
+ tmp32_3 = tcg_temp_new_i32();
+ tcg_gen_qemu_ld32u(tmp2, addr, get_mem_index(s));
+ tcg_gen_trunc_i64_i32(tmp32_2, tmp2);
+ /* XXX possible optimization point */
+ gen_op_calc_cc(s);
+ gen_helper_addc_u32(tmp32_3, cc_op, tmp32_1, tmp32_2);
+ set_cc_addu32(s, tmp32_1, tmp32_2, tmp32_3);
+ store_reg32(r1, tmp32_3);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ tcg_temp_free_i32(tmp32_3);
+ break;
+ case 0x99: /* SLB R1,D2(X2,B2) [RXY] */
+ tmp2 = tcg_temp_new_i64();
+ tmp32_1 = tcg_const_i32(r1);
+ tmp32_2 = tcg_temp_new_i32();
+ tcg_gen_qemu_ld32u(tmp2, addr, get_mem_index(s));
+ tcg_gen_trunc_i64_i32(tmp32_2, tmp2);
+ /* XXX possible optimization point */
+ gen_op_calc_cc(s);
+ gen_helper_slb(cc_op, cc_op, tmp32_1, tmp32_2);
+ set_cc_static(s);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ break;
+ default:
+ LOG_DISAS("illegal e3 operation 0x%x\n", op);
+ gen_illegal_opcode(s, 3);
+ break;
+ }
+ tcg_temp_free_i64(addr);
+}
+
+#ifndef CONFIG_USER_ONLY
+static void disas_e5(DisasContext* s, uint64_t insn)
+{
+ TCGv_i64 tmp, tmp2;
+ int op = (insn >> 32) & 0xff;
+
+ tmp = get_address(s, 0, (insn >> 28) & 0xf, (insn >> 16) & 0xfff);
+ tmp2 = get_address(s, 0, (insn >> 12) & 0xf, insn & 0xfff);
+
+ LOG_DISAS("disas_e5: insn %" PRIx64 "\n", insn);
+ switch (op) {
+ case 0x01: /* TPROT D1(B1),D2(B2) [SSE] */
+ /* Test Protection */
+ potential_page_fault(s);
+ gen_helper_tprot(cc_op, tmp, tmp2);
+ set_cc_static(s);
+ break;
+ default:
+ LOG_DISAS("illegal e5 operation 0x%x\n", op);
+ gen_illegal_opcode(s, 3);
+ break;
+ }
+
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+}
+#endif
+
+static void disas_eb(DisasContext *s, int op, int r1, int r3, int b2, int d2)
+{
+ TCGv_i64 tmp, tmp2, tmp3, tmp4;
+ TCGv_i32 tmp32_1, tmp32_2;
+ int i, stm_len;
+ int ilc = 3;
+
+ LOG_DISAS("disas_eb: op 0x%x r1 %d r3 %d b2 %d d2 0x%x\n",
+ op, r1, r3, b2, d2);
+ switch (op) {
+ case 0xc: /* SRLG R1,R3,D2(B2) [RSY] */
+ case 0xd: /* SLLG R1,R3,D2(B2) [RSY] */
+ case 0xa: /* SRAG R1,R3,D2(B2) [RSY] */
+ case 0xb: /* SLAG R1,R3,D2(B2) [RSY] */
+ case 0x1c: /* RLLG R1,R3,D2(B2) [RSY] */
+ if (b2) {
+ tmp = get_address(s, 0, b2, d2);
+ tcg_gen_andi_i64(tmp, tmp, 0x3f);
+ } else {
+ tmp = tcg_const_i64(d2 & 0x3f);
+ }
+ switch (op) {
+ case 0xc:
+ tcg_gen_shr_i64(regs[r1], regs[r3], tmp);
+ break;
+ case 0xd:
+ tcg_gen_shl_i64(regs[r1], regs[r3], tmp);
+ break;
+ case 0xa:
+ tcg_gen_sar_i64(regs[r1], regs[r3], tmp);
+ break;
+ case 0xb:
+ tmp2 = tcg_temp_new_i64();
+ tmp3 = tcg_temp_new_i64();
+ gen_op_update2_cc_i64(s, CC_OP_SLAG, regs[r3], tmp);
+ tcg_gen_shl_i64(tmp2, regs[r3], tmp);
+ /* override sign bit with source sign */
+ tcg_gen_andi_i64(tmp2, tmp2, ~0x8000000000000000ULL);
+ tcg_gen_andi_i64(tmp3, regs[r3], 0x8000000000000000ULL);
+ tcg_gen_or_i64(regs[r1], tmp2, tmp3);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i64(tmp3);
+ break;
+ case 0x1c:
+ tcg_gen_rotl_i64(regs[r1], regs[r3], tmp);
+ break;
+ default:
+ tcg_abort();
+ break;
+ }
+ if (op == 0xa) {
+ set_cc_s64(s, regs[r1]);
+ }
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0x1d: /* RLL R1,R3,D2(B2) [RSY] */
+ if (b2) {
+ tmp = get_address(s, 0, b2, d2);
+ tcg_gen_andi_i64(tmp, tmp, 0x3f);
+ } else {
+ tmp = tcg_const_i64(d2 & 0x3f);
+ }
+ tmp32_1 = tcg_temp_new_i32();
+ tmp32_2 = load_reg32(r3);
+ tcg_gen_trunc_i64_i32(tmp32_1, tmp);
+ switch (op) {
+ case 0x1d:
+ tcg_gen_rotl_i32(tmp32_1, tmp32_2, tmp32_1);
+ break;
+ default:
+ tcg_abort();
+ break;
+ }
+ store_reg32(r1, tmp32_1);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ break;
+ case 0x4: /* LMG R1,R3,D2(B2) [RSE] */
+ case 0x24: /* STMG R1,R3,D2(B2) [RSE] */
+ stm_len = 8;
+ goto do_mh;
+ case 0x26: /* STMH R1,R3,D2(B2) [RSE] */
+ case 0x96: /* LMH R1,R3,D2(B2) [RSE] */
+ stm_len = 4;
+do_mh:
+ /* Apparently, unrolling lmg/stmg of any size gains performance -
+ even for very long ones... */
+ tmp = get_address(s, 0, b2, d2);
+ tmp3 = tcg_const_i64(stm_len);
+ tmp4 = tcg_const_i64(32);
+ for (i = r1;; i = (i + 1) % 16) {
+ switch (op) {
+ case 0x4:
+ tcg_gen_qemu_ld64(regs[i], tmp, get_mem_index(s));
+ break;
+ case 0x96:
+ tmp2 = tcg_temp_new_i64();
+#if HOST_LONG_BITS == 32
+ tcg_gen_qemu_ld32u(tmp2, tmp, get_mem_index(s));
+ tcg_gen_trunc_i64_i32(TCGV_HIGH(regs[i]), tmp2);
+#else
+ tmp2 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld32u(tmp2, tmp, get_mem_index(s));
+ tcg_gen_shl_i64(tmp2, tmp2, 4);
+ tcg_gen_ext32u_i64(regs[i], regs[i]);
+ tcg_gen_or_i64(regs[i], regs[i], tmp2);
+#endif
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0x24:
+ tcg_gen_qemu_st64(regs[i], tmp, get_mem_index(s));
+ break;
+ case 0x26:
+ tmp2 = tcg_temp_new_i64();
+ tcg_gen_shr_i64(tmp2, regs[i], tmp4);
+ tcg_gen_qemu_st32(tmp2, tmp, get_mem_index(s));
+ tcg_temp_free_i64(tmp2);
+ break;
+ default:
+ tcg_abort();
+ }
+ if (i == r3) {
+ break;
+ }
+ tcg_gen_add_i64(tmp, tmp, tmp3);
+ }
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp4);
+ break;
+ case 0x2c: /* STCMH R1,M3,D2(B2) [RSY] */
+ tmp = get_address(s, 0, b2, d2);
+ tmp32_1 = tcg_const_i32(r1);
+ tmp32_2 = tcg_const_i32(r3);
+ potential_page_fault(s);
+ gen_helper_stcmh(tmp32_1, tmp, tmp32_2);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ break;
+#ifndef CONFIG_USER_ONLY
+ case 0x2f: /* LCTLG R1,R3,D2(B2) [RSE] */
+ /* Load Control */
+ check_privileged(s, ilc);
+ tmp = get_address(s, 0, b2, d2);
+ tmp32_1 = tcg_const_i32(r1);
+ tmp32_2 = tcg_const_i32(r3);
+ potential_page_fault(s);
+ gen_helper_lctlg(tmp32_1, tmp, tmp32_2);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ break;
+ case 0x25: /* STCTG R1,R3,D2(B2) [RSE] */
+ /* Store Control */
+ check_privileged(s, ilc);
+ tmp = get_address(s, 0, b2, d2);
+ tmp32_1 = tcg_const_i32(r1);
+ tmp32_2 = tcg_const_i32(r3);
+ potential_page_fault(s);
+ gen_helper_stctg(tmp32_1, tmp, tmp32_2);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ break;
+#endif
+ case 0x30: /* CSG R1,R3,D2(B2) [RSY] */
+ tmp = get_address(s, 0, b2, d2);
+ tmp32_1 = tcg_const_i32(r1);
+ tmp32_2 = tcg_const_i32(r3);
+ potential_page_fault(s);
+ /* XXX rewrite in tcg */
+ gen_helper_csg(cc_op, tmp32_1, tmp, tmp32_2);
+ set_cc_static(s);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ break;
+ case 0x3e: /* CDSG R1,R3,D2(B2) [RSY] */
+ tmp = get_address(s, 0, b2, d2);
+ tmp32_1 = tcg_const_i32(r1);
+ tmp32_2 = tcg_const_i32(r3);
+ potential_page_fault(s);
+ /* XXX rewrite in tcg */
+ gen_helper_cdsg(cc_op, tmp32_1, tmp, tmp32_2);
+ set_cc_static(s);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ break;
+ case 0x51: /* TMY D1(B1),I2 [SIY] */
+ tmp = get_address(s, 0, b2, d2); /* SIY -> this is the destination */
+ tmp2 = tcg_const_i64((r1 << 4) | r3);
+ tcg_gen_qemu_ld8u(tmp, tmp, get_mem_index(s));
+ /* yes, this is a 32 bit operation with 64 bit tcg registers, because
+ that incurs less conversions */
+ cmp_64(s, tmp, tmp2, CC_OP_TM_32);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0x52: /* MVIY D1(B1),I2 [SIY] */
+ tmp = get_address(s, 0, b2, d2); /* SIY -> this is the destination */
+ tmp2 = tcg_const_i64((r1 << 4) | r3);
+ tcg_gen_qemu_st8(tmp2, tmp, get_mem_index(s));
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0x55: /* CLIY D1(B1),I2 [SIY] */
+ tmp3 = get_address(s, 0, b2, d2); /* SIY -> this is the 1st operand */
+ tmp = tcg_temp_new_i64();
+ tmp32_1 = tcg_temp_new_i32();
+ tcg_gen_qemu_ld8u(tmp, tmp3, get_mem_index(s));
+ tcg_gen_trunc_i64_i32(tmp32_1, tmp);
+ cmp_u32c(s, tmp32_1, (r1 << 4) | r3);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp3);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0x80: /* ICMH R1,M3,D2(B2) [RSY] */
+ tmp = get_address(s, 0, b2, d2);
+ tmp32_1 = tcg_const_i32(r1);
+ tmp32_2 = tcg_const_i32(r3);
+ potential_page_fault(s);
+ /* XXX split CC calculation out */
+ gen_helper_icmh(cc_op, tmp32_1, tmp, tmp32_2);
+ set_cc_static(s);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ break;
+ default:
+ LOG_DISAS("illegal eb operation 0x%x\n", op);
+ gen_illegal_opcode(s, ilc);
+ break;
+ }
+}
+
+static void disas_ed(DisasContext *s, int op, int r1, int x2, int b2, int d2,
+ int r1b)
+{
+ TCGv_i32 tmp_r1, tmp32;
+ TCGv_i64 addr, tmp;
+ addr = get_address(s, x2, b2, d2);
+ tmp_r1 = tcg_const_i32(r1);
+ switch (op) {
+ case 0x5: /* LXDB R1,D2(X2,B2) [RXE] */
+ potential_page_fault(s);
+ gen_helper_lxdb(tmp_r1, addr);
+ break;
+ case 0x9: /* CEB R1,D2(X2,B2) [RXE] */
+ tmp = tcg_temp_new_i64();
+ tmp32 = load_freg32(r1);
+ tcg_gen_qemu_ld32u(tmp, addr, get_mem_index(s));
+ set_cc_cmp_f32_i64(s, tmp32, tmp);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i32(tmp32);
+ break;
+ case 0xa: /* AEB R1,D2(X2,B2) [RXE] */
+ tmp = tcg_temp_new_i64();
+ tmp32 = tcg_temp_new_i32();
+ tcg_gen_qemu_ld32u(tmp, addr, get_mem_index(s));
+ tcg_gen_trunc_i64_i32(tmp32, tmp);
+ gen_helper_aeb(tmp_r1, tmp32);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i32(tmp32);
+
+ tmp32 = load_freg32(r1);
+ set_cc_nz_f32(s, tmp32);
+ tcg_temp_free_i32(tmp32);
+ break;
+ case 0xb: /* SEB R1,D2(X2,B2) [RXE] */
+ tmp = tcg_temp_new_i64();
+ tmp32 = tcg_temp_new_i32();
+ tcg_gen_qemu_ld32u(tmp, addr, get_mem_index(s));
+ tcg_gen_trunc_i64_i32(tmp32, tmp);
+ gen_helper_seb(tmp_r1, tmp32);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i32(tmp32);
+
+ tmp32 = load_freg32(r1);
+ set_cc_nz_f32(s, tmp32);
+ tcg_temp_free_i32(tmp32);
+ break;
+ case 0xd: /* DEB R1,D2(X2,B2) [RXE] */
+ tmp = tcg_temp_new_i64();
+ tmp32 = tcg_temp_new_i32();
+ tcg_gen_qemu_ld32u(tmp, addr, get_mem_index(s));
+ tcg_gen_trunc_i64_i32(tmp32, tmp);
+ gen_helper_deb(tmp_r1, tmp32);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i32(tmp32);
+ break;
+ case 0x10: /* TCEB R1,D2(X2,B2) [RXE] */
+ potential_page_fault(s);
+ gen_helper_tceb(cc_op, tmp_r1, addr);
+ set_cc_static(s);
+ break;
+ case 0x11: /* TCDB R1,D2(X2,B2) [RXE] */
+ potential_page_fault(s);
+ gen_helper_tcdb(cc_op, tmp_r1, addr);
+ set_cc_static(s);
+ break;
+ case 0x12: /* TCXB R1,D2(X2,B2) [RXE] */
+ potential_page_fault(s);
+ gen_helper_tcxb(cc_op, tmp_r1, addr);
+ set_cc_static(s);
+ break;
+ case 0x17: /* MEEB R1,D2(X2,B2) [RXE] */
+ tmp = tcg_temp_new_i64();
+ tmp32 = tcg_temp_new_i32();
+ tcg_gen_qemu_ld32u(tmp, addr, get_mem_index(s));
+ tcg_gen_trunc_i64_i32(tmp32, tmp);
+ gen_helper_meeb(tmp_r1, tmp32);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i32(tmp32);
+ break;
+ case 0x19: /* CDB R1,D2(X2,B2) [RXE] */
+ potential_page_fault(s);
+ gen_helper_cdb(cc_op, tmp_r1, addr);
+ set_cc_static(s);
+ break;
+ case 0x1a: /* ADB R1,D2(X2,B2) [RXE] */
+ potential_page_fault(s);
+ gen_helper_adb(cc_op, tmp_r1, addr);
+ set_cc_static(s);
+ break;
+ case 0x1b: /* SDB R1,D2(X2,B2) [RXE] */
+ potential_page_fault(s);
+ gen_helper_sdb(cc_op, tmp_r1, addr);
+ set_cc_static(s);
+ break;
+ case 0x1c: /* MDB R1,D2(X2,B2) [RXE] */
+ potential_page_fault(s);
+ gen_helper_mdb(tmp_r1, addr);
+ break;
+ case 0x1d: /* DDB R1,D2(X2,B2) [RXE] */
+ potential_page_fault(s);
+ gen_helper_ddb(tmp_r1, addr);
+ break;
+ case 0x1e: /* MADB R1,R3,D2(X2,B2) [RXF] */
+ /* for RXF insns, r1 is R3 and r1b is R1 */
+ tmp32 = tcg_const_i32(r1b);
+ potential_page_fault(s);
+ gen_helper_madb(tmp32, addr, tmp_r1);
+ tcg_temp_free_i32(tmp32);
+ break;
+ default:
+ LOG_DISAS("illegal ed operation 0x%x\n", op);
+ gen_illegal_opcode(s, 3);
+ return;
+ }
+ tcg_temp_free_i32(tmp_r1);
+ tcg_temp_free_i64(addr);
+}
+
+static void disas_a5(DisasContext *s, int op, int r1, int i2)
+{
+ TCGv_i64 tmp, tmp2;
+ TCGv_i32 tmp32;
+ LOG_DISAS("disas_a5: op 0x%x r1 %d i2 0x%x\n", op, r1, i2);
+ switch (op) {
+ case 0x0: /* IIHH R1,I2 [RI] */
+ tmp = tcg_const_i64(i2);
+ tcg_gen_deposit_i64(regs[r1], regs[r1], tmp, 48, 16);
+ break;
+ case 0x1: /* IIHL R1,I2 [RI] */
+ tmp = tcg_const_i64(i2);
+ tcg_gen_deposit_i64(regs[r1], regs[r1], tmp, 32, 16);
+ break;
+ case 0x2: /* IILH R1,I2 [RI] */
+ tmp = tcg_const_i64(i2);
+ tcg_gen_deposit_i64(regs[r1], regs[r1], tmp, 16, 16);
+ break;
+ case 0x3: /* IILL R1,I2 [RI] */
+ tmp = tcg_const_i64(i2);
+ tcg_gen_deposit_i64(regs[r1], regs[r1], tmp, 0, 16);
+ break;
+ case 0x4: /* NIHH R1,I2 [RI] */
+ case 0x8: /* OIHH R1,I2 [RI] */
+ tmp = load_reg(r1);
+ tmp32 = tcg_temp_new_i32();
+ switch (op) {
+ case 0x4:
+ tmp2 = tcg_const_i64((((uint64_t)i2) << 48)
+ | 0x0000ffffffffffffULL);
+ tcg_gen_and_i64(tmp, tmp, tmp2);
+ break;
+ case 0x8:
+ tmp2 = tcg_const_i64(((uint64_t)i2) << 48);
+ tcg_gen_or_i64(tmp, tmp, tmp2);
+ break;
+ default:
+ tcg_abort();
+ }
+ store_reg(r1, tmp);
+ tcg_gen_shri_i64(tmp2, tmp, 48);
+ tcg_gen_trunc_i64_i32(tmp32, tmp2);
+ set_cc_nz_u32(s, tmp32);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i32(tmp32);
+ break;
+ case 0x5: /* NIHL R1,I2 [RI] */
+ case 0x9: /* OIHL R1,I2 [RI] */
+ tmp = load_reg(r1);
+ tmp32 = tcg_temp_new_i32();
+ switch (op) {
+ case 0x5:
+ tmp2 = tcg_const_i64((((uint64_t)i2) << 32)
+ | 0xffff0000ffffffffULL);
+ tcg_gen_and_i64(tmp, tmp, tmp2);
+ break;
+ case 0x9:
+ tmp2 = tcg_const_i64(((uint64_t)i2) << 32);
+ tcg_gen_or_i64(tmp, tmp, tmp2);
+ break;
+ default:
+ tcg_abort();
+ }
+ store_reg(r1, tmp);
+ tcg_gen_shri_i64(tmp2, tmp, 32);
+ tcg_gen_trunc_i64_i32(tmp32, tmp2);
+ tcg_gen_andi_i32(tmp32, tmp32, 0xffff);
+ set_cc_nz_u32(s, tmp32);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i32(tmp32);
+ break;
+ case 0x6: /* NILH R1,I2 [RI] */
+ case 0xa: /* OILH R1,I2 [RI] */
+ tmp = load_reg(r1);
+ tmp32 = tcg_temp_new_i32();
+ switch (op) {
+ case 0x6:
+ tmp2 = tcg_const_i64((((uint64_t)i2) << 16)
+ | 0xffffffff0000ffffULL);
+ tcg_gen_and_i64(tmp, tmp, tmp2);
+ break;
+ case 0xa:
+ tmp2 = tcg_const_i64(((uint64_t)i2) << 16);
+ tcg_gen_or_i64(tmp, tmp, tmp2);
+ break;
+ default:
+ tcg_abort();
+ }
+ store_reg(r1, tmp);
+ tcg_gen_shri_i64(tmp, tmp, 16);
+ tcg_gen_trunc_i64_i32(tmp32, tmp);
+ tcg_gen_andi_i32(tmp32, tmp32, 0xffff);
+ set_cc_nz_u32(s, tmp32);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i32(tmp32);
+ break;
+ case 0x7: /* NILL R1,I2 [RI] */
+ case 0xb: /* OILL R1,I2 [RI] */
+ tmp = load_reg(r1);
+ tmp32 = tcg_temp_new_i32();
+ switch (op) {
+ case 0x7:
+ tmp2 = tcg_const_i64(i2 | 0xffffffffffff0000ULL);
+ tcg_gen_and_i64(tmp, tmp, tmp2);
+ break;
+ case 0xb:
+ tmp2 = tcg_const_i64(i2);
+ tcg_gen_or_i64(tmp, tmp, tmp2);
+ break;
+ default:
+ tcg_abort();
+ }
+ store_reg(r1, tmp);
+ tcg_gen_trunc_i64_i32(tmp32, tmp);
+ tcg_gen_andi_i32(tmp32, tmp32, 0xffff);
+ set_cc_nz_u32(s, tmp32); /* signedness should not matter here */
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i32(tmp32);
+ break;
+ case 0xc: /* LLIHH R1,I2 [RI] */
+ tmp = tcg_const_i64( ((uint64_t)i2) << 48 );
+ store_reg(r1, tmp);
+ break;
+ case 0xd: /* LLIHL R1,I2 [RI] */
+ tmp = tcg_const_i64( ((uint64_t)i2) << 32 );
+ store_reg(r1, tmp);
+ break;
+ case 0xe: /* LLILH R1,I2 [RI] */
+ tmp = tcg_const_i64( ((uint64_t)i2) << 16 );
+ store_reg(r1, tmp);
+ break;
+ case 0xf: /* LLILL R1,I2 [RI] */
+ tmp = tcg_const_i64(i2);
+ store_reg(r1, tmp);
+ break;
+ default:
+ LOG_DISAS("illegal a5 operation 0x%x\n", op);
+ gen_illegal_opcode(s, 2);
+ return;
+ }
+ tcg_temp_free_i64(tmp);
+}
+
+static void disas_a7(DisasContext *s, int op, int r1, int i2)
+{
+ TCGv_i64 tmp, tmp2;
+ TCGv_i32 tmp32_1, tmp32_2, tmp32_3;
+ int l1;
+
+ LOG_DISAS("disas_a7: op 0x%x r1 %d i2 0x%x\n", op, r1, i2);
+ switch (op) {
+ case 0x0: /* TMLH or TMH R1,I2 [RI] */
+ case 0x1: /* TMLL or TML R1,I2 [RI] */
+ case 0x2: /* TMHH R1,I2 [RI] */
+ case 0x3: /* TMHL R1,I2 [RI] */
+ tmp = load_reg(r1);
+ tmp2 = tcg_const_i64((uint16_t)i2);
+ switch (op) {
+ case 0x0:
+ tcg_gen_shri_i64(tmp, tmp, 16);
+ break;
+ case 0x1:
+ break;
+ case 0x2:
+ tcg_gen_shri_i64(tmp, tmp, 48);
+ break;
+ case 0x3:
+ tcg_gen_shri_i64(tmp, tmp, 32);
+ break;
+ }
+ tcg_gen_andi_i64(tmp, tmp, 0xffff);
+ cmp_64(s, tmp, tmp2, CC_OP_TM_64);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0x4: /* brc m1, i2 */
+ gen_brc(r1, s, i2 * 2LL);
+ return;
+ case 0x5: /* BRAS R1,I2 [RI] */
+ tmp = tcg_const_i64(pc_to_link_info(s, s->pc + 4));
+ store_reg(r1, tmp);
+ tcg_temp_free_i64(tmp);
+ gen_goto_tb(s, 0, s->pc + i2 * 2LL);
+ s->is_jmp = DISAS_TB_JUMP;
+ break;
+ case 0x6: /* BRCT R1,I2 [RI] */
+ tmp32_1 = load_reg32(r1);
+ tcg_gen_subi_i32(tmp32_1, tmp32_1, 1);
+ store_reg32(r1, tmp32_1);
+ gen_update_cc_op(s);
+ l1 = gen_new_label();
+ tcg_gen_brcondi_i32(TCG_COND_EQ, tmp32_1, 0, l1);
+ gen_goto_tb(s, 0, s->pc + (i2 * 2LL));
+ gen_set_label(l1);
+ gen_goto_tb(s, 1, s->pc + 4);
+ s->is_jmp = DISAS_TB_JUMP;
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0x7: /* BRCTG R1,I2 [RI] */
+ tmp = load_reg(r1);
+ tcg_gen_subi_i64(tmp, tmp, 1);
+ store_reg(r1, tmp);
+ gen_update_cc_op(s);
+ l1 = gen_new_label();
+ tcg_gen_brcondi_i64(TCG_COND_EQ, tmp, 0, l1);
+ gen_goto_tb(s, 0, s->pc + (i2 * 2LL));
+ gen_set_label(l1);
+ gen_goto_tb(s, 1, s->pc + 4);
+ s->is_jmp = DISAS_TB_JUMP;
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0x8: /* lhi r1, i2 */
+ tmp32_1 = tcg_const_i32(i2);
+ store_reg32(r1, tmp32_1);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0x9: /* lghi r1, i2 */
+ tmp = tcg_const_i64(i2);
+ store_reg(r1, tmp);
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0xa: /* AHI R1,I2 [RI] */
+ tmp32_1 = load_reg32(r1);
+ tmp32_2 = tcg_temp_new_i32();
+ tmp32_3 = tcg_const_i32(i2);
+
+ if (i2 < 0) {
+ tcg_gen_subi_i32(tmp32_2, tmp32_1, -i2);
+ } else {
+ tcg_gen_add_i32(tmp32_2, tmp32_1, tmp32_3);
+ }
+
+ store_reg32(r1, tmp32_2);
+ set_cc_add32(s, tmp32_1, tmp32_3, tmp32_2);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ tcg_temp_free_i32(tmp32_3);
+ break;
+ case 0xb: /* aghi r1, i2 */
+ tmp = load_reg(r1);
+ tmp2 = tcg_const_i64(i2);
+
+ if (i2 < 0) {
+ tcg_gen_subi_i64(regs[r1], tmp, -i2);
+ } else {
+ tcg_gen_add_i64(regs[r1], tmp, tmp2);
+ }
+ set_cc_add64(s, tmp, tmp2, regs[r1]);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0xc: /* MHI R1,I2 [RI] */
+ tmp32_1 = load_reg32(r1);
+ tcg_gen_muli_i32(tmp32_1, tmp32_1, i2);
+ store_reg32(r1, tmp32_1);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0xd: /* MGHI R1,I2 [RI] */
+ tmp = load_reg(r1);
+ tcg_gen_muli_i64(tmp, tmp, i2);
+ store_reg(r1, tmp);
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0xe: /* CHI R1,I2 [RI] */
+ tmp32_1 = load_reg32(r1);
+ cmp_s32c(s, tmp32_1, i2);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0xf: /* CGHI R1,I2 [RI] */
+ tmp = load_reg(r1);
+ cmp_s64c(s, tmp, i2);
+ tcg_temp_free_i64(tmp);
+ break;
+ default:
+ LOG_DISAS("illegal a7 operation 0x%x\n", op);
+ gen_illegal_opcode(s, 2);
+ return;
+ }
+}
+
+static void disas_b2(DisasContext *s, int op, uint32_t insn)
+{
+ TCGv_i64 tmp, tmp2, tmp3;
+ TCGv_i32 tmp32_1, tmp32_2, tmp32_3;
+ int r1, r2;
+ int ilc = 2;
+#ifndef CONFIG_USER_ONLY
+ int r3, d2, b2;
+#endif
+
+ r1 = (insn >> 4) & 0xf;
+ r2 = insn & 0xf;
+
+ LOG_DISAS("disas_b2: op 0x%x r1 %d r2 %d\n", op, r1, r2);
+
+ switch (op) {
+ case 0x22: /* IPM R1 [RRE] */
+ tmp32_1 = tcg_const_i32(r1);
+ gen_op_calc_cc(s);
+ gen_helper_ipm(cc_op, tmp32_1);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0x41: /* CKSM R1,R2 [RRE] */
+ tmp32_1 = tcg_const_i32(r1);
+ tmp32_2 = tcg_const_i32(r2);
+ potential_page_fault(s);
+ gen_helper_cksm(tmp32_1, tmp32_2);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ gen_op_movi_cc(s, 0);
+ break;
+ case 0x4e: /* SAR R1,R2 [RRE] */
+ tmp32_1 = load_reg32(r2);
+ tcg_gen_st_i32(tmp32_1, cpu_env, offsetof(CPUState, aregs[r1]));
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0x4f: /* EAR R1,R2 [RRE] */
+ tmp32_1 = tcg_temp_new_i32();
+ tcg_gen_ld_i32(tmp32_1, cpu_env, offsetof(CPUState, aregs[r2]));
+ store_reg32(r1, tmp32_1);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0x52: /* MSR R1,R2 [RRE] */
+ tmp32_1 = load_reg32(r1);
+ tmp32_2 = load_reg32(r2);
+ tcg_gen_mul_i32(tmp32_1, tmp32_1, tmp32_2);
+ store_reg32(r1, tmp32_1);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ break;
+ case 0x54: /* MVPG R1,R2 [RRE] */
+ tmp = load_reg(0);
+ tmp2 = load_reg(r1);
+ tmp3 = load_reg(r2);
+ potential_page_fault(s);
+ gen_helper_mvpg(tmp, tmp2, tmp3);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i64(tmp3);
+ /* XXX check CCO bit and set CC accordingly */
+ gen_op_movi_cc(s, 0);
+ break;
+ case 0x55: /* MVST R1,R2 [RRE] */
+ tmp32_1 = load_reg32(0);
+ tmp32_2 = tcg_const_i32(r1);
+ tmp32_3 = tcg_const_i32(r2);
+ potential_page_fault(s);
+ gen_helper_mvst(tmp32_1, tmp32_2, tmp32_3);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ tcg_temp_free_i32(tmp32_3);
+ gen_op_movi_cc(s, 1);
+ break;
+ case 0x5d: /* CLST R1,R2 [RRE] */
+ tmp32_1 = load_reg32(0);
+ tmp32_2 = tcg_const_i32(r1);
+ tmp32_3 = tcg_const_i32(r2);
+ potential_page_fault(s);
+ gen_helper_clst(cc_op, tmp32_1, tmp32_2, tmp32_3);
+ set_cc_static(s);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ tcg_temp_free_i32(tmp32_3);
+ break;
+ case 0x5e: /* SRST R1,R2 [RRE] */
+ tmp32_1 = load_reg32(0);
+ tmp32_2 = tcg_const_i32(r1);
+ tmp32_3 = tcg_const_i32(r2);
+ potential_page_fault(s);
+ gen_helper_srst(cc_op, tmp32_1, tmp32_2, tmp32_3);
+ set_cc_static(s);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ tcg_temp_free_i32(tmp32_3);
+ break;
+
+#ifndef CONFIG_USER_ONLY
+ case 0x02: /* STIDP D2(B2) [S] */
+ /* Store CPU ID */
+ check_privileged(s, ilc);
+ decode_rs(s, insn, &r1, &r3, &b2, &d2);
+ tmp = get_address(s, 0, b2, d2);
+ potential_page_fault(s);
+ gen_helper_stidp(tmp);
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0x04: /* SCK D2(B2) [S] */
+ /* Set Clock */
+ check_privileged(s, ilc);
+ decode_rs(s, insn, &r1, &r3, &b2, &d2);
+ tmp = get_address(s, 0, b2, d2);
+ potential_page_fault(s);
+ gen_helper_sck(cc_op, tmp);
+ set_cc_static(s);
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0x05: /* STCK D2(B2) [S] */
+ /* Store Clock */
+ decode_rs(s, insn, &r1, &r3, &b2, &d2);
+ tmp = get_address(s, 0, b2, d2);
+ potential_page_fault(s);
+ gen_helper_stck(cc_op, tmp);
+ set_cc_static(s);
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0x06: /* SCKC D2(B2) [S] */
+ /* Set Clock Comparator */
+ check_privileged(s, ilc);
+ decode_rs(s, insn, &r1, &r3, &b2, &d2);
+ tmp = get_address(s, 0, b2, d2);
+ potential_page_fault(s);
+ gen_helper_sckc(tmp);
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0x07: /* STCKC D2(B2) [S] */
+ /* Store Clock Comparator */
+ check_privileged(s, ilc);
+ decode_rs(s, insn, &r1, &r3, &b2, &d2);
+ tmp = get_address(s, 0, b2, d2);
+ potential_page_fault(s);
+ gen_helper_stckc(tmp);
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0x08: /* SPT D2(B2) [S] */
+ /* Set CPU Timer */
+ check_privileged(s, ilc);
+ decode_rs(s, insn, &r1, &r3, &b2, &d2);
+ tmp = get_address(s, 0, b2, d2);
+ potential_page_fault(s);
+ gen_helper_spt(tmp);
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0x09: /* STPT D2(B2) [S] */
+ /* Store CPU Timer */
+ check_privileged(s, ilc);
+ decode_rs(s, insn, &r1, &r3, &b2, &d2);
+ tmp = get_address(s, 0, b2, d2);
+ potential_page_fault(s);
+ gen_helper_stpt(tmp);
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0x0a: /* SPKA D2(B2) [S] */
+ /* Set PSW Key from Address */
+ check_privileged(s, ilc);
+ decode_rs(s, insn, &r1, &r3, &b2, &d2);
+ tmp = get_address(s, 0, b2, d2);
+ tmp2 = tcg_temp_new_i64();
+ tcg_gen_andi_i64(tmp2, psw_mask, ~PSW_MASK_KEY);
+ tcg_gen_shli_i64(tmp, tmp, PSW_SHIFT_KEY - 4);
+ tcg_gen_or_i64(psw_mask, tmp2, tmp);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0x0d: /* PTLB [S] */
+ /* Purge TLB */
+ check_privileged(s, ilc);
+ gen_helper_ptlb();
+ break;
+ case 0x10: /* SPX D2(B2) [S] */
+ /* Set Prefix Register */
+ check_privileged(s, ilc);
+ decode_rs(s, insn, &r1, &r3, &b2, &d2);
+ tmp = get_address(s, 0, b2, d2);
+ potential_page_fault(s);
+ gen_helper_spx(tmp);
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0x11: /* STPX D2(B2) [S] */
+ /* Store Prefix */
+ check_privileged(s, ilc);
+ decode_rs(s, insn, &r1, &r3, &b2, &d2);
+ tmp = get_address(s, 0, b2, d2);
+ tmp2 = tcg_temp_new_i64();
+ tcg_gen_ld_i64(tmp2, cpu_env, offsetof(CPUState, psa));
+ tcg_gen_qemu_st32(tmp2, tmp, get_mem_index(s));
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0x12: /* STAP D2(B2) [S] */
+ /* Store CPU Address */
+ check_privileged(s, ilc);
+ decode_rs(s, insn, &r1, &r3, &b2, &d2);
+ tmp = get_address(s, 0, b2, d2);
+ tmp2 = tcg_temp_new_i64();
+ tmp32_1 = tcg_temp_new_i32();
+ tcg_gen_ld_i32(tmp32_1, cpu_env, offsetof(CPUState, cpu_num));
+ tcg_gen_extu_i32_i64(tmp2, tmp32_1);
+ tcg_gen_qemu_st32(tmp2, tmp, get_mem_index(s));
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0x21: /* IPTE R1,R2 [RRE] */
+ /* Invalidate PTE */
+ check_privileged(s, ilc);
+ r1 = (insn >> 4) & 0xf;
+ r2 = insn & 0xf;
+ tmp = load_reg(r1);
+ tmp2 = load_reg(r2);
+ gen_helper_ipte(tmp, tmp2);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0x29: /* ISKE R1,R2 [RRE] */
+ /* Insert Storage Key Extended */
+ check_privileged(s, ilc);
+ r1 = (insn >> 4) & 0xf;
+ r2 = insn & 0xf;
+ tmp = load_reg(r2);
+ tmp2 = tcg_temp_new_i64();
+ gen_helper_iske(tmp2, tmp);
+ store_reg(r1, tmp2);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0x2a: /* RRBE R1,R2 [RRE] */
+ /* Set Storage Key Extended */
+ check_privileged(s, ilc);
+ r1 = (insn >> 4) & 0xf;
+ r2 = insn & 0xf;
+ tmp32_1 = load_reg32(r1);
+ tmp = load_reg(r2);
+ gen_helper_rrbe(cc_op, tmp32_1, tmp);
+ set_cc_static(s);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0x2b: /* SSKE R1,R2 [RRE] */
+ /* Set Storage Key Extended */
+ check_privileged(s, ilc);
+ r1 = (insn >> 4) & 0xf;
+ r2 = insn & 0xf;
+ tmp32_1 = load_reg32(r1);
+ tmp = load_reg(r2);
+ gen_helper_sske(tmp32_1, tmp);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0x34: /* STCH ? */
+ /* Store Subchannel */
+ check_privileged(s, ilc);
+ gen_op_movi_cc(s, 3);
+ break;
+ case 0x46: /* STURA R1,R2 [RRE] */
+ /* Store Using Real Address */
+ check_privileged(s, ilc);
+ r1 = (insn >> 4) & 0xf;
+ r2 = insn & 0xf;
+ tmp32_1 = load_reg32(r1);
+ tmp = load_reg(r2);
+ potential_page_fault(s);
+ gen_helper_stura(tmp, tmp32_1);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0x50: /* CSP R1,R2 [RRE] */
+ /* Compare And Swap And Purge */
+ check_privileged(s, ilc);
+ r1 = (insn >> 4) & 0xf;
+ r2 = insn & 0xf;
+ tmp32_1 = tcg_const_i32(r1);
+ tmp32_2 = tcg_const_i32(r2);
+ gen_helper_csp(cc_op, tmp32_1, tmp32_2);
+ set_cc_static(s);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ break;
+ case 0x5f: /* CHSC ? */
+ /* Channel Subsystem Call */
+ check_privileged(s, ilc);
+ gen_op_movi_cc(s, 3);
+ break;
+ case 0x78: /* STCKE D2(B2) [S] */
+ /* Store Clock Extended */
+ decode_rs(s, insn, &r1, &r3, &b2, &d2);
+ tmp = get_address(s, 0, b2, d2);
+ potential_page_fault(s);
+ gen_helper_stcke(cc_op, tmp);
+ set_cc_static(s);
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0x79: /* SACF D2(B2) [S] */
+ /* Store Clock Extended */
+ check_privileged(s, ilc);
+ decode_rs(s, insn, &r1, &r3, &b2, &d2);
+ tmp = get_address(s, 0, b2, d2);
+ potential_page_fault(s);
+ gen_helper_sacf(tmp);
+ tcg_temp_free_i64(tmp);
+ /* addressing mode has changed, so end the block */
+ s->pc += ilc * 2;
+ update_psw_addr(s);
+ s->is_jmp = DISAS_EXCP;
+ break;
+ case 0x7d: /* STSI D2,(B2) [S] */
+ check_privileged(s, ilc);
+ decode_rs(s, insn, &r1, &r3, &b2, &d2);
+ tmp = get_address(s, 0, b2, d2);
+ tmp32_1 = load_reg32(0);
+ tmp32_2 = load_reg32(1);
+ potential_page_fault(s);
+ gen_helper_stsi(cc_op, tmp, tmp32_1, tmp32_2);
+ set_cc_static(s);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ break;
+ case 0x9d: /* LFPC D2(B2) [S] */
+ decode_rs(s, insn, &r1, &r3, &b2, &d2);
+ tmp = get_address(s, 0, b2, d2);
+ tmp2 = tcg_temp_new_i64();
+ tmp32_1 = tcg_temp_new_i32();
+ tcg_gen_qemu_ld32u(tmp2, tmp, get_mem_index(s));
+ tcg_gen_trunc_i64_i32(tmp32_1, tmp2);
+ tcg_gen_st_i32(tmp32_1, cpu_env, offsetof(CPUState, fpc));
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0xb1: /* STFL D2(B2) [S] */
+ /* Store Facility List (CPU features) at 200 */
+ check_privileged(s, ilc);
+ tmp2 = tcg_const_i64(0xc0000000);
+ tmp = tcg_const_i64(200);
+ tcg_gen_qemu_st32(tmp2, tmp, get_mem_index(s));
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0xb2: /* LPSWE D2(B2) [S] */
+ /* Load PSW Extended */
+ check_privileged(s, ilc);
+ decode_rs(s, insn, &r1, &r3, &b2, &d2);
+ tmp = get_address(s, 0, b2, d2);
+ tmp2 = tcg_temp_new_i64();
+ tmp3 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld64(tmp2, tmp, get_mem_index(s));
+ tcg_gen_addi_i64(tmp, tmp, 8);
+ tcg_gen_qemu_ld64(tmp3, tmp, get_mem_index(s));
+ gen_helper_load_psw(tmp2, tmp3);
+ /* we need to keep cc_op intact */
+ s->is_jmp = DISAS_JUMP;
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0x20: /* SERVC R1,R2 [RRE] */
+ /* SCLP Service call (PV hypercall) */
+ check_privileged(s, ilc);
+ potential_page_fault(s);
+ tmp32_1 = load_reg32(r2);
+ tmp = load_reg(r1);
+ gen_helper_servc(cc_op, tmp32_1, tmp);
+ set_cc_static(s);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i64(tmp);
+ break;
+#endif
+ default:
+ LOG_DISAS("illegal b2 operation 0x%x\n", op);
+ gen_illegal_opcode(s, ilc);
+ break;
+ }
+}
+
+static void disas_b3(DisasContext *s, int op, int m3, int r1, int r2)
+{
+ TCGv_i64 tmp;
+ TCGv_i32 tmp32_1, tmp32_2, tmp32_3;
+ LOG_DISAS("disas_b3: op 0x%x m3 0x%x r1 %d r2 %d\n", op, m3, r1, r2);
+#define FP_HELPER(i) \
+ tmp32_1 = tcg_const_i32(r1); \
+ tmp32_2 = tcg_const_i32(r2); \
+ gen_helper_ ## i (tmp32_1, tmp32_2); \
+ tcg_temp_free_i32(tmp32_1); \
+ tcg_temp_free_i32(tmp32_2);
+
+#define FP_HELPER_CC(i) \
+ tmp32_1 = tcg_const_i32(r1); \
+ tmp32_2 = tcg_const_i32(r2); \
+ gen_helper_ ## i (cc_op, tmp32_1, tmp32_2); \
+ set_cc_static(s); \
+ tcg_temp_free_i32(tmp32_1); \
+ tcg_temp_free_i32(tmp32_2);
+
+ switch (op) {
+ case 0x0: /* LPEBR R1,R2 [RRE] */
+ FP_HELPER_CC(lpebr);
+ break;
+ case 0x2: /* LTEBR R1,R2 [RRE] */
+ FP_HELPER_CC(ltebr);
+ break;
+ case 0x3: /* LCEBR R1,R2 [RRE] */
+ FP_HELPER_CC(lcebr);
+ break;
+ case 0x4: /* LDEBR R1,R2 [RRE] */
+ FP_HELPER(ldebr);
+ break;
+ case 0x5: /* LXDBR R1,R2 [RRE] */
+ FP_HELPER(lxdbr);
+ break;
+ case 0x9: /* CEBR R1,R2 [RRE] */
+ FP_HELPER_CC(cebr);
+ break;
+ case 0xa: /* AEBR R1,R2 [RRE] */
+ FP_HELPER_CC(aebr);
+ break;
+ case 0xb: /* SEBR R1,R2 [RRE] */
+ FP_HELPER_CC(sebr);
+ break;
+ case 0xd: /* DEBR R1,R2 [RRE] */
+ FP_HELPER(debr);
+ break;
+ case 0x10: /* LPDBR R1,R2 [RRE] */
+ FP_HELPER_CC(lpdbr);
+ break;
+ case 0x12: /* LTDBR R1,R2 [RRE] */
+ FP_HELPER_CC(ltdbr);
+ break;
+ case 0x13: /* LCDBR R1,R2 [RRE] */
+ FP_HELPER_CC(lcdbr);
+ break;
+ case 0x15: /* SQBDR R1,R2 [RRE] */
+ FP_HELPER(sqdbr);
+ break;
+ case 0x17: /* MEEBR R1,R2 [RRE] */
+ FP_HELPER(meebr);
+ break;
+ case 0x19: /* CDBR R1,R2 [RRE] */
+ FP_HELPER_CC(cdbr);
+ break;
+ case 0x1a: /* ADBR R1,R2 [RRE] */
+ FP_HELPER_CC(adbr);
+ break;
+ case 0x1b: /* SDBR R1,R2 [RRE] */
+ FP_HELPER_CC(sdbr);
+ break;
+ case 0x1c: /* MDBR R1,R2 [RRE] */
+ FP_HELPER(mdbr);
+ break;
+ case 0x1d: /* DDBR R1,R2 [RRE] */
+ FP_HELPER(ddbr);
+ break;
+ case 0xe: /* MAEBR R1,R3,R2 [RRF] */
+ case 0x1e: /* MADBR R1,R3,R2 [RRF] */
+ case 0x1f: /* MSDBR R1,R3,R2 [RRF] */
+ /* for RRF insns, m3 is R1, r1 is R3, and r2 is R2 */
+ tmp32_1 = tcg_const_i32(m3);
+ tmp32_2 = tcg_const_i32(r2);
+ tmp32_3 = tcg_const_i32(r1);
+ switch (op) {
+ case 0xe:
+ gen_helper_maebr(tmp32_1, tmp32_3, tmp32_2);
+ break;
+ case 0x1e:
+ gen_helper_madbr(tmp32_1, tmp32_3, tmp32_2);
+ break;
+ case 0x1f:
+ gen_helper_msdbr(tmp32_1, tmp32_3, tmp32_2);
+ break;
+ default:
+ tcg_abort();
+ }
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ tcg_temp_free_i32(tmp32_3);
+ break;
+ case 0x40: /* LPXBR R1,R2 [RRE] */
+ FP_HELPER_CC(lpxbr);
+ break;
+ case 0x42: /* LTXBR R1,R2 [RRE] */
+ FP_HELPER_CC(ltxbr);
+ break;
+ case 0x43: /* LCXBR R1,R2 [RRE] */
+ FP_HELPER_CC(lcxbr);
+ break;
+ case 0x44: /* LEDBR R1,R2 [RRE] */
+ FP_HELPER(ledbr);
+ break;
+ case 0x45: /* LDXBR R1,R2 [RRE] */
+ FP_HELPER(ldxbr);
+ break;
+ case 0x46: /* LEXBR R1,R2 [RRE] */
+ FP_HELPER(lexbr);
+ break;
+ case 0x49: /* CXBR R1,R2 [RRE] */
+ FP_HELPER_CC(cxbr);
+ break;
+ case 0x4a: /* AXBR R1,R2 [RRE] */
+ FP_HELPER_CC(axbr);
+ break;
+ case 0x4b: /* SXBR R1,R2 [RRE] */
+ FP_HELPER_CC(sxbr);
+ break;
+ case 0x4c: /* MXBR R1,R2 [RRE] */
+ FP_HELPER(mxbr);
+ break;
+ case 0x4d: /* DXBR R1,R2 [RRE] */
+ FP_HELPER(dxbr);
+ break;
+ case 0x65: /* LXR R1,R2 [RRE] */
+ tmp = load_freg(r2);
+ store_freg(r1, tmp);
+ tcg_temp_free_i64(tmp);
+ tmp = load_freg(r2 + 2);
+ store_freg(r1 + 2, tmp);
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0x74: /* LZER R1 [RRE] */
+ tmp32_1 = tcg_const_i32(r1);
+ gen_helper_lzer(tmp32_1);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0x75: /* LZDR R1 [RRE] */
+ tmp32_1 = tcg_const_i32(r1);
+ gen_helper_lzdr(tmp32_1);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0x76: /* LZXR R1 [RRE] */
+ tmp32_1 = tcg_const_i32(r1);
+ gen_helper_lzxr(tmp32_1);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0x84: /* SFPC R1 [RRE] */
+ tmp32_1 = load_reg32(r1);
+ tcg_gen_st_i32(tmp32_1, cpu_env, offsetof(CPUState, fpc));
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0x8c: /* EFPC R1 [RRE] */
+ tmp32_1 = tcg_temp_new_i32();
+ tcg_gen_ld_i32(tmp32_1, cpu_env, offsetof(CPUState, fpc));
+ store_reg32(r1, tmp32_1);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0x94: /* CEFBR R1,R2 [RRE] */
+ case 0x95: /* CDFBR R1,R2 [RRE] */
+ case 0x96: /* CXFBR R1,R2 [RRE] */
+ tmp32_1 = tcg_const_i32(r1);
+ tmp32_2 = load_reg32(r2);
+ switch (op) {
+ case 0x94:
+ gen_helper_cefbr(tmp32_1, tmp32_2);
+ break;
+ case 0x95:
+ gen_helper_cdfbr(tmp32_1, tmp32_2);
+ break;
+ case 0x96:
+ gen_helper_cxfbr(tmp32_1, tmp32_2);
+ break;
+ default:
+ tcg_abort();
+ }
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ break;
+ case 0x98: /* CFEBR R1,R2 [RRE] */
+ case 0x99: /* CFDBR R1,R2 [RRE] */
+ case 0x9a: /* CFXBR R1,R2 [RRE] */
+ tmp32_1 = tcg_const_i32(r1);
+ tmp32_2 = tcg_const_i32(r2);
+ tmp32_3 = tcg_const_i32(m3);
+ switch (op) {
+ case 0x98:
+ gen_helper_cfebr(cc_op, tmp32_1, tmp32_2, tmp32_3);
+ break;
+ case 0x99:
+ gen_helper_cfdbr(cc_op, tmp32_1, tmp32_2, tmp32_3);
+ break;
+ case 0x9a:
+ gen_helper_cfxbr(cc_op, tmp32_1, tmp32_2, tmp32_3);
+ break;
+ default:
+ tcg_abort();
+ }
+ set_cc_static(s);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ tcg_temp_free_i32(tmp32_3);
+ break;
+ case 0xa4: /* CEGBR R1,R2 [RRE] */
+ case 0xa5: /* CDGBR R1,R2 [RRE] */
+ tmp32_1 = tcg_const_i32(r1);
+ tmp = load_reg(r2);
+ switch (op) {
+ case 0xa4:
+ gen_helper_cegbr(tmp32_1, tmp);
+ break;
+ case 0xa5:
+ gen_helper_cdgbr(tmp32_1, tmp);
+ break;
+ default:
+ tcg_abort();
+ }
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0xa6: /* CXGBR R1,R2 [RRE] */
+ tmp32_1 = tcg_const_i32(r1);
+ tmp = load_reg(r2);
+ gen_helper_cxgbr(tmp32_1, tmp);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0xa8: /* CGEBR R1,R2 [RRE] */
+ tmp32_1 = tcg_const_i32(r1);
+ tmp32_2 = tcg_const_i32(r2);
+ tmp32_3 = tcg_const_i32(m3);
+ gen_helper_cgebr(cc_op, tmp32_1, tmp32_2, tmp32_3);
+ set_cc_static(s);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ tcg_temp_free_i32(tmp32_3);
+ break;
+ case 0xa9: /* CGDBR R1,R2 [RRE] */
+ tmp32_1 = tcg_const_i32(r1);
+ tmp32_2 = tcg_const_i32(r2);
+ tmp32_3 = tcg_const_i32(m3);
+ gen_helper_cgdbr(cc_op, tmp32_1, tmp32_2, tmp32_3);
+ set_cc_static(s);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ tcg_temp_free_i32(tmp32_3);
+ break;
+ case 0xaa: /* CGXBR R1,R2 [RRE] */
+ tmp32_1 = tcg_const_i32(r1);
+ tmp32_2 = tcg_const_i32(r2);
+ tmp32_3 = tcg_const_i32(m3);
+ gen_helper_cgxbr(cc_op, tmp32_1, tmp32_2, tmp32_3);
+ set_cc_static(s);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ tcg_temp_free_i32(tmp32_3);
+ break;
+ default:
+ LOG_DISAS("illegal b3 operation 0x%x\n", op);
+ gen_illegal_opcode(s, 2);
+ break;
+ }
+
+#undef FP_HELPER_CC
+#undef FP_HELPER
+}
+
+static void disas_b9(DisasContext *s, int op, int r1, int r2)
+{
+ TCGv_i64 tmp, tmp2, tmp3;
+ TCGv_i32 tmp32_1, tmp32_2, tmp32_3;
+
+ LOG_DISAS("disas_b9: op 0x%x r1 %d r2 %d\n", op, r1, r2);
+ switch (op) {
+ case 0x0: /* LPGR R1,R2 [RRE] */
+ case 0x1: /* LNGR R1,R2 [RRE] */
+ case 0x2: /* LTGR R1,R2 [RRE] */
+ case 0x3: /* LCGR R1,R2 [RRE] */
+ case 0x10: /* LPGFR R1,R2 [RRE] */
+ case 0x11: /* LNFGR R1,R2 [RRE] */
+ case 0x12: /* LTGFR R1,R2 [RRE] */
+ case 0x13: /* LCGFR R1,R2 [RRE] */
+ if (op & 0x10) {
+ tmp = load_reg32_i64(r2);
+ } else {
+ tmp = load_reg(r2);
+ }
+ switch (op & 0xf) {
+ case 0x0: /* LP?GR */
+ set_cc_abs64(s, tmp);
+ gen_helper_abs_i64(tmp, tmp);
+ store_reg(r1, tmp);
+ break;
+ case 0x1: /* LN?GR */
+ set_cc_nabs64(s, tmp);
+ gen_helper_nabs_i64(tmp, tmp);
+ store_reg(r1, tmp);
+ break;
+ case 0x2: /* LT?GR */
+ if (r1 != r2) {
+ store_reg(r1, tmp);
+ }
+ set_cc_s64(s, tmp);
+ break;
+ case 0x3: /* LC?GR */
+ tcg_gen_neg_i64(regs[r1], tmp);
+ set_cc_comp64(s, regs[r1]);
+ break;
+ }
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0x4: /* LGR R1,R2 [RRE] */
+ store_reg(r1, regs[r2]);
+ break;
+ case 0x6: /* LGBR R1,R2 [RRE] */
+ tmp2 = load_reg(r2);
+ tcg_gen_ext8s_i64(tmp2, tmp2);
+ store_reg(r1, tmp2);
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0x8: /* AGR R1,R2 [RRE] */
+ case 0xa: /* ALGR R1,R2 [RRE] */
+ tmp = load_reg(r1);
+ tmp2 = load_reg(r2);
+ tmp3 = tcg_temp_new_i64();
+ tcg_gen_add_i64(tmp3, tmp, tmp2);
+ store_reg(r1, tmp3);
+ switch (op) {
+ case 0x8:
+ set_cc_add64(s, tmp, tmp2, tmp3);
+ break;
+ case 0xa:
+ set_cc_addu64(s, tmp, tmp2, tmp3);
+ break;
+ default:
+ tcg_abort();
+ }
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i64(tmp3);
+ break;
+ case 0x9: /* SGR R1,R2 [RRE] */
+ case 0xb: /* SLGR R1,R2 [RRE] */
+ case 0x1b: /* SLGFR R1,R2 [RRE] */
+ case 0x19: /* SGFR R1,R2 [RRE] */
+ tmp = load_reg(r1);
+ switch (op) {
+ case 0x1b:
+ tmp32_1 = load_reg32(r2);
+ tmp2 = tcg_temp_new_i64();
+ tcg_gen_extu_i32_i64(tmp2, tmp32_1);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0x19:
+ tmp32_1 = load_reg32(r2);
+ tmp2 = tcg_temp_new_i64();
+ tcg_gen_ext_i32_i64(tmp2, tmp32_1);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ default:
+ tmp2 = load_reg(r2);
+ break;
+ }
+ tmp3 = tcg_temp_new_i64();
+ tcg_gen_sub_i64(tmp3, tmp, tmp2);
+ store_reg(r1, tmp3);
+ switch (op) {
+ case 0x9:
+ case 0x19:
+ set_cc_sub64(s, tmp, tmp2, tmp3);
+ break;
+ case 0xb:
+ case 0x1b:
+ set_cc_subu64(s, tmp, tmp2, tmp3);
+ break;
+ default:
+ tcg_abort();
+ }
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i64(tmp3);
+ break;
+ case 0xc: /* MSGR R1,R2 [RRE] */
+ case 0x1c: /* MSGFR R1,R2 [RRE] */
+ tmp = load_reg(r1);
+ tmp2 = load_reg(r2);
+ if (op == 0x1c) {
+ tcg_gen_ext32s_i64(tmp2, tmp2);
+ }
+ tcg_gen_mul_i64(tmp, tmp, tmp2);
+ store_reg(r1, tmp);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0xd: /* DSGR R1,R2 [RRE] */
+ case 0x1d: /* DSGFR R1,R2 [RRE] */
+ tmp = load_reg(r1 + 1);
+ if (op == 0xd) {
+ tmp2 = load_reg(r2);
+ } else {
+ tmp32_1 = load_reg32(r2);
+ tmp2 = tcg_temp_new_i64();
+ tcg_gen_ext_i32_i64(tmp2, tmp32_1);
+ tcg_temp_free_i32(tmp32_1);
+ }
+ tmp3 = tcg_temp_new_i64();
+ tcg_gen_div_i64(tmp3, tmp, tmp2);
+ store_reg(r1 + 1, tmp3);
+ tcg_gen_rem_i64(tmp3, tmp, tmp2);
+ store_reg(r1, tmp3);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i64(tmp3);
+ break;
+ case 0x14: /* LGFR R1,R2 [RRE] */
+ tmp32_1 = load_reg32(r2);
+ tmp = tcg_temp_new_i64();
+ tcg_gen_ext_i32_i64(tmp, tmp32_1);
+ store_reg(r1, tmp);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0x16: /* LLGFR R1,R2 [RRE] */
+ tmp32_1 = load_reg32(r2);
+ tmp = tcg_temp_new_i64();
+ tcg_gen_extu_i32_i64(tmp, tmp32_1);
+ store_reg(r1, tmp);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0x17: /* LLGTR R1,R2 [RRE] */
+ tmp32_1 = load_reg32(r2);
+ tmp = tcg_temp_new_i64();
+ tcg_gen_andi_i32(tmp32_1, tmp32_1, 0x7fffffffUL);
+ tcg_gen_extu_i32_i64(tmp, tmp32_1);
+ store_reg(r1, tmp);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0x18: /* AGFR R1,R2 [RRE] */
+ case 0x1a: /* ALGFR R1,R2 [RRE] */
+ tmp32_1 = load_reg32(r2);
+ tmp2 = tcg_temp_new_i64();
+ if (op == 0x18) {
+ tcg_gen_ext_i32_i64(tmp2, tmp32_1);
+ } else {
+ tcg_gen_extu_i32_i64(tmp2, tmp32_1);
+ }
+ tcg_temp_free_i32(tmp32_1);
+ tmp = load_reg(r1);
+ tmp3 = tcg_temp_new_i64();
+ tcg_gen_add_i64(tmp3, tmp, tmp2);
+ store_reg(r1, tmp3);
+ if (op == 0x18) {
+ set_cc_add64(s, tmp, tmp2, tmp3);
+ } else {
+ set_cc_addu64(s, tmp, tmp2, tmp3);
+ }
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i64(tmp3);
+ break;
+ case 0x1f: /* LRVR R1,R2 [RRE] */
+ tmp32_1 = load_reg32(r2);
+ tcg_gen_bswap32_i32(tmp32_1, tmp32_1);
+ store_reg32(r1, tmp32_1);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0x20: /* CGR R1,R2 [RRE] */
+ case 0x30: /* CGFR R1,R2 [RRE] */
+ tmp2 = load_reg(r2);
+ if (op == 0x30) {
+ tcg_gen_ext32s_i64(tmp2, tmp2);
+ }
+ tmp = load_reg(r1);
+ cmp_s64(s, tmp, tmp2);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0x21: /* CLGR R1,R2 [RRE] */
+ case 0x31: /* CLGFR R1,R2 [RRE] */
+ tmp2 = load_reg(r2);
+ if (op == 0x31) {
+ tcg_gen_ext32u_i64(tmp2, tmp2);
+ }
+ tmp = load_reg(r1);
+ cmp_u64(s, tmp, tmp2);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0x26: /* LBR R1,R2 [RRE] */
+ tmp32_1 = load_reg32(r2);
+ tcg_gen_ext8s_i32(tmp32_1, tmp32_1);
+ store_reg32(r1, tmp32_1);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0x27: /* LHR R1,R2 [RRE] */
+ tmp32_1 = load_reg32(r2);
+ tcg_gen_ext16s_i32(tmp32_1, tmp32_1);
+ store_reg32(r1, tmp32_1);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0x80: /* NGR R1,R2 [RRE] */
+ case 0x81: /* OGR R1,R2 [RRE] */
+ case 0x82: /* XGR R1,R2 [RRE] */
+ tmp = load_reg(r1);
+ tmp2 = load_reg(r2);
+ switch (op) {
+ case 0x80:
+ tcg_gen_and_i64(tmp, tmp, tmp2);
+ break;
+ case 0x81:
+ tcg_gen_or_i64(tmp, tmp, tmp2);
+ break;
+ case 0x82:
+ tcg_gen_xor_i64(tmp, tmp, tmp2);
+ break;
+ default:
+ tcg_abort();
+ }
+ store_reg(r1, tmp);
+ set_cc_nz_u64(s, tmp);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0x83: /* FLOGR R1,R2 [RRE] */
+ tmp = load_reg(r2);
+ tmp32_1 = tcg_const_i32(r1);
+ gen_helper_flogr(cc_op, tmp32_1, tmp);
+ set_cc_static(s);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0x84: /* LLGCR R1,R2 [RRE] */
+ tmp = load_reg(r2);
+ tcg_gen_andi_i64(tmp, tmp, 0xff);
+ store_reg(r1, tmp);
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0x85: /* LLGHR R1,R2 [RRE] */
+ tmp = load_reg(r2);
+ tcg_gen_andi_i64(tmp, tmp, 0xffff);
+ store_reg(r1, tmp);
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0x87: /* DLGR R1,R2 [RRE] */
+ tmp32_1 = tcg_const_i32(r1);
+ tmp = load_reg(r2);
+ gen_helper_dlg(tmp32_1, tmp);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0x88: /* ALCGR R1,R2 [RRE] */
+ tmp = load_reg(r1);
+ tmp2 = load_reg(r2);
+ tmp3 = tcg_temp_new_i64();
+ gen_op_calc_cc(s);
+ tcg_gen_extu_i32_i64(tmp3, cc_op);
+ tcg_gen_shri_i64(tmp3, tmp3, 1);
+ tcg_gen_andi_i64(tmp3, tmp3, 1);
+ tcg_gen_add_i64(tmp3, tmp2, tmp3);
+ tcg_gen_add_i64(tmp3, tmp, tmp3);
+ store_reg(r1, tmp3);
+ set_cc_addu64(s, tmp, tmp2, tmp3);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i64(tmp3);
+ break;
+ case 0x89: /* SLBGR R1,R2 [RRE] */
+ tmp = load_reg(r1);
+ tmp2 = load_reg(r2);
+ tmp32_1 = tcg_const_i32(r1);
+ gen_op_calc_cc(s);
+ gen_helper_slbg(cc_op, cc_op, tmp32_1, tmp, tmp2);
+ set_cc_static(s);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0x94: /* LLCR R1,R2 [RRE] */
+ tmp32_1 = load_reg32(r2);
+ tcg_gen_andi_i32(tmp32_1, tmp32_1, 0xff);
+ store_reg32(r1, tmp32_1);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0x95: /* LLHR R1,R2 [RRE] */
+ tmp32_1 = load_reg32(r2);
+ tcg_gen_andi_i32(tmp32_1, tmp32_1, 0xffff);
+ store_reg32(r1, tmp32_1);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0x96: /* MLR R1,R2 [RRE] */
+ /* reg(r1, r1+1) = reg(r1+1) * reg(r2) */
+ tmp2 = load_reg(r2);
+ tmp3 = load_reg((r1 + 1) & 15);
+ tcg_gen_ext32u_i64(tmp2, tmp2);
+ tcg_gen_ext32u_i64(tmp3, tmp3);
+ tcg_gen_mul_i64(tmp2, tmp2, tmp3);
+ store_reg32_i64((r1 + 1) & 15, tmp2);
+ tcg_gen_shri_i64(tmp2, tmp2, 32);
+ store_reg32_i64(r1, tmp2);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i64(tmp3);
+ break;
+ case 0x97: /* DLR R1,R2 [RRE] */
+ /* reg(r1) = reg(r1, r1+1) % reg(r2) */
+ /* reg(r1+1) = reg(r1, r1+1) / reg(r2) */
+ tmp = load_reg(r1);
+ tmp2 = load_reg(r2);
+ tmp3 = load_reg((r1 + 1) & 15);
+ tcg_gen_ext32u_i64(tmp2, tmp2);
+ tcg_gen_ext32u_i64(tmp3, tmp3);
+ tcg_gen_shli_i64(tmp, tmp, 32);
+ tcg_gen_or_i64(tmp, tmp, tmp3);
+
+ tcg_gen_rem_i64(tmp3, tmp, tmp2);
+ tcg_gen_div_i64(tmp, tmp, tmp2);
+ store_reg32_i64((r1 + 1) & 15, tmp);
+ store_reg32_i64(r1, tmp3);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i64(tmp3);
+ break;
+ case 0x98: /* ALCR R1,R2 [RRE] */
+ tmp32_1 = load_reg32(r1);
+ tmp32_2 = load_reg32(r2);
+ tmp32_3 = tcg_temp_new_i32();
+ /* XXX possible optimization point */
+ gen_op_calc_cc(s);
+ gen_helper_addc_u32(tmp32_3, cc_op, tmp32_1, tmp32_2);
+ set_cc_addu32(s, tmp32_1, tmp32_2, tmp32_3);
+ store_reg32(r1, tmp32_3);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ tcg_temp_free_i32(tmp32_3);
+ break;
+ case 0x99: /* SLBR R1,R2 [RRE] */
+ tmp32_1 = load_reg32(r2);
+ tmp32_2 = tcg_const_i32(r1);
+ gen_op_calc_cc(s);
+ gen_helper_slb(cc_op, cc_op, tmp32_2, tmp32_1);
+ set_cc_static(s);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ break;
+ default:
+ LOG_DISAS("illegal b9 operation 0x%x\n", op);
+ gen_illegal_opcode(s, 2);
+ break;
+ }
+}
+
+static void disas_c0(DisasContext *s, int op, int r1, int i2)
+{
+ TCGv_i64 tmp;
+ TCGv_i32 tmp32_1, tmp32_2;
+ uint64_t target = s->pc + i2 * 2LL;
+ int l1;
+
+ LOG_DISAS("disas_c0: op 0x%x r1 %d i2 %d\n", op, r1, i2);
+
+ switch (op) {
+ case 0: /* larl r1, i2 */
+ tmp = tcg_const_i64(target);
+ store_reg(r1, tmp);
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0x1: /* LGFI R1,I2 [RIL] */
+ tmp = tcg_const_i64((int64_t)i2);
+ store_reg(r1, tmp);
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0x4: /* BRCL M1,I2 [RIL] */
+ /* m1 & (1 << (3 - cc)) */
+ tmp32_1 = tcg_const_i32(3);
+ tmp32_2 = tcg_const_i32(1);
+ gen_op_calc_cc(s);
+ tcg_gen_sub_i32(tmp32_1, tmp32_1, cc_op);
+ tcg_gen_shl_i32(tmp32_2, tmp32_2, tmp32_1);
+ tcg_temp_free_i32(tmp32_1);
+ tmp32_1 = tcg_const_i32(r1); /* m1 == r1 */
+ tcg_gen_and_i32(tmp32_1, tmp32_1, tmp32_2);
+ l1 = gen_new_label();
+ tcg_gen_brcondi_i32(TCG_COND_EQ, tmp32_1, 0, l1);
+ gen_goto_tb(s, 0, target);
+ gen_set_label(l1);
+ gen_goto_tb(s, 1, s->pc + 6);
+ s->is_jmp = DISAS_TB_JUMP;
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ break;
+ case 0x5: /* brasl r1, i2 */
+ tmp = tcg_const_i64(pc_to_link_info(s, s->pc + 6));
+ store_reg(r1, tmp);
+ tcg_temp_free_i64(tmp);
+ gen_goto_tb(s, 0, target);
+ s->is_jmp = DISAS_TB_JUMP;
+ break;
+ case 0x7: /* XILF R1,I2 [RIL] */
+ case 0xb: /* NILF R1,I2 [RIL] */
+ case 0xd: /* OILF R1,I2 [RIL] */
+ tmp32_1 = load_reg32(r1);
+ switch (op) {
+ case 0x7:
+ tcg_gen_xori_i32(tmp32_1, tmp32_1, (uint32_t)i2);
+ break;
+ case 0xb:
+ tcg_gen_andi_i32(tmp32_1, tmp32_1, (uint32_t)i2);
+ break;
+ case 0xd:
+ tcg_gen_ori_i32(tmp32_1, tmp32_1, (uint32_t)i2);
+ break;
+ default:
+ tcg_abort();
+ }
+ store_reg32(r1, tmp32_1);
+ set_cc_nz_u32(s, tmp32_1);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0x9: /* IILF R1,I2 [RIL] */
+ tmp32_1 = tcg_const_i32((uint32_t)i2);
+ store_reg32(r1, tmp32_1);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0xa: /* NIHF R1,I2 [RIL] */
+ tmp = load_reg(r1);
+ tmp32_1 = tcg_temp_new_i32();
+ tcg_gen_andi_i64(tmp, tmp, (((uint64_t)((uint32_t)i2)) << 32)
+ | 0xffffffffULL);
+ store_reg(r1, tmp);
+ tcg_gen_shri_i64(tmp, tmp, 32);
+ tcg_gen_trunc_i64_i32(tmp32_1, tmp);
+ set_cc_nz_u32(s, tmp32_1);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0xe: /* LLIHF R1,I2 [RIL] */
+ tmp = tcg_const_i64(((uint64_t)(uint32_t)i2) << 32);
+ store_reg(r1, tmp);
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0xf: /* LLILF R1,I2 [RIL] */
+ tmp = tcg_const_i64((uint32_t)i2);
+ store_reg(r1, tmp);
+ tcg_temp_free_i64(tmp);
+ break;
+ default:
+ LOG_DISAS("illegal c0 operation 0x%x\n", op);
+ gen_illegal_opcode(s, 3);
+ break;
+ }
+}
+
+static void disas_c2(DisasContext *s, int op, int r1, int i2)
+{
+ TCGv_i64 tmp, tmp2, tmp3;
+ TCGv_i32 tmp32_1, tmp32_2, tmp32_3;
+
+ switch (op) {
+ case 0x4: /* SLGFI R1,I2 [RIL] */
+ case 0xa: /* ALGFI R1,I2 [RIL] */
+ tmp = load_reg(r1);
+ tmp2 = tcg_const_i64((uint64_t)(uint32_t)i2);
+ tmp3 = tcg_temp_new_i64();
+ switch (op) {
+ case 0x4:
+ tcg_gen_sub_i64(tmp3, tmp, tmp2);
+ set_cc_subu64(s, tmp, tmp2, tmp3);
+ break;
+ case 0xa:
+ tcg_gen_add_i64(tmp3, tmp, tmp2);
+ set_cc_addu64(s, tmp, tmp2, tmp3);
+ break;
+ default:
+ tcg_abort();
+ }
+ store_reg(r1, tmp3);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i64(tmp3);
+ break;
+ case 0x5: /* SLFI R1,I2 [RIL] */
+ case 0xb: /* ALFI R1,I2 [RIL] */
+ tmp32_1 = load_reg32(r1);
+ tmp32_2 = tcg_const_i32(i2);
+ tmp32_3 = tcg_temp_new_i32();
+ switch (op) {
+ case 0x5:
+ tcg_gen_sub_i32(tmp32_3, tmp32_1, tmp32_2);
+ set_cc_subu32(s, tmp32_1, tmp32_2, tmp32_3);
+ break;
+ case 0xb:
+ tcg_gen_add_i32(tmp32_3, tmp32_1, tmp32_2);
+ set_cc_addu32(s, tmp32_1, tmp32_2, tmp32_3);
+ break;
+ default:
+ tcg_abort();
+ }
+ store_reg32(r1, tmp32_3);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ tcg_temp_free_i32(tmp32_3);
+ break;
+ case 0xc: /* CGFI R1,I2 [RIL] */
+ tmp = load_reg(r1);
+ cmp_s64c(s, tmp, (int64_t)i2);
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0xe: /* CLGFI R1,I2 [RIL] */
+ tmp = load_reg(r1);
+ cmp_u64c(s, tmp, (uint64_t)(uint32_t)i2);
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0xd: /* CFI R1,I2 [RIL] */
+ tmp32_1 = load_reg32(r1);
+ cmp_s32c(s, tmp32_1, i2);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0xf: /* CLFI R1,I2 [RIL] */
+ tmp32_1 = load_reg32(r1);
+ cmp_u32c(s, tmp32_1, i2);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ default:
+ LOG_DISAS("illegal c2 operation 0x%x\n", op);
+ gen_illegal_opcode(s, 3);
+ break;
+ }
+}
+
+static void gen_and_or_xor_i32(int opc, TCGv_i32 tmp, TCGv_i32 tmp2)
+{
+ switch (opc & 0xf) {
+ case 0x4:
+ tcg_gen_and_i32(tmp, tmp, tmp2);
+ break;
+ case 0x6:
+ tcg_gen_or_i32(tmp, tmp, tmp2);
+ break;
+ case 0x7:
+ tcg_gen_xor_i32(tmp, tmp, tmp2);
+ break;
+ default:
+ tcg_abort();
+ }
+}
+
+static void disas_s390_insn(DisasContext *s)
+{
+ TCGv_i64 tmp, tmp2, tmp3, tmp4;
+ TCGv_i32 tmp32_1, tmp32_2, tmp32_3, tmp32_4;
+ unsigned char opc;
+ uint64_t insn;
+ int op, r1, r2, r3, d1, d2, x2, b1, b2, i, i2, r1b;
+ TCGv_i32 vl;
+ int ilc;
+ int l1;
+
+ opc = ldub_code(s->pc);
+ LOG_DISAS("opc 0x%x\n", opc);
+
+ ilc = get_ilc(opc);
+
+ switch (opc) {
+#ifndef CONFIG_USER_ONLY
+ case 0x01: /* SAM */
+ insn = ld_code2(s->pc);
+ /* set addressing mode, but we only do 64bit anyways */
+ break;
+#endif
+ case 0x6: /* BCTR R1,R2 [RR] */
+ insn = ld_code2(s->pc);
+ decode_rr(s, insn, &r1, &r2);
+ tmp32_1 = load_reg32(r1);
+ tcg_gen_subi_i32(tmp32_1, tmp32_1, 1);
+ store_reg32(r1, tmp32_1);
+
+ if (r2) {
+ gen_update_cc_op(s);
+ l1 = gen_new_label();
+ tcg_gen_brcondi_i32(TCG_COND_NE, tmp32_1, 0, l1);
+
+ /* not taking the branch, jump to after the instruction */
+ gen_goto_tb(s, 0, s->pc + 2);
+ gen_set_label(l1);
+
+ /* take the branch, move R2 into psw.addr */
+ tmp32_1 = load_reg32(r2);
+ tmp = tcg_temp_new_i64();
+ tcg_gen_extu_i32_i64(tmp, tmp32_1);
+ tcg_gen_mov_i64(psw_addr, tmp);
+ s->is_jmp = DISAS_JUMP;
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i64(tmp);
+ }
+ break;
+ case 0x7: /* BCR M1,R2 [RR] */
+ insn = ld_code2(s->pc);
+ decode_rr(s, insn, &r1, &r2);
+ if (r2) {
+ tmp = load_reg(r2);
+ gen_bcr(s, r1, tmp, s->pc);
+ tcg_temp_free_i64(tmp);
+ s->is_jmp = DISAS_TB_JUMP;
+ } else {
+ /* XXX: "serialization and checkpoint-synchronization function"? */
+ }
+ break;
+ case 0xa: /* SVC I [RR] */
+ insn = ld_code2(s->pc);
+ debug_insn(insn);
+ i = insn & 0xff;
+ update_psw_addr(s);
+ gen_op_calc_cc(s);
+ tmp32_1 = tcg_const_i32(i);
+ tmp32_2 = tcg_const_i32(ilc * 2);
+ tmp32_3 = tcg_const_i32(EXCP_SVC);
+ tcg_gen_st_i32(tmp32_1, cpu_env, offsetof(CPUState, int_svc_code));
+ tcg_gen_st_i32(tmp32_2, cpu_env, offsetof(CPUState, int_svc_ilc));
+ gen_helper_exception(tmp32_3);
+ s->is_jmp = DISAS_EXCP;
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ tcg_temp_free_i32(tmp32_3);
+ break;
+ case 0xd: /* BASR R1,R2 [RR] */
+ insn = ld_code2(s->pc);
+ decode_rr(s, insn, &r1, &r2);
+ tmp = tcg_const_i64(pc_to_link_info(s, s->pc + 2));
+ store_reg(r1, tmp);
+ if (r2) {
+ tmp2 = load_reg(r2);
+ tcg_gen_mov_i64(psw_addr, tmp2);
+ tcg_temp_free_i64(tmp2);
+ s->is_jmp = DISAS_JUMP;
+ }
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0xe: /* MVCL R1,R2 [RR] */
+ insn = ld_code2(s->pc);
+ decode_rr(s, insn, &r1, &r2);
+ tmp32_1 = tcg_const_i32(r1);
+ tmp32_2 = tcg_const_i32(r2);
+ potential_page_fault(s);
+ gen_helper_mvcl(cc_op, tmp32_1, tmp32_2);
+ set_cc_static(s);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ break;
+ case 0x10: /* LPR R1,R2 [RR] */
+ insn = ld_code2(s->pc);
+ decode_rr(s, insn, &r1, &r2);
+ tmp32_1 = load_reg32(r2);
+ set_cc_abs32(s, tmp32_1);
+ gen_helper_abs_i32(tmp32_1, tmp32_1);
+ store_reg32(r1, tmp32_1);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0x11: /* LNR R1,R2 [RR] */
+ insn = ld_code2(s->pc);
+ decode_rr(s, insn, &r1, &r2);
+ tmp32_1 = load_reg32(r2);
+ set_cc_nabs32(s, tmp32_1);
+ gen_helper_nabs_i32(tmp32_1, tmp32_1);
+ store_reg32(r1, tmp32_1);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0x12: /* LTR R1,R2 [RR] */
+ insn = ld_code2(s->pc);
+ decode_rr(s, insn, &r1, &r2);
+ tmp32_1 = load_reg32(r2);
+ if (r1 != r2) {
+ store_reg32(r1, tmp32_1);
+ }
+ set_cc_s32(s, tmp32_1);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0x13: /* LCR R1,R2 [RR] */
+ insn = ld_code2(s->pc);
+ decode_rr(s, insn, &r1, &r2);
+ tmp32_1 = load_reg32(r2);
+ tcg_gen_neg_i32(tmp32_1, tmp32_1);
+ store_reg32(r1, tmp32_1);
+ set_cc_comp32(s, tmp32_1);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0x14: /* NR R1,R2 [RR] */
+ case 0x16: /* OR R1,R2 [RR] */
+ case 0x17: /* XR R1,R2 [RR] */
+ insn = ld_code2(s->pc);
+ decode_rr(s, insn, &r1, &r2);
+ tmp32_2 = load_reg32(r2);
+ tmp32_1 = load_reg32(r1);
+ gen_and_or_xor_i32(opc, tmp32_1, tmp32_2);
+ store_reg32(r1, tmp32_1);
+ set_cc_nz_u32(s, tmp32_1);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ break;
+ case 0x18: /* LR R1,R2 [RR] */
+ insn = ld_code2(s->pc);
+ decode_rr(s, insn, &r1, &r2);
+ tmp32_1 = load_reg32(r2);
+ store_reg32(r1, tmp32_1);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0x15: /* CLR R1,R2 [RR] */
+ case 0x19: /* CR R1,R2 [RR] */
+ insn = ld_code2(s->pc);
+ decode_rr(s, insn, &r1, &r2);
+ tmp32_1 = load_reg32(r1);
+ tmp32_2 = load_reg32(r2);
+ if (opc == 0x15) {
+ cmp_u32(s, tmp32_1, tmp32_2);
+ } else {
+ cmp_s32(s, tmp32_1, tmp32_2);
+ }
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ break;
+ case 0x1a: /* AR R1,R2 [RR] */
+ case 0x1e: /* ALR R1,R2 [RR] */
+ insn = ld_code2(s->pc);
+ decode_rr(s, insn, &r1, &r2);
+ tmp32_1 = load_reg32(r1);
+ tmp32_2 = load_reg32(r2);
+ tmp32_3 = tcg_temp_new_i32();
+ tcg_gen_add_i32(tmp32_3, tmp32_1, tmp32_2);
+ store_reg32(r1, tmp32_3);
+ if (opc == 0x1a) {
+ set_cc_add32(s, tmp32_1, tmp32_2, tmp32_3);
+ } else {
+ set_cc_addu32(s, tmp32_1, tmp32_2, tmp32_3);
+ }
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ tcg_temp_free_i32(tmp32_3);
+ break;
+ case 0x1b: /* SR R1,R2 [RR] */
+ case 0x1f: /* SLR R1,R2 [RR] */
+ insn = ld_code2(s->pc);
+ decode_rr(s, insn, &r1, &r2);
+ tmp32_1 = load_reg32(r1);
+ tmp32_2 = load_reg32(r2);
+ tmp32_3 = tcg_temp_new_i32();
+ tcg_gen_sub_i32(tmp32_3, tmp32_1, tmp32_2);
+ store_reg32(r1, tmp32_3);
+ if (opc == 0x1b) {
+ set_cc_sub32(s, tmp32_1, tmp32_2, tmp32_3);
+ } else {
+ set_cc_subu32(s, tmp32_1, tmp32_2, tmp32_3);
+ }
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ tcg_temp_free_i32(tmp32_3);
+ break;
+ case 0x1c: /* MR R1,R2 [RR] */
+ /* reg(r1, r1+1) = reg(r1+1) * reg(r2) */
+ insn = ld_code2(s->pc);
+ decode_rr(s, insn, &r1, &r2);
+ tmp2 = load_reg(r2);
+ tmp3 = load_reg((r1 + 1) & 15);
+ tcg_gen_ext32s_i64(tmp2, tmp2);
+ tcg_gen_ext32s_i64(tmp3, tmp3);
+ tcg_gen_mul_i64(tmp2, tmp2, tmp3);
+ store_reg32_i64((r1 + 1) & 15, tmp2);
+ tcg_gen_shri_i64(tmp2, tmp2, 32);
+ store_reg32_i64(r1, tmp2);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i64(tmp3);
+ break;
+ case 0x1d: /* DR R1,R2 [RR] */
+ insn = ld_code2(s->pc);
+ decode_rr(s, insn, &r1, &r2);
+ tmp32_1 = load_reg32(r1);
+ tmp32_2 = load_reg32(r1 + 1);
+ tmp32_3 = load_reg32(r2);
+
+ tmp = tcg_temp_new_i64(); /* dividend */
+ tmp2 = tcg_temp_new_i64(); /* divisor */
+ tmp3 = tcg_temp_new_i64();
+
+ /* dividend is r(r1 << 32) | r(r1 + 1) */
+ tcg_gen_extu_i32_i64(tmp, tmp32_1);
+ tcg_gen_extu_i32_i64(tmp2, tmp32_2);
+ tcg_gen_shli_i64(tmp, tmp, 32);
+ tcg_gen_or_i64(tmp, tmp, tmp2);
+
+ /* divisor is r(r2) */
+ tcg_gen_ext_i32_i64(tmp2, tmp32_3);
+
+ tcg_gen_div_i64(tmp3, tmp, tmp2);
+ tcg_gen_rem_i64(tmp, tmp, tmp2);
+
+ tcg_gen_trunc_i64_i32(tmp32_1, tmp);
+ tcg_gen_trunc_i64_i32(tmp32_2, tmp3);
+
+ store_reg32(r1, tmp32_1); /* remainder */
+ store_reg32(r1 + 1, tmp32_2); /* quotient */
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ tcg_temp_free_i32(tmp32_3);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i64(tmp3);
+ break;
+ case 0x28: /* LDR R1,R2 [RR] */
+ insn = ld_code2(s->pc);
+ decode_rr(s, insn, &r1, &r2);
+ tmp = load_freg(r2);
+ store_freg(r1, tmp);
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0x38: /* LER R1,R2 [RR] */
+ insn = ld_code2(s->pc);
+ decode_rr(s, insn, &r1, &r2);
+ tmp32_1 = load_freg32(r2);
+ store_freg32(r1, tmp32_1);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0x40: /* STH R1,D2(X2,B2) [RX] */
+ insn = ld_code4(s->pc);
+ tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
+ tmp2 = load_reg(r1);
+ tcg_gen_qemu_st16(tmp2, tmp, get_mem_index(s));
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0x41: /* la */
+ insn = ld_code4(s->pc);
+ tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
+ store_reg(r1, tmp); /* FIXME: 31/24-bit addressing */
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0x42: /* STC R1,D2(X2,B2) [RX] */
+ insn = ld_code4(s->pc);
+ tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
+ tmp2 = load_reg(r1);
+ tcg_gen_qemu_st8(tmp2, tmp, get_mem_index(s));
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0x43: /* IC R1,D2(X2,B2) [RX] */
+ insn = ld_code4(s->pc);
+ tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
+ tmp2 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld8u(tmp2, tmp, get_mem_index(s));
+ store_reg8(r1, tmp2);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0x44: /* EX R1,D2(X2,B2) [RX] */
+ insn = ld_code4(s->pc);
+ tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
+ tmp2 = load_reg(r1);
+ tmp3 = tcg_const_i64(s->pc + 4);
+ update_psw_addr(s);
+ gen_op_calc_cc(s);
+ gen_helper_ex(cc_op, cc_op, tmp2, tmp, tmp3);
+ set_cc_static(s);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i64(tmp3);
+ break;
+ case 0x46: /* BCT R1,D2(X2,B2) [RX] */
+ insn = ld_code4(s->pc);
+ tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
+ tcg_temp_free_i64(tmp);
+
+ tmp32_1 = load_reg32(r1);
+ tcg_gen_subi_i32(tmp32_1, tmp32_1, 1);
+ store_reg32(r1, tmp32_1);
+
+ gen_update_cc_op(s);
+ l1 = gen_new_label();
+ tcg_gen_brcondi_i32(TCG_COND_NE, tmp32_1, 0, l1);
+
+ /* not taking the branch, jump to after the instruction */
+ gen_goto_tb(s, 0, s->pc + 4);
+ gen_set_label(l1);
+
+ /* take the branch, move R2 into psw.addr */
+ tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
+ tcg_gen_mov_i64(psw_addr, tmp);
+ s->is_jmp = DISAS_JUMP;
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0x47: /* BC M1,D2(X2,B2) [RX] */
+ insn = ld_code4(s->pc);
+ tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
+ gen_bcr(s, r1, tmp, s->pc + 4);
+ tcg_temp_free_i64(tmp);
+ s->is_jmp = DISAS_TB_JUMP;
+ break;
+ case 0x48: /* LH R1,D2(X2,B2) [RX] */
+ insn = ld_code4(s->pc);
+ tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
+ tmp2 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld16s(tmp2, tmp, get_mem_index(s));
+ store_reg32_i64(r1, tmp2);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0x49: /* CH R1,D2(X2,B2) [RX] */
+ insn = ld_code4(s->pc);
+ tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
+ tmp32_1 = load_reg32(r1);
+ tmp32_2 = tcg_temp_new_i32();
+ tmp2 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld16s(tmp2, tmp, get_mem_index(s));
+ tcg_gen_trunc_i64_i32(tmp32_2, tmp2);
+ cmp_s32(s, tmp32_1, tmp32_2);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0x4a: /* AH R1,D2(X2,B2) [RX] */
+ case 0x4b: /* SH R1,D2(X2,B2) [RX] */
+ case 0x4c: /* MH R1,D2(X2,B2) [RX] */
+ insn = ld_code4(s->pc);
+ tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
+ tmp2 = tcg_temp_new_i64();
+ tmp32_1 = load_reg32(r1);
+ tmp32_2 = tcg_temp_new_i32();
+ tmp32_3 = tcg_temp_new_i32();
+
+ tcg_gen_qemu_ld16s(tmp2, tmp, get_mem_index(s));
+ tcg_gen_trunc_i64_i32(tmp32_2, tmp2);
+ switch (opc) {
+ case 0x4a:
+ tcg_gen_add_i32(tmp32_3, tmp32_1, tmp32_2);
+ set_cc_add32(s, tmp32_1, tmp32_2, tmp32_3);
+ break;
+ case 0x4b:
+ tcg_gen_sub_i32(tmp32_3, tmp32_1, tmp32_2);
+ set_cc_sub32(s, tmp32_1, tmp32_2, tmp32_3);
+ break;
+ case 0x4c:
+ tcg_gen_mul_i32(tmp32_3, tmp32_1, tmp32_2);
+ break;
+ default:
+ tcg_abort();
+ }
+ store_reg32(r1, tmp32_3);
+
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ tcg_temp_free_i32(tmp32_3);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0x4d: /* BAS R1,D2(X2,B2) [RX] */
+ insn = ld_code4(s->pc);
+ tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
+ tmp2 = tcg_const_i64(pc_to_link_info(s, s->pc + 4));
+ store_reg(r1, tmp2);
+ tcg_gen_mov_i64(psw_addr, tmp);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ s->is_jmp = DISAS_JUMP;
+ break;
+ case 0x4e: /* CVD R1,D2(X2,B2) [RX] */
+ insn = ld_code4(s->pc);
+ tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
+ tmp2 = tcg_temp_new_i64();
+ tmp32_1 = tcg_temp_new_i32();
+ tcg_gen_trunc_i64_i32(tmp32_1, regs[r1]);
+ gen_helper_cvd(tmp2, tmp32_1);
+ tcg_gen_qemu_st64(tmp2, tmp, get_mem_index(s));
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0x50: /* st r1, d2(x2, b2) */
+ insn = ld_code4(s->pc);
+ tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
+ tmp2 = load_reg(r1);
+ tcg_gen_qemu_st32(tmp2, tmp, get_mem_index(s));
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0x55: /* CL R1,D2(X2,B2) [RX] */
+ insn = ld_code4(s->pc);
+ tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
+ tmp2 = tcg_temp_new_i64();
+ tmp32_1 = tcg_temp_new_i32();
+ tmp32_2 = load_reg32(r1);
+ tcg_gen_qemu_ld32u(tmp2, tmp, get_mem_index(s));
+ tcg_gen_trunc_i64_i32(tmp32_1, tmp2);
+ cmp_u32(s, tmp32_2, tmp32_1);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ break;
+ case 0x54: /* N R1,D2(X2,B2) [RX] */
+ case 0x56: /* O R1,D2(X2,B2) [RX] */
+ case 0x57: /* X R1,D2(X2,B2) [RX] */
+ insn = ld_code4(s->pc);
+ tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
+ tmp2 = tcg_temp_new_i64();
+ tmp32_1 = load_reg32(r1);
+ tmp32_2 = tcg_temp_new_i32();
+ tcg_gen_qemu_ld32u(tmp2, tmp, get_mem_index(s));
+ tcg_gen_trunc_i64_i32(tmp32_2, tmp2);
+ gen_and_or_xor_i32(opc, tmp32_1, tmp32_2);
+ store_reg32(r1, tmp32_1);
+ set_cc_nz_u32(s, tmp32_1);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ break;
+ case 0x58: /* l r1, d2(x2, b2) */
+ insn = ld_code4(s->pc);
+ tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
+ tmp2 = tcg_temp_new_i64();
+ tmp32_1 = tcg_temp_new_i32();
+ tcg_gen_qemu_ld32u(tmp2, tmp, get_mem_index(s));
+ tcg_gen_trunc_i64_i32(tmp32_1, tmp2);
+ store_reg32(r1, tmp32_1);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0x59: /* C R1,D2(X2,B2) [RX] */
+ insn = ld_code4(s->pc);
+ tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
+ tmp2 = tcg_temp_new_i64();
+ tmp32_1 = tcg_temp_new_i32();
+ tmp32_2 = load_reg32(r1);
+ tcg_gen_qemu_ld32s(tmp2, tmp, get_mem_index(s));
+ tcg_gen_trunc_i64_i32(tmp32_1, tmp2);
+ cmp_s32(s, tmp32_2, tmp32_1);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ break;
+ case 0x5a: /* A R1,D2(X2,B2) [RX] */
+ case 0x5b: /* S R1,D2(X2,B2) [RX] */
+ case 0x5e: /* AL R1,D2(X2,B2) [RX] */
+ case 0x5f: /* SL R1,D2(X2,B2) [RX] */
+ insn = ld_code4(s->pc);
+ tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
+ tmp32_1 = load_reg32(r1);
+ tmp32_2 = tcg_temp_new_i32();
+ tmp32_3 = tcg_temp_new_i32();
+ tcg_gen_qemu_ld32s(tmp, tmp, get_mem_index(s));
+ tcg_gen_trunc_i64_i32(tmp32_2, tmp);
+ switch (opc) {
+ case 0x5a:
+ case 0x5e:
+ tcg_gen_add_i32(tmp32_3, tmp32_1, tmp32_2);
+ break;
+ case 0x5b:
+ case 0x5f:
+ tcg_gen_sub_i32(tmp32_3, tmp32_1, tmp32_2);
+ break;
+ default:
+ tcg_abort();
+ }
+ store_reg32(r1, tmp32_3);
+ switch (opc) {
+ case 0x5a:
+ set_cc_add32(s, tmp32_1, tmp32_2, tmp32_3);
+ break;
+ case 0x5e:
+ set_cc_addu32(s, tmp32_1, tmp32_2, tmp32_3);
+ break;
+ case 0x5b:
+ set_cc_sub32(s, tmp32_1, tmp32_2, tmp32_3);
+ break;
+ case 0x5f:
+ set_cc_subu32(s, tmp32_1, tmp32_2, tmp32_3);
+ break;
+ default:
+ tcg_abort();
+ }
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ tcg_temp_free_i32(tmp32_3);
+ break;
+ case 0x5c: /* M R1,D2(X2,B2) [RX] */
+ /* reg(r1, r1+1) = reg(r1+1) * *(s32*)addr */
+ insn = ld_code4(s->pc);
+ tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
+ tmp2 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld32s(tmp2, tmp, get_mem_index(s));
+ tmp3 = load_reg((r1 + 1) & 15);
+ tcg_gen_ext32s_i64(tmp2, tmp2);
+ tcg_gen_ext32s_i64(tmp3, tmp3);
+ tcg_gen_mul_i64(tmp2, tmp2, tmp3);
+ store_reg32_i64((r1 + 1) & 15, tmp2);
+ tcg_gen_shri_i64(tmp2, tmp2, 32);
+ store_reg32_i64(r1, tmp2);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i64(tmp3);
+ break;
+ case 0x5d: /* D R1,D2(X2,B2) [RX] */
+ insn = ld_code4(s->pc);
+ tmp3 = decode_rx(s, insn, &r1, &x2, &b2, &d2);
+ tmp32_1 = load_reg32(r1);
+ tmp32_2 = load_reg32(r1 + 1);
+
+ tmp = tcg_temp_new_i64();
+ tmp2 = tcg_temp_new_i64();
+
+ /* dividend is r(r1 << 32) | r(r1 + 1) */
+ tcg_gen_extu_i32_i64(tmp, tmp32_1);
+ tcg_gen_extu_i32_i64(tmp2, tmp32_2);
+ tcg_gen_shli_i64(tmp, tmp, 32);
+ tcg_gen_or_i64(tmp, tmp, tmp2);
+
+ /* divisor is in memory */
+ tcg_gen_qemu_ld32s(tmp2, tmp3, get_mem_index(s));
+
+ /* XXX divisor == 0 -> FixP divide exception */
+
+ tcg_gen_div_i64(tmp3, tmp, tmp2);
+ tcg_gen_rem_i64(tmp, tmp, tmp2);
+
+ tcg_gen_trunc_i64_i32(tmp32_1, tmp);
+ tcg_gen_trunc_i64_i32(tmp32_2, tmp3);
+
+ store_reg32(r1, tmp32_1); /* remainder */
+ store_reg32(r1 + 1, tmp32_2); /* quotient */
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i64(tmp3);
+ break;
+ case 0x60: /* STD R1,D2(X2,B2) [RX] */
+ insn = ld_code4(s->pc);
+ tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
+ tmp2 = load_freg(r1);
+ tcg_gen_qemu_st64(tmp2, tmp, get_mem_index(s));
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0x68: /* LD R1,D2(X2,B2) [RX] */
+ insn = ld_code4(s->pc);
+ tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
+ tmp2 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld64(tmp2, tmp, get_mem_index(s));
+ store_freg(r1, tmp2);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0x70: /* STE R1,D2(X2,B2) [RX] */
+ insn = ld_code4(s->pc);
+ tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
+ tmp2 = tcg_temp_new_i64();
+ tmp32_1 = load_freg32(r1);
+ tcg_gen_extu_i32_i64(tmp2, tmp32_1);
+ tcg_gen_qemu_st32(tmp2, tmp, get_mem_index(s));
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0x71: /* MS R1,D2(X2,B2) [RX] */
+ insn = ld_code4(s->pc);
+ tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
+ tmp2 = tcg_temp_new_i64();
+ tmp32_1 = load_reg32(r1);
+ tmp32_2 = tcg_temp_new_i32();
+ tcg_gen_qemu_ld32s(tmp2, tmp, get_mem_index(s));
+ tcg_gen_trunc_i64_i32(tmp32_2, tmp2);
+ tcg_gen_mul_i32(tmp32_1, tmp32_1, tmp32_2);
+ store_reg32(r1, tmp32_1);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ break;
+ case 0x78: /* LE R1,D2(X2,B2) [RX] */
+ insn = ld_code4(s->pc);
+ tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
+ tmp2 = tcg_temp_new_i64();
+ tmp32_1 = tcg_temp_new_i32();
+ tcg_gen_qemu_ld32u(tmp2, tmp, get_mem_index(s));
+ tcg_gen_trunc_i64_i32(tmp32_1, tmp2);
+ store_freg32(r1, tmp32_1);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+#ifndef CONFIG_USER_ONLY
+ case 0x80: /* SSM D2(B2) [S] */
+ /* Set System Mask */
+ check_privileged(s, ilc);
+ insn = ld_code4(s->pc);
+ decode_rs(s, insn, &r1, &r3, &b2, &d2);
+ tmp = get_address(s, 0, b2, d2);
+ tmp2 = tcg_temp_new_i64();
+ tmp3 = tcg_temp_new_i64();
+ tcg_gen_andi_i64(tmp3, psw_mask, ~0xff00000000000000ULL);
+ tcg_gen_qemu_ld8u(tmp2, tmp, get_mem_index(s));
+ tcg_gen_shli_i64(tmp2, tmp2, 56);
+ tcg_gen_or_i64(psw_mask, tmp3, tmp2);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i64(tmp3);
+ break;
+ case 0x82: /* LPSW D2(B2) [S] */
+ /* Load PSW */
+ check_privileged(s, ilc);
+ insn = ld_code4(s->pc);
+ decode_rs(s, insn, &r1, &r3, &b2, &d2);
+ tmp = get_address(s, 0, b2, d2);
+ tmp2 = tcg_temp_new_i64();
+ tmp3 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld32u(tmp2, tmp, get_mem_index(s));
+ tcg_gen_addi_i64(tmp, tmp, 4);
+ tcg_gen_qemu_ld32u(tmp3, tmp, get_mem_index(s));
+ gen_helper_load_psw(tmp2, tmp3);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i64(tmp3);
+ /* we need to keep cc_op intact */
+ s->is_jmp = DISAS_JUMP;
+ break;
+ case 0x83: /* DIAG R1,R3,D2 [RS] */
+ /* Diagnose call (KVM hypercall) */
+ check_privileged(s, ilc);
+ potential_page_fault(s);
+ insn = ld_code4(s->pc);
+ decode_rs(s, insn, &r1, &r3, &b2, &d2);
+ tmp32_1 = tcg_const_i32(insn & 0xfff);
+ tmp2 = load_reg(2);
+ tmp3 = load_reg(1);
+ gen_helper_diag(tmp2, tmp32_1, tmp2, tmp3);
+ store_reg(2, tmp2);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i64(tmp3);
+ break;
+#endif
+ case 0x88: /* SRL R1,D2(B2) [RS] */
+ case 0x89: /* SLL R1,D2(B2) [RS] */
+ case 0x8a: /* SRA R1,D2(B2) [RS] */
+ insn = ld_code4(s->pc);
+ decode_rs(s, insn, &r1, &r3, &b2, &d2);
+ tmp = get_address(s, 0, b2, d2);
+ tmp32_1 = load_reg32(r1);
+ tmp32_2 = tcg_temp_new_i32();
+ tcg_gen_trunc_i64_i32(tmp32_2, tmp);
+ tcg_gen_andi_i32(tmp32_2, tmp32_2, 0x3f);
+ switch (opc) {
+ case 0x88:
+ tcg_gen_shr_i32(tmp32_1, tmp32_1, tmp32_2);
+ break;
+ case 0x89:
+ tcg_gen_shl_i32(tmp32_1, tmp32_1, tmp32_2);
+ break;
+ case 0x8a:
+ tcg_gen_sar_i32(tmp32_1, tmp32_1, tmp32_2);
+ set_cc_s32(s, tmp32_1);
+ break;
+ default:
+ tcg_abort();
+ }
+ store_reg32(r1, tmp32_1);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ break;
+ case 0x8c: /* SRDL R1,D2(B2) [RS] */
+ case 0x8d: /* SLDL R1,D2(B2) [RS] */
+ case 0x8e: /* SRDA R1,D2(B2) [RS] */
+ insn = ld_code4(s->pc);
+ decode_rs(s, insn, &r1, &r3, &b2, &d2);
+ tmp = get_address(s, 0, b2, d2); /* shift */
+ tmp2 = tcg_temp_new_i64();
+ tmp32_1 = load_reg32(r1);
+ tmp32_2 = load_reg32(r1 + 1);
+ tcg_gen_concat_i32_i64(tmp2, tmp32_2, tmp32_1); /* operand */
+ switch (opc) {
+ case 0x8c:
+ tcg_gen_shr_i64(tmp2, tmp2, tmp);
+ break;
+ case 0x8d:
+ tcg_gen_shl_i64(tmp2, tmp2, tmp);
+ break;
+ case 0x8e:
+ tcg_gen_sar_i64(tmp2, tmp2, tmp);
+ set_cc_s64(s, tmp2);
+ break;
+ }
+ tcg_gen_shri_i64(tmp, tmp2, 32);
+ tcg_gen_trunc_i64_i32(tmp32_1, tmp);
+ store_reg32(r1, tmp32_1);
+ tcg_gen_trunc_i64_i32(tmp32_2, tmp2);
+ store_reg32(r1 + 1, tmp32_2);
+ break;
+ case 0x98: /* LM R1,R3,D2(B2) [RS] */
+ case 0x90: /* STM R1,R3,D2(B2) [RS] */
+ insn = ld_code4(s->pc);
+ decode_rs(s, insn, &r1, &r3, &b2, &d2);
+
+ tmp = get_address(s, 0, b2, d2);
+ tmp2 = tcg_temp_new_i64();
+ tmp3 = tcg_const_i64(4);
+ tmp4 = tcg_const_i64(0xffffffff00000000ULL);
+ for (i = r1;; i = (i + 1) % 16) {
+ if (opc == 0x98) {
+ tcg_gen_qemu_ld32u(tmp2, tmp, get_mem_index(s));
+ tcg_gen_and_i64(regs[i], regs[i], tmp4);
+ tcg_gen_or_i64(regs[i], regs[i], tmp2);
+ } else {
+ tcg_gen_qemu_st32(regs[i], tmp, get_mem_index(s));
+ }
+ if (i == r3) {
+ break;
+ }
+ tcg_gen_add_i64(tmp, tmp, tmp3);
+ }
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i64(tmp3);
+ tcg_temp_free_i64(tmp4);
+ break;
+ case 0x91: /* TM D1(B1),I2 [SI] */
+ insn = ld_code4(s->pc);
+ tmp = decode_si(s, insn, &i2, &b1, &d1);
+ tmp2 = tcg_const_i64(i2);
+ tcg_gen_qemu_ld8u(tmp, tmp, get_mem_index(s));
+ cmp_64(s, tmp, tmp2, CC_OP_TM_32);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0x92: /* MVI D1(B1),I2 [SI] */
+ insn = ld_code4(s->pc);
+ tmp = decode_si(s, insn, &i2, &b1, &d1);
+ tmp2 = tcg_const_i64(i2);
+ tcg_gen_qemu_st8(tmp2, tmp, get_mem_index(s));
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0x94: /* NI D1(B1),I2 [SI] */
+ case 0x96: /* OI D1(B1),I2 [SI] */
+ case 0x97: /* XI D1(B1),I2 [SI] */
+ insn = ld_code4(s->pc);
+ tmp = decode_si(s, insn, &i2, &b1, &d1);
+ tmp2 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld8u(tmp2, tmp, get_mem_index(s));
+ switch (opc) {
+ case 0x94:
+ tcg_gen_andi_i64(tmp2, tmp2, i2);
+ break;
+ case 0x96:
+ tcg_gen_ori_i64(tmp2, tmp2, i2);
+ break;
+ case 0x97:
+ tcg_gen_xori_i64(tmp2, tmp2, i2);
+ break;
+ default:
+ tcg_abort();
+ }
+ tcg_gen_qemu_st8(tmp2, tmp, get_mem_index(s));
+ set_cc_nz_u64(s, tmp2);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0x95: /* CLI D1(B1),I2 [SI] */
+ insn = ld_code4(s->pc);
+ tmp = decode_si(s, insn, &i2, &b1, &d1);
+ tmp2 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld8u(tmp2, tmp, get_mem_index(s));
+ cmp_u64c(s, tmp2, i2);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0x9a: /* LAM R1,R3,D2(B2) [RS] */
+ insn = ld_code4(s->pc);
+ decode_rs(s, insn, &r1, &r3, &b2, &d2);
+ tmp = get_address(s, 0, b2, d2);
+ tmp32_1 = tcg_const_i32(r1);
+ tmp32_2 = tcg_const_i32(r3);
+ potential_page_fault(s);
+ gen_helper_lam(tmp32_1, tmp, tmp32_2);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ break;
+ case 0x9b: /* STAM R1,R3,D2(B2) [RS] */
+ insn = ld_code4(s->pc);
+ decode_rs(s, insn, &r1, &r3, &b2, &d2);
+ tmp = get_address(s, 0, b2, d2);
+ tmp32_1 = tcg_const_i32(r1);
+ tmp32_2 = tcg_const_i32(r3);
+ potential_page_fault(s);
+ gen_helper_stam(tmp32_1, tmp, tmp32_2);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ break;
+ case 0xa5:
+ insn = ld_code4(s->pc);
+ r1 = (insn >> 20) & 0xf;
+ op = (insn >> 16) & 0xf;
+ i2 = insn & 0xffff;
+ disas_a5(s, op, r1, i2);
+ break;
+ case 0xa7:
+ insn = ld_code4(s->pc);
+ r1 = (insn >> 20) & 0xf;
+ op = (insn >> 16) & 0xf;
+ i2 = (short)insn;
+ disas_a7(s, op, r1, i2);
+ break;
+ case 0xa8: /* MVCLE R1,R3,D2(B2) [RS] */
+ insn = ld_code4(s->pc);
+ decode_rs(s, insn, &r1, &r3, &b2, &d2);
+ tmp = get_address(s, 0, b2, d2);
+ tmp32_1 = tcg_const_i32(r1);
+ tmp32_2 = tcg_const_i32(r3);
+ potential_page_fault(s);
+ gen_helper_mvcle(cc_op, tmp32_1, tmp, tmp32_2);
+ set_cc_static(s);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ break;
+ case 0xa9: /* CLCLE R1,R3,D2(B2) [RS] */
+ insn = ld_code4(s->pc);
+ decode_rs(s, insn, &r1, &r3, &b2, &d2);
+ tmp = get_address(s, 0, b2, d2);
+ tmp32_1 = tcg_const_i32(r1);
+ tmp32_2 = tcg_const_i32(r3);
+ potential_page_fault(s);
+ gen_helper_clcle(cc_op, tmp32_1, tmp, tmp32_2);
+ set_cc_static(s);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ break;
+#ifndef CONFIG_USER_ONLY
+ case 0xac: /* STNSM D1(B1),I2 [SI] */
+ case 0xad: /* STOSM D1(B1),I2 [SI] */
+ check_privileged(s, ilc);
+ insn = ld_code4(s->pc);
+ tmp = decode_si(s, insn, &i2, &b1, &d1);
+ tmp2 = tcg_temp_new_i64();
+ tcg_gen_shri_i64(tmp2, psw_mask, 56);
+ tcg_gen_qemu_st8(tmp2, tmp, get_mem_index(s));
+ if (opc == 0xac) {
+ tcg_gen_andi_i64(psw_mask, psw_mask,
+ ((uint64_t)i2 << 56) | 0x00ffffffffffffffULL);
+ } else {
+ tcg_gen_ori_i64(psw_mask, psw_mask, (uint64_t)i2 << 56);
+ }
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ break;
+ case 0xae: /* SIGP R1,R3,D2(B2) [RS] */
+ check_privileged(s, ilc);
+ insn = ld_code4(s->pc);
+ decode_rs(s, insn, &r1, &r3, &b2, &d2);
+ tmp = get_address(s, 0, b2, d2);
+ tmp2 = load_reg(r3);
+ tmp32_1 = tcg_const_i32(r1);
+ potential_page_fault(s);
+ gen_helper_sigp(cc_op, tmp, tmp32_1, tmp2);
+ set_cc_static(s);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+ case 0xb1: /* LRA R1,D2(X2, B2) [RX] */
+ check_privileged(s, ilc);
+ insn = ld_code4(s->pc);
+ tmp = decode_rx(s, insn, &r1, &x2, &b2, &d2);
+ tmp32_1 = tcg_const_i32(r1);
+ potential_page_fault(s);
+ gen_helper_lra(cc_op, tmp, tmp32_1);
+ set_cc_static(s);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i32(tmp32_1);
+ break;
+#endif
+ case 0xb2:
+ insn = ld_code4(s->pc);
+ op = (insn >> 16) & 0xff;
+ switch (op) {
+ case 0x9c: /* STFPC D2(B2) [S] */
+ d2 = insn & 0xfff;
+ b2 = (insn >> 12) & 0xf;
+ tmp32_1 = tcg_temp_new_i32();
+ tmp = tcg_temp_new_i64();
+ tmp2 = get_address(s, 0, b2, d2);
+ tcg_gen_ld_i32(tmp32_1, cpu_env, offsetof(CPUState, fpc));
+ tcg_gen_extu_i32_i64(tmp, tmp32_1);
+ tcg_gen_qemu_st32(tmp, tmp2, get_mem_index(s));
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ break;
+ default:
+ disas_b2(s, op, insn);
+ break;
+ }
+ break;
+ case 0xb3:
+ insn = ld_code4(s->pc);
+ op = (insn >> 16) & 0xff;
+ r3 = (insn >> 12) & 0xf; /* aka m3 */
+ r1 = (insn >> 4) & 0xf;
+ r2 = insn & 0xf;
+ disas_b3(s, op, r3, r1, r2);
+ break;
+#ifndef CONFIG_USER_ONLY
+ case 0xb6: /* STCTL R1,R3,D2(B2) [RS] */
+ /* Store Control */
+ check_privileged(s, ilc);
+ insn = ld_code4(s->pc);
+ decode_rs(s, insn, &r1, &r3, &b2, &d2);
+ tmp = get_address(s, 0, b2, d2);
+ tmp32_1 = tcg_const_i32(r1);
+ tmp32_2 = tcg_const_i32(r3);
+ potential_page_fault(s);
+ gen_helper_stctl(tmp32_1, tmp, tmp32_2);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ break;
+ case 0xb7: /* LCTL R1,R3,D2(B2) [RS] */
+ /* Load Control */
+ check_privileged(s, ilc);
+ insn = ld_code4(s->pc);
+ decode_rs(s, insn, &r1, &r3, &b2, &d2);
+ tmp = get_address(s, 0, b2, d2);
+ tmp32_1 = tcg_const_i32(r1);
+ tmp32_2 = tcg_const_i32(r3);
+ potential_page_fault(s);
+ gen_helper_lctl(tmp32_1, tmp, tmp32_2);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ break;
+#endif
+ case 0xb9:
+ insn = ld_code4(s->pc);
+ r1 = (insn >> 4) & 0xf;
+ r2 = insn & 0xf;
+ op = (insn >> 16) & 0xff;
+ disas_b9(s, op, r1, r2);
+ break;
+ case 0xba: /* CS R1,R3,D2(B2) [RS] */
+ insn = ld_code4(s->pc);
+ decode_rs(s, insn, &r1, &r3, &b2, &d2);
+ tmp = get_address(s, 0, b2, d2);
+ tmp32_1 = tcg_const_i32(r1);
+ tmp32_2 = tcg_const_i32(r3);
+ potential_page_fault(s);
+ gen_helper_cs(cc_op, tmp32_1, tmp, tmp32_2);
+ set_cc_static(s);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ break;
+ case 0xbd: /* CLM R1,M3,D2(B2) [RS] */
+ insn = ld_code4(s->pc);
+ decode_rs(s, insn, &r1, &r3, &b2, &d2);
+ tmp = get_address(s, 0, b2, d2);
+ tmp32_1 = load_reg32(r1);
+ tmp32_2 = tcg_const_i32(r3);
+ potential_page_fault(s);
+ gen_helper_clm(cc_op, tmp32_1, tmp32_2, tmp);
+ set_cc_static(s);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ break;
+ case 0xbe: /* STCM R1,M3,D2(B2) [RS] */
+ insn = ld_code4(s->pc);
+ decode_rs(s, insn, &r1, &r3, &b2, &d2);
+ tmp = get_address(s, 0, b2, d2);
+ tmp32_1 = load_reg32(r1);
+ tmp32_2 = tcg_const_i32(r3);
+ potential_page_fault(s);
+ gen_helper_stcm(tmp32_1, tmp32_2, tmp);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ break;
+ case 0xbf: /* ICM R1,M3,D2(B2) [RS] */
+ insn = ld_code4(s->pc);
+ decode_rs(s, insn, &r1, &r3, &b2, &d2);
+ if (r3 == 15) {
+ /* effectively a 32-bit load */
+ tmp = get_address(s, 0, b2, d2);
+ tmp32_1 = tcg_temp_new_i32();
+ tmp32_2 = tcg_const_i32(r3);
+ tcg_gen_qemu_ld32u(tmp, tmp, get_mem_index(s));
+ store_reg32_i64(r1, tmp);
+ tcg_gen_trunc_i64_i32(tmp32_1, tmp);
+ set_cc_icm(s, tmp32_2, tmp32_1);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ } else if (r3) {
+ uint32_t mask = 0x00ffffffUL;
+ uint32_t shift = 24;
+ int m3 = r3;
+ tmp = get_address(s, 0, b2, d2);
+ tmp2 = tcg_temp_new_i64();
+ tmp32_1 = load_reg32(r1);
+ tmp32_2 = tcg_temp_new_i32();
+ tmp32_3 = tcg_const_i32(r3);
+ tmp32_4 = tcg_const_i32(0);
+ while (m3) {
+ if (m3 & 8) {
+ tcg_gen_qemu_ld8u(tmp2, tmp, get_mem_index(s));
+ tcg_gen_trunc_i64_i32(tmp32_2, tmp2);
+ if (shift) {
+ tcg_gen_shli_i32(tmp32_2, tmp32_2, shift);
+ }
+ tcg_gen_andi_i32(tmp32_1, tmp32_1, mask);
+ tcg_gen_or_i32(tmp32_1, tmp32_1, tmp32_2);
+ tcg_gen_or_i32(tmp32_4, tmp32_4, tmp32_2);
+ tcg_gen_addi_i64(tmp, tmp, 1);
+ }
+ m3 = (m3 << 1) & 0xf;
+ mask = (mask >> 8) | 0xff000000UL;
+ shift -= 8;
+ }
+ store_reg32(r1, tmp32_1);
+ set_cc_icm(s, tmp32_3, tmp32_4);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i32(tmp32_1);
+ tcg_temp_free_i32(tmp32_2);
+ tcg_temp_free_i32(tmp32_3);
+ tcg_temp_free_i32(tmp32_4);
+ } else {
+ /* i.e. env->cc = 0 */
+ gen_op_movi_cc(s, 0);
+ }
+ break;
+ case 0xc0:
+ case 0xc2:
+ insn = ld_code6(s->pc);
+ r1 = (insn >> 36) & 0xf;
+ op = (insn >> 32) & 0xf;
+ i2 = (int)insn;
+ switch (opc) {
+ case 0xc0:
+ disas_c0(s, op, r1, i2);
+ break;
+ case 0xc2:
+ disas_c2(s, op, r1, i2);
+ break;
+ default:
+ tcg_abort();
+ }
+ break;
+ case 0xd2: /* MVC D1(L,B1),D2(B2) [SS] */
+ case 0xd4: /* NC D1(L,B1),D2(B2) [SS] */
+ case 0xd5: /* CLC D1(L,B1),D2(B2) [SS] */
+ case 0xd6: /* OC D1(L,B1),D2(B2) [SS] */
+ case 0xd7: /* XC D1(L,B1),D2(B2) [SS] */
+ case 0xdc: /* TR D1(L,B1),D2(B2) [SS] */
+ case 0xf3: /* UNPK D1(L1,B1),D2(L2,B2) [SS] */
+ insn = ld_code6(s->pc);
+ vl = tcg_const_i32((insn >> 32) & 0xff);
+ b1 = (insn >> 28) & 0xf;
+ b2 = (insn >> 12) & 0xf;
+ d1 = (insn >> 16) & 0xfff;
+ d2 = insn & 0xfff;
+ tmp = get_address(s, 0, b1, d1);
+ tmp2 = get_address(s, 0, b2, d2);
+ switch (opc) {
+ case 0xd2:
+ gen_op_mvc(s, (insn >> 32) & 0xff, tmp, tmp2);
+ break;
+ case 0xd4:
+ potential_page_fault(s);
+ gen_helper_nc(cc_op, vl, tmp, tmp2);
+ set_cc_static(s);
+ break;
+ case 0xd5:
+ gen_op_clc(s, (insn >> 32) & 0xff, tmp, tmp2);
+ break;
+ case 0xd6:
+ potential_page_fault(s);
+ gen_helper_oc(cc_op, vl, tmp, tmp2);
+ set_cc_static(s);
+ break;
+ case 0xd7:
+ potential_page_fault(s);
+ gen_helper_xc(cc_op, vl, tmp, tmp2);
+ set_cc_static(s);
+ break;
+ case 0xdc:
+ potential_page_fault(s);
+ gen_helper_tr(vl, tmp, tmp2);
+ set_cc_static(s);
+ break;
+ case 0xf3:
+ potential_page_fault(s);
+ gen_helper_unpk(vl, tmp, tmp2);
+ break;
+ default:
+ tcg_abort();
+ }
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ break;
+#ifndef CONFIG_USER_ONLY
+ case 0xda: /* MVCP D1(R1,B1),D2(B2),R3 [SS] */
+ case 0xdb: /* MVCS D1(R1,B1),D2(B2),R3 [SS] */
+ check_privileged(s, ilc);
+ potential_page_fault(s);
+ insn = ld_code6(s->pc);
+ r1 = (insn >> 36) & 0xf;
+ r3 = (insn >> 32) & 0xf;
+ b1 = (insn >> 28) & 0xf;
+ d1 = (insn >> 16) & 0xfff;
+ b2 = (insn >> 12) & 0xf;
+ d2 = insn & 0xfff;
+ tmp = load_reg(r1);
+ /* XXX key in r3 */
+ tmp2 = get_address(s, 0, b1, d1);
+ tmp3 = get_address(s, 0, b2, d2);
+ if (opc == 0xda) {
+ gen_helper_mvcp(cc_op, tmp, tmp2, tmp3);
+ } else {
+ gen_helper_mvcs(cc_op, tmp, tmp2, tmp3);
+ }
+ set_cc_static(s);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmp2);
+ tcg_temp_free_i64(tmp3);
+ break;
+#endif
+ case 0xe3:
+ insn = ld_code6(s->pc);
+ debug_insn(insn);
+ op = insn & 0xff;
+ r1 = (insn >> 36) & 0xf;
+ x2 = (insn >> 32) & 0xf;
+ b2 = (insn >> 28) & 0xf;
+ d2 = ((int)((((insn >> 16) & 0xfff)
+ | ((insn << 4) & 0xff000)) << 12)) >> 12;
+ disas_e3(s, op, r1, x2, b2, d2 );
+ break;
+#ifndef CONFIG_USER_ONLY
+ case 0xe5:
+ /* Test Protection */
+ check_privileged(s, ilc);
+ insn = ld_code6(s->pc);
+ debug_insn(insn);
+ disas_e5(s, insn);
+ break;
+#endif
+ case 0xeb:
+ insn = ld_code6(s->pc);
+ debug_insn(insn);
+ op = insn & 0xff;
+ r1 = (insn >> 36) & 0xf;
+ r3 = (insn >> 32) & 0xf;
+ b2 = (insn >> 28) & 0xf;
+ d2 = ((int)((((insn >> 16) & 0xfff)
+ | ((insn << 4) & 0xff000)) << 12)) >> 12;
+ disas_eb(s, op, r1, r3, b2, d2);
+ break;
+ case 0xed:
+ insn = ld_code6(s->pc);
+ debug_insn(insn);
+ op = insn & 0xff;
+ r1 = (insn >> 36) & 0xf;
+ x2 = (insn >> 32) & 0xf;
+ b2 = (insn >> 28) & 0xf;
+ d2 = (short)((insn >> 16) & 0xfff);
+ r1b = (insn >> 12) & 0xf;
+ disas_ed(s, op, r1, x2, b2, d2, r1b);
+ break;
+ default:
+ LOG_DISAS("unimplemented opcode 0x%x\n", opc);
+ gen_illegal_opcode(s, ilc);
+ break;
+ }
+
+ /* Instruction length is encoded in the opcode */
+ s->pc += (ilc * 2);
+}
+
+static inline void gen_intermediate_code_internal(CPUState *env,
+ TranslationBlock *tb,
+ int search_pc)
+{
+ DisasContext dc;
+ target_ulong pc_start;
+ uint64_t next_page_start;
+ uint16_t *gen_opc_end;
+ int j, lj = -1;
+ int num_insns, max_insns;
+ CPUBreakpoint *bp;
+
+ pc_start = tb->pc;
+
+ /* 31-bit mode */
+ if (!(tb->flags & FLAG_MASK_64)) {
+ pc_start &= 0x7fffffff;
+ }
+
+ dc.pc = pc_start;
+ dc.is_jmp = DISAS_NEXT;
+ dc.tb = tb;
+ dc.cc_op = CC_OP_DYNAMIC;
+
+ gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
+
+ next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
+
+ num_insns = 0;
+ max_insns = tb->cflags & CF_COUNT_MASK;
+ if (max_insns == 0) {
+ max_insns = CF_COUNT_MASK;
+ }
+
+ gen_icount_start();
+
+ do {
+ if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
+ QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
+ if (bp->pc == dc.pc) {
+ gen_debug(&dc);
+ break;
+ }
+ }
+ }
+ if (search_pc) {
+ j = gen_opc_ptr - gen_opc_buf;
+ if (lj < j) {
+ lj++;
+ while (lj < j) {
+ gen_opc_instr_start[lj++] = 0;
+ }
+ }
+ gen_opc_pc[lj] = dc.pc;
+ gen_opc_cc_op[lj] = dc.cc_op;
+ gen_opc_instr_start[lj] = 1;
+ gen_opc_icount[lj] = num_insns;
+ }
+ if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) {
+ gen_io_start();
+ }
+#if defined(S390X_DEBUG_DISAS_VERBOSE)
+ LOG_DISAS("pc " TARGET_FMT_lx "\n",
+ dc.pc);
+#endif
+ disas_s390_insn(&dc);
+
+ num_insns++;
+ if (env->singlestep_enabled) {
+ gen_debug(&dc);
+ }
+ } while (!dc.is_jmp && gen_opc_ptr < gen_opc_end && dc.pc < next_page_start
+ && num_insns < max_insns && !env->singlestep_enabled
+ && !singlestep);
+
+ if (!dc.is_jmp) {
+ update_psw_addr(&dc);
+ }
+
+ if (singlestep && dc.cc_op != CC_OP_DYNAMIC) {
+ gen_op_calc_cc(&dc);
+ } else {
+ /* next TB starts off with CC_OP_DYNAMIC, so make sure the cc op type
+ is in env */
+ gen_op_set_cc_op(&dc);
+ }
+
+ if (tb->cflags & CF_LAST_IO) {
+ gen_io_end();
+ }
+ /* Generate the return instruction */
+ if (dc.is_jmp != DISAS_TB_JUMP) {
+ tcg_gen_exit_tb(0);
+ }
+ gen_icount_end(tb, num_insns);
+ *gen_opc_ptr = INDEX_op_end;
+ if (search_pc) {
+ j = gen_opc_ptr - gen_opc_buf;
+ lj++;
+ while (lj <= j) {
+ gen_opc_instr_start[lj++] = 0;
+ }
+ } else {
+ tb->size = dc.pc - pc_start;
+ tb->icount = num_insns;
+ }
+#if defined(S390X_DEBUG_DISAS)
+ log_cpu_state_mask(CPU_LOG_TB_CPU, env, 0);
+ if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
+ qemu_log("IN: %s\n", lookup_symbol(pc_start));
+ log_target_disas(pc_start, dc.pc - pc_start, 1);
+ qemu_log("\n");
+ }
+#endif
}
void gen_intermediate_code (CPUState *env, struct TranslationBlock *tb)
{
+ gen_intermediate_code_internal(env, tb, 0);
}
void gen_intermediate_code_pc (CPUState *env, struct TranslationBlock *tb)
{
+ gen_intermediate_code_internal(env, tb, 1);
}
void restore_state_to_opc(CPUState *env, TranslationBlock *tb, int pc_pos)
{
+ int cc_op;
env->psw.addr = gen_opc_pc[pc_pos];
+ cc_op = gen_opc_cc_op[pc_pos];
+ if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
+ env->cc_op = cc_op;
+ }
}