diff options
Diffstat (limited to 'target-ppc')
-rw-r--r-- | target-ppc/fpu_helper.c | 37 |
1 files changed, 13 insertions, 24 deletions
diff --git a/target-ppc/fpu_helper.c b/target-ppc/fpu_helper.c index abba703752..c6f484fc34 100644 --- a/target-ppc/fpu_helper.c +++ b/target-ppc/fpu_helper.c @@ -2487,12 +2487,6 @@ VSX_CMP(xvcmpeqsp, 4, float32, VsrW(i), eq, 0) VSX_CMP(xvcmpgesp, 4, float32, VsrW(i), le, 1) VSX_CMP(xvcmpgtsp, 4, float32, VsrW(i), lt, 1) -#if defined(HOST_WORDS_BIGENDIAN) -#define JOFFSET 0 -#else -#define JOFFSET 1 -#endif - /* VSX_CVT_FP_TO_FP - VSX floating point/floating point conversion * op - instruction mnemonic * nels - number of elements (1, 2 or 4) @@ -2614,7 +2608,7 @@ VSX_CVT_FP_TO_INT(xvcvspuxws, 4, float32, uint32, VsrW(i), VsrW(i), 0U) * jdef - definition of the j index (i or 2*i) * sfprf - set FPRF */ -#define VSX_CVT_INT_TO_FP(op, nels, stp, ttp, sfld, tfld, jdef, sfprf, r2sp) \ +#define VSX_CVT_INT_TO_FP(op, nels, stp, ttp, sfld, tfld, sfprf, r2sp) \ void helper_##op(CPUPPCState *env, uint32_t opcode) \ { \ ppc_vsr_t xt, xb; \ @@ -2624,7 +2618,6 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \ getVSR(xT(opcode), &xt, env); \ \ for (i = 0; i < nels; i++) { \ - int j = jdef; \ xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status); \ if (r2sp) { \ xt.tfld = helper_frsp(env, xt.tfld); \ @@ -2638,22 +2631,18 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \ helper_float_check_status(env); \ } -VSX_CVT_INT_TO_FP(xscvsxddp, 1, int64, float64, u64[j], f64[i], i, 1, 0) -VSX_CVT_INT_TO_FP(xscvuxddp, 1, uint64, float64, u64[j], f64[i], i, 1, 0) -VSX_CVT_INT_TO_FP(xscvsxdsp, 1, int64, float64, u64[j], f64[i], i, 1, 1) -VSX_CVT_INT_TO_FP(xscvuxdsp, 1, uint64, float64, u64[j], f64[i], i, 1, 1) -VSX_CVT_INT_TO_FP(xvcvsxddp, 2, int64, float64, u64[j], f64[i], i, 0, 0) -VSX_CVT_INT_TO_FP(xvcvuxddp, 2, uint64, float64, u64[j], f64[i], i, 0, 0) -VSX_CVT_INT_TO_FP(xvcvsxwdp, 2, int32, float64, u32[j], f64[i], \ - 2*i + JOFFSET, 0, 0) -VSX_CVT_INT_TO_FP(xvcvuxwdp, 2, uint64, float64, u32[j], f64[i], \ - 2*i + JOFFSET, 0, 0) -VSX_CVT_INT_TO_FP(xvcvsxdsp, 2, int64, float32, u64[i], f32[j], \ - 2*i + JOFFSET, 0, 0) -VSX_CVT_INT_TO_FP(xvcvuxdsp, 2, uint64, float32, u64[i], f32[j], \ - 2*i + JOFFSET, 0, 0) -VSX_CVT_INT_TO_FP(xvcvsxwsp, 4, int32, float32, u32[j], f32[i], i, 0, 0) -VSX_CVT_INT_TO_FP(xvcvuxwsp, 4, uint32, float32, u32[j], f32[i], i, 0, 0) +VSX_CVT_INT_TO_FP(xscvsxddp, 1, int64, float64, VsrD(0), VsrD(0), 1, 0) +VSX_CVT_INT_TO_FP(xscvuxddp, 1, uint64, float64, VsrD(0), VsrD(0), 1, 0) +VSX_CVT_INT_TO_FP(xscvsxdsp, 1, int64, float64, VsrD(0), VsrD(0), 1, 1) +VSX_CVT_INT_TO_FP(xscvuxdsp, 1, uint64, float64, VsrD(0), VsrD(0), 1, 1) +VSX_CVT_INT_TO_FP(xvcvsxddp, 2, int64, float64, VsrD(i), VsrD(i), 0, 0) +VSX_CVT_INT_TO_FP(xvcvuxddp, 2, uint64, float64, VsrD(i), VsrD(i), 0, 0) +VSX_CVT_INT_TO_FP(xvcvsxwdp, 2, int32, float64, VsrW(2*i), VsrD(i), 0, 0) +VSX_CVT_INT_TO_FP(xvcvuxwdp, 2, uint64, float64, VsrW(2*i), VsrD(i), 0, 0) +VSX_CVT_INT_TO_FP(xvcvsxdsp, 2, int64, float32, VsrD(i), VsrW(2*i), 0, 0) +VSX_CVT_INT_TO_FP(xvcvuxdsp, 2, uint64, float32, VsrD(i), VsrW(2*i), 0, 0) +VSX_CVT_INT_TO_FP(xvcvsxwsp, 4, int32, float32, VsrW(i), VsrW(i), 0, 0) +VSX_CVT_INT_TO_FP(xvcvuxwsp, 4, uint32, float32, VsrW(i), VsrW(i), 0, 0) /* For "use current rounding mode", define a value that will not be one of * the existing rounding model enums. |