diff options
author | Richard Henderson <rth@twiddle.net> | 2013-04-03 10:56:45 +0000 |
---|---|---|
committer | Alexander Graf <agraf@suse.de> | 2013-04-26 23:02:40 +0200 |
commit | 752d634ecc74c76eb5e32db0e536d84c2d6aa3d8 (patch) | |
tree | 7110476ce5e2a8e1ddf09ca0cbf04c8b46200139 | |
parent | 2bc173224adc0fc318f2bd6fcf65dfdbc7d51123 (diff) | |
download | qemu-752d634ecc74c76eb5e32db0e536d84c2d6aa3d8.tar.gz qemu-752d634ecc74c76eb5e32db0e536d84c2d6aa3d8.tar.bz2 qemu-752d634ecc74c76eb5e32db0e536d84c2d6aa3d8.zip |
target-ppc: Fix narrow-mode add/sub carry output
Broken in b5a73f8d8a57e940f9bbeb399a9e47897522ee9a, the carry itself was
fixed in 79482e5ab38a05ca8869040b0d8b8f451f16ff62. But we still need to
produce the full 64-bit addition.
Simplify the conditions at the top of the functions for when we need a
new temporary. Only plain addition is important enough to warrent avoiding
the temporary, and the extra tcg move op that would come with it.
Signed-off-by: Richard Henderson <rth@twiddle.net>
Reviewed-by: Aurelien Jarno <aurelien@aurel32.net>
Tested-by: Aurelien Jarno <aurelien@aurel32.net>
Signed-off-by: Alexander Graf <agraf@suse.de>
-rw-r--r-- | target-ppc/translate.c | 35 |
1 files changed, 22 insertions, 13 deletions
diff --git a/target-ppc/translate.c b/target-ppc/translate.c index 294ab5809c..362ca3af99 100644 --- a/target-ppc/translate.c +++ b/target-ppc/translate.c @@ -768,22 +768,25 @@ static inline void gen_op_arith_add(DisasContext *ctx, TCGv ret, TCGv arg1, { TCGv t0 = ret; - if (((compute_ca && add_ca) || compute_ov) - && (TCGV_EQUAL(ret, arg1) || TCGV_EQUAL(ret, arg2))) { + if (compute_ca || compute_ov) { t0 = tcg_temp_new(); } if (compute_ca) { if (NARROW_MODE(ctx)) { + /* Caution: a non-obvious corner case of the spec is that we + must produce the *entire* 64-bit addition, but produce the + carry into bit 32. */ TCGv t1 = tcg_temp_new(); - tcg_gen_ext32u_tl(t1, arg2); - tcg_gen_ext32u_tl(t0, arg1); - tcg_gen_add_tl(t0, t0, t1); - tcg_temp_free(t1); + tcg_gen_xor_tl(t1, arg1, arg2); /* add without carry */ + tcg_gen_add_tl(t0, arg1, arg2); if (add_ca) { tcg_gen_add_tl(t0, t0, cpu_ca); } - tcg_gen_shri_tl(cpu_ca, t0, 32); + tcg_gen_xor_tl(cpu_ca, t0, t1); /* bits changed w/ carry */ + tcg_temp_free(t1); + tcg_gen_shri_tl(cpu_ca, cpu_ca, 32); /* extract bit 32 */ + tcg_gen_andi_tl(cpu_ca, cpu_ca, 1); } else { TCGv zero = tcg_const_tl(0); if (add_ca) { @@ -1122,24 +1125,30 @@ static inline void gen_op_arith_subf(DisasContext *ctx, TCGv ret, TCGv arg1, { TCGv t0 = ret; - if (compute_ov && (TCGV_EQUAL(ret, arg1) || TCGV_EQUAL(ret, arg2))) { + if (compute_ca || compute_ov) { t0 = tcg_temp_new(); } if (compute_ca) { /* dest = ~arg1 + arg2 [+ ca]. */ if (NARROW_MODE(ctx)) { + /* Caution: a non-obvious corner case of the spec is that we + must produce the *entire* 64-bit addition, but produce the + carry into bit 32. */ TCGv inv1 = tcg_temp_new(); + TCGv t1 = tcg_temp_new(); tcg_gen_not_tl(inv1, arg1); - tcg_gen_ext32u_tl(t0, arg2); - tcg_gen_ext32u_tl(inv1, inv1); if (add_ca) { - tcg_gen_add_tl(t0, t0, cpu_ca); + tcg_gen_add_tl(t0, arg2, cpu_ca); } else { - tcg_gen_addi_tl(t0, t0, 1); + tcg_gen_addi_tl(t0, arg2, 1); } + tcg_gen_xor_tl(t1, arg2, inv1); /* add without carry */ tcg_gen_add_tl(t0, t0, inv1); - tcg_gen_shri_tl(cpu_ca, t0, 32); + tcg_gen_xor_tl(cpu_ca, t0, t1); /* bits changes w/ carry */ + tcg_temp_free(t1); + tcg_gen_shri_tl(cpu_ca, cpu_ca, 32); /* extract bit 32 */ + tcg_gen_andi_tl(cpu_ca, cpu_ca, 1); } else if (add_ca) { TCGv zero, inv1 = tcg_temp_new(); tcg_gen_not_tl(inv1, arg1); |