diff options
author | Aurelien Jarno <aurelien@aurel32.net> | 2015-07-10 18:03:31 +0200 |
---|---|---|
committer | Richard Henderson <rth@twiddle.net> | 2015-08-24 11:10:08 -0700 |
commit | 29f3ff8d6cbc28f79933aeaa25805408d0984a8f (patch) | |
tree | 70c3174b701cace8d606a50d69c102bb44e0541a /tcg | |
parent | a30878e708c2149ce07d709a8b62edd944628449 (diff) | |
download | qemu-29f3ff8d6cbc28f79933aeaa25805408d0984a8f.tar.gz qemu-29f3ff8d6cbc28f79933aeaa25805408d0984a8f.tar.bz2 qemu-29f3ff8d6cbc28f79933aeaa25805408d0984a8f.zip |
tcg/optimize: fix constant signedness
By convention, on a 64-bit host TCG internally stores 32-bit constants
as sign-extended. This is not the case in the optimizer when a 32-bit
constant is folded.
This doesn't seem to have more consequences than suboptimal code
generation. For instance the x86 backend assumes sign-extended constants,
and in some rare cases uses a 32-bit unsigned immediate 0xffffffff
instead of a 8-bit signed immediate 0xff for the constant -1. This is
with a ppc guest:
before
------
---- 0x9f29cc
movi_i32 tmp1,$0xffffffff
movi_i32 tmp2,$0x0
add2_i32 tmp0,CA,CA,tmp2,r6,tmp2
add2_i32 tmp0,CA,tmp0,CA,tmp1,tmp2
mov_i32 r10,tmp0
0x7fd8c7dfe90c: xor %ebp,%ebp
0x7fd8c7dfe90e: mov %ebp,%r11d
0x7fd8c7dfe911: mov 0x18(%r14),%r9d
0x7fd8c7dfe915: add %r9d,%r10d
0x7fd8c7dfe918: adc %ebp,%r11d
0x7fd8c7dfe91b: add $0xffffffff,%r10d
0x7fd8c7dfe922: adc %ebp,%r11d
0x7fd8c7dfe925: mov %r11d,0x134(%r14)
0x7fd8c7dfe92c: mov %r10d,0x28(%r14)
after
-----
---- 0x9f29cc
movi_i32 tmp1,$0xffffffffffffffff
movi_i32 tmp2,$0x0
add2_i32 tmp0,CA,CA,tmp2,r6,tmp2
add2_i32 tmp0,CA,tmp0,CA,tmp1,tmp2
mov_i32 r10,tmp0
0x7f37010d490c: xor %ebp,%ebp
0x7f37010d490e: mov %ebp,%r11d
0x7f37010d4911: mov 0x18(%r14),%r9d
0x7f37010d4915: add %r9d,%r10d
0x7f37010d4918: adc %ebp,%r11d
0x7f37010d491b: add $0xffffffffffffffff,%r10d
0x7f37010d491f: adc %ebp,%r11d
0x7f37010d4922: mov %r11d,0x134(%r14)
0x7f37010d4929: mov %r10d,0x28(%r14)
Signed-off-by: Aurelien Jarno <aurelien@aurel32.net>
Message-Id: <1436544211-2769-2-git-send-email-aurelien@aurel32.net>
Signed-off-by: Richard Henderson <rth@twiddle.net>
Diffstat (limited to 'tcg')
-rw-r--r-- | tcg/optimize.c | 10 |
1 files changed, 5 insertions, 5 deletions
diff --git a/tcg/optimize.c b/tcg/optimize.c index 18283cfd7b..cd0e793a9d 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -395,7 +395,7 @@ static TCGArg do_constant_folding(TCGOpcode op, TCGArg x, TCGArg y) { TCGArg res = do_constant_folding_2(op, x, y); if (op_bits(op) == 32) { - res &= 0xffffffff; + res = (int32_t)res; } return res; } @@ -1128,8 +1128,8 @@ void tcg_optimize(TCGContext *s) rl = args[0]; rh = args[1]; - tcg_opt_gen_movi(s, op, args, rl, (uint32_t)a); - tcg_opt_gen_movi(s, op2, args2, rh, (uint32_t)(a >> 32)); + tcg_opt_gen_movi(s, op, args, rl, (int32_t)a); + tcg_opt_gen_movi(s, op2, args2, rh, (int32_t)(a >> 32)); /* We've done all we need to do with the movi. Skip it. */ oi_next = op2->next; @@ -1149,8 +1149,8 @@ void tcg_optimize(TCGContext *s) rl = args[0]; rh = args[1]; - tcg_opt_gen_movi(s, op, args, rl, (uint32_t)r); - tcg_opt_gen_movi(s, op2, args2, rh, (uint32_t)(r >> 32)); + tcg_opt_gen_movi(s, op, args, rl, (int32_t)r); + tcg_opt_gen_movi(s, op2, args2, rh, (int32_t)(r >> 32)); /* We've done all we need to do with the movi. Skip it. */ oi_next = op2->next; |