summaryrefslogtreecommitdiff
path: root/tcg
diff options
context:
space:
mode:
authorRichard Henderson <rth@twiddle.net>2016-09-02 12:23:57 -0700
committerRichard Henderson <rth@twiddle.net>2016-10-26 08:29:01 -0700
commitdf79b996a7b21c6ea7847f7927a2e1a294b86c72 (patch)
tree878779e7214c310b0b0929ca115e286a9f0ba12f /tcg
parent7ebee43ee3e2fcd7b5063058b7ef74bc43216733 (diff)
downloadqemu-df79b996a7b21c6ea7847f7927a2e1a294b86c72.tar.gz
qemu-df79b996a7b21c6ea7847f7927a2e1a294b86c72.tar.bz2
qemu-df79b996a7b21c6ea7847f7927a2e1a294b86c72.zip
tcg: Add CONFIG_ATOMIC64
Allow qemu to build on 32-bit hosts without 64-bit atomic ops. Even if we only allow 32-bit hosts to multi-thread emulate 32-bit guests, we still need some way to handle the 32-bit guest using a 64-bit atomic operation. Do so by dropping back to single-step. Reviewed-by: Emilio G. Cota <cota@braap.org> Reviewed-by: Alex Bennée <alex.bennee@linaro.org> Signed-off-by: Richard Henderson <rth@twiddle.net>
Diffstat (limited to 'tcg')
-rw-r--r--tcg/tcg-op.c22
-rw-r--r--tcg/tcg-runtime.h46
-rw-r--r--tcg/tcg.h15
3 files changed, 70 insertions, 13 deletions
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
index 65e36637d0..cdd61d678b 100644
--- a/tcg/tcg-op.c
+++ b/tcg/tcg-op.c
@@ -2040,14 +2040,20 @@ typedef void (*gen_atomic_op_i32)(TCGv_i32, TCGv_env, TCGv, TCGv_i32);
typedef void (*gen_atomic_op_i64)(TCGv_i64, TCGv_env, TCGv, TCGv_i64);
#endif
+#ifdef CONFIG_ATOMIC64
+# define WITH_ATOMIC64(X) X,
+#else
+# define WITH_ATOMIC64(X)
+#endif
+
static void * const table_cmpxchg[16] = {
[MO_8] = gen_helper_atomic_cmpxchgb,
[MO_16 | MO_LE] = gen_helper_atomic_cmpxchgw_le,
[MO_16 | MO_BE] = gen_helper_atomic_cmpxchgw_be,
[MO_32 | MO_LE] = gen_helper_atomic_cmpxchgl_le,
[MO_32 | MO_BE] = gen_helper_atomic_cmpxchgl_be,
- [MO_64 | MO_LE] = gen_helper_atomic_cmpxchgq_le,
- [MO_64 | MO_BE] = gen_helper_atomic_cmpxchgq_be,
+ WITH_ATOMIC64([MO_64 | MO_LE] = gen_helper_atomic_cmpxchgq_le)
+ WITH_ATOMIC64([MO_64 | MO_BE] = gen_helper_atomic_cmpxchgq_be)
};
void tcg_gen_atomic_cmpxchg_i32(TCGv_i32 retv, TCGv addr, TCGv_i32 cmpv,
@@ -2117,6 +2123,7 @@ void tcg_gen_atomic_cmpxchg_i64(TCGv_i64 retv, TCGv addr, TCGv_i64 cmpv,
}
tcg_temp_free_i64(t1);
} else if ((memop & MO_SIZE) == MO_64) {
+#ifdef CONFIG_ATOMIC64
gen_atomic_cx_i64 gen;
gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)];
@@ -2131,6 +2138,9 @@ void tcg_gen_atomic_cmpxchg_i64(TCGv_i64 retv, TCGv addr, TCGv_i64 cmpv,
#else
gen(retv, tcg_ctx.tcg_env, addr, cmpv, newv);
#endif
+#else
+ gen_helper_exit_atomic(tcg_ctx.tcg_env);
+#endif /* CONFIG_ATOMIC64 */
} else {
TCGv_i32 c32 = tcg_temp_new_i32();
TCGv_i32 n32 = tcg_temp_new_i32();
@@ -2218,6 +2228,7 @@ static void do_atomic_op_i64(TCGv_i64 ret, TCGv addr, TCGv_i64 val,
memop = tcg_canonicalize_memop(memop, 1, 0);
if ((memop & MO_SIZE) == MO_64) {
+#ifdef CONFIG_ATOMIC64
gen_atomic_op_i64 gen;
gen = table[memop & (MO_SIZE | MO_BSWAP)];
@@ -2232,6 +2243,9 @@ static void do_atomic_op_i64(TCGv_i64 ret, TCGv addr, TCGv_i64 val,
#else
gen(ret, tcg_ctx.tcg_env, addr, val);
#endif
+#else
+ gen_helper_exit_atomic(tcg_ctx.tcg_env);
+#endif /* CONFIG_ATOMIC64 */
} else {
TCGv_i32 v32 = tcg_temp_new_i32();
TCGv_i32 r32 = tcg_temp_new_i32();
@@ -2256,8 +2270,8 @@ static void * const table_##NAME[16] = { \
[MO_16 | MO_BE] = gen_helper_atomic_##NAME##w_be, \
[MO_32 | MO_LE] = gen_helper_atomic_##NAME##l_le, \
[MO_32 | MO_BE] = gen_helper_atomic_##NAME##l_be, \
- [MO_64 | MO_LE] = gen_helper_atomic_##NAME##q_le, \
- [MO_64 | MO_BE] = gen_helper_atomic_##NAME##q_be, \
+ WITH_ATOMIC64([MO_64 | MO_LE] = gen_helper_atomic_##NAME##q_le) \
+ WITH_ATOMIC64([MO_64 | MO_BE] = gen_helper_atomic_##NAME##q_be) \
}; \
void tcg_gen_atomic_##NAME##_i32 \
(TCGv_i32 ret, TCGv addr, TCGv_i32 val, TCGArg idx, TCGMemOp memop) \
diff --git a/tcg/tcg-runtime.h b/tcg/tcg-runtime.h
index 22367aaf97..1deb86a099 100644
--- a/tcg/tcg-runtime.h
+++ b/tcg/tcg-runtime.h
@@ -15,23 +15,28 @@ DEF_HELPER_FLAGS_2(sar_i64, TCG_CALL_NO_RWG_SE, s64, s64, s64)
DEF_HELPER_FLAGS_2(mulsh_i64, TCG_CALL_NO_RWG_SE, s64, s64, s64)
DEF_HELPER_FLAGS_2(muluh_i64, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_1(exit_atomic, TCG_CALL_NO_WG, noreturn, env)
+
#ifdef CONFIG_SOFTMMU
DEF_HELPER_FLAGS_5(atomic_cmpxchgb, TCG_CALL_NO_WG,
i32, env, tl, i32, i32, i32)
DEF_HELPER_FLAGS_5(atomic_cmpxchgw_be, TCG_CALL_NO_WG,
i32, env, tl, i32, i32, i32)
-DEF_HELPER_FLAGS_5(atomic_cmpxchgl_be, TCG_CALL_NO_WG,
- i32, env, tl, i32, i32, i32)
-DEF_HELPER_FLAGS_5(atomic_cmpxchgq_be, TCG_CALL_NO_WG,
- i64, env, tl, i64, i64, i32)
DEF_HELPER_FLAGS_5(atomic_cmpxchgw_le, TCG_CALL_NO_WG,
i32, env, tl, i32, i32, i32)
+DEF_HELPER_FLAGS_5(atomic_cmpxchgl_be, TCG_CALL_NO_WG,
+ i32, env, tl, i32, i32, i32)
DEF_HELPER_FLAGS_5(atomic_cmpxchgl_le, TCG_CALL_NO_WG,
i32, env, tl, i32, i32, i32)
+#ifdef CONFIG_ATOMIC64
+DEF_HELPER_FLAGS_5(atomic_cmpxchgq_be, TCG_CALL_NO_WG,
+ i64, env, tl, i64, i64, i32)
DEF_HELPER_FLAGS_5(atomic_cmpxchgq_le, TCG_CALL_NO_WG,
i64, env, tl, i64, i64, i32)
+#endif
+#ifdef CONFIG_ATOMIC64
#define GEN_ATOMIC_HELPERS(NAME) \
DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), b), \
TCG_CALL_NO_WG, i32, env, tl, i32, i32) \
@@ -47,17 +52,33 @@ DEF_HELPER_FLAGS_5(atomic_cmpxchgq_le, TCG_CALL_NO_WG,
TCG_CALL_NO_WG, i64, env, tl, i64, i32) \
DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), q_be), \
TCG_CALL_NO_WG, i64, env, tl, i64, i32)
+#else
+#define GEN_ATOMIC_HELPERS(NAME) \
+ DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), b), \
+ TCG_CALL_NO_WG, i32, env, tl, i32, i32) \
+ DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_le), \
+ TCG_CALL_NO_WG, i32, env, tl, i32, i32) \
+ DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_be), \
+ TCG_CALL_NO_WG, i32, env, tl, i32, i32) \
+ DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_le), \
+ TCG_CALL_NO_WG, i32, env, tl, i32, i32) \
+ DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_be), \
+ TCG_CALL_NO_WG, i32, env, tl, i32, i32)
+#endif /* CONFIG_ATOMIC64 */
#else
DEF_HELPER_FLAGS_4(atomic_cmpxchgb, TCG_CALL_NO_WG, i32, env, tl, i32, i32)
DEF_HELPER_FLAGS_4(atomic_cmpxchgw_be, TCG_CALL_NO_WG, i32, env, tl, i32, i32)
-DEF_HELPER_FLAGS_4(atomic_cmpxchgl_be, TCG_CALL_NO_WG, i32, env, tl, i32, i32)
-DEF_HELPER_FLAGS_4(atomic_cmpxchgq_be, TCG_CALL_NO_WG, i64, env, tl, i64, i64)
DEF_HELPER_FLAGS_4(atomic_cmpxchgw_le, TCG_CALL_NO_WG, i32, env, tl, i32, i32)
+DEF_HELPER_FLAGS_4(atomic_cmpxchgl_be, TCG_CALL_NO_WG, i32, env, tl, i32, i32)
DEF_HELPER_FLAGS_4(atomic_cmpxchgl_le, TCG_CALL_NO_WG, i32, env, tl, i32, i32)
+#ifdef CONFIG_ATOMIC64
+DEF_HELPER_FLAGS_4(atomic_cmpxchgq_be, TCG_CALL_NO_WG, i64, env, tl, i64, i64)
DEF_HELPER_FLAGS_4(atomic_cmpxchgq_le, TCG_CALL_NO_WG, i64, env, tl, i64, i64)
+#endif
+#ifdef CONFIG_ATOMIC64
#define GEN_ATOMIC_HELPERS(NAME) \
DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), b), \
TCG_CALL_NO_WG, i32, env, tl, i32) \
@@ -73,6 +94,19 @@ DEF_HELPER_FLAGS_4(atomic_cmpxchgq_le, TCG_CALL_NO_WG, i64, env, tl, i64, i64)
TCG_CALL_NO_WG, i64, env, tl, i64) \
DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), q_be), \
TCG_CALL_NO_WG, i64, env, tl, i64)
+#else
+#define GEN_ATOMIC_HELPERS(NAME) \
+ DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), b), \
+ TCG_CALL_NO_WG, i32, env, tl, i32) \
+ DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), w_le), \
+ TCG_CALL_NO_WG, i32, env, tl, i32) \
+ DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), w_be), \
+ TCG_CALL_NO_WG, i32, env, tl, i32) \
+ DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), l_le), \
+ TCG_CALL_NO_WG, i32, env, tl, i32) \
+ DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), l_be), \
+ TCG_CALL_NO_WG, i32, env, tl, i32)
+#endif /* CONFIG_ATOMIC64 */
#endif /* CONFIG_SOFTMMU */
diff --git a/tcg/tcg.h b/tcg/tcg.h
index bc3ea7af19..b34b5fbc2f 100644
--- a/tcg/tcg.h
+++ b/tcg/tcg.h
@@ -1204,14 +1204,23 @@ TYPE helper_atomic_ ## NAME ## SUFFIX ## _mmu \
(CPUArchState *env, target_ulong addr, TYPE val, \
TCGMemOpIdx oi, uintptr_t retaddr);
+#ifdef CONFIG_ATOMIC64
#define GEN_ATOMIC_HELPER_ALL(NAME) \
- GEN_ATOMIC_HELPER(NAME, uint32_t, b) \
+ GEN_ATOMIC_HELPER(NAME, uint32_t, b) \
GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \
- GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \
- GEN_ATOMIC_HELPER(NAME, uint64_t, q_le) \
GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \
+ GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \
GEN_ATOMIC_HELPER(NAME, uint32_t, l_be) \
+ GEN_ATOMIC_HELPER(NAME, uint64_t, q_le) \
GEN_ATOMIC_HELPER(NAME, uint64_t, q_be)
+#else
+#define GEN_ATOMIC_HELPER_ALL(NAME) \
+ GEN_ATOMIC_HELPER(NAME, uint32_t, b) \
+ GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \
+ GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \
+ GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \
+ GEN_ATOMIC_HELPER(NAME, uint32_t, l_be)
+#endif
GEN_ATOMIC_HELPER_ALL(fetch_add)
GEN_ATOMIC_HELPER_ALL(fetch_sub)