summaryrefslogtreecommitdiff
path: root/tcg
diff options
context:
space:
mode:
authorPranith Kumar <bobby.prani@gmail.com>2016-08-23 09:48:25 -0400
committerRichard Henderson <rth@twiddle.net>2016-09-16 08:12:12 -0700
commit34f939218ce78163171addd63750e1e0300376ab (patch)
treeb753d7c12df7c6c955bc93e3947c97cd7428125b /tcg
parentcc19e497a047193db5083425957d7292c8dd3226 (diff)
downloadqemu-34f939218ce78163171addd63750e1e0300376ab.tar.gz
qemu-34f939218ce78163171addd63750e1e0300376ab.tar.bz2
qemu-34f939218ce78163171addd63750e1e0300376ab.zip
tcg: Optimize fence instructions
This commit optimizes fence instructions. Two optimizations are currently implemented: (1) unnecessary duplicate fence instructions, and (2) merging weaker fences into a stronger fence. [rth: Merge tcg_optimize_mb back into tcg_optimize, so that we only loop over the opcode stream once. Merge "unrelated" weaker barriers into one stronger barrier.] Signed-off-by: Pranith Kumar <bobby.prani@gmail.com> Message-Id: <20160823134825.32578-1-bobby.prani@gmail.com> Signed-off-by: Richard Henderson <rth@twiddle.net>
Diffstat (limited to 'tcg')
-rw-r--r--tcg/optimize.c39
1 files changed, 39 insertions, 0 deletions
diff --git a/tcg/optimize.c b/tcg/optimize.c
index cffe89b525..9998ac7413 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -542,6 +542,7 @@ static bool swap_commutative2(TCGArg *p1, TCGArg *p2)
void tcg_optimize(TCGContext *s)
{
int oi, oi_next, nb_temps, nb_globals;
+ TCGArg *prev_mb_args = NULL;
/* Array VALS has an element for each temp.
If this temp holds a constant then its value is kept in VALS' element.
@@ -1295,5 +1296,43 @@ void tcg_optimize(TCGContext *s)
}
break;
}
+
+ /* Eliminate duplicate and redundant fence instructions. */
+ if (prev_mb_args) {
+ switch (opc) {
+ case INDEX_op_mb:
+ /* Merge two barriers of the same type into one,
+ * or a weaker barrier into a stronger one,
+ * or two weaker barriers into a stronger one.
+ * mb X; mb Y => mb X|Y
+ * mb; strl => mb; st
+ * ldaq; mb => ld; mb
+ * ldaq; strl => ld; mb; st
+ * Other combinations are also merged into a strong
+ * barrier. This is stricter than specified but for
+ * the purposes of TCG is better than not optimizing.
+ */
+ prev_mb_args[0] |= args[0];
+ tcg_op_remove(s, op);
+ break;
+
+ default:
+ /* Opcodes that end the block stop the optimization. */
+ if ((def->flags & TCG_OPF_BB_END) == 0) {
+ break;
+ }
+ /* fallthru */
+ case INDEX_op_qemu_ld_i32:
+ case INDEX_op_qemu_ld_i64:
+ case INDEX_op_qemu_st_i32:
+ case INDEX_op_qemu_st_i64:
+ case INDEX_op_call:
+ /* Opcodes that touch guest memory stop the optimization. */
+ prev_mb_args = NULL;
+ break;
+ }
+ } else if (opc == INDEX_op_mb) {
+ prev_mb_args = args;
+ }
}
}