summaryrefslogtreecommitdiff
path: root/tcg/tcg.h
diff options
context:
space:
mode:
authorSergey Sorokin <afarallax@yandex.ru>2016-06-23 21:16:46 +0300
committerRichard Henderson <rth@twiddle.net>2016-07-05 20:50:13 -0700
commit1f00b27f17518a1bcb4cedca49eaec96a4d560bd (patch)
tree90abe3fc60c60ce9ff0aa5c46a87c5589ff55229 /tcg/tcg.h
parent59d7c14eeff8d2ad7f61aed86ce5a176113bc153 (diff)
downloadqemu-1f00b27f17518a1bcb4cedca49eaec96a4d560bd.tar.gz
qemu-1f00b27f17518a1bcb4cedca49eaec96a4d560bd.tar.bz2
qemu-1f00b27f17518a1bcb4cedca49eaec96a4d560bd.zip
tcg: Improve the alignment check infrastructure
Some architectures (e.g. ARMv8) need the address which is aligned to a size more than the size of the memory access. To support such check it's enough the current costless alignment check implementation in QEMU, but we need to support an alignment size specifying. Signed-off-by: Sergey Sorokin <afarallax@yandex.ru> Message-Id: <1466705806-679898-1-git-send-email-afarallax@yandex.ru> Signed-off-by: Richard Henderson <rth@twiddle.net> [rth: Assert in tcg_canonicalize_memop. Leave get_alignment_bits available for, though unused by, user-mode. Retain logging difference based on ALIGNED_ONLY.]
Diffstat (limited to 'tcg/tcg.h')
-rw-r--r--tcg/tcg.h87
1 files changed, 74 insertions, 13 deletions
diff --git a/tcg/tcg.h b/tcg/tcg.h
index 66d7fc01c5..66ae0c76e6 100644
--- a/tcg/tcg.h
+++ b/tcg/tcg.h
@@ -191,6 +191,15 @@ typedef uint64_t tcg_insn_unit;
#endif
+#ifdef CONFIG_DEBUG_TCG
+# define tcg_debug_assert(X) do { assert(X); } while (0)
+#elif QEMU_GNUC_PREREQ(4, 5)
+# define tcg_debug_assert(X) \
+ do { if (!(X)) { __builtin_unreachable(); } } while (0)
+#else
+# define tcg_debug_assert(X) do { (void)(X); } while (0)
+#endif
+
typedef struct TCGRelocation {
struct TCGRelocation *next;
int type;
@@ -275,10 +284,26 @@ typedef enum TCGMemOp {
#endif
/* MO_UNALN accesses are never checked for alignment.
- MO_ALIGN accesses will result in a call to the CPU's
- do_unaligned_access hook if the guest address is not aligned.
- The default depends on whether the target CPU defines ALIGNED_ONLY. */
- MO_AMASK = 16,
+ * MO_ALIGN accesses will result in a call to the CPU's
+ * do_unaligned_access hook if the guest address is not aligned.
+ * The default depends on whether the target CPU defines ALIGNED_ONLY.
+ * Some architectures (e.g. ARMv8) need the address which is aligned
+ * to a size more than the size of the memory access.
+ * To support such check it's enough the current costless alignment
+ * check implementation in QEMU, but we need to support
+ * an alignment size specifying.
+ * MO_ALIGN supposes a natural alignment
+ * (i.e. the alignment size is the size of a memory access).
+ * Note that an alignment size must be equal or greater
+ * than an access size.
+ * There are three options:
+ * - an alignment to the size of an access (MO_ALIGN);
+ * - an alignment to the specified size that is equal or greater than
+ * an access size (MO_ALIGN_x where 'x' is a size in bytes);
+ * - unaligned access permitted (MO_UNALN).
+ */
+ MO_ASHIFT = 4,
+ MO_AMASK = 7 << MO_ASHIFT,
#ifdef ALIGNED_ONLY
MO_ALIGN = 0,
MO_UNALN = MO_AMASK,
@@ -286,6 +311,12 @@ typedef enum TCGMemOp {
MO_ALIGN = MO_AMASK,
MO_UNALN = 0,
#endif
+ MO_ALIGN_2 = 1 << MO_ASHIFT,
+ MO_ALIGN_4 = 2 << MO_ASHIFT,
+ MO_ALIGN_8 = 3 << MO_ASHIFT,
+ MO_ALIGN_16 = 4 << MO_ASHIFT,
+ MO_ALIGN_32 = 5 << MO_ASHIFT,
+ MO_ALIGN_64 = 6 << MO_ASHIFT,
/* Combinations of the above, for ease of use. */
MO_UB = MO_8,
@@ -317,6 +348,45 @@ typedef enum TCGMemOp {
MO_SSIZE = MO_SIZE | MO_SIGN,
} TCGMemOp;
+/**
+ * get_alignment_bits
+ * @memop: TCGMemOp value
+ *
+ * Extract the alignment size from the memop.
+ *
+ * Returns: 0 in case of byte access (which is always aligned);
+ * positive value - number of alignment bits;
+ * negative value if unaligned access enabled
+ * and this is not a byte access.
+ */
+static inline int get_alignment_bits(TCGMemOp memop)
+{
+ int a = memop & MO_AMASK;
+ int s = memop & MO_SIZE;
+ int r;
+
+ if (a == MO_UNALN) {
+ /* Negative value if unaligned access enabled,
+ * or zero value in case of byte access.
+ */
+ return -s;
+ } else if (a == MO_ALIGN) {
+ /* A natural alignment: return a number of access size bits */
+ r = s;
+ } else {
+ /* Specific alignment size. It must be equal or greater
+ * than the access size.
+ */
+ r = a >> MO_ASHIFT;
+ tcg_debug_assert(r >= s);
+ }
+#if defined(CONFIG_SOFTMMU)
+ /* The requested alignment cannot overlap the TLB flags. */
+ tcg_debug_assert((TLB_FLAGS_MASK & ((1 << r) - 1)) == 0);
+#endif
+ return r;
+}
+
typedef tcg_target_ulong TCGArg;
/* Define a type and accessor macros for variables. Using pointer types
@@ -790,15 +860,6 @@ do {\
abort();\
} while (0)
-#ifdef CONFIG_DEBUG_TCG
-# define tcg_debug_assert(X) do { assert(X); } while (0)
-#elif QEMU_GNUC_PREREQ(4, 5)
-# define tcg_debug_assert(X) \
- do { if (!(X)) { __builtin_unreachable(); } } while (0)
-#else
-# define tcg_debug_assert(X) do { (void)(X); } while (0)
-#endif
-
void tcg_add_target_add_op_defs(const TCGTargetOpDef *tdefs);
#if UINTPTR_MAX == UINT32_MAX