summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJin Kyu Song <jin.kyu.song@intel.com>2013-08-15 19:01:25 -0700
committerCyrill Gorcunov <gorcunov@gmail.com>2013-08-16 09:06:15 +0400
commitcc1dc9de53137e864bde06573556723149239f29 (patch)
treeba84247ccd27964e56e29384b05a4ab1da169d9c
parent72018a2b4326d5a647b8879ba8124300b68ca212 (diff)
downloadnasm-cc1dc9de53137e864bde06573556723149239f29.tar.gz
nasm-cc1dc9de53137e864bde06573556723149239f29.tar.bz2
nasm-cc1dc9de53137e864bde06573556723149239f29.zip
AVX-512: Add EVEX encoding and new instructions
EVEX encoding support includes 32 vector regs (XMM/YMM/ZMM), opmask, broadcasting, embedded rounding mode, suppress all exceptions, compressed displacement. Signed-off-by: Jin Kyu Song <jin.kyu.song@intel.com> Signed-off-by: Cyrill Gorcunov <gorcunov@gmail.com>
-rw-r--r--assemble.c326
-rw-r--r--disasm.c6
-rw-r--r--insns.dat448
-rw-r--r--insns.h10
-rwxr-xr-xinsns.pl140
-rw-r--r--nasm.h54
-rw-r--r--opflags.h6
-rw-r--r--parser.c6
-rw-r--r--regs.dat4
9 files changed, 925 insertions, 75 deletions
diff --git a/assemble.c b/assemble.c
index b119f86..6054d4a 100644
--- a/assemble.c
+++ b/assemble.c
@@ -1,6 +1,6 @@
/* ----------------------------------------------------------------------- *
*
- * Copyright 1996-2012 The NASM Authors - All Rights Reserved
+ * Copyright 1996-2013 The NASM Authors - All Rights Reserved
* See the file AUTHORS included with the NASM distribution for
* the specific copyright holders.
*
@@ -67,6 +67,35 @@
* an arbitrary value in bits 3..0 (assembled as zero.)
* \2ab - a ModRM, calculated on EA in operand a, with the spare
* field equal to digit b.
+ *
+ * \240..\243 - this instruction uses EVEX rather than REX or VEX/XOP, with the
+ * V field taken from operand 0..3.
+ * \250 - this instruction uses EVEX rather than REX or VEX/XOP, with the
+ * V field set to 1111b.
+ * EVEX prefixes are followed by the sequence:
+ * \cm\wlp\tup where cm is:
+ * cc 000 0mm
+ * c = 2 for EVEX and m is the legacy escape (0f, 0f38, 0f3a)
+ * and wlp is:
+ * 00 wwl lpp
+ * [l0] ll = 0 (.128, .lz)
+ * [l1] ll = 1 (.256)
+ * [l2] ll = 2 (.512)
+ * [lig] ll = 3 for EVEX.L'L don't care (always assembled as 0)
+ *
+ * [w0] ww = 0 for W = 0
+ * [w1] ww = 1 for W = 1
+ * [wig] ww = 2 for W don't care (always assembled as 0)
+ * [ww] ww = 3 for W used as REX.W
+ *
+ * [p0] pp = 0 for no prefix
+ * [60] pp = 1 for legacy prefix 60
+ * [f3] pp = 2
+ * [f2] pp = 3
+ *
+ * tup is tuple type for Disp8*N from %tuple_codes in insns.pl
+ * (compressed displacement encoding)
+ *
* \254..\257 - a signed 32-bit operand to be extended to 64 bits.
* \260..\263 - this instruction uses VEX/XOP rather than REX, with the
* V field taken from operand 0..3.
@@ -76,9 +105,9 @@
* VEX/XOP prefixes are followed by the sequence:
* \tmm\wlp where mm is the M field; and wlp is:
* 00 wwl lpp
- * [l0] ll = 0 for L = 0 (.128, .lz)
- * [l1] ll = 1 for L = 1 (.256)
- * [lig] ll = 2 for L don't care (always assembled as 0)
+ * [l0] ll = 0 for L = 0 (.128, .lz)
+ * [l1] ll = 1 for L = 1 (.256)
+ * [lig] ll = 2 for L don't care (always assembled as 0)
*
* [w0] ww = 0 for W = 0
* [w1 ] ww = 1 for W = 1
@@ -136,6 +165,7 @@
* used for conditional jump over longer jump
* \374 - this instruction takes an XMM VSIB memory EA
* \375 - this instruction takes an YMM VSIB memory EA
+ * \376 - this instruction takes an ZMM VSIB memory EA
*/
#include "compiler.h"
@@ -174,6 +204,7 @@ typedef struct {
int bytes; /* # of bytes of offset needed */
int size; /* lazy - this is sib+bytes+1 */
uint8_t modrm, sib, rex, rip; /* the bytes themselves */
+ int8_t disp8; /* compressed displacement for EVEX */
} ea;
#define GEN_SIB(scale, index, base) \
@@ -200,9 +231,10 @@ static opflags_t regflag(const operand *);
static int32_t regval(const operand *);
static int rexflags(int, opflags_t, int);
static int op_rexflags(const operand *, int);
+static int op_evexflags(const operand *, int, uint8_t);
static void add_asp(insn *, int);
-static enum ea_type process_ea(operand *, ea *, int, int, int, opflags_t);
+static enum ea_type process_ea(operand *, ea *, int, int, opflags_t, insn *);
static int has_prefix(insn * ins, enum prefix_pos pos, int prefix)
{
@@ -820,6 +852,7 @@ static int64_t calcsize(int32_t segment, int64_t offset, int bits,
ins->rex = 0; /* Ensure REX is reset */
eat = EA_SCALAR; /* Expect a scalar EA */
+ memset(ins->evex_p, 0, 3); /* Ensure EVEX is reset */
if (ins->prefixes[PPS_OSIZE] == P_O64)
ins->rex |= REX_W;
@@ -910,6 +943,23 @@ static int64_t calcsize(int32_t segment, int64_t offset, int bits,
length++;
break;
+ case4(0240):
+ ins->rex |= REX_EV;
+ ins->vexreg = regval(opx);
+ ins->evex_p[2] |= op_evexflags(opx, EVEX_P2VP, 2); /* High-16 NDS */
+ ins->vex_cm = *codes++;
+ ins->vex_wlp = *codes++;
+ ins->evex_tuple = (*codes++ - 0300);
+ break;
+
+ case 0250:
+ ins->rex |= REX_EV;
+ ins->vexreg = 0;
+ ins->vex_cm = *codes++;
+ ins->vex_wlp = *codes++;
+ ins->evex_tuple = (*codes++ - 0300);
+ break;
+
case4(0254):
length += 4;
break;
@@ -1076,6 +1126,10 @@ static int64_t calcsize(int32_t segment, int64_t offset, int bits,
eat = EA_YMMVSIB;
break;
+ case 0376:
+ eat = EA_ZMMVSIB;
+ break;
+
case4(0100):
case4(0110):
case4(0120):
@@ -1093,6 +1147,7 @@ static int64_t calcsize(int32_t segment, int64_t offset, int bits,
int rfield;
opflags_t rflags;
struct operand *opy = &ins->oprs[op2];
+ struct operand *oplast;
ea_data.rex = 0; /* Ensure ea.REX is initially 0 */
@@ -1100,12 +1155,30 @@ static int64_t calcsize(int32_t segment, int64_t offset, int bits,
/* pick rfield from operand b (opx) */
rflags = regflag(opx);
rfield = nasm_regvals[opx->basereg];
+ /* find the last SIMD operand where ER decorator resides */
+ oplast = &ins->oprs[op1 > op2 ? op1 : op2];
} else {
rflags = 0;
rfield = c & 7;
+ oplast = opy;
}
- if (process_ea(opy, &ea_data, bits,ins->addr_size,
- rfield, rflags) != eat) {
+
+ if (oplast->decoflags & ER) {
+ /* set EVEX.RC (rounding control) and b */
+ ins->evex_p[2] |= (((ins->evex_rm - BRC_RN) << 5) & EVEX_P2LL) |
+ EVEX_P2B;
+ } else {
+ /* set EVEX.L'L (vector length) */
+ ins->evex_p[2] |= ((ins->vex_wlp << (5 - 2)) & EVEX_P2LL);
+ if ((oplast->decoflags & SAE) ||
+ (opy->decoflags & BRDCAST_MASK)) {
+ /* set EVEX.b */
+ ins->evex_p[2] |= EVEX_P2B;
+ }
+ }
+
+ if (process_ea(opy, &ea_data, bits,
+ rfield, rflags, ins) != eat) {
errfunc(ERR_NONFATAL, "invalid effective address");
return -1;
} else {
@@ -1132,11 +1205,11 @@ static int64_t calcsize(int32_t segment, int64_t offset, int bits,
ins->rex &= ~REX_P; /* Don't force REX prefix due to high reg */
}
- if (ins->rex & REX_V) {
+ if (ins->rex & (REX_V | REX_EV)) {
int bad32 = REX_R|REX_W|REX_X|REX_B;
if (ins->rex & REX_H) {
- errfunc(ERR_NONFATAL, "cannot use high register in vex instruction");
+ errfunc(ERR_NONFATAL, "cannot use high register in AVX instruction");
return -1;
}
switch (ins->vex_wlp & 060) {
@@ -1157,7 +1230,9 @@ static int64_t calcsize(int32_t segment, int64_t offset, int bits,
errfunc(ERR_NONFATAL, "invalid operands in non-64-bit mode");
return -1;
}
- if (ins->vex_cm != 1 || (ins->rex & (REX_W|REX_X|REX_B)))
+ if (ins->rex & REX_EV)
+ length += 4;
+ else if (ins->vex_cm != 1 || (ins->rex & (REX_W|REX_X|REX_B)))
length += 3;
else
length += 2;
@@ -1194,7 +1269,7 @@ static int64_t calcsize(int32_t segment, int64_t offset, int bits,
static inline unsigned int emit_rex(insn *ins, int32_t segment, int64_t offset, int bits)
{
if (bits == 64) {
- if ((ins->rex & REX_REAL) && !(ins->rex & REX_V)) {
+ if ((ins->rex & REX_REAL) && !(ins->rex & (REX_V | REX_EV))) {
ins->rex = (ins->rex & REX_REAL) | REX_P;
out(offset, segment, &ins->rex, OUT_RAWDATA, 1, NO_SEG, NO_SEG);
ins->rex = 0;
@@ -1431,6 +1506,25 @@ static void gencode(int32_t segment, int64_t offset, int bits,
offset += 4;
break;
+ case4(0240):
+ case 0250:
+ codes += 3;
+ ins->evex_p[2] |= op_evexflags(&ins->oprs[0],
+ EVEX_P2Z | EVEX_P2AAA, 2);
+ ins->evex_p[2] ^= EVEX_P2VP; /* 1's complement */
+ bytes[0] = 0x62;
+ /* EVEX.X can be set by either REX or EVEX for different reasons */
+ bytes[1] = (~(((ins->rex & 7) << 5) |
+ (ins->evex_p[0] & (EVEX_P0X | EVEX_P0RP))) & 0xf0) |
+ (ins->vex_cm & 3);
+ bytes[2] = ((ins->rex & REX_W) << (7 - 3)) |
+ ((~ins->vexreg & 15) << 3) |
+ (1 << 2) | (ins->vex_wlp & 3);
+ bytes[3] = ins->evex_p[2];
+ out(offset, segment, &bytes, OUT_RAWDATA, 4, NO_SEG, NO_SEG);
+ offset += 4;
+ break;
+
case4(0260):
case 0270:
codes += 2;
@@ -1631,6 +1725,10 @@ static void gencode(int32_t segment, int64_t offset, int bits,
eat = EA_YMMVSIB;
break;
+ case 0376:
+ eat = EA_ZMMVSIB;
+ break;
+
case4(0100):
case4(0110):
case4(0120):
@@ -1661,8 +1759,8 @@ static void gencode(int32_t segment, int64_t offset, int bits,
rfield = c & 7;
}
- if (process_ea(opy, &ea_data, bits, ins->addr_size,
- rfield, rflags) != eat)
+ if (process_ea(opy, &ea_data, bits,
+ rfield, rflags, ins) != eat)
errfunc(ERR_NONFATAL, "invalid effective address");
p = bytes;
@@ -1687,7 +1785,8 @@ static void gencode(int32_t segment, int64_t offset, int bits,
case 2:
case 4:
case 8:
- data = opy->offset;
+ /* use compressed displacement, if available */
+ data = ea_data.disp8 ? ea_data.disp8 : opy->offset;
s += ea_data.bytes;
if (ea_data.rip) {
if (opy->segment == segment) {
@@ -1702,9 +1801,9 @@ static void gencode(int32_t segment, int64_t offset, int bits,
insn_end - offset, opy->segment, opy->wrt);
}
} else {
- if (overflow_general(opy->offset, ins->addr_size >> 3) ||
- signed_bits(opy->offset, ins->addr_size) !=
- signed_bits(opy->offset, ea_data.bytes * 8))
+ if (overflow_general(data, ins->addr_size >> 3) ||
+ signed_bits(data, ins->addr_size) !=
+ signed_bits(data, ea_data.bytes * 8))
warn_overflow(ERR_PASS2, ea_data.bytes);
out(offset, segment, &data, OUT_ADDRESS,
@@ -1774,6 +1873,40 @@ static int rexflags(int val, opflags_t flags, int mask)
return rex & mask;
}
+static int evexflags(int val, decoflags_t deco,
+ int mask, uint8_t byte)
+{
+ int evex = 0;
+
+ switch(byte) {
+ case 0:
+ if (val >= 16)
+ evex |= (EVEX_P0RP | EVEX_P0X);
+ break;
+ case 2:
+ if (val >= 16)
+ evex |= EVEX_P2VP;
+ if (deco & Z)
+ evex |= EVEX_P2Z;
+ if (deco & OPMASK_MASK)
+ evex |= deco & EVEX_P2AAA;
+ break;
+ }
+ return evex & mask;
+}
+
+static int op_evexflags(const operand * o, int mask, uint8_t byte)
+{
+ int val;
+
+ if (!is_register(o->basereg))
+ errfunc(ERR_PANIC, "invalid operand passed to op_evexflags()");
+
+ val = nasm_regvals[o->basereg];
+
+ return evexflags(val, o->decoflags, mask, byte);
+}
+
static enum match_result find_match(const struct itemplate **tempp,
insn *instruction,
int32_t segment, int64_t offset, int bits)
@@ -1908,6 +2041,9 @@ static enum match_result matches(const struct itemplate *itemp,
asize = BITS256;
break;
case IF_SZ:
+ asize = BITS512;
+ break;
+ case IF_SIZE:
switch (bits) {
case 16:
asize = BITS16;
@@ -1961,10 +2097,12 @@ static enum match_result matches(const struct itemplate *itemp,
*/
for (i = 0; i < itemp->operands; i++) {
opflags_t type = instruction->oprs[i].type;
+ decoflags_t deco = instruction->oprs[i].decoflags;
if (!(type & SIZE_MASK))
type |= size[i];
- if (itemp->opd[i] & ~type & ~SIZE_MASK) {
+ if ((itemp->opd[i] & ~type & ~SIZE_MASK) ||
+ (itemp->deco[i] & deco) != deco) {
return MERR_INVALOP;
} else if ((itemp->opd[i] & SIZE_MASK) &&
(itemp->opd[i] & SIZE_MASK) != (type & SIZE_MASK)) {
@@ -2036,16 +2174,116 @@ static enum match_result matches(const struct itemplate *itemp,
return MOK_GOOD;
}
+/*
+ * Check if offset is a multiple of N with corresponding tuple type
+ * if Disp8*N is available, compressed displacement is stored in compdisp
+ */
+static bool is_disp8n(operand *input, insn *ins, int8_t *compdisp)
+{
+ const uint8_t fv_n[2][2][VLMAX] = {{{16, 32, 64}, {4, 4, 4}},
+ {{16, 32, 64}, {8, 8, 8}}};
+ const uint8_t hv_n[2][VLMAX] = {{8, 16, 32}, {4, 4, 4}};
+ const uint8_t dup_n[VLMAX] = {8, 32, 64};
+
+ bool evex_b = input->decoflags & BRDCAST_MASK;
+ enum ttypes tuple = ins->evex_tuple;
+ /* vex_wlp composed as [wwllpp] */
+ enum vectlens vectlen = (ins->vex_wlp & 0x0c) >> 2;
+ /* wig(=2) is treated as w0(=0) */
+ bool evex_w = (ins->vex_wlp & 0x10) >> 4;
+ int32_t off = input->offset;
+ uint8_t n = 0;
+ int32_t disp8;
+
+ switch(tuple) {
+ case FV:
+ n = fv_n[evex_w][evex_b][vectlen];
+ break;
+ case HV:
+ n = hv_n[evex_b][vectlen];
+ break;
+
+ case FVM:
+ /* 16, 32, 64 for VL 128, 256, 512 respectively*/
+ n = 1 << (vectlen + 4);
+ break;
+ case T1S8: /* N = 1 */
+ case T1S16: /* N = 2 */
+ n = tuple - T1S8 + 1;
+ break;
+ case T1S:
+ /* N = 4 for 32bit, 8 for 64bit */
+ n = evex_w ? 8 : 4;
+ break;
+ case T1F32:
+ case T1F64:
+ /* N = 4 for 32bit, 8 for 64bit */
+ n = (tuple == T1F32 ? 4 : 8);
+ break;
+ case T2:
+ case T4:
+ case T8:
+ if (vectlen + 7 <= (evex_w + 5) + (tuple - T2 + 1))
+ n = 0;
+ else
+ n = 1 << (tuple - T2 + evex_w + 4);
+ break;
+ case HVM:
+ case QVM:
+ case OVM:
+ n = 1 << (OVM - tuple + vectlen + 1);
+ break;
+ case M128:
+ n = 16;
+ break;
+ case DUP:
+ n = dup_n[vectlen];
+ break;
+
+ default:
+ break;
+ }
+
+ if (n && !(off & (n - 1))) {
+ disp8 = off / n;
+ /* if it fits in Disp8 */
+ if (disp8 >= -128 && disp8 <= 127) {
+ *compdisp = disp8;
+ return true;
+ }
+ }
+
+ *compdisp = 0;
+ return false;
+}
+
+/*
+ * Check if ModR/M.mod should/can be 01.
+ * - EAF_BYTEOFFS is set
+ * - offset can fit in a byte when EVEX is not used
+ * - offset can be compressed when EVEX is used
+ */
+#define IS_MOD_01() (input->eaflags & EAF_BYTEOFFS || \
+ (o >= -128 && o <= 127 && \
+ seg == NO_SEG && !forw_ref && \
+ !(input->eaflags & EAF_WORDOFFS) && \
+ !(ins->rex & REX_EV)) || \
+ (ins->rex & REX_EV && \
+ is_disp8n(input, ins, &output->disp8)))
+
static enum ea_type process_ea(operand *input, ea *output, int bits,
- int addrbits, int rfield, opflags_t rflags)
+ int rfield, opflags_t rflags, insn *ins)
{
bool forw_ref = !!(input->opflags & OPFLAG_UNKNOWN);
+ int addrbits = ins->addr_size;
output->type = EA_SCALAR;
output->rip = false;
/* REX flags for the rfield operand */
output->rex |= rexflags(rfield, rflags, REX_R | REX_P | REX_W | REX_H);
+ /* EVEX.R' flag for the REG operand */
+ ins->evex_p[0] |= evexflags(rfield, 0, EVEX_P0RP, 0);
if (is_class(REGISTER, input->type)) {
/*
@@ -2054,10 +2292,17 @@ static enum ea_type process_ea(operand *input, ea *output, int bits,
if (!is_register(input->basereg))
goto err;
- if (!is_class(REG_EA, regflag(input)))
+ if (!is_reg_class(REG_EA, input->basereg))
goto err;
+ /* broadcasting is not available with a direct register operand. */
+ if (input->decoflags & BRDCAST_MASK) {
+ nasm_error(ERR_NONFATAL, "Broadcasting not allowed from a register");
+ goto err;
+ }
+
output->rex |= op_rexflags(input, REX_B | REX_P | REX_W | REX_H);
+ ins->evex_p[0] |= op_evexflags(input, EVEX_P0X, 0);
output->sib_present = false; /* no SIB necessary */
output->bytes = 0; /* no offset necessary either */
output->modrm = GEN_MODRM(3, rfield, nasm_regvals[input->basereg]);
@@ -2065,6 +2310,14 @@ static enum ea_type process_ea(operand *input, ea *output, int bits,
/*
* It's a memory reference.
*/
+
+ /* Embedded rounding or SAE is not available with a mem ref operand. */
+ if (input->decoflags & (ER | SAE)) {
+ nasm_error(ERR_NONFATAL,
+ "Embedded rounding is available only with reg-reg op.");
+ return -1;
+ }
+
if (input->basereg == -1 &&
(input->indexreg == -1 || input->scale == 0)) {
/*
@@ -2125,7 +2378,7 @@ static enum ea_type process_ea(operand *input, ea *output, int bits,
}
/* if either one are a vector register... */
- if ((ix|bx) & (XMMREG|YMMREG) & ~REG_EA) {
+ if ((ix|bx) & (XMMREG|YMMREG|ZMMREG) & ~REG_EA) {
opflags_t sok = BITS32 | BITS64;
int32_t o = input->offset;
int mod, scale, index, base;
@@ -2134,7 +2387,7 @@ static enum ea_type process_ea(operand *input, ea *output, int bits,
* For a vector SIB, one has to be a vector and the other,
* if present, a GPR. The vector must be the index operand.
*/
- if (it == -1 || (bx & (XMMREG|YMMREG) & ~REG_EA)) {
+ if (it == -1 || (bx & (XMMREG|YMMREG|ZMMREG) & ~REG_EA)) {
if (s == 0)
s = 1;
else if (s != 1)
@@ -2165,11 +2418,13 @@ static enum ea_type process_ea(operand *input, ea *output, int bits,
(addrbits == 64 && !(sok & BITS64)))
goto err;
- output->type = (ix & YMMREG & ~REG_EA)
- ? EA_YMMVSIB : EA_XMMVSIB;
+ output->type = ((ix & ZMMREG & ~REG_EA) ? EA_ZMMVSIB
+ : ((ix & YMMREG & ~REG_EA)
+ ? EA_YMMVSIB : EA_XMMVSIB));
- output->rex |= rexflags(it, ix, REX_X);
- output->rex |= rexflags(bt, bx, REX_B);
+ output->rex |= rexflags(it, ix, REX_X);
+ output->rex |= rexflags(bt, bx, REX_B);
+ ins->evex_p[2] |= evexflags(it, 0, EVEX_P2VP, 2);
index = it & 7; /* it is known to be != -1 */
@@ -2199,10 +2454,7 @@ static enum ea_type process_ea(operand *input, ea *output, int bits,
seg == NO_SEG && !forw_ref &&
!(input->eaflags & (EAF_BYTEOFFS | EAF_WORDOFFS)))
mod = 0;
- else if (input->eaflags & EAF_BYTEOFFS ||
- (o >= -128 && o <= 127 &&
- seg == NO_SEG && !forw_ref &&
- !(input->eaflags & EAF_WORDOFFS)))
+ else if (IS_MOD_01())
mod = 1;
else
mod = 2;
@@ -2293,10 +2545,7 @@ static enum ea_type process_ea(operand *input, ea *output, int bits,
seg == NO_SEG && !forw_ref &&
!(input->eaflags & (EAF_BYTEOFFS | EAF_WORDOFFS)))
mod = 0;
- else if (input->eaflags & EAF_BYTEOFFS ||
- (o >= -128 && o <= 127 &&
- seg == NO_SEG && !forw_ref &&
- !(input->eaflags & EAF_WORDOFFS)))
+ else if (IS_MOD_01())
mod = 1;
else
mod = 2;
@@ -2340,10 +2589,7 @@ static enum ea_type process_ea(operand *input, ea *output, int bits,
seg == NO_SEG && !forw_ref &&
!(input->eaflags & (EAF_BYTEOFFS | EAF_WORDOFFS)))
mod = 0;
- else if (input->eaflags & EAF_BYTEOFFS ||
- (o >= -128 && o <= 127 &&
- seg == NO_SEG && !forw_ref &&
- !(input->eaflags & EAF_WORDOFFS)))
+ else if (IS_MOD_01())
mod = 1;
else
mod = 2;
@@ -2428,9 +2674,7 @@ static enum ea_type process_ea(operand *input, ea *output, int bits,
if (o == 0 && seg == NO_SEG && !forw_ref && rm != 6 &&
!(input->eaflags & (EAF_BYTEOFFS | EAF_WORDOFFS)))
mod = 0;
- else if (input->eaflags & EAF_BYTEOFFS ||
- (o >= -128 && o <= 127 && seg == NO_SEG &&
- !forw_ref && !(input->eaflags & EAF_WORDOFFS)))
+ else if (IS_MOD_01())
mod = 1;
else
mod = 2;
diff --git a/disasm.c b/disasm.c
index 97bf27e..9d2e1b1 100644
--- a/disasm.c
+++ b/disasm.c
@@ -328,6 +328,8 @@ static uint8_t *do_ea(uint8_t *data, int modrm, int asize,
op->indexreg = nasm_rd_xmmreg[index | ((rex & REX_X) ? 8 : 0)];
else if (type == EA_YMMVSIB)
op->indexreg = nasm_rd_ymmreg[index | ((rex & REX_X) ? 8 : 0)];
+ else if (type == EA_ZMMVSIB)
+ op->indexreg = nasm_rd_zmmreg[index | ((rex & REX_X) ? 8 : 0)];
else if (index == 4 && !(rex & REX_X))
op->indexreg = -1; /* ESP/RSP cannot be an index */
else if (a64)
@@ -868,6 +870,10 @@ static int matches(const struct itemplate *t, uint8_t *data,
eat = EA_YMMVSIB;
break;
+ case 0376:
+ eat = EA_ZMMVSIB;
+ break;
+
default:
return false; /* Unknown code */
}
diff --git a/insns.dat b/insns.dat
index 0b55b68..320280a 100644
--- a/insns.dat
+++ b/insns.dat
@@ -1064,16 +1064,16 @@ PUSH reg_ds [-: 1e] 8086,NOLONG
PUSH reg_fs [-: 0f a0] 386
PUSH reg_gs [-: 0f a8] 386
PUSH imm8 [i: 6a ib,s] 186
-PUSH sbyteword16 [i: o16 6a ib,s] 186,AR0,SZ,ND
-PUSH imm16 [i: o16 68 iw] 186,AR0,SZ
-PUSH sbytedword32 [i: o32 6a ib,s] 386,NOLONG,AR0,SZ,ND
-PUSH imm32 [i: o32 68 id] 386,NOLONG,AR0,SZ
+PUSH sbyteword16 [i: o16 6a ib,s] 186,AR0,SIZE,ND
+PUSH imm16 [i: o16 68 iw] 186,AR0,SIZE
+PUSH sbytedword32 [i: o32 6a ib,s] 386,NOLONG,AR0,SIZE,ND
+PUSH imm32 [i: o32 68 id] 386,NOLONG,AR0,SIZE
PUSH sbytedword32 [i: o32 6a ib,s] 386,NOLONG,SD,ND
PUSH imm32 [i: o32 68 id] 386,NOLONG,SD
-PUSH sbytedword64 [i: o64nw 6a ib,s] X64,AR0,SZ,ND
-PUSH imm64 [i: o64nw 68 id,s] X64,AR0,SZ
-PUSH sbytedword32 [i: o64nw 6a ib,s] X64,AR0,SZ,ND
-PUSH imm32 [i: o64nw 68 id,s] X64,AR0,SZ
+PUSH sbytedword64 [i: o64nw 6a ib,s] X64,AR0,SIZE,ND
+PUSH imm64 [i: o64nw 68 id,s] X64,AR0,SIZE
+PUSH sbytedword32 [i: o64nw 6a ib,s] X64,AR0,SIZE,ND
+PUSH imm32 [i: o64nw 68 id,s] X64,AR0,SIZE
PUSHA void [ odf 60] 186,NOLONG
PUSHAD void [ o32 60] 386,NOLONG
PUSHAW void [ o16 60] 186,NOLONG
@@ -3457,7 +3457,437 @@ TZMSK reg32,rm32 [vm: xop.ndd.lz.m9.w0 01 /4] FUTURE,TBM
TZMSK reg64,rm64 [vm: xop.ndd.lz.m9.w1 01 /4] LONG,FUTURE,TBM
T1MSKC reg32,rm32 [vm: xop.ndd.lz.m9.w0 01 /7] FUTURE,TBM
T1MSKC reg64,rm64 [vm: xop.ndd.lz.m9.w1 01 /7] LONG,FUTURE,TBM
-+
+
+;# Intel AVX512 instructions
+;
+; based on pub number 319433-015 dated July 2013
+;
+VADDPD zmmreg|mask|z,zmmreg,zmmrm512|b64|er [rvm:fv: evex.nds.512.66.0f.w1 58 /r ] AVX512,FUTURE
+VADDPS zmmreg|mask|z,zmmreg,zmmrm512|b32|er [rvm:fv: evex.nds.512.0f.w0 58 /r ] AVX512,FUTURE
+VADDSD xmmreg|mask|z,xmmreg,xmmrm64|er [rvm:t1s: evex.nds.lig.f2.0f.w1 58 /r ] AVX512,FUTURE
+VADDSS xmmreg|mask|z,xmmreg,xmmrm32|er [rvm:t1s: evex.nds.lig.f3.0f.w0 58 /r ] AVX512,FUTURE
+VALIGND zmmreg|mask|z,zmmreg,zmmrm512|b32,imm8 [rvmi:fv: evex.nds.512.66.0f3a.w0 03 /r ib ] AVX512,FUTURE
+VALIGNQ zmmreg|mask|z,zmmreg,zmmrm512|b64,imm8 [rvmi:fv: evex.nds.512.66.0f3a.w1 03 /r ib ] AVX512,FUTURE
+VBLENDMPD zmmreg|mask|z,zmmreg,zmmrm512|b64 [rvm:fv: evex.nds.512.66.0f38.w1 65 /r ] AVX512,FUTURE
+VBLENDMPS zmmreg|mask|z,zmmreg,zmmrm512|b32 [rvm:fv: evex.nds.512.66.0f38.w0 65 /r ] AVX512,FUTURE
+VBROADCASTF32X4 zmmreg|mask|z,mem128 [rm:t4: evex.512.66.0f38.w0 1a /r ] AVX512,FUTURE
+VBROADCASTF64X4 zmmreg|mask|z,mem256 [rm:t4: evex.512.66.0f38.w1 1b /r ] AVX512,FUTURE
+VBROADCASTI32X4 zmmreg|mask|z,mem128 [rm:t4: evex.512.66.0f38.w0 5a /r ] AVX512,FUTURE
+VBROADCASTI64X4 zmmreg|mask|z,mem256 [rm:t4: evex.512.66.0f38.w1 5b /r ] AVX512,FUTURE
+VBROADCASTSD zmmreg|mask|z,mem64 [rm:t1s: evex.512.66.0f38.w1 19 /r ] AVX512,FUTURE
+VBROADCASTSD zmmreg|mask|z,xmmreg [rm: evex.512.66.0f38.w1 19 /r ] AVX512,FUTURE
+VBROADCASTSS zmmreg|mask|z,mem32 [rm:t1s: evex.512.66.0f38.w0 18 /r ] AVX512,FUTURE
+VBROADCASTSS zmmreg|mask|z,xmmreg [rm: evex.512.66.0f38.w0 18 /r ] AVX512,FUTURE
+VCMPPD opmaskreg|mask,zmmreg,zmmrm512|b64|sae,imm8 [rvmi:fv: evex.nds.512.66.0f.w1 c2 /r ib ] AVX512,FUTURE
+VCMPPS opmaskreg|mask,zmmreg,zmmrm512|b32|sae,imm8 [rvmi:fv: evex.nds.512.0f.w0 c2 /r ib ] AVX512,FUTURE
+VCMPSD opmaskreg|mask,xmmreg,xmmrm64|sae,imm8 [rvmi:t1s: evex.nds.lig.f2.0f.w1 c2 /r ib ] AVX512,FUTURE
+VCMPSS opmaskreg|mask,xmmreg,xmmrm32|sae,imm8 [rvmi:t1s: evex.nds.lig.f3.0f.w0 c2 /r ib ] AVX512,FUTURE
+VCOMISD xmmreg,xmmrm64|sae [rm:t1s: evex.lig.66.0f.w1 2f /r ] AVX512,FUTURE
+VCOMISS xmmreg,xmmrm32|sae [rm:t1s: evex.lig.0f.w0 2f /r ] AVX512,FUTURE
+VCOMPRESSPD mem512|mask,zmmreg [mr:t1s: evex.512.66.0f38.w1 8a /r ] AVX512,FUTURE
+VCOMPRESSPD zmmreg|mask|z,zmmreg [mr: evex.512.66.0f38.w1 8a /r ] AVX512,FUTURE
+VCOMPRESSPS mem512|mask,zmmreg [mr:t1s: evex.512.66.0f38.w0 8a /r ] AVX512,FUTURE
+VCOMPRESSPS zmmreg|mask|z,zmmreg [mr: evex.512.66.0f38.w0 8a /r ] AVX512,FUTURE
+VCVTDQ2PD zmmreg|mask|z,ymmrm256|b32|er [rm:hv: evex.512.f3.0f.w0 e6 /r ] AVX512,FUTURE
+VCVTDQ2PS zmmreg|mask|z,zmmrm512|b32|er [rm:fv: evex.512.0f.w0 5b /r ] AVX512,FUTURE
+VCVTPD2DQ ymmreg|mask|z,zmmrm512|b64|er [rm:fv: evex.512.f2.0f.w1 e6 /r ] AVX512,FUTURE
+VCVTPD2PS ymmreg|mask|z,zmmrm512|b64|er [rm:fv: evex.512.66.0f.w1 5a /r ] AVX512,FUTURE
+VCVTPD2UDQ ymmreg|mask|z,zmmrm512|b64|er [rm:fv: evex.512.0f.w1 79 /r ] AVX512,FUTURE
+VCVTPH2PS zmmreg|mask|z,ymmrm256|sae [rm:hvm: evex.512.66.0f38.w0 13 /r ] AVX512,FUTURE
+VCVTPS2DQ zmmreg|mask|z,zmmrm512|b32|er [rm:fv: evex.512.66.0f.w0 5b /r ] AVX512,FUTURE
+VCVTPS2PD zmmreg|mask|z,ymmrm256|b32|sae [rm:hv: evex.512.0f.w0 5a /r ] AVX512,FUTURE
+VCVTPS2PH mem256|mask,zmmreg|sae,imm8 [mri:hvm: evex.512.66.0f3a.w0 1d /r ib ] AVX512,FUTURE
+VCVTPS2PH ymmreg|mask|z,zmmreg|sae,imm8 [mri:hvm: evex.512.66.0f3a.w0 1d /r ib ] AVX512,FUTURE
+VCVTPS2UDQ zmmreg|mask|z,zmmrm512|b32|er [rm:fv: evex.512.0f.w0 79 /r ] AVX512,FUTURE
+VCVTSD2SI reg32,xmmrm64|er [rm:t1f64: evex.lig.f2.0f.w0 2d /r ] AVX512,FUTURE
+VCVTSD2SI reg64,xmmrm64|er [rm:t1f64: evex.lig.f2.0f.w1 2d /r ] AVX512,FUTURE
+VCVTSD2SS xmmreg|mask|z,xmmreg,xmmrm64|er [rvm:t1s: evex.nds.lig.f2.0f.w1 5a /r ] AVX512,FUTURE
+VCVTSD2USI reg32,xmmrm64|er [rm:t1f64: evex.lig.f2.0f.w0 79 /r ] AVX512,FUTURE
+VCVTSD2USI reg64,xmmrm64|er [rm:t1f64: evex.lig.f2.0f.w1 79 /r ] AVX512,FUTURE
+VCVTSI2SD xmmreg,xmmreg,rm32|er [rvm:t1s: evex.nds.lig.f2.0f.w0 2a /r ] AVX512,FUTURE
+VCVTSI2SD xmmreg,xmmreg,rm64|er [rvm:t1s: evex.nds.lig.f2.0f.w1 2a /r ] AVX512,FUTURE
+VCVTSI2SS xmmreg,xmmreg,rm32|er [rvm:t1s: evex.nds.lig.f3.0f.w0 2a /r ] AVX512,FUTURE
+VCVTSI2SS xmmreg,xmmreg,rm64|er [rvm:t1s: evex.nds.lig.f3.0f.w1 2a /r ] AVX512,FUTURE
+VCVTSS2SD xmmreg|mask|z,xmmreg,xmmrm32|sae [rvm:t1s: evex.nds.lig.f3.0f.w0 5a /r ] AVX512,FUTURE
+VCVTSS2SI reg32,xmmrm32|er [rm:t1f32: evex.lig.f3.0f.w0 2d /r ] AVX512,FUTURE
+VCVTSS2SI reg64,xmmrm32|er [rm:t1f32: evex.lig.f3.0f.w1 2d /r ] AVX512,FUTURE
+VCVTSS2USI reg32,xmmrm32|er [rm:t1f32: evex.lig.f3.0f.w0 79 /r ] AVX512,FUTURE
+VCVTSS2USI reg64,xmmrm32|er [rm:t1f32: evex.lig.f3.0f.w1 79 /r ] AVX512,FUTURE
+VCVTTPD2DQ ymmreg|mask|z,zmmrm512|b64|sae [rm:fv: evex.512.66.0f.w1 e6 /r ] AVX512,FUTURE
+VCVTTPD2UDQ ymmreg|mask|z,zmmrm512|b64|sae [rm:fv: evex.512.0f.w1 78 /r ] AVX512,FUTURE
+VCVTTPS2DQ zmmreg|mask|z,zmmrm512|b32|sae [rm:fv: evex.512.f3.0f.w0 5b /r ] AVX512,FUTURE
+VCVTTPS2UDQ zmmreg|mask|z,zmmrm512|b32|sae [rm:fv: evex.512.0f.w0 78 /r ] AVX512,FUTURE
+VCVTTSD2SI reg32,xmmrm64|sae [rm:t1f64: evex.lig.f2.0f.w0 2c /r ] AVX512,FUTURE
+VCVTTSD2SI reg64,xmmrm64|sae [rm:t1f64: evex.lig.f2.0f.w1 2c /r ] AVX512,FUTURE
+VCVTTSD2USI reg32,xmmrm64|sae [rm:t1f64: evex.lig.f2.0f.w0 78 /r ] AVX512,FUTURE
+VCVTTSD2USI reg64,xmmrm64|sae [rm:t1f64: evex.lig.f2.0f.w1 78 /r ] AVX512,FUTURE
+VCVTTSS2SI reg32,xmmrm32|sae [rm:t1f32: evex.lig.f3.0f.w0 2c /r ] AVX512,FUTURE
+VCVTTSS2SI reg64,xmmrm32|sae [rm:t1f32: evex.lig.f3.0f.w1 2c /r ] AVX512,FUTURE
+VCVTTSS2USI reg32,xmmrm32|sae [rm:t1f32: evex.lig.f3.0f.w0 78 /r ] AVX512,FUTURE
+VCVTTSS2USI reg64,xmmrm32|sae [rm:t1f32: evex.lig.f3.0f.w1 78 /r ] AVX512,FUTURE
+VCVTUDQ2PD zmmreg|mask|z,ymmrm256|b32|er [rm:hv: evex.512.f3.0f.w0 7a /r ] AVX512,FUTURE
+VCVTUDQ2PS zmmreg|mask|z,zmmrm512|b32|er [rm:fv: evex.512.f2.0f.w0 7a /r ] AVX512,FUTURE
+VCVTUSI2SD xmmreg,xmmreg,rm32|er [rvm:t1s: evex.nds.lig.f2.0f.w0 7b /r ] AVX512,FUTURE
+VCVTUSI2SD xmmreg,xmmreg,rm64|er [rvm:t1s: evex.nds.lig.f2.0f.w1 7b /r ] AVX512,FUTURE
+VCVTUSI2SS xmmreg,xmmreg,rm32|er [rvm:t1s: evex.nds.lig.f3.0f.w0 7b /r ] AVX512,FUTURE
+VCVTUSI2SS xmmreg,xmmreg,rm64|er [rvm:t1s: evex.nds.lig.f3.0f.w1 7b /r ] AVX512,FUTURE
+VDIVPD zmmreg|mask|z,zmmreg,zmmrm512|b64|er [rvm:fv: evex.nds.512.66.0f.w1 5e /r ] AVX512,FUTURE
+VDIVPS zmmreg|mask|z,zmmreg,zmmrm512|b32|er [rvm:fv: evex.nds.512.0f.w0 5e /r ] AVX512,FUTURE
+VDIVSD xmmreg|mask|z,xmmreg,xmmrm64|er [rvm:t1s: evex.nds.lig.f2.0f.w1 5e /r ] AVX512,FUTURE
+VDIVSS xmmreg|mask|z,xmmreg,xmmrm32|er [rvm:t1s: evex.nds.lig.f3.0f.w0 5e /r ] AVX512,FUTURE
+VEXPANDPD zmmreg|mask|z,mem512 [rm:t1s: evex.512.66.0f38.w1 88 /r ] AVX512,FUTURE
+VEXPANDPD zmmreg|mask|z,zmmreg [rm:t1s: evex.512.66.0f38.w1 88 /r ] AVX512,FUTURE
+VEXPANDPS zmmreg|mask|z,mem512 [rm:t1s: evex.512.66.0f38.w0 88 /r ] AVX512,FUTURE
+VEXPANDPS zmmreg|mask|z,zmmreg [rm:t1s: evex.512.66.0f38.w0 88 /r ] AVX512,FUTURE
+VEXTRACTF32X4 mem128|mask,zmmreg,imm8 [mri:t4: evex.512.66.0f3a.w0 19 /r ib ] AVX512,FUTURE
+VEXTRACTF32X4 xmmreg|mask|z,zmmreg,imm8 [mri:t4: evex.512.66.0f3a.w0 19 /r ib ] AVX512,FUTURE
+VEXTRACTF64X4 mem256|mask,zmmreg,imm8 [mri:t4: evex.512.66.0f3a.w1 1b /r ib ] AVX512,FUTURE
+VEXTRACTF64X4 ymmreg|mask|z,zmmreg,imm8 [mri: evex.512.66.0f3a.w1 1b /r ib ] AVX512,FUTURE
+VEXTRACTI32X4 mem128|mask,zmmreg,imm8 [mri:t4: evex.512.66.0f3a.w0 39 /r ib ] AVX512,FUTURE
+VEXTRACTI32X4 xmmreg|mask|z,zmmreg,imm8 [mri: evex.512.66.0f3a.w0 39 /r ib ] AVX512,FUTURE
+VEXTRACTI64X4 mem256|mask,zmmreg,imm8 [mri:t4: evex.512.66.0f3a.w1 3b /r ib ] AVX512,FUTURE
+VEXTRACTI64X4 ymmreg|mask|z,zmmreg,imm8 [mri: evex.512.66.0f3a.w1 3b /r ib ] AVX512,FUTURE
+VEXTRACTPS rm32,xmmreg,imm8 [mri:t1s: evex.128.66.0f3a.wig 17 /r ib ] AVX512,FUTURE
+VFIXUPIMMPD zmmreg|mask|z,zmmreg,zmmrm512|b64|sae,imm8 [rvmi:fv: evex.nds.512.66.0f3a.w1 54 /r ib ] AVX512,FUTURE
+VFIXUPIMMPS zmmreg|mask|z,zmmreg,zmmrm512|b32|sae,imm8 [rvmi:fv: evex.nds.512.66.0f3a.w0 54 /r ib ] AVX512,FUTURE
+VFIXUPIMMSD xmmreg|mask|z,xmmreg,xmmrm64|sae,imm8 [rvmi:t1s: evex.nds.lig.66.0f3a.w1 55 /r ib ] AVX512,FUTURE
+VFIXUPIMMSS xmmreg|mask|z,xmmreg,xmmrm32|sae,imm8 [rvmi:t1s: evex.nds.lig.66.0f3a.w0 55 /r ib ] AVX512,FUTURE
+VFMADD132PD zmmreg|mask|z,zmmreg,zmmrm512|b64|er [rvm:fv: evex.nds.512.66.0f38.w1 98 /r ] AVX512,FUTURE
+VFMADD132PS zmmreg|mask|z,zmmreg,zmmrm512|b32|er [rvm:fv: evex.nds.512.66.0f38.w0 98 /r ] AVX512,FUTURE
+VFMADD132SD xmmreg|mask|z,xmmreg,xmmrm64|er [rvm:t1s: evex.nds.lig.66.0f38.w1 99 /r ] AVX512,FUTURE
+VFMADD132SS xmmreg|mask|z,xmmreg,xmmrm32|er [rvm:t1s: evex.nds.lig.66.0f38.w0 99 /r ] AVX512,FUTURE
+VFMADD213PD zmmreg|mask|z,zmmreg,zmmrm512|b64|er [rvm:fv: evex.nds.512.66.0f38.w1 a8 /r ] AVX512,FUTURE
+VFMADD213PS zmmreg|mask|z,zmmreg,zmmrm512|b32|er [rvm:fv: evex.nds.512.66.0f38.w0 a8 /r ] AVX512,FUTURE
+VFMADD213SD xmmreg|mask|z,xmmreg,xmmrm64|er [rvm:t1s: evex.nds.lig.66.0f38.w1 a9 /r ] AVX512,FUTURE
+VFMADD213SS xmmreg|mask|z,xmmreg,xmmrm32|er [rvm:t1s: evex.nds.lig.66.0f38.w0 a9 /r ] AVX512,FUTURE
+VFMADD231PD zmmreg|mask|z,zmmreg,zmmrm512|b64|er [rvm:fv: evex.nds.512.66.0f38.w1 b8 /r ] AVX512,FUTURE
+VFMADD231PS zmmreg|mask|z,zmmreg,zmmrm512|b32|er [rvm:fv: evex.nds.512.66.0f38.w0 b8 /r ] AVX512,FUTURE
+VFMADD231SD xmmreg|mask|z,xmmreg,xmmrm64|er [rvm:t1s: evex.nds.lig.66.0f38.w1 b9 /r ] AVX512,FUTURE
+VFMADD231SS xmmreg|mask|z,xmmreg,xmmrm32|er [rvm:t1s: evex.nds.lig.66.0f38.w0 b9 /r ] AVX512,FUTURE
+VFMADDSUB132PD zmmreg|mask|z,zmmreg,zmmrm512|b64|er [rvm:fv: evex.nds.512.66.0f38.w1 96 /r ] AVX512,FUTURE
+VFMADDSUB132PS zmmreg|mask|z,zmmreg,zmmrm512|b32|er [rvm:fv: evex.nds.512.66.0f38.w0 96 /r ] AVX512,FUTURE
+VFMADDSUB213PD zmmreg|mask|z,zmmreg,zmmrm512|b64|er [rvm:fv: evex.nds.512.66.0f38.w1 a6 /r ] AVX512,FUTURE
+VFMADDSUB213PS zmmreg|mask|z,zmmreg,zmmrm512|b32|er [rvm:fv: evex.nds.512.66.0f38.w0 a6 /r ] AVX512,FUTURE
+VFMADDSUB231PD zmmreg|mask|z,zmmreg,zmmrm512|b64|er [rvm:fv: evex.nds.512.66.0f38.w1 b6 /r ] AVX512,FUTURE
+VFMADDSUB231PS zmmreg|mask|z,zmmreg,zmmrm512|b32|er [rvm:fv: evex.nds.512.66.0f38.w0 b6 /r ] AVX512,FUTURE
+VFMSUB132PD zmmreg|mask|z,zmmreg,zmmrm512|b64|er [rvm:fv: evex.nds.512.66.0f38.w1 9a /r ] AVX512,FUTURE
+VFMSUB132PS zmmreg|mask|z,zmmreg,zmmrm512|b32|er [rvm:fv: evex.nds.512.66.0f38.w0 9a /r ] AVX512,FUTURE
+VFMSUB132SD xmmreg|mask|z,xmmreg,xmmrm64|er [rvm:t1s: evex.nds.lig.66.0f38.w1 9b /r ] AVX512,FUTURE
+VFMSUB132SS xmmreg|mask|z,xmmreg,xmmrm32|er [rvm:t1s: evex.nds.lig.66.0f38.w0 9b /r ] AVX512,FUTURE
+VFMSUB213PD zmmreg|mask|z,zmmreg,zmmrm512|b64|er [rvm:fv: evex.nds.512.66.0f38.w1 aa /r ] AVX512,FUTURE
+VFMSUB213PS zmmreg|mask|z,zmmreg,zmmrm512|b32|er [rvm:fv: evex.nds.512.66.0f38.w0 aa /r ] AVX512,FUTURE
+VFMSUB213SD xmmreg|mask|z,xmmreg,xmmrm64|er [rvm:t1s: evex.nds.lig.66.0f38.w1 ab /r ] AVX512,FUTURE
+VFMSUB213SS xmmreg|mask|z,xmmreg,xmmrm32|er [rvm:t1s: evex.nds.lig.66.0f38.w0 ab /r ] AVX512,FUTURE
+VFMSUB231PD zmmreg|mask|z,zmmreg,zmmrm512|b64|er [rvm:fv: evex.nds.512.66.0f38.w1 ba /r ] AVX512,FUTURE
+VFMSUB231PS zmmreg|mask|z,zmmreg,zmmrm512|b32|er [rvm:fv: evex.nds.512.66.0f38.w0 ba /r ] AVX512,FUTURE
+VFMSUB231SD xmmreg|mask|z,xmmreg,xmmrm64|er [rvm:t1s: evex.nds.lig.66.0f38.w1 bb /r ] AVX512,FUTURE
+VFMSUB231SS xmmreg|mask|z,xmmreg,xmmrm32|er [rvm:t1s: evex.nds.lig.66.0f38.w0 bb /r ] AVX512,FUTURE
+VFMSUBADD132PD zmmreg|mask|z,zmmreg,zmmrm512|b64|er [rvm:fv: evex.nds.512.66.0f38.w1 97 /r ] AVX512,FUTURE
+VFMSUBADD132PS zmmreg|mask|z,zmmreg,zmmrm512|b32|er [rvm:fv: evex.nds.512.66.0f38.w0 97 /r ] AVX512,FUTURE
+VFMSUBADD213PD zmmreg|mask|z,zmmreg,zmmrm512|b64|er [rvm:fv: evex.nds.512.66.0f38.w1 a7 /r ] AVX512,FUTURE
+VFMSUBADD213PS zmmreg|mask|z,zmmreg,zmmrm512|b32|er [rvm:fv: evex.nds.512.66.0f38.w0 a7 /r ] AVX512,FUTURE
+VFMSUBADD231PD zmmreg|mask|z,zmmreg,zmmrm512|b64|er [rvm:fv: evex.nds.512.66.0f38.w1 b7 /r ] AVX512,FUTURE
+VFMSUBADD231PS zmmreg|mask|z,zmmreg,zmmrm512|b32|er [rvm:fv: evex.nds.512.66.0f38.w0 b7 /r ] AVX512,FUTURE
+VFNMADD132PD zmmreg|mask|z,zmmreg,zmmrm512|b64|er [rvm:fv: evex.nds.512.66.0f38.w1 9c /r ] AVX512,FUTURE
+VFNMADD132PS zmmreg|mask|z,zmmreg,zmmrm512|b32|er [rvm:fv: evex.nds.512.66.0f38.w0 9c /r ] AVX512,FUTURE
+VFNMADD132SD xmmreg|mask|z,xmmreg,xmmrm64|er [rvm:t1s: evex.nds.lig.66.0f38.w1 9d /r ] AVX512,FUTURE
+VFNMADD132SS xmmreg|mask|z,xmmreg,xmmrm32|er [rvm:t1s: evex.nds.lig.66.0f38.w0 9d /r ] AVX512,FUTURE
+VFNMADD213PD zmmreg|mask|z,zmmreg,zmmrm512|b64|er [rvm:fv: evex.nds.512.66.0f38.w1 ac /r ] AVX512,FUTURE
+VFNMADD213PS zmmreg|mask|z,zmmreg,zmmrm512|b32|er [rvm:fv: evex.nds.512.66.0f38.w0 ac /r ] AVX512,FUTURE
+VFNMADD213SD xmmreg|mask|z,xmmreg,xmmrm64|er [rvm:t1s: evex.nds.lig.66.0f38.w1 ad /r ] AVX512,FUTURE
+VFNMADD213SS xmmreg|mask|z,xmmreg,xmmrm32|er [rvm:t1s: evex.nds.lig.66.0f38.w0 ad /r ] AVX512,FUTURE
+VFNMADD231PD zmmreg|mask|z,zmmreg,zmmrm512|b64|er [rvm:fv: evex.nds.512.66.0f38.w1 bc /r ] AVX512,FUTURE
+VFNMADD231PS zmmreg|mask|z,zmmreg,zmmrm512|b32|er [rvm:fv: evex.nds.512.66.0f38.w0 bc /r ] AVX512,FUTURE
+VFNMADD231SD xmmreg|mask|z,xmmreg,xmmrm64|er [rvm:t1s: evex.nds.lig.66.0f38.w1 bd /r ] AVX512,FUTURE
+VFNMADD231SS xmmreg|mask|z,xmmreg,xmmrm32|er [rvm:t1s: evex.nds.lig.66.0f38.w0 bd /r ] AVX512,FUTURE
+VFNMSUB132PD zmmreg|mask|z,zmmreg,zmmrm512|b64|er [rvm:fv: evex.nds.512.66.0f38.w1 9e /r ] AVX512,FUTURE
+VFNMSUB132PS zmmreg|mask|z,zmmreg,zmmrm512|b32|er [rvm:fv: evex.nds.512.66.0f38.w0 9e /r ] AVX512,FUTURE
+VFNMSUB132SD xmmreg|mask|z,xmmreg,xmmrm64|er [rvm:t1s: evex.nds.lig.66.0f38.w1 9f /r ] AVX512,FUTURE
+VFNMSUB132SS xmmreg|mask|z,xmmreg,xmmrm32|er [rvm:t1s: evex.nds.lig.66.0f38.w0 9f /r ] AVX512,FUTURE
+VFNMSUB213PD zmmreg|mask|z,zmmreg,zmmrm512|b64|er [rvm:fv: evex.nds.512.66.0f38.w1 ae /r ] AVX512,FUTURE
+VFNMSUB213PS zmmreg|mask|z,zmmreg,zmmrm512|b32|er [rvm:fv: evex.nds.512.66.0f38.w0 ae /r ] AVX512,FUTURE
+VFNMSUB213SD xmmreg|mask|z,xmmreg,xmmrm64|er [rvm:t1s: evex.nds.lig.66.0f38.w1 af /r ] AVX512,FUTURE
+VFNMSUB213SS xmmreg|mask|z,xmmreg,xmmrm32|er [rvm:t1s: evex.nds.lig.66.0f38.w0 af /r ] AVX512,FUTURE
+VFNMSUB231PD zmmreg|mask|z,zmmreg,zmmrm512|b64|er [rvm:fv: evex.nds.512.66.0f38.w1 be /r ] AVX512,FUTURE
+VFNMSUB231PS zmmreg|mask|z,zmmreg,zmmrm512|b32|er [rvm:fv: evex.nds.512.66.0f38.w0 be /r ] AVX512,FUTURE
+VFNMSUB231SD xmmreg|mask|z,xmmreg,xmmrm64|er [rvm:t1s: evex.nds.lig.66.0f38.w1 bf /r ] AVX512,FUTURE
+VFNMSUB231SS xmmreg|mask|z,xmmreg,xmmrm32|er [rvm:t1s: evex.nds.lig.66.0f38.w0 bf /r ] AVX512,FUTURE
+VGATHERDPD zmmreg|mask,ymem64 [rm:t1s: vsiby evex.512.66.0f38.w1 92 /r ] AVX512,FUTURE
+VGATHERDPS zmmreg|mask,zmem32 [rm:t1s: vsibz evex.512.66.0f38.w0 92 /r ] AVX512,FUTURE
+VGATHERQPD zmmreg|mask,zmem64 [rm:t1s: vsibz evex.512.66.0f38.w1 93 /r ] AVX512,FUTURE
+VGATHERQPS ymmreg|mask,zmem32 [rm:t1s: vsibz evex.512.66.0f38.w0 93 /r ] AVX512,FUTURE
+VGETEXPPD zmmreg|mask|z,zmmrm512|b64|sae [rm:fv: evex.512.66.0f38.w1 42 /r ] AVX512,FUTURE
+VGETEXPPS zmmreg|mask|z,zmmrm512|b32|sae [rm:fv: evex.512.66.0f38.w0 42 /r ] AVX512,FUTURE
+VGETEXPSD xmmreg|mask|z,xmmreg,xmmrm64|sae [rvm:t1s: evex.nds.lig.66.0f38.w1 43 /r ] AVX512,FUTURE
+VGETEXPSS xmmreg|mask|z,xmmreg,xmmrm32|sae [rvm:t1s: evex.nds.lig.66.0f38.w0 43 /r ] AVX512,FUTURE
+VGETMANTPD zmmreg|mask|z,zmmrm512|b64|sae,imm8 [rmi:fv: evex.512.66.0f3a.w1 26 /r ib ] AVX512,FUTURE
+VGETMANTPS zmmreg|mask|z,zmmrm512|b32|sae,imm8 [rmi:fv: evex.512.66.0f3a.w0 26 /r ib ] AVX512,FUTURE
+VGETMANTSD xmmreg|mask|z,xmmreg,xmmrm64|sae,imm8 [rvmi:t1s: evex.nds.lig.66.0f3a.w1 27 /r ib ] AVX512,FUTURE
+VGETMANTSS xmmreg|mask|z,xmmreg,xmmrm32|sae,imm8 [rvmi:t1s: evex.nds.lig.66.0f3a.w0 27 /r ib ] AVX512,FUTURE
+VINSERTF32X4 zmmreg|mask|z,zmmreg,xmmrm128,imm8 [rvmi:t4: evex.nds.512.66.0f3a.w0 18 /r ib ] AVX512,FUTURE
+VINSERTF64X4 zmmreg|mask|z,zmmreg,ymmrm256,imm8 [rvmi:t4: evex.nds.512.66.0f3a.w1 1a /r ib ] AVX512,FUTURE
+VINSERTI32X4 zmmreg|mask|z,zmmreg,xmmrm128,imm8 [rvmi:t4: evex.nds.512.66.0f3a.w0 38 /r ib ] AVX512,FUTURE
+VINSERTI64X4 zmmreg|mask|z,zmmreg,ymmrm256,imm8 [rvmi:t4: evex.nds.512.66.0f3a.w1 3a /r ib ] AVX512,FUTURE
+VINSERTPS xmmreg,xmmreg,xmmrm32,imm8 [rvmi:t1s: evex.nds.128.66.0f3a.w0 21 /r ib ] AVX512,FUTURE
+VMAXPD zmmreg|mask|z,zmmreg,zmmrm512|b64|sae [rvm:fv: evex.nds.512.66.0f.w1 5f /r ] AVX512,FUTURE
+VMAXPS zmmreg|mask|z,zmmreg,zmmrm512|b32|sae [rvm:fv: evex.nds.512.0f.w0 5f /r ] AVX512,FUTURE
+VMAXSD xmmreg|mask|z,xmmreg,xmmrm64|sae [rvm:t1s: evex.nds.lig.f2.0f.w1 5f /r ] AVX512,FUTURE
+VMAXSS xmmreg|mask|z,xmmreg,xmmrm32|sae [rvm:t1s: evex.nds.lig.f3.0f.w0 5f /r ] AVX512,FUTURE
+VMINPD zmmreg|mask|z,zmmreg,zmmrm512|b64|sae [rvm:fv: evex.nds.512.66.0f.w1 5d /r ] AVX512,FUTURE
+VMINPS zmmreg|mask|z,zmmreg,zmmrm512|b32|sae [rvm:fv: evex.nds.512.0f.w0 5d /r ] AVX512,FUTURE
+VMINSD xmmreg|mask|z,xmmreg,xmmrm64|sae [rvm:t1s: evex.nds.lig.f2.0f.w1 5d /r ] AVX512,FUTURE
+VMINSS xmmreg|mask|z,xmmreg,xmmrm32|sae [rvm:t1s: evex.nds.lig.f3.0f.w0 5d /r ] AVX512,FUTURE
+VMOVAPD mem512|mask,zmmreg [mr:fvm: evex.512.66.0f.w1 29 /r ] AVX512,FUTURE
+VMOVAPD zmmreg|mask|z,zmmreg [mr: evex.512.66.0f.w1 29 /r ] AVX512,FUTURE
+VMOVAPD zmmreg|mask|z,zmmrm512 [rm:fvm: evex.512.66.0f.w1 28 /r ] AVX512,FUTURE
+VMOVAPS mem512|mask,zmmreg [mr:fvm: evex.512.0f.w0 29 /r ] AVX512,FUTURE
+VMOVAPS zmmreg|mask|z,zmmreg [mr: evex.512.0f.w0 29 /r ] AVX512,FUTURE
+VMOVAPS zmmreg|mask|z,zmmrm512 [rm:fvm: evex.512.0f.w0 28 /r ] AVX512,FUTURE
+VMOVD rm32,xmmreg [mr:t1s: evex.128.66.0f.w0 7e /r ] AVX512,FUTURE
+VMOVD xmmreg,rm32 [rm:t1s: evex.128.66.0f.w0 6e /r ] AVX512,FUTURE
+VMOVDDUP zmmreg|mask|z,zmmrm512 [rm:dup: evex.512.f2.0f.w1 12 /r ] AVX512,FUTURE
+VMOVDQA32 mem512|mask,zmmreg [mr:fvm: evex.512.66.0f.w0 7f /r ] AVX512,FUTURE
+VMOVDQA32 zmmreg|mask|z,zmmreg [mr: evex.512.66.0f.w0 7f /r ] AVX512,FUTURE
+VMOVDQA32 zmmreg|mask|z,zmmrm512 [rm:fvm: evex.512.66.0f.w0 6f /r ] AVX512,FUTURE
+VMOVDQA64 mem512|mask,zmmreg [mr:fvm: evex.512.66.0f.w1 7f /r ] AVX512,FUTURE
+VMOVDQA64 zmmreg|mask|z,zmmreg [mr: evex.512.66.0f.w1 7f /r ] AVX512,FUTURE
+VMOVDQA64 zmmreg|mask|z,zmmrm512 [rm:fvm: evex.512.66.0f.w1 6f /r ] AVX512,FUTURE
+VMOVDQU32 mem512|mask,zmmreg [mr:fvm: evex.512.f3.0f.w0 7f /r ] AVX512,FUTURE
+VMOVDQU32 zmmreg|mask|z,zmmreg [mr: evex.512.f3.0f.w0 7f /r ] AVX512,FUTURE
+VMOVDQU32 zmmreg|mask|z,zmmrm512 [rm:fvm: evex.512.f3.0f.w0 6f /r ] AVX512,FUTURE
+VMOVDQU64 mem512|mask,zmmreg [mr:fvm: evex.512.f3.0f.w1 7f /r ] AVX512,FUTURE
+VMOVDQU64 zmmreg|mask|z,zmmreg [mr: evex.512.f3.0f.w1 7f /r ] AVX512,FUTURE
+VMOVDQU64 zmmreg|mask|z,zmmrm512 [rm:fvm: evex.512.f3.0f.w1 6f /r ] AVX512,FUTURE
+VMOVHLPS xmmreg,xmmreg,xmmreg [rvm: evex.nds.128.0f.w0 12 /r ] AVX512,FUTURE
+VMOVHPD mem64,xmmreg [mr:t1s: evex.128.66.0f.w1 17 /r ] AVX512,FUTURE
+VMOVHPD xmmreg,xmmreg,mem64 [rvm:t1s: evex.nds.128.66.0f.w1 16 /r ] AVX512,FUTURE
+VMOVHPS mem64,xmmreg [mr:t2: evex.128.0f.w0 17 /r ] AVX512,FUTURE
+VMOVHPS xmmreg,xmmreg,mem64 [rvm:t2: evex.nds.128.0f.w0 16 /r ] AVX512,FUTURE
+VMOVLHPS xmmreg,xmmreg,xmmreg [rvm: evex.nds.128.0f.w0 16 /r ] AVX512,FUTURE
+VMOVLPD mem64,xmmreg [mr:t1s: evex.128.66.0f.w1 13 /r ] AVX512,FUTURE
+VMOVLPD xmmreg,xmmreg,mem64 [rvm:t1s: evex.nds.128.66.0f.w1 12 /r ] AVX512,FUTURE
+VMOVLPS mem64,xmmreg [mr:t2: evex.128.0f.w0 13 /r ] AVX512,FUTURE
+VMOVLPS xmmreg,xmmreg,mem64 [rvm:t2: evex.nds.128.0f.w0 12 /r ] AVX512,FUTURE
+VMOVNTDQ mem512,zmmreg [mr:fvm: evex.512.66.0f.w0 e7 /r ] AVX512,FUTURE
+VMOVNTDQA zmmreg,mem512 [rm:fvm: evex.512.66.0f38.w0 2a /r ] AVX512,FUTURE
+VMOVNTPD mem512,zmmreg [mr:fvm: evex.512.66.0f.w1 2b /r ] AVX512,FUTURE
+VMOVNTPS mem512,zmmreg [mr:fvm: evex.512.0f.w0 2b /r ] AVX512,FUTURE
+VMOVQ rm64,xmmreg [mr:t1s: evex.128.66.0f.w1 7e /r ] AVX512,FUTURE
+VMOVQ xmmreg,rm64 [rm:t1s: evex.128.66.0f.w1 6e /r ] AVX512,FUTURE
+VMOVQ xmmreg,xmmrm64 [rm:t1s: evex.128.f3.0f.w1 7e /r ] AVX512,FUTURE
+VMOVQ xmmrm64,xmmreg [mr:t1s: evex.128.66.0f.w1 d6 /r ] AVX512,FUTURE
+VMOVSD mem64|mask,xmmreg [mr:t1s: evex.lig.f2.0f.w1 11 /r ] AVX512,FUTURE
+VMOVSD xmmreg|mask|z,mem64 [rm:t1s: evex.lig.f2.0f.w1 10 /r ] AVX512,FUTURE
+VMOVSD xmmreg|mask|z,xmmreg,xmmreg [mvr: evex.nds.lig.f2.0f.w1 11 /r ] AVX512,FUTURE
+VMOVSD xmmreg|mask|z,xmmreg,xmmreg [rvm: evex.nds.lig.f2.0f.w1 10 /r ] AVX512,FUTURE
+VMOVSHDUP zmmreg|mask|z,zmmrm512 [rm:fvm: evex.512.f3.0f.w0 16 /r ] AVX512,FUTURE
+VMOVSLDUP zmmreg|mask|z,zmmrm512 [rm:fvm: evex.512.f3.0f.w0 12 /r ] AVX512,FUTURE
+VMOVSS mem32|mask,xmmreg [mr:t1s: evex.lig.f3.0f.w0 11 /r ] AVX512,FUTURE
+VMOVSS xmmreg|mask|z,mem32 [rm:t1s: evex.lig.f3.0f.w0 10 /r ] AVX512,FUTURE
+VMOVSS xmmreg|mask|z,xmmreg,xmmreg [mvr: evex.nds.lig.f3.0f.w0 11 /r ] AVX512,FUTURE
+VMOVSS xmmreg|mask|z,xmmreg,xmmreg [rvm: evex.nds.lig.f3.0f.w0 10 /r ] AVX512,FUTURE
+VMOVUPD mem512|mask,zmmreg [mr:fvm: evex.512.66.0f.w1 11 /r ] AVX512,FUTURE
+VMOVUPD zmmreg|mask|z,zmmreg [mr: evex.512.66.0f.w1 11 /r ] AVX512,FUTURE
+VMOVUPD zmmreg|mask|z,zmmrm512 [rm:fvm: evex.512.66.0f.w1 10 /r ] AVX512,FUTURE
+VMOVUPS mem512|mask,zmmreg [mr:fvm: evex.512.0f.w0 11 /r ] AVX512,FUTURE
+VMOVUPS zmmreg|mask|z,zmmreg [mr: evex.512.0f.w0 11 /r ] AVX512,FUTURE
+VMOVUPS zmmreg|mask|z,zmmrm512 [rm:fvm: evex.512.0f.w0 10 /r ] AVX512,FUTURE
+VMULPD zmmreg|mask|z,zmmreg,zmmrm512|b64|er [rvm:fv: evex.nds.512.66.0f.w1 59 /r ] AVX512,FUTURE
+VMULPS zmmreg|mask|z,zmmreg,zmmrm512|b32|er [rvm:fv: evex.nds.512.0f.w0 59 /r ] AVX512,FUTURE
+VMULSD xmmreg|mask|z,xmmreg,xmmrm64|er [rvm:t1s: evex.nds.lig.f2.0f.w1 59 /r ] AVX512,FUTURE
+VMULSS xmmreg|mask|z,xmmreg,xmmrm32|er [rvm:t1s: evex.nds.lig.f3.0f.w0 59 /r ] AVX512,FUTURE
+VPABSD zmmreg|mask|z,zmmrm512|b32 [rm:fv: evex.512.66.0f38.w0 1e /r ] AVX512,FUTURE
+VPABSQ zmmreg|mask|z,zmmrm512|b64 [rm:fv: evex.512.66.0f38.w1 1f /r ] AVX512,FUTURE
+VPADDD zmmreg|mask|z,zmmreg,zmmrm512|b32 [rvm:fv: evex.nds.512.66.0f.w0 fe /r ] AVX512,FUTURE
+VPADDQ zmmreg|mask|z,zmmreg,zmmrm512|b64 [rvm:fv: evex.nds.512.66.0f.w1 d4 /r ] AVX512,FUTURE
+VPANDD zmmreg|mask|z,zmmreg,zmmrm512|b32 [rvm:fv: evex.nds.512.66.0f.w0 db /r ] AVX512,FUTURE
+VPANDND zmmreg|mask|z,zmmreg,zmmrm512|b32 [rvm:fv: evex.nds.512.66.0f.w0 df /r ] AVX512,FUTURE
+VPANDNQ zmmreg|mask|z,zmmreg,zmmrm512|b64 [rvm:fv: evex.nds.512.66.0f.w1 df /r ] AVX512,FUTURE
+VPANDQ zmmreg|mask|z,zmmreg,zmmrm512|b64 [rvm:fv: evex.nds.512.66.0f.w1 db /r ] AVX512,FUTURE
+VPBLENDMD zmmreg|mask|z,zmmreg,zmmrm512|b32 [rvm:fv: evex.nds.512.66.0f38.w0 64 /r ] AVX512,FUTURE
+VPBLENDMQ zmmreg|mask|z,zmmreg,zmmrm512|b64 [rvm:fv: evex.nds.512.66.0f38.w1 64 /r ] AVX512,FUTURE
+VPBROADCASTD zmmreg|mask|z,mem32 [rm:t1s: evex.512.66.0f38.w0 58 /r ] AVX512,FUTURE
+VPBROADCASTD zmmreg|mask|z,reg32 [rm: evex.512.66.0f38.w0 7c /r ] AVX512,FUTURE
+VPBROADCASTD zmmreg|mask|z,xmmreg [rm: evex.512.66.0f38.w0 58 /r ] AVX512,FUTURE
+VPBROADCASTQ zmmreg|mask|z,mem64 [rm:t1s: evex.512.66.0f38.w1 59 /r ] AVX512,FUTURE
+VPBROADCASTQ zmmreg|mask|z,reg64 [rm: evex.512.66.0f38.w1 7c /r ] AVX512,FUTURE
+VPBROADCASTQ zmmreg|mask|z,xmmreg [rm: evex.512.66.0f38.w1 59 /r ] AVX512,FUTURE
+VPCMPD opmaskreg|mask,zmmreg,zmmrm512|b32,imm8 [rvmi:fv: evex.nds.512.66.0f3a.w0 1f /r ib ] AVX512,FUTURE
+VPCMPEQD opmaskreg|mask,zmmreg,zmmrm512|b32 [rvm:fv: evex.nds.512.66.0f.w0 76 /r ] AVX512,FUTURE
+VPCMPEQQ opmaskreg|mask,zmmreg,zmmrm512|b64 [rvm:fv: evex.nds.512.66.0f38.w1 29 /r ] AVX512,FUTURE
+VPCMPGTD opmaskreg|mask,zmmreg,zmmrm512|b32 [rvm:fv: evex.nds.512.66.0f.w0 66 /r ] AVX512,FUTURE
+VPCMPGTQ opmaskreg|mask,zmmreg,zmmrm512|b64 [rvm:fv: evex.nds.512.66.0f38.w1 37 /r ] AVX512,FUTURE
+VPCMPQ opmaskreg|mask,zmmreg,zmmrm512|b64,imm8 [rvmi:fv: evex.nds.512.66.0f3a.w1 1f /r ib ] AVX512,FUTURE
+VPCMPUD opmaskreg|mask,zmmreg,zmmrm512|b32,imm8 [rvmi:fv: evex.nds.512.66.0f3a.w0 1e /r ib ] AVX512,FUTURE
+VPCMPUQ opmaskreg|mask,zmmreg,zmmrm512|b64,imm8 [rvmi:fv: evex.nds.512.66.0f3a.w1 1e /r ib ] AVX512,FUTURE
+VPCOMPRESSD mem512|mask,zmmreg [mr:t1s: evex.512.66.0f38.w0 8b /r ] AVX512,FUTURE
+VPCOMPRESSD zmmreg|mask|z,zmmreg [mr: evex.512.66.0f38.w0 8b /r ] AVX512,FUTURE
+VPCOMPRESSQ mem512|mask,zmmreg [mr:t1s: evex.512.66.0f38.w1 8b /r ] AVX512,FUTURE
+VPCOMPRESSQ zmmreg|mask|z,zmmreg [mr: evex.512.66.0f38.w1 8b /r ] AVX512,FUTURE
+VPERMD zmmreg|mask|z,zmmreg,zmmrm512|b32 [rvm:fv: evex.nds.512.66.0f38.w0 36 /r ] AVX512,FUTURE
+VPERMI2D zmmreg|mask|z,zmmreg,zmmrm512|b32 [rvm:fv: evex.nds.512.66.0f38.w0 76 /r ] AVX512,FUTURE
+VPERMI2PD zmmreg|mask|z,zmmreg,zmmrm512|b64 [rvm:fv: evex.nds.512.66.0f38.w1 77 /r ] AVX512,FUTURE
+VPERMI2PS zmmreg|mask|z,zmmreg,zmmrm512|b32 [rvm:fv: evex.nds.512.66.0f38.w0 77 /r ] AVX512,FUTURE
+VPERMI2Q zmmreg|mask|z,zmmreg,zmmrm512|b64 [rvm:fv: evex.nds.512.66.0f38.w1 76 /r ] AVX512,FUTURE
+VPERMILPD zmmreg|mask|z,zmmreg,zmmrm512|b64 [rvm:fv: evex.nds.512.66.0f38.w1 0d /r ] AVX512,FUTURE
+VPERMILPD zmmreg|mask|z,zmmrm512|b64,imm8 [rmi:fv: evex.512.66.0f3a.w1 05 /r ib ] AVX512,FUTURE
+VPERMILPS zmmreg|mask|z,zmmreg,zmmrm512|b32 [rvm:fv: evex.nds.512.66.0f38.w0 0c /r ] AVX512,FUTURE
+VPERMILPS zmmreg|mask|z,zmmrm512|b32,imm8 [rmi:fv: evex.512.66.0f3a.w0 04 /r ib ] AVX512,FUTURE
+VPERMPD zmmreg|mask|z,zmmreg,zmmrm512|b64 [rvm:fv: evex.nds.512.66.0f38.w1 16 /r ] AVX512,FUTURE
+VPERMPD zmmreg|mask|z,zmmrm512|b64,imm8 [rmi:fv: evex.512.66.0f3a.w1 01 /r ib ] AVX512,FUTURE
+VPERMPS zmmreg|mask|z,zmmreg,zmmrm512|b32 [rvm:fv: evex.nds.512.66.0f38.w0 16 /r ] AVX512,FUTURE
+VPERMQ zmmreg|mask|z,zmmreg,zmmrm512|b64 [rvm:fv: evex.nds.512.66.0f38.w1 36 /r ] AVX512,FUTURE
+VPERMQ zmmreg|mask|z,zmmrm512|b64,imm8 [rmi:fv: evex.512.66.0f3a.w1 00 /r ib ] AVX512,FUTURE
+VPERMT2D zmmreg|mask|z,zmmreg,zmmrm512|b32 [rvm:fv: evex.nds.512.66.0f38.w0 7e /r ] AVX512,FUTURE
+VPERMT2PD zmmreg|mask|z,zmmreg,zmmrm512|b64 [rvm:fv: evex.nds.512.66.0f38.w1 7f /r ] AVX512,FUTURE
+VPERMT2PS zmmreg|mask|z,zmmreg,zmmrm512|b32 [rvm:fv: evex.nds.512.66.0f38.w0 7f /r ] AVX512,FUTURE
+VPERMT2Q zmmreg|mask|z,zmmreg,zmmrm512|b64 [rvm:fv: evex.nds.512.66.0f38.w1 7e /r ] AVX512,FUTURE
+VPEXPANDD zmmreg|mask|z,mem512 [rm:t1s: evex.512.66.0f38.w0 89 /r ] AVX512,FUTURE
+VPEXPANDD zmmreg|mask|z,zmmreg [rm:t1s: evex.512.66.0f38.w0 89 /r ] AVX512,FUTURE
+VPEXPANDQ zmmreg|mask|z,mem512 [rm:t1s: evex.512.66.0f38.w1 89 /r ] AVX512,FUTURE
+VPEXPANDQ zmmreg|mask|z,zmmreg [rm:t1s: evex.512.66.0f38.w1 89 /r ] AVX512,FUTURE
+VPGATHERDD zmmreg|mask,zmem32 [rm:t1s: vsibz evex.512.66.0f38.w0 90 /r ] AVX512,FUTURE
+VPGATHERDQ zmmreg|mask,ymem64 [rm:t1s: vsiby evex.512.66.0f38.w1 90 /r ] AVX512,FUTURE
+VPGATHERQD ymmreg|mask,zmem32 [rm:t1s: vsibz evex.512.66.0f38.w0 91 /r ] AVX512,FUTURE
+VPGATHERQQ zmmreg|mask,zmem64 [rm:t1s: vsibz evex.512.66.0f38.w1 91 /r ] AVX512,FUTURE
+VPMAXSD zmmreg|mask|z,zmmreg,zmmrm512|b32 [rvm:fv: evex.nds.512.66.0f38.w0 3d /r ] AVX512,FUTURE
+VPMAXSQ zmmreg|mask|z,zmmreg,zmmrm512|b64 [rvm:fv: evex.nds.512.66.0f38.w1 3d /r ] AVX512,FUTURE
+VPMAXUD zmmreg|mask|z,zmmreg,zmmrm512|b32 [rvm:fv: evex.nds.512.66.0f38.w0 3f /r ] AVX512,FUTURE
+VPMAXUQ zmmreg|mask|z,zmmreg,zmmrm512|b64 [rvm:fv: evex.nds.512.66.0f38.w1 3f /r ] AVX512,FUTURE
+VPMINSD zmmreg|mask|z,zmmreg,zmmrm512|b32 [rvm:fv: evex.nds.512.66.0f38.w0 39 /r ] AVX512,FUTURE
+VPMINSQ zmmreg|mask|z,zmmreg,zmmrm512|b64 [rvm:fv: evex.nds.512.66.0f38.w1 39 /r ] AVX512,FUTURE
+VPMINUD zmmreg|mask|z,zmmreg,zmmrm512|b32 [rvm:fv: evex.nds.512.66.0f38.w0 3b /r ] AVX512,FUTURE
+VPMINUQ zmmreg|mask|z,zmmreg,zmmrm512|b64 [rvm:fv: evex.nds.512.66.0f38.w1 3b /r ] AVX512,FUTURE
+VPMOVDB mem128|mask,zmmreg [mr:qvm: evex.512.f3.0f38.w0 31 /r ] AVX512,FUTURE
+VPMOVDB xmmreg|mask|z,zmmreg [mr: evex.512.f3.0f38.w0 31 /r ] AVX512,FUTURE
+VPMOVDW mem256|mask,zmmreg [mr:hvm: evex.512.f3.0f38.w0 33 /r ] AVX512,FUTURE
+VPMOVDW ymmreg|mask|z,zmmreg [mr: evex.512.f3.0f38.w0 33 /r ] AVX512,FUTURE
+VPMOVQB mem64|mask,zmmreg [mr:ovm: evex.512.f3.0f38.w0 32 /r ] AVX512,FUTURE
+VPMOVQB xmmreg|mask|z,zmmreg [mr: evex.512.f3.0f38.w0 32 /r ] AVX512,FUTURE
+VPMOVQD mem256|mask,zmmreg [mr:hvm: evex.512.f3.0f38.w0 35 /r ] AVX512,FUTURE
+VPMOVQD ymmreg|mask|z,zmmreg [mr: evex.512.f3.0f38.w0 35 /r ] AVX512,FUTURE
+VPMOVQW mem128|mask,zmmreg [mr:qvm: evex.512.f3.0f38.w0 34 /r ] AVX512,FUTURE
+VPMOVQW xmmreg|mask|z,zmmreg [mr: evex.512.f3.0f38.w0 34 /r ] AVX512,FUTURE
+VPMOVSDB mem128|mask,zmmreg [mr:qvm: evex.512.f3.0f38.w0 21 /r ] AVX512,FUTURE
+VPMOVSDB xmmreg|mask|z,zmmreg [mr: evex.512.f3.0f38.w0 21 /r ] AVX512,FUTURE
+VPMOVSDW mem256|mask,zmmreg [mr:hvm: evex.512.f3.0f38.w0 23 /r ] AVX512,FUTURE
+VPMOVSDW ymmreg|mask|z,zmmreg [mr: evex.512.f3.0f38.w0 23 /r ] AVX512,FUTURE
+VPMOVSQB mem64|mask,zmmreg [mr:ovm: evex.512.f3.0f38.w0 22 /r ] AVX512,FUTURE
+VPMOVSQB xmmreg|mask|z,zmmreg [mr: evex.512.f3.0f38.w0 22 /r ] AVX512,FUTURE
+VPMOVSQD mem256|mask,zmmreg [mr:hvm: evex.512.f3.0f38.w0 25 /r ] AVX512,FUTURE
+VPMOVSQD ymmreg|mask|z,zmmreg [mr: evex.512.f3.0f38.w0 25 /r ] AVX512,FUTURE
+VPMOVSQW mem128|mask,zmmreg [mr:qvm: evex.512.f3.0f38.w0 24 /r ] AVX512,FUTURE
+VPMOVSQW xmmreg|mask|z,zmmreg [mr: evex.512.f3.0f38.w0 24 /r ] AVX512,FUTURE
+VPMOVSXBD zmmreg|mask|z,xmmrm128 [rm:qvm: evex.512.66.0f38.wig 21 /r ] AVX512,FUTURE
+VPMOVSXBQ zmmreg|mask|z,xmmrm64 [rm:ovm: evex.512.66.0f38.wig 22 /r ] AVX512,FUTURE
+VPMOVSXDQ zmmreg|mask|z,ymmrm256 [rm:hvm: evex.512.66.0f38.w0 25 /r ] AVX512,FUTURE
+VPMOVSXWD zmmreg|mask|z,ymmrm256 [rm:hvm: evex.512.66.0f38.wig 23 /r ] AVX512,FUTURE
+VPMOVSXWQ zmmreg|mask|z,xmmrm128 [rm:qvm: evex.512.66.0f38.wig 24 /r ] AVX512,FUTURE
+VPMOVUSDB mem128|mask,zmmreg [mr:qvm: evex.512.f3.0f38.w0 11 /r ] AVX512,FUTURE
+VPMOVUSDB xmmreg|mask|z,zmmreg [mr: evex.512.f3.0f38.w0 11 /r ] AVX512,FUTURE
+VPMOVUSDW mem256|mask,zmmreg [mr:hvm: evex.512.f3.0f38.w0 13 /r ] AVX512,FUTURE
+VPMOVUSDW ymmreg|mask|z,zmmreg [mr: evex.512.f3.0f38.w0 13 /r ] AVX512,FUTURE
+VPMOVUSQB mem64|mask,zmmreg [mr:ovm: evex.512.f3.0f38.w0 12 /r ] AVX512,FUTURE
+VPMOVUSQB xmmreg|mask|z,zmmreg [mr: evex.512.f3.0f38.w0 12 /r ] AVX512,FUTURE
+VPMOVUSQD mem256|mask,zmmreg [mr:hvm: evex.512.f3.0f38.w0 15 /r ] AVX512,FUTURE
+VPMOVUSQD ymmreg|mask|z,zmmreg [mr: evex.512.f3.0f38.w0 15 /r ] AVX512,FUTURE
+VPMOVUSQW mem128|mask,zmmreg [mr:qvm: evex.512.f3.0f38.w0 14 /r ] AVX512,FUTURE
+VPMOVUSQW xmmreg|mask|z,zmmreg [mr: evex.512.f3.0f38.w0 14 /r ] AVX512,FUTURE
+VPMOVZXBD zmmreg|mask|z,xmmrm128 [rm:qvm: evex.512.66.0f38.wig 31 /r ] AVX512,FUTURE
+VPMOVZXBQ zmmreg|mask|z,xmmrm64 [rm:ovm: evex.512.66.0f38.wig 32 /r ] AVX512,FUTURE
+VPMOVZXDQ zmmreg|mask|z,ymmrm256 [rm:hvm: evex.512.66.0f38.w0 35 /r ] AVX512,FUTURE
+VPMOVZXWD zmmreg|mask|z,ymmrm256 [rm:hvm: evex.512.66.0f38.wig 33 /r ] AVX512,FUTURE
+VPMOVZXWQ zmmreg|mask|z,xmmrm128 [rm:qvm: evex.512.66.0f38.wig 34 /r ] AVX512,FUTURE
+VPMULDQ zmmreg|mask|z,zmmreg,zmmrm512|b64 [rvm:fv: evex.nds.512.66.0f38.w1 28 /r ] AVX512,FUTURE
+VPMULLD zmmreg|mask|z,zmmreg,zmmrm512|b32 [rvm:fv: evex.nds.512.66.0f38.w0 40 /r ] AVX512,FUTURE
+VPMULUDQ zmmreg|mask|z,zmmreg,zmmrm512|b64 [rvm:fv: evex.nds.512.66.0f.w1 f4 /r ] AVX512,FUTURE
+VPORD zmmreg|mask|z,zmmreg,zmmrm512|b32 [rvm:fv: evex.nds.512.66.0f.w0 eb /r ] AVX512,FUTURE
+VPORQ zmmreg|mask|z,zmmreg,zmmrm512|b64 [rvm:fv: evex.nds.512.66.0f.w1 eb /r ] AVX512,FUTURE
+VPROLD zmmreg|mask|z,zmmrm512|b32,imm8 [vmi:fv: evex.ndd.512.66.0f.w0 72 /1 ib ] AVX512,FUTURE
+VPROLQ zmmreg|mask|z,zmmrm512|b64,imm8 [vmi:fv: evex.ndd.512.66.0f.w1 72 /1 ib ] AVX512,FUTURE
+VPROLVD zmmreg|mask|z,zmmreg,zmmrm512|b32 [rvm:fv: evex.nds.512.66.0f38.w0 15 /r ] AVX512,FUTURE
+VPROLVQ zmmreg|mask|z,zmmreg,zmmrm512|b64 [rvm:fv: evex.nds.512.66.0f38.w1 15 /r ] AVX512,FUTURE
+VPRORD zmmreg|mask|z,zmmrm512|b32,imm8 [vmi:fv: evex.ndd.512.66.0f.w0 72 /0 ib ] AVX512,FUTURE
+VPRORQ zmmreg|mask|z,zmmrm512|b64,imm8 [vmi:fv: evex.ndd.512.66.0f.w1 72 /0 ib ] AVX512,FUTURE
+VPRORVD zmmreg|mask|z,zmmreg,zmmrm512|b32 [rvm:fv: evex.nds.512.66.0f38.w0 14 /r ] AVX512,FUTURE
+VPRORVQ zmmreg|mask|z,zmmreg,zmmrm512|b64 [rvm:fv: evex.nds.512.66.0f38.w1 14 /r ] AVX512,FUTURE
+VPSCATTERDD zmem32|mask,zmmreg [mr:t1s: vsibz evex.512.66.0f38.w0 a0 /r ] AVX512,FUTURE
+VPSCATTERDQ ymem64|mask,zmmreg [mr:t1s: vsiby evex.512.66.0f38.w1 a0 /r ] AVX512,FUTURE
+VPSCATTERQD zmem32|mask,ymmreg [mr:t1s: vsibz evex.512.66.0f38.w0 a1 /r ] AVX512,FUTURE
+VPSCATTERQQ zmem64|mask,zmmreg [mr:t1s: vsibz evex.512.66.0f38.w1 a1 /r ] AVX512,FUTURE
+VPSHUFD zmmreg|mask|z,zmmrm512|b32,imm8 [rmi:fv: evex.512.66.0f.w0 70 /r ib ] AVX512,FUTURE
+VPSLLD zmmreg|mask|z,zmmreg,xmmrm128 [rvm:m128: evex.nds.512.66.0f.w0 f2 /r ] AVX512,FUTURE
+VPSLLD zmmreg|mask|z,zmmrm512|b32,imm8 [vmi:fv: evex.ndd.512.66.0f.w0 72 /6 ib ] AVX512,FUTURE
+VPSLLQ zmmreg|mask|z,zmmreg,xmmrm128 [rvm:m128: evex.nds.512.66.0f.w1 f3 /r ] AVX512,FUTURE
+VPSLLQ zmmreg|mask|z,zmmrm512|b64,imm8 [vmi:fv: evex.ndd.512.66.0f.w1 73 /6 ib ] AVX512,FUTURE
+VPSLLVD zmmreg|mask|z,zmmreg,zmmrm512|b32 [rvm:fv: evex.nds.512.66.0f38.w0 47 /r ] AVX512,FUTURE
+VPSLLVQ zmmreg|mask|z,zmmreg,zmmrm512|b64 [rvm:fv: evex.nds.512.66.0f38.w1 47 /r ] AVX512,FUTURE
+VPSRAD zmmreg|mask|z,zmmreg,xmmrm128 [rvm:m128: evex.nds.512.66.0f.w0 e2 /r ] AVX512,FUTURE
+VPSRAD zmmreg|mask|z,zmmrm512|b32,imm8 [vmi:fv: evex.ndd.512.66.0f.w0 72 /4 ib ] AVX512,FUTURE
+VPSRAQ zmmreg|mask|z,zmmreg,xmmrm128 [rvm:m128: evex.nds.512.66.0f.w1 e2 /r ] AVX512,FUTURE
+VPSRAQ zmmreg|mask|z,zmmrm512|b64,imm8 [vmi:fv: evex.ndd.512.66.0f.w1 72 /4 ib ] AVX512,FUTURE
+VPSRAVD zmmreg|mask|z,zmmreg,zmmrm512|b32 [rvm:fv: evex.nds.512.66.0f38.w0 46 /r ] AVX512,FUTURE
+VPSRAVQ zmmreg|mask|z,zmmreg,zmmrm512|b64 [rvm:fv: evex.nds.512.66.0f38.w1 46 /r ] AVX512,FUTURE
+VPSRLD zmmreg|mask|z,zmmreg,xmmrm128 [rvm:m128: evex.nds.512.66.0f.w0 d2 /r ] AVX512,FUTURE
+VPSRLD zmmreg|mask|z,zmmrm512|b32,imm8 [vmi:fv: evex.ndd.512.66.0f.w0 72 /2 ib ] AVX512,FUTURE
+VPSRLQ zmmreg|mask|z,zmmreg,xmmrm128 [rvm:m128: evex.nds.512.66.0f.w1 d3 /r ] AVX512,FUTURE
+VPSRLQ zmmreg|mask|z,zmmrm512|b64,imm8 [vmi:fv: evex.ndd.512.66.0f.w1 73 /2 ib ] AVX512,FUTURE
+VPSRLVD zmmreg|mask|z,zmmreg,zmmrm512|b32 [rvm:fv: evex.nds.512.66.0f38.w0 45 /r ] AVX512,FUTURE
+VPSRLVQ zmmreg|mask|z,zmmreg,zmmrm512|b64 [rvm:fv: evex.nds.512.66.0f38.w1 45 /r ] AVX512,FUTURE
+VPSUBD zmmreg|mask|z,zmmreg,zmmrm512|b32 [rvm:fv: evex.nds.512.66.0f.w0 fa /r ] AVX512,FUTURE
+VPSUBQ zmmreg|mask|z,zmmreg,zmmrm512|b64 [rvm:fv: evex.nds.512.66.0f.w1 fb /r ] AVX512,FUTURE
+VPTERNLOGD zmmreg|mask|z,zmmreg,zmmrm512|b32,imm8 [rvmi:fv: evex.nds.512.66.0f3a.w0 25 /r ib ] AVX512,FUTURE
+VPTERNLOGQ zmmreg|mask|z,zmmreg,zmmrm512|b64,imm8 [rvmi:fv: evex.nds.512.66.0f3a.w1 25 /r ib ] AVX512,FUTURE
+VPTESTMD opmaskreg|mask,zmmreg,zmmrm512|b32 [rvm:fv: evex.nds.512.66.0f38.w0 27 /r ] AVX512,FUTURE
+VPTESTMQ opmaskreg|mask,zmmreg,zmmrm512|b64 [rvm:fv: evex.nds.512.66.0f38.w1 27 /r ] AVX512,FUTURE
+VPUNPCKHDQ zmmreg|mask|z,zmmreg,zmmrm512|b32 [rvm:fv: evex.nds.512.66.0f.w0 6a /r ] AVX512,FUTURE
+VPUNPCKHQDQ zmmreg|mask|z,zmmreg,zmmrm512|b64 [rvm:fv: evex.nds.512.66.0f.w1 6d /r ] AVX512,FUTURE
+VPUNPCKLDQ zmmreg|mask|z,zmmreg,zmmrm512|b32 [rvm:fv: evex.nds.512.66.0f.w0 62 /r ] AVX512,FUTURE
+VPUNPCKLQDQ zmmreg|mask|z,zmmreg,zmmrm512|b64 [rvm:fv: evex.nds.512.66.0f.w1 6c /r ] AVX512,FUTURE
+VPXORD zmmreg|mask|z,zmmreg,zmmrm512|b32 [rvm:fv: evex.nds.512.66.0f.w0 ef /r ] AVX512,FUTURE
+VPXORQ zmmreg|mask|z,zmmreg,zmmrm512|b64 [rvm:fv: evex.nds.512.66.0f.w1 ef /r ] AVX512,FUTURE
+VRCP14PD zmmreg|mask|z,zmmrm512|b64 [rm:fv: evex.512.66.0f38.w1 4c /r ] AVX512,FUTURE
+VRCP14PS zmmreg|mask|z,zmmrm512|b32 [rm:fv: evex.512.66.0f38.w0 4c /r ] AVX512,FUTURE
+VRCP14SD xmmreg|mask|z,xmmreg,xmmrm64 [rvm:t1s: evex.nds.lig.66.0f38.w1 4d /r ] AVX512,FUTURE
+VRCP14SS xmmreg|mask|z,xmmreg,xmmrm32 [rvm:t1s: evex.nds.lig.66.0f38.w0 4d /r ] AVX512,FUTURE
+VRNDSCALEPD zmmreg|mask|z,zmmrm512|b64|sae,imm8 [rmi:fv: evex.512.66.0f3a.w1 09 /r ib ] AVX512,FUTURE
+VRNDSCALEPS zmmreg|mask|z,zmmrm512|b32|sae,imm8 [rmi:fv: evex.512.66.0f3a.w0 08 /r ib ] AVX512,FUTURE
+VRNDSCALESD xmmreg|mask|z,xmmreg,xmmrm64|sae,imm8 [rvmi:t1s: evex.nds.lig.66.0f3a.w1 0b /r ib ] AVX512,FUTURE
+VRNDSCALESS xmmreg|mask|z,xmmreg,xmmrm32|sae,imm8 [rvmi:t1s: evex.nds.lig.66.0f3a.w0 0a /r ib ] AVX512,FUTURE
+VRSQRT14PD zmmreg|mask|z,zmmrm512|b64 [rm:fv: evex.512.66.0f38.w1 4e /r ] AVX512,FUTURE
+VRSQRT14PS zmmreg|mask|z,zmmrm512|b32 [rm:fv: evex.512.66.0f38.w0 4e /r ] AVX512,FUTURE
+VRSQRT14SD xmmreg|mask|z,xmmreg,xmmrm64 [rvm:t1s: evex.nds.lig.66.0f38.w1 4f /r ] AVX512,FUTURE
+VRSQRT14SS xmmreg|mask|z,xmmreg,xmmrm32 [rvm:t1s: evex.nds.lig.66.0f38.w0 4f /r ] AVX512,FUTURE
+VSCALEFPD zmmreg|mask|z,zmmreg,zmmrm512|b64|er [rvm:fv: evex.nds.512.66.0f38.w1 2c /r ] AVX512,FUTURE
+VSCALEFPS zmmreg|mask|z,zmmreg,zmmrm512|b32|er [rvm:fv: evex.nds.512.66.0f38.w0 2c /r ] AVX512,FUTURE
+VSCALEFSD xmmreg|mask|z,xmmreg,xmmrm64|er [rvm:t1s: evex.nds.lig.66.0f38.w1 2d /r ] AVX512,FUTURE
+VSCALEFSS xmmreg|mask|z,xmmreg,xmmrm32|er [rvm:t1s: evex.nds.lig.66.0f38.w0 2d /r ] AVX512,FUTURE
+VSCATTERDPD ymem64|mask,zmmreg [mr:t1s: vsiby evex.512.66.0f38.w1 a2 /r ] AVX512,FUTURE
+VSCATTERDPS zmem32|mask,zmmreg [mr:t1s: vsibz evex.512.66.0f38.w0 a2 /r ] AVX512,FUTURE
+VSCATTERQPD zmem64|mask,zmmreg [mr:t1s: vsibz evex.512.66.0f38.w1 a3 /r ] AVX512,FUTURE
+VSCATTERQPS zmem32|mask,ymmreg [mr:t1s: vsibz evex.512.66.0f38.w0 a3 /r ] AVX512,FUTURE
+VSHUFF32X4 zmmreg|mask|z,zmmreg,zmmrm512|b32,imm8 [rvmi:fv: evex.nds.512.66.0f3a.w0 23 /r ib ] AVX512,FUTURE
+VSHUFF64X2 zmmreg|mask|z,zmmreg,zmmrm512|b64,imm8 [rvmi:fv: evex.nds.512.66.0f3a.w1 23 /r ib ] AVX512,FUTURE
+VSHUFI32X4 zmmreg|mask|z,zmmreg,zmmrm512|b32,imm8 [rvmi:fv: evex.nds.512.66.0f3a.w0 43 /r ib ] AVX512,FUTURE
+VSHUFI64X2 zmmreg|mask|z,zmmreg,zmmrm512|b64,imm8 [rvmi:fv: evex.nds.512.66.0f3a.w1 43 /r ib ] AVX512,FUTURE
+VSHUFPD zmmreg|mask|z,zmmreg,zmmrm512|b64,imm8 [rvmi:fv: evex.nds.512.66.0f.w1 c6 /r ib ] AVX512,FUTURE
+VSHUFPS zmmreg|mask|z,zmmreg,zmmrm512|b32,imm8 [rvmi:fv: evex.nds.512.0f.w0 c6 /r ib ] AVX512,FUTURE
+VSQRTPD zmmreg|mask|z,zmmrm512|b64|er [rm:fv: evex.512.66.0f.w1 51 /r ] AVX512,FUTURE
+VSQRTPS zmmreg|mask|z,zmmrm512|b32|er [rm:fv: evex.512.0f.w0 51 /r ] AVX512,FUTURE
+VSQRTSD xmmreg|mask|z,xmmreg,xmmrm64|er [rvm:t1s: evex.nds.lig.f2.0f.w1 51 /r ] AVX512,FUTURE
+VSQRTSS xmmreg|mask|z,xmmreg,xmmrm32|er [rvm:t1s: evex.nds.lig.f3.0f.w0 51 /r ] AVX512,FUTURE
+VSUBPD zmmreg|mask|z,zmmreg,zmmrm512|b64|er [rvm:fv: evex.nds.512.66.0f.w1 5c /r ] AVX512,FUTURE
+VSUBPS zmmreg|mask|z,zmmreg,zmmrm512|b32|er [rvm:fv: evex.nds.512.0f.w0 5c /r ] AVX512,FUTURE
+VSUBSD xmmreg|mask|z,xmmreg,xmmrm64|er [rvm:t1s: evex.nds.lig.f2.0f.w1 5c /r ] AVX512,FUTURE
+VSUBSS xmmreg|mask|z,xmmreg,xmmrm32|er [rvm:t1s: evex.nds.lig.f3.0f.w0 5c /r ] AVX512,FUTURE
+VUCOMISD xmmreg,xmmrm64|sae [rm:t1s: evex.lig.66.0f.w1 2e /r ] AVX512,FUTURE
+VUCOMISS xmmreg,xmmrm32|sae [rm:t1s: evex.lig.0f.w0 2e /r ] AVX512,FUTURE
+VUNPCKHPD zmmreg|mask|z,zmmreg,zmmrm512|b64 [rvm:fv: evex.nds.512.66.0f.w1 15 /r ] AVX512,FUTURE
+VUNPCKHPS zmmreg|mask|z,zmmreg,zmmrm512|b32 [rvm:fv: evex.nds.512.0f.w0 15 /r ] AVX512,FUTURE
+VUNPCKLPD zmmreg|mask|z,zmmreg,zmmrm512|b64 [rvm:fv: evex.nds.512.66.0f.w1 14 /r ] AVX512,FUTURE
+VUNPCKLPS zmmreg|mask|z,zmmreg,zmmrm512|b32 [rvm:fv: evex.nds.512.0f.w0 14 /r ] AVX512,FUTURE
+
+
;# Systematic names for the hinting nop instructions
; These should be last in the file
HINT_NOP0 rm16 [m: o16 0f 18 /0] P6,UNDOC
diff --git a/insns.h b/insns.h
index d258910..58a4cd7 100644
--- a/insns.h
+++ b/insns.h
@@ -12,10 +12,12 @@
#include "nasm.h"
#include "tokens.h"
+/* if changed, ITEMPLATE_END should be also changed accordingly */
struct itemplate {
enum opcode opcode; /* the token, passed from "parser.c" */
int operands; /* number of operands */
opflags_t opd[MAX_OPERANDS]; /* bit flags for operand types */
+ decoflags_t deco[MAX_OPERANDS]; /* bit flags for operand decorators */
const uint8_t *code; /* the code it assembles to */
uint32_t flags; /* some flags */
};
@@ -35,7 +37,7 @@ struct disasm_index {
/* Tables for the assembler and disassembler, respectively */
extern const struct itemplate * const nasm_instructions[];
extern const struct disasm_index itable[256];
-extern const struct disasm_index * const itable_vex[2][32][4];
+extern const struct disasm_index * const itable_vex[NASM_VEX_CLASSES][32][4];
/* Common table for the byte codes */
extern const uint8_t nasm_bytecodes[];
@@ -43,7 +45,7 @@ extern const uint8_t nasm_bytecodes[];
/*
* this define is used to signify the end of an itemplate
*/
-#define ITEMPLATE_END {-1,-1,{-1,-1,-1},NULL,0}
+#define ITEMPLATE_END {-1,-1,{-1,-1,-1,-1,-1},{-1,-1,-1,-1,-1},NULL,0}
/*
* Instruction template flags. These specify which processor
@@ -80,7 +82,8 @@ extern const uint8_t nasm_bytecodes[];
#define IF_SQ 0x00000010UL /* unsized operands can't be non-qword */
#define IF_SO 0x00000014UL /* unsized operands can't be non-oword */
#define IF_SY 0x00000018UL /* unsized operands can't be non-yword */
-#define IF_SZ 0x00000038UL /* unsized operands must match the bitsize */
+#define IF_SZ 0x0000001CUL /* unsized operands can't be non-zword */
+#define IF_SIZE 0x00000038UL /* unsized operands must match the bitsize */
#define IF_SX 0x0000003CUL /* unsized operands not allowed */
#define IF_SMASK 0x0000003CUL /* mask for unsized argument size */
#define IF_AR0 0x00000040UL /* SB, SW, SD applies to argument 0 */
@@ -115,6 +118,7 @@ extern const uint8_t nasm_bytecodes[];
#define IF_SSE5 0x00000000UL /* HACK NEED TO REORGANIZE THESE BITS */
#define IF_AVX 0x00000000UL /* HACK NEED TO REORGANIZE THESE BITS */
#define IF_AVX2 0x00000000UL /* HACK NEED TO REORGANIZE THESE BITS */
+#define IF_AVX512 0x00000000UL /* HACK NEED TO REORGANIZE THESE BITS */
#define IF_FMA 0x00000000UL /* HACK NEED TO REORGANIZE THESE BITS */
#define IF_BMI1 0x00000000UL /* HACK NEED TO REORGANIZE THESE BITS */
#define IF_BMI2 0x00000000UL /* HACK NEED TO REORGANIZE THESE BITS */
diff --git a/insns.pl b/insns.pl
index a63ee71..eb99f6b 100755
--- a/insns.pl
+++ b/insns.pl
@@ -1,7 +1,7 @@
#!/usr/bin/perl
## --------------------------------------------------------------------------
##
-## Copyright 1996-2012 The NASM Authors - All Rights Reserved
+## Copyright 1996-2013 The NASM Authors - All Rights Reserved
## See the file AUTHORS included with the NASM distribution for
## the specific copyright holders.
##
@@ -45,7 +45,7 @@
$MAX_OPERANDS = 5;
# Add VEX/XOP prefixes
-@vex_class = ( 'vex', 'xop' );
+@vex_class = ( 'vex', 'xop', 'evex' );
$vex_classes = scalar(@vex_class);
@vexlist = ();
%vexmap = ();
@@ -319,8 +319,7 @@ if ( !defined($output) || $output eq 'd' ) {
print D "};\n";
}
- printf D "\nconst struct disasm_index * const itable_vex[%d][32][4] =\n",
- $vex_classes;
+ printf D "\nconst struct disasm_index * const itable_vex[NASM_VEX_CLASSES][32][4] =\n";
print D "{\n";
for ($c = 0; $c < $vex_classes; $c++) {
print D " {\n";
@@ -363,6 +362,8 @@ if ( !defined($output) || $output eq 'i' ) {
print I "\tI_none = -1\n";
print I "};\n\n";
print I "#define MAX_INSLEN ", $maxlen, "\n";
+ print I "#define NASM_VEX_CLASSES ", $vex_classes, "\n";
+ print I "#define NO_DECORATOR\t{", join(',',(0) x $MAX_OPERANDS), "}\n";
print I "#define FIRST_COND_OPCODE I_", $opcodes_cc[0], "\n\n";
print I "#endif /* NASM_INSNSI_H */\n";
@@ -411,8 +412,10 @@ sub count_bytecodes(@) {
$skip = 1;
} elsif ($bc == 0172 || $bc == 0173) {
$skip = 1;
- } elsif ($bc >= 0260 && $bc <= 0270) {
+ } elsif (($bc & ~3) == 0260 || $bc == 0270) { # VEX
$skip = 2;
+ } elsif (($bc & ~3) == 0240 || $bc == 0250) { # EVEX
+ $skip = 3;
} elsif ($bc == 0330) {
$skip = 1;
}
@@ -423,7 +426,7 @@ sub format_insn($$$$$) {
my ($opcode, $operands, $codes, $flags, $relax) = @_;
my $num, $nd = 0;
my @bytecode;
- my $op, @ops, $opp, @opx, @oppx;
+ my $op, @ops, $opp, @opx, @oppx, @decos, @opevex;
return (undef, undef) if $operands eq "ignore";
@@ -431,12 +434,18 @@ sub format_insn($$$$$) {
$operands =~ s/\*//g;
$operands =~ s/:/|colon,/g;
@ops = ();
+ @decos = ();
if ($operands ne 'void') {
foreach $op (split(/,/, $operands)) {
@opx = ();
+ @opevex = ();
foreach $opp (split(/\|/, $op)) {
@oppx = ();
- if ($opp =~ s/(?<=\D)(8|16|32|64|80|128|256)$//) {
+ if ($opp =~ s/^(b(32|64)|mask|z|er|sae)$//) {
+ push(@opevex, $1);
+ }
+
+ if ($opp =~ s/(?<!\d)(8|16|32|64|80|128|256|512)$//) {
push(@oppx, "bits$1");
}
$opp =~ s/^mem$/memory/;
@@ -445,20 +454,28 @@ sub format_insn($$$$$) {
$opp =~ s/^([a-z]+)rm$/rm_$1/;
$opp =~ s/^rm$/rm_gpr/;
$opp =~ s/^reg$/reg_gpr/;
- push(@opx, $opp, @oppx);
+ push(@opx, $opp, @oppx) if $opp;
}
$op = join('|', @opx);
push(@ops, $op);
+ push(@decos, (@opevex ? join('|', @opevex) : '0'));
}
}
$num = scalar(@ops);
while (scalar(@ops) < $MAX_OPERANDS) {
push(@ops, '0');
+ push(@decos, '0');
}
$operands = join(',', @ops);
$operands =~ tr/a-z/A-Z/;
+ $decorators = "{" . join(',', @decos) . "}";
+ if ($decorators =~ /^{(0,)+0}$/) {
+ $decorators = "NO_DECORATOR";
+ }
+ $decorators =~ tr/a-z/A-Z/;
+
# format the flags
$flags =~ s/,/|IF_/g;
$flags =~ s/(\|IF_ND|IF_ND\|)//, $nd = 1 if $flags =~ /IF_ND/;
@@ -469,7 +486,7 @@ sub format_insn($$$$$) {
$codes = hexstr(@bytecode);
count_bytecodes(@bytecode);
- ("{I_$opcode, $num, {$operands}, \@\@CODES-$codes\@\@, $flags},", $nd);
+ ("{I_$opcode, $num, {$operands}, $decorators, \@\@CODES-$codes\@\@, $flags},", $nd);
}
#
@@ -557,6 +574,7 @@ sub hexstr(@) {
# \34[4567] mean PUSH/POP of segment registers: special case
# \17[234] skip is4 control byte
# \26x \270 skip VEX control bytes
+# \24x \250 skip EVEX control bytes
sub startseq($$) {
my ($codestr, $relax) = @_;
my $word, @range;
@@ -604,13 +622,17 @@ sub startseq($$) {
return addprefix($prefix, $c1..($c1+15));
} elsif ($c0 == 0 || $c0 == 0340) {
return $prefix;
- } elsif (($c0 & ~3) == 0260 || $c0 == 0270) {
+ } elsif (($c0 & ~3) == 0260 || $c0 == 0270 ||
+ ($c0 & ~3) == 0240 || $c0 == 0250) {
my $c,$m,$wlp;
$m = shift(@codes);
$wlp = shift(@codes);
$c = ($m >> 6);
$m = $m & 31;
$prefix .= sprintf('%s%02X%01X', $vex_class[$c], $m, $wlp & 3);
+ if ($c0 < 0260) {
+ my $tuple = shift(@codes);
+ }
} elsif ($c0 >= 0172 && $c0 <= 173) {
shift(@codes); # Skip is4 control byte
} else {
@@ -621,6 +643,36 @@ sub startseq($$) {
return $prefix;
}
+# EVEX tuple types offset is 0300. e.g. 0301 is for full vector(fv).
+sub tupletype($) {
+ my ($tuplestr) = @_;
+ my %tuple_codes = (
+ '' => 000,
+ 'fv' => 001,
+ 'hv' => 002,
+ 'fvm' => 003,
+ 't1s8' => 004,
+ 't1s16' => 005,
+ 't1s' => 006,
+ 't1f32' => 007,
+ 't1f64' => 010,
+ 't2' => 011,
+ 't4' => 012,
+ 't8' => 013,
+ 'hvm' => 014,
+ 'qvm' => 015,
+ 'ovm' => 016,
+ 'm128' => 017,
+ 'dup' => 020,
+ );
+
+ if (defined $tuple_codes{$tuplestr}) {
+ return 0300 + $tuple_codes{$tuplestr};
+ } else {
+ die "Undefined tuple type : $tuplestr\n";
+ }
+}
+
#
# This function takes a series of byte codes in a format which is more
# typical of the Intel documentation, and encode it.
@@ -710,14 +762,20 @@ sub byte_code_compile($$) {
# This instruction takes YMM VSIB
'vsiby' => 0375,
'vm32y' => 0375,
- 'vm64y' => 0375
+ 'vm64y' => 0375,
+
+ # This instruction takes ZMM VSIB
+ 'vsibz' => 0376,
+ 'vm32z' => 0376,
+ 'vm64z' => 0376,
);
- unless ($str =~ /^(([^\s:]*)\:|)\s*(.*\S)\s*$/) {
+ unless ($str =~ /^(([^\s:]*)\:*([^\s:]*)\:|)\s*(.*\S)\s*$/) {
die "$fname: $line: cannot parse: [$str]\n";
}
$opr = "\L$2";
- $opc = "\L$3";
+ $tuple = "\L$3"; # Tuple type for AVX512
+ $opc = "\L$4";
my $op = 0;
for ($i = 0; $i < length($opr); $i++) {
@@ -732,6 +790,7 @@ sub byte_code_compile($$) {
$oppos{$c} = $op++;
}
}
+ $tup = tupletype($tuple);
my $last_imm = 'h';
my $prefix_ok = 1;
@@ -833,6 +892,61 @@ sub byte_code_compile($$) {
push(@codes, defined($oppos{'v'}) ? 0260+($oppos{'v'} & 3) : 0270,
($c << 6)+$m, ($w << 4)+($l << 2)+$p);
$prefix_ok = 0;
+ } elsif ($op =~ /^(evex)(|\..*)$/) {
+ my $c = $vexmap{$1};
+ my ($m,$w,$l,$p) = (undef,2,undef,0);
+ my $has_nds = 0;
+ my @subops = split(/\./, $op);
+ shift @subops; # Drop prefix
+ foreach $oq (@subops) {
+ if ($oq eq '128' || $oq eq 'l0' || $oq eq 'lz' || $oq eq 'lig') {
+ $l = 0;
+ } elsif ($oq eq '256' || $oq eq 'l1') {
+ $l = 1;
+ } elsif ($oq eq '512' || $oq eq 'l2') {
+ $l = 2;
+ } elsif ($oq eq 'w0') {
+ $w = 0;
+ } elsif ($oq eq 'w1') {
+ $w = 1;
+ } elsif ($oq eq 'wig') {
+ $w = 2;
+ } elsif ($oq eq 'ww') {
+ $w = 3;
+ } elsif ($oq eq 'p0') {
+ $p = 0;
+ } elsif ($oq eq '66' || $oq eq 'p1') {
+ $p = 1;
+ } elsif ($oq eq 'f3' || $oq eq 'p2') {
+ $p = 2;
+ } elsif ($oq eq 'f2' || $oq eq 'p3') {
+ $p = 3;
+ } elsif ($oq eq '0f') {
+ $m = 1;
+ } elsif ($oq eq '0f38') {
+ $m = 2;
+ } elsif ($oq eq '0f3a') {
+ $m = 3;
+ } elsif ($oq =~ /^m([0-9]+)$/) {
+ $m = $1+0;
+ } elsif ($oq eq 'nds' || $oq eq 'ndd' || $oq eq 'dds') {
+ if (!defined($oppos{'v'})) {
+ die "$fname: $line: evex.$oq without 'v' operand\n";
+ }
+ $has_nds = 1;
+ } else {
+ die "$fname: $line: undefined EVEX subcode: $oq\n";
+ }
+ }
+ if (!defined($m) || !defined($w) || !defined($l) || !defined($p)) {
+ die "$fname: $line: missing fields in EVEX specification\n";
+ }
+ if (defined($oppos{'v'}) && !$has_nds) {
+ die "$fname: $line: 'v' operand without evex.nds or evex.ndd\n";
+ }
+ push(@codes, defined($oppos{'v'}) ? 0240+($oppos{'v'} & 3) : 0250,
+ ($c << 6)+$m, ($w << 4)+($l << 2)+$p, $tup);
+ $prefix_ok = 0;
} elsif (defined $imm_codes{$op}) {
if ($op eq 'seg') {
if ($last_imm lt 'i') {
diff --git a/nasm.h b/nasm.h
index fb6c6e9..628ec43 100644
--- a/nasm.h
+++ b/nasm.h
@@ -1,6 +1,6 @@
/* ----------------------------------------------------------------------- *
*
- * Copyright 1996-2012 The NASM Authors - All Rights Reserved
+ * Copyright 1996-2013 The NASM Authors - All Rights Reserved
* See the file AUTHORS included with the NASM distribution for
* the specific copyright holders.
*
@@ -504,6 +504,18 @@ static inline uint8_t get_cond_opcode(enum ccode c)
#define REX_H 0x80 /* High register present, REX forbidden */
#define REX_V 0x0100 /* Instruction uses VEX/XOP instead of REX */
#define REX_NH 0x0200 /* Instruction which doesn't use high regs */
+#define REX_EV 0x0400 /* Instruction uses EVEX instead of REX */
+
+/*
+ * EVEX bit field
+ */
+#define EVEX_P0RP 0x10 /* EVEX P[4] : High-16 reg */
+#define EVEX_P0X 0x40 /* EVEX P[6] : High-16 rm */
+#define EVEX_P2AAA 0x07 /* EVEX P[18:16] : Embedded opmask */
+#define EVEX_P2VP 0x08 /* EVEX P[19] : High-16 NDS reg */
+#define EVEX_P2B 0x10 /* EVEX P[20] : Broadcast / RC / SAE */
+#define EVEX_P2LL 0x60 /* EVEX P[22:21] : Vector length / RC */
+#define EVEX_P2Z 0x80 /* EVEX P[23] : Zeroing/Merging */
/*
* REX_V "classes" (prefixes which behave like VEX)
@@ -601,6 +613,7 @@ enum ea_type {
EA_SCALAR, /* Scalar EA */
EA_XMMVSIB, /* XMM vector EA */
EA_YMMVSIB, /* YMM vector EA */
+ EA_ZMMVSIB, /* ZMM vector EA */
};
/*
@@ -623,6 +636,37 @@ enum prefix_pos {
MAXPREFIX /* Total number of prefix slots */
};
+/*
+ * Tuple types that are used when determining Disp8*N eligibility
+ * The order must match with a hash %tuple_codes in insns.pl
+ */
+enum ttypes {
+ FV = 001,
+ HV = 002,
+ FVM = 003,
+ T1S8 = 004,
+ T1S16 = 005,
+ T1S = 006,
+ T1F32 = 007,
+ T1F64 = 010,
+ T2 = 011,
+ T4 = 012,
+ T8 = 013,
+ HVM = 014,
+ QVM = 015,
+ OVM = 016,
+ M128 = 017,
+ DUP = 020,
+};
+
+/* EVEX.L'L : Vector length on vector insns */
+enum vectlens {
+ VL128 = 0,
+ VL256 = 1,
+ VL512 = 2,
+ VLMAX = 3,
+};
+
/* If you need to change this, also change it in insns.pl */
#define MAX_OPERANDS 5
@@ -642,6 +686,9 @@ typedef struct insn { /* an instruction itself */
int vexreg; /* Register encoded in VEX prefix */
int vex_cm; /* Class and M field for VEX prefix */
int vex_wlp; /* W, P and L information for VEX prefix */
+ uint8_t evex_p[3]; /* EVEX.P0: [RXB,R',00,mm], P1: [W,vvvv,1,pp] */
+ /* EVEX.P2: [z,L'L,b,V',aaa] */
+ enum ttypes evex_tuple; /* Tuple type for compressed Disp8*N */
int evex_rm; /* static rounding mode for AVX3 (EVEX) */
} insn;
@@ -972,8 +1019,8 @@ enum decorator_tokens {
BRC_1TO8 = DECORATOR_ENUM_START,
BRC_1TO16,
BRC_RN,
- BRC_RU,
BRC_RD,
+ BRC_RU,
BRC_RZ,
BRC_SAE,
BRC_Z,
@@ -992,6 +1039,7 @@ enum decorator_tokens {
* .........................1...... static rounding
* ........................1....... SAE
*/
+#define OP_GENVAL(val, bits, shift) (((val) & ((UINT64_C(1) << (bits)) - 1)) << (shift))
/*
* Opmask register number
@@ -1015,7 +1063,6 @@ enum decorator_tokens {
#define Z_BITS (1)
#define Z_MASK OP_GENMASK(Z_BITS, Z_SHIFT)
#define GEN_Z(bit) OP_GENBIT(bit, Z_SHIFT)
-#define VAL_Z(val) OP_GENVAL(val, Z_BITS, Z_SHIFT)
/*
* broadcast - Whether this operand can be broadcasted
@@ -1026,7 +1073,6 @@ enum decorator_tokens {
#define BRDCAST_BITS (1)
#define BRDCAST_MASK OP_GENMASK(BRDCAST_BITS, BRDCAST_SHIFT)
#define GEN_BRDCAST(bit) OP_GENBIT(bit, BRDCAST_SHIFT)
-#define VAL_BRDCAST(val) OP_GENVAL(val, BRDCAST_BITS, BRDCAST_SHIFT)
/*
* Whether this instruction can have a static rounding mode.
diff --git a/opflags.h b/opflags.h
index ed7f8ee..ee387d7 100644
--- a/opflags.h
+++ b/opflags.h
@@ -56,7 +56,6 @@
#define OP_GENMASK(bits, shift) (((UINT64_C(1) << (bits)) - 1) << (shift))
#define OP_GENBIT(bit, shift) (UINT64_C(1) << ((shift) + (bit)))
-#define OP_GENVAL(val, bits, shift) (((val) & ((UINT64_C(1) << (bits)) - 1)) << (shift))
/*
* Type of operand: memory reference, register, etc.
@@ -168,8 +167,8 @@
#define is_class(class, op) (!((opflags_t)(class) & ~(opflags_t)(op)))
#define is_reg_class(class, reg) is_class((class), nasm_reg_flags[(reg)])
-#define IS_SREG(op) is_reg_class(REG_SREG, (op))
-#define IS_FSGS(op) is_reg_class(REG_FSGS, (op))
+#define IS_SREG(reg) is_reg_class(REG_SREG, (reg))
+#define IS_FSGS(reg) is_reg_class(REG_FSGS, (reg))
/* Register classes */
#define REG_EA ( REGMEM | REGISTER) /* 'normal' reg, qualifies as EA */
@@ -239,6 +238,7 @@
#define IP_REL (GEN_SUBCLASS(2) | MEMORY) /* IP-relative offset */
#define XMEM (GEN_SUBCLASS(3) | MEMORY) /* 128-bit vector SIB */
#define YMEM (GEN_SUBCLASS(4) | MEMORY) /* 256-bit vector SIB */
+#define ZMEM (GEN_SUBCLASS(5) | MEMORY) /* 512-bit vector SIB */
/* memory which matches any type of r/m operand */
#define MEMORY_ANY (MEMORY | RM_GPR | RM_MMX | RM_XMM | RM_YMM | RM_ZMM)
diff --git a/parser.c b/parser.c
index f7139f3..5571c6f 100644
--- a/parser.c
+++ b/parser.c
@@ -223,6 +223,10 @@ static bool parse_braces(decoflags_t *decoflags)
*/
*decoflags |= GEN_Z(0);
break;
+ default:
+ nasm_error(ERR_NONFATAL, "{%s} is not an expected decorator",
+ tokval.t_charptr);
+ break;
}
} else if (i == ',' || i == TOKEN_EOS){
break;
@@ -917,6 +921,8 @@ is_expression:
result->oprs[operand].type |= XMEM;
else if (is_class(YMMREG,iclass))
result->oprs[operand].type |= YMEM;
+ else if (is_class(ZMMREG,iclass))
+ result->oprs[operand].type |= ZMEM;
}
result->oprs[operand].basereg = b;
diff --git a/regs.dat b/regs.dat
index 742b69d..1e083d0 100644
--- a/regs.dat
+++ b/regs.dat
@@ -117,11 +117,11 @@ mm0-7 MMXREG mmxreg 0
# SSE registers
xmm0 XMM0 xmmreg 0
-xmm1-15 XMMREG xmmreg 1
+xmm1-31 XMMREG xmmreg 1
# AVX registers
ymm0 YMM0 ymmreg 0
-ymm1-15 YMMREG ymmreg 1
+ymm1-31 YMMREG ymmreg 1
# AVX3 registers
zmm0 ZMM0 zmmreg 0