diff options
-rw-r--r-- | Makefile.in | 4 | ||||
-rw-r--r-- | assemble.c | 85 | ||||
-rw-r--r-- | disasm.c | 71 | ||||
-rw-r--r-- | disp8.c | 131 | ||||
-rw-r--r-- | disp8.h | 45 |
5 files changed, 181 insertions, 155 deletions
diff --git a/Makefile.in b/Makefile.in index d692dd2..bd807f2 100644 --- a/Makefile.in +++ b/Makefile.in @@ -89,10 +89,10 @@ NASM = nasm.$(O) nasmlib.$(O) ver.$(O) \ strfunc.$(O) tokhash.$(O) regvals.$(O) regflags.$(O) \ ilog2.$(O) \ lib/strlcpy.$(O) \ - preproc-nop.$(O) + preproc-nop.$(O) disp8.$(O) NDISASM = ndisasm.$(O) disasm.$(O) sync.$(O) nasmlib.$(O) ver.$(O) \ - insnsd.$(O) insnsb.$(O) insnsn.$(O) regs.$(O) regdis.$(O) + insnsd.$(O) insnsb.$(O) insnsn.$(O) regs.$(O) regdis.$(O) disp8.$(O) #-- End File Lists --# all: nasm$(X) ndisasm$(X) manpages rdf @@ -180,6 +180,7 @@ #include "assemble.h" #include "insns.h" #include "tables.h" +#include "disp8.h" enum match_result { /* @@ -1197,6 +1198,7 @@ static int64_t calcsize(int32_t segment, int64_t offset, int bits, } else { /* set EVEX.L'L (vector length) */ ins->evex_p[2] |= ((ins->vex_wlp << (5 - 2)) & EVEX_P2LL); + ins->evex_p[1] |= ((ins->vex_wlp << (7 - 4)) & EVEX_P1W); if (opy->decoflags & BRDCAST_MASK) { /* set EVEX.b */ ins->evex_p[2] |= EVEX_P2B; @@ -2293,89 +2295,6 @@ static enum match_result matches(const struct itemplate *itemp, } /* - * Check if offset is a multiple of N with corresponding tuple type - * if Disp8*N is available, compressed displacement is stored in compdisp - */ -static bool is_disp8n(operand *input, insn *ins, int8_t *compdisp) -{ - const uint8_t fv_n[2][2][VLMAX] = {{{16, 32, 64}, {4, 4, 4}}, - {{16, 32, 64}, {8, 8, 8}}}; - const uint8_t hv_n[2][VLMAX] = {{8, 16, 32}, {4, 4, 4}}; - const uint8_t dup_n[VLMAX] = {8, 32, 64}; - - bool evex_b = input->decoflags & BRDCAST_MASK; - enum ttypes tuple = ins->evex_tuple; - /* vex_wlp composed as [wwllpp] */ - enum vectlens vectlen = (ins->vex_wlp & 0x0c) >> 2; - /* wig(=2) is treated as w0(=0) */ - bool evex_w = (ins->vex_wlp & 0x10) >> 4; - int32_t off = input->offset; - uint8_t n = 0; - int32_t disp8; - - switch(tuple) { - case FV: - n = fv_n[evex_w][evex_b][vectlen]; - break; - case HV: - n = hv_n[evex_b][vectlen]; - break; - - case FVM: - /* 16, 32, 64 for VL 128, 256, 512 respectively*/ - n = 1 << (vectlen + 4); - break; - case T1S8: /* N = 1 */ - case T1S16: /* N = 2 */ - n = tuple - T1S8 + 1; - break; - case T1S: - /* N = 4 for 32bit, 8 for 64bit */ - n = evex_w ? 8 : 4; - break; - case T1F32: - case T1F64: - /* N = 4 for 32bit, 8 for 64bit */ - n = (tuple == T1F32 ? 4 : 8); - break; - case T2: - case T4: - case T8: - if (vectlen + 7 <= (evex_w + 5) + (tuple - T2 + 1)) - n = 0; - else - n = 1 << (tuple - T2 + evex_w + 3); - break; - case HVM: - case QVM: - case OVM: - n = 1 << (OVM - tuple + vectlen + 1); - break; - case M128: - n = 16; - break; - case DUP: - n = dup_n[vectlen]; - break; - - default: - break; - } - - if (n && !(off & (n - 1))) { - disp8 = off / n; - /* if it fits in Disp8 */ - if (disp8 >= -128 && disp8 <= 127) { - *compdisp = disp8; - return true; - } - } - - *compdisp = 0; - return false; -} - -/* * Check if ModR/M.mod should/can be 01. * - EAF_BYTEOFFS is set * - offset can fit in a byte when EVEX is not used @@ -48,6 +48,7 @@ #include "insns.h" #include "tables.h" #include "regdis.h" +#include "disp8.h" /* * Flags that go into the `segment' field of `insn' structures @@ -198,76 +199,6 @@ static enum reg_enum whichreg(opflags_t regflags, int regval, int rex) return 0; } -/* - * Find N value for compressed displacement (disp8 * N) - */ -static uint8_t get_disp8N(insn *ins) -{ - const uint8_t fv_n[2][2][VLMAX] = {{{16, 32, 64}, {4, 4, 4}}, - {{16, 32, 64}, {8, 8, 8}}}; - const uint8_t hv_n[2][VLMAX] = {{8, 16, 32}, {4, 4, 4}}; - const uint8_t dup_n[VLMAX] = {8, 32, 64}; - - bool evex_b = (ins->evex_p[2] & EVEX_P2B) >> 4; - enum ttypes tuple = ins->evex_tuple; - /* vex_wlp composed as [wwllpp] */ - enum vectlens vectlen = (ins->evex_p[2] & EVEX_P2LL) >> 5; - /* wig(=2) is treated as w0(=0) */ - bool evex_w = (ins->evex_p[1] & EVEX_P1W) >> 7; - uint8_t n = 0; - - switch(tuple) { - case FV: - n = fv_n[evex_w][evex_b][vectlen]; - break; - case HV: - n = hv_n[evex_b][vectlen]; - break; - - case FVM: - /* 16, 32, 64 for VL 128, 256, 512 respectively*/ - n = 1 << (vectlen + 4); - break; - case T1S8: /* N = 1 */ - case T1S16: /* N = 2 */ - n = tuple - T1S8 + 1; - break; - case T1S: - /* N = 4 for 32bit, 8 for 64bit */ - n = evex_w ? 8 : 4; - break; - case T1F32: - case T1F64: - /* N = 4 for 32bit, 8 for 64bit */ - n = (tuple == T1F32 ? 4 : 8); - break; - case T2: - case T4: - case T8: - if (vectlen + 7 <= (evex_w + 5) + (tuple - T2 + 1)) - n = 0; - else - n = 1 << (tuple - T2 + evex_w + 3); - break; - case HVM: - case QVM: - case OVM: - n = 1 << (OVM - tuple + vectlen + 1); - break; - case M128: - n = 16; - break; - case DUP: - n = dup_n[vectlen]; - break; - - default: - break; - } - - return n; -} - static uint32_t append_evex_reg_deco(char *buf, uint32_t num, decoflags_t deco, uint8_t *evex) { @@ -0,0 +1,131 @@ +/* ----------------------------------------------------------------------- * + * + * Copyright 1996-2013 The NASM Authors - All Rights Reserved + * See the file AUTHORS included with the NASM distribution for + * the specific copyright holders. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following + * conditions are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND + * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, + * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * ----------------------------------------------------------------------- */ + +/* + * disp8.c : Contains a common logic for EVEX compressed displacement + */ + +#include "disp8.h" + +/* + * Find N value for compressed displacement (disp8 * N) + */ +uint8_t get_disp8N(insn *ins) +{ + const uint8_t fv_n[2][2][VLMAX] = {{{16, 32, 64}, {4, 4, 4}}, + {{16, 32, 64}, {8, 8, 8}}}; + const uint8_t hv_n[2][VLMAX] = {{8, 16, 32}, {4, 4, 4}}; + const uint8_t dup_n[VLMAX] = {8, 32, 64}; + + bool evex_b = (ins->evex_p[2] & EVEX_P2B) >> 4; + enum ttypes tuple = ins->evex_tuple; + enum vectlens vectlen = (ins->evex_p[2] & EVEX_P2LL) >> 5; + bool evex_w = (ins->evex_p[1] & EVEX_P1W) >> 7; + uint8_t n = 0; + + switch(tuple) { + case FV: + n = fv_n[evex_w][evex_b][vectlen]; + break; + case HV: + n = hv_n[evex_b][vectlen]; + break; + + case FVM: + /* 16, 32, 64 for VL 128, 256, 512 respectively*/ + n = 1 << (vectlen + 4); + break; + case T1S8: /* N = 1 */ + case T1S16: /* N = 2 */ + n = tuple - T1S8 + 1; + break; + case T1S: + /* N = 4 for 32bit, 8 for 64bit */ + n = evex_w ? 8 : 4; + break; + case T1F32: + case T1F64: + /* N = 4 for 32bit, 8 for 64bit */ + n = (tuple == T1F32 ? 4 : 8); + break; + case T2: + case T4: + case T8: + if (vectlen + 7 <= (evex_w + 5) + (tuple - T2 + 1)) + n = 0; + else + n = 1 << (tuple - T2 + evex_w + 3); + break; + case HVM: + case QVM: + case OVM: + n = 1 << (OVM - tuple + vectlen + 1); + break; + case M128: + n = 16; + break; + case DUP: + n = dup_n[vectlen]; + break; + + default: + break; + } + + return n; +} + +/* + * Check if offset is a multiple of N with corresponding tuple type + * if Disp8*N is available, compressed displacement is stored in compdisp + */ +bool is_disp8n(operand *input, insn *ins, int8_t *compdisp) +{ + int32_t off = input->offset; + uint8_t n; + int32_t disp8; + + n = get_disp8N(ins); + + if (n && !(off & (n - 1))) { + disp8 = off / n; + /* if it fits in Disp8 */ + if (disp8 >= -128 && disp8 <= 127) { + *compdisp = disp8; + return true; + } + } + + *compdisp = 0; + return false; +} @@ -0,0 +1,45 @@ +/* ----------------------------------------------------------------------- * + * + * Copyright 1996-2013 The NASM Authors - All Rights Reserved + * See the file AUTHORS included with the NASM distribution for + * the specific copyright holders. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following + * conditions are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND + * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, + * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * ----------------------------------------------------------------------- */ + +/* + * disp8.h header file for disp8.c + */ + +#ifndef NASM_DISP8_H +#define NASM_DISP8_H + +#include "nasm.h" + +uint8_t get_disp8N(insn *ins); +bool is_disp8n(operand *input, insn *ins, int8_t *compdisp); +#endif /* NASM_DISP8_H */ |