summaryrefslogtreecommitdiff
path: root/disp8.c
diff options
context:
space:
mode:
authorJin Kyu Song <jin.kyu.song@intel.com>2013-11-20 15:32:52 -0800
committerJin Kyu Song <jin.kyu.song@intel.com>2013-11-20 15:40:55 -0800
commit5f3bfee708deba146302df4bb33d081f496399c0 (patch)
tree948e7a4c7a4db0dd6b120e672ae9cc7552e7e686 /disp8.c
parent28d5bf811b02b4ffa61b4100df11bc0872ea84bf (diff)
downloadnasm-5f3bfee708deba146302df4bb33d081f496399c0.tar.gz
nasm-5f3bfee708deba146302df4bb33d081f496399c0.tar.bz2
nasm-5f3bfee708deba146302df4bb33d081f496399c0.zip
disp8: Consolidate a logic to get compressed displacement
Consolidated two separate but similar functions in nasm and ndisasm into a commonly linked source code. To encode and decode the compressed displacement (disp8*N) for EVEX, N value should be derived using various conditions. Signed-off-by: Jin Kyu Song <jin.kyu.song@intel.com>
Diffstat (limited to 'disp8.c')
-rw-r--r--disp8.c131
1 files changed, 131 insertions, 0 deletions
diff --git a/disp8.c b/disp8.c
new file mode 100644
index 0000000..1621022
--- /dev/null
+++ b/disp8.c
@@ -0,0 +1,131 @@
+/* ----------------------------------------------------------------------- *
+ *
+ * Copyright 1996-2013 The NASM Authors - All Rights Reserved
+ * See the file AUTHORS included with the NASM distribution for
+ * the specific copyright holders.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * ----------------------------------------------------------------------- */
+
+/*
+ * disp8.c : Contains a common logic for EVEX compressed displacement
+ */
+
+#include "disp8.h"
+
+/*
+ * Find N value for compressed displacement (disp8 * N)
+ */
+uint8_t get_disp8N(insn *ins)
+{
+ const uint8_t fv_n[2][2][VLMAX] = {{{16, 32, 64}, {4, 4, 4}},
+ {{16, 32, 64}, {8, 8, 8}}};
+ const uint8_t hv_n[2][VLMAX] = {{8, 16, 32}, {4, 4, 4}};
+ const uint8_t dup_n[VLMAX] = {8, 32, 64};
+
+ bool evex_b = (ins->evex_p[2] & EVEX_P2B) >> 4;
+ enum ttypes tuple = ins->evex_tuple;
+ enum vectlens vectlen = (ins->evex_p[2] & EVEX_P2LL) >> 5;
+ bool evex_w = (ins->evex_p[1] & EVEX_P1W) >> 7;
+ uint8_t n = 0;
+
+ switch(tuple) {
+ case FV:
+ n = fv_n[evex_w][evex_b][vectlen];
+ break;
+ case HV:
+ n = hv_n[evex_b][vectlen];
+ break;
+
+ case FVM:
+ /* 16, 32, 64 for VL 128, 256, 512 respectively*/
+ n = 1 << (vectlen + 4);
+ break;
+ case T1S8: /* N = 1 */
+ case T1S16: /* N = 2 */
+ n = tuple - T1S8 + 1;
+ break;
+ case T1S:
+ /* N = 4 for 32bit, 8 for 64bit */
+ n = evex_w ? 8 : 4;
+ break;
+ case T1F32:
+ case T1F64:
+ /* N = 4 for 32bit, 8 for 64bit */
+ n = (tuple == T1F32 ? 4 : 8);
+ break;
+ case T2:
+ case T4:
+ case T8:
+ if (vectlen + 7 <= (evex_w + 5) + (tuple - T2 + 1))
+ n = 0;
+ else
+ n = 1 << (tuple - T2 + evex_w + 3);
+ break;
+ case HVM:
+ case QVM:
+ case OVM:
+ n = 1 << (OVM - tuple + vectlen + 1);
+ break;
+ case M128:
+ n = 16;
+ break;
+ case DUP:
+ n = dup_n[vectlen];
+ break;
+
+ default:
+ break;
+ }
+
+ return n;
+}
+
+/*
+ * Check if offset is a multiple of N with corresponding tuple type
+ * if Disp8*N is available, compressed displacement is stored in compdisp
+ */
+bool is_disp8n(operand *input, insn *ins, int8_t *compdisp)
+{
+ int32_t off = input->offset;
+ uint8_t n;
+ int32_t disp8;
+
+ n = get_disp8N(ins);
+
+ if (n && !(off & (n - 1))) {
+ disp8 = off / n;
+ /* if it fits in Disp8 */
+ if (disp8 >= -128 && disp8 <= 127) {
+ *compdisp = disp8;
+ return true;
+ }
+ }
+
+ *compdisp = 0;
+ return false;
+}