summaryrefslogtreecommitdiff
path: root/src/jit/instrsxarch.h
diff options
context:
space:
mode:
authorJiyoung Yun <jy910.yun@samsung.com>2016-12-27 16:46:08 +0900
committerJiyoung Yun <jy910.yun@samsung.com>2016-12-27 16:46:08 +0900
commitdb20f3f1bb8595633a7e16c8900fd401a453a6b5 (patch)
treee5435159cd1bf0519276363a6fe1663d1721bed3 /src/jit/instrsxarch.h
parent4b4aad7217d3292650e77eec2cf4c198ea9c3b4b (diff)
downloadcoreclr-db20f3f1bb8595633a7e16c8900fd401a453a6b5.tar.gz
coreclr-db20f3f1bb8595633a7e16c8900fd401a453a6b5.tar.bz2
coreclr-db20f3f1bb8595633a7e16c8900fd401a453a6b5.zip
Imported Upstream version 1.0.0.9127upstream/1.0.0.9127
Diffstat (limited to 'src/jit/instrsxarch.h')
-rw-r--r--src/jit/instrsxarch.h19
1 files changed, 11 insertions, 8 deletions
diff --git a/src/jit/instrsxarch.h b/src/jit/instrsxarch.h
index 436563babf..4317334bf2 100644
--- a/src/jit/instrsxarch.h
+++ b/src/jit/instrsxarch.h
@@ -178,6 +178,7 @@ INST3(FIRST_SSE2_INSTRUCTION, "FIRST_SSE2_INSTRUCTION", 0, IUM_WR, 0, 0, BAD_CO
// These are the SSE instructions used on x86
INST3( mov_i2xmm, "movd" , 0, IUM_WR, 0, 0, BAD_CODE, BAD_CODE, PCKDBL(0x6E)) // Move int reg to a xmm reg. reg1=xmm reg, reg2=int reg
INST3( mov_xmm2i, "movd" , 0, IUM_WR, 0, 0, BAD_CODE, BAD_CODE, PCKDBL(0x7E)) // Move xmm reg to an int reg. reg1=xmm reg, reg2=int reg
+INST3( pmovmskb, "pmovmskb" , 0, IUM_WR, 0, 0, BAD_CODE, BAD_CODE, PCKDBL(0xD7)) // Move the MSB bits of all bytes in a xmm reg to an int reg
INST3( movq, "movq" , 0, IUM_WR, 0, 0, PCKDBL(0xD6), BAD_CODE, SSEFLT(0x7E))
INST3( movsdsse2, "movsd" , 0, IUM_WR, 0, 0, SSEDBL(0x11), BAD_CODE, SSEDBL(0x10))
@@ -317,6 +318,8 @@ INST3( insertps, "insertps" , 0, IUM_WR, 0, 0, BAD_CODE, BAD_CODE, SS
INST3( pcmpeqq, "pcmpeqq" , 0, IUM_WR, 0, 0, BAD_CODE, BAD_CODE, SSE38(0x29)) // Packed compare 64-bit integers for equality
INST3( pcmpgtq, "pcmpgtq" , 0, IUM_WR, 0, 0, BAD_CODE, BAD_CODE, SSE38(0x37)) // Packed compare 64-bit integers for equality
INST3( pmulld, "pmulld" , 0, IUM_WR, 0, 0, BAD_CODE, BAD_CODE, SSE38(0x40)) // Packed multiply 32 bit unsigned integers and store lower 32 bits of each result
+INST3( ptest, "ptest" , 0, IUM_WR, 0, 0, BAD_CODE, BAD_CODE, SSE38(0x17)) // Packed logical compare
+INST3( phaddd, "phaddd" , 0, IUM_WR, 0, 0, BAD_CODE, BAD_CODE, SSE38(0x02)) // Packed horizontal add
INST3(LAST_SSE4_INSTRUCTION, "LAST_SSE4_INSTRUCTION", 0, IUM_WR, 0, 0, BAD_CODE, BAD_CODE, BAD_CODE)
INST3(FIRST_AVX_INSTRUCTION, "FIRST_AVX_INSTRUCTION", 0, IUM_WR, 0, 0, BAD_CODE, BAD_CODE, BAD_CODE)
@@ -367,25 +370,25 @@ INST2(sar_N , "sar" , 0, IUM_RW, 0, 1, 0x0038C0, 0x0038C0)
INST1(r_movsb, "rep movsb" , 0, IUM_RD, 0, 0, 0x00A4F3)
INST1(r_movsd, "rep movsd" , 0, IUM_RD, 0, 0, 0x00A5F3)
-#ifndef LEGACY_BACKEND
+#if !defined(LEGACY_BACKEND) && defined(_TARGET_AMD64_)
INST1(r_movsq, "rep movsq" , 0, IUM_RD, 0, 0, 0xF3A548)
-#endif // !LEGACY_BACKEND
+#endif // !LEGACY_BACKEND || !defined(_TARGET_AMD64_)
INST1(movsb , "movsb" , 0, IUM_RD, 0, 0, 0x0000A4)
INST1(movsd , "movsd" , 0, IUM_RD, 0, 0, 0x0000A5)
-#ifndef LEGACY_BACKEND
+#if !defined(LEGACY_BACKEND) && defined(_TARGET_AMD64_)
INST1(movsq, "movsq" , 0, IUM_RD, 0, 0, 0x00A548)
-#endif // !LEGACY_BACKEND
+#endif // !LEGACY_BACKEND || !defined(_TARGET_AMD64_)
INST1(r_stosb, "rep stosb" , 0, IUM_RD, 0, 0, 0x00AAF3)
INST1(r_stosd, "rep stosd" , 0, IUM_RD, 0, 0, 0x00ABF3)
-#ifndef LEGACY_BACKEND
+#if !defined(LEGACY_BACKEND) && defined(_TARGET_AMD64_)
INST1(r_stosq, "rep stosq" , 0, IUM_RD, 0, 0, 0xF3AB48)
-#endif // !LEGACY_BACKEND
+#endif // !LEGACY_BACKEND || !defined(_TARGET_AMD64_)
INST1(stosb, "stosb" , 0, IUM_RD, 0, 0, 0x0000AA)
INST1(stosd, "stosd" , 0, IUM_RD, 0, 0, 0x0000AB)
-#ifndef LEGACY_BACKEND
+#if !defined(LEGACY_BACKEND) && defined(_TARGET_AMD64_)
INST1(stosq, "stosq" , 0, IUM_RD, 0, 0, 0x00AB48)
-#endif // !LEGACY_BACKEND
+#endif // !LEGACY_BACKEND || !defined(_TARGET_AMD64_)
INST1(int3 , "int3" , 0, IUM_RD, 0, 0, 0x0000CC)
INST1(nop , "nop" , 0, IUM_RD, 0, 0, 0x000090)