summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorMike Danes <onemihaid@hotmail.com>2018-01-20 14:08:37 +0200
committerMike Danes <onemihaid@hotmail.com>2018-01-20 14:08:37 +0200
commitca397e5f57a649ad3bcc621cbb02354670f87a08 (patch)
tree4a494f97f3fdf53603483950aa61dd103dc62a8d /src
parentc7c2869ca0def15c25b8043ac78a378e0145bac8 (diff)
downloadcoreclr-ca397e5f57a649ad3bcc621cbb02354670f87a08.tar.gz
coreclr-ca397e5f57a649ad3bcc621cbb02354670f87a08.tar.bz2
coreclr-ca397e5f57a649ad3bcc621cbb02354670f87a08.zip
Fix 64 bit shift inconsistencies (on 32 bit targets)
Recent shift changes made the JIT_LLsh helper mask the shift count to 6 bits. The other 2 helpers (JIT_LRsh and JIT_LRsz) so now we get inconsistencies such as `(x >> 64) != (x << 64)`. The ECMA spec says that "the return value is unspecified if shiftAmount is greater than or equal to the width of value" so the JIT has no obligation to implement a particular behavior. But it seems preferable to have all shift instructions behave similarly, it avoids complications and reduces risks. This also changes `ValueNumStore::EvalOpIntegral` to mask the shift count for 64 bit shifts so it matches `gtFoldExprConst`. Otherwise the produced value depends on the C/C++ compiler's behavior.
Diffstat (limited to 'src')
-rw-r--r--src/jit/decomposelongs.cpp119
-rw-r--r--src/jit/valuenum.cpp20
-rw-r--r--src/vm/i386/jithelp.asm4
-rw-r--r--src/vm/jithelpers.cpp4
4 files changed, 53 insertions, 94 deletions
diff --git a/src/jit/decomposelongs.cpp b/src/jit/decomposelongs.cpp
index 89092b08b9..7caae4743c 100644
--- a/src/jit/decomposelongs.cpp
+++ b/src/jit/decomposelongs.cpp
@@ -1087,7 +1087,9 @@ GenTree* DecomposeLongs::DecomposeShift(LIR::Use& use)
// If we are shifting by a constant int, we do not want to use a helper, instead, we decompose.
if (shiftByOper == GT_CNS_INT)
{
- unsigned int count = shiftByOp->gtIntCon.gtIconVal;
+ // Reduce count modulo 64 to match behavior found in the shift helpers,
+ // Compiler::gtFoldExpr and ValueNumStore::EvalOpIntegral.
+ unsigned int count = shiftByOp->gtIntCon.gtIconVal & 0x3F;
Range().Remove(shiftByOp);
if (count == 0)
@@ -1112,24 +1114,6 @@ GenTree* DecomposeLongs::DecomposeShift(LIR::Use& use)
{
case GT_LSH:
{
- // Reduce count modulo 64 to match behavior found in
- // the LLSH helper and in gtFoldExpr.
- count &= 0x3F;
-
- // Retest for zero shift
- if (count == 0)
- {
- GenTree* next = shift->gtNext;
- // Remove shift and don't do anything else.
- if (shift->IsUnusedValue())
- {
- gtLong->SetUnusedValue();
- }
- Range().Remove(shift);
- use.ReplaceWith(m_compiler, gtLong);
- return next;
- }
-
if (count < 32)
{
// For shifts of < 32 bits, we transform the code to:
@@ -1258,6 +1242,8 @@ GenTree* DecomposeLongs::DecomposeShift(LIR::Use& use)
}
else
{
+ assert(count >= 32 && count < 64);
+
// Since we're right shifting at least 32 bits, we can remove the lo part of the shifted value iff
// it has no side effects.
//
@@ -1272,45 +1258,19 @@ GenTree* DecomposeLongs::DecomposeShift(LIR::Use& use)
loOp1->SetUnusedValue();
}
- assert(count >= 32);
- if (count < 64)
+ if (count == 32)
{
- if (count == 32)
- {
- // Move hiOp1 into loResult.
- loResult = hiOp1;
- }
- else
- {
- assert(count > 32 && count < 64);
-
- // Move hiOp1 into loResult, do a GT_RSZ with count - 32.
- GenTree* shiftBy = m_compiler->gtNewIconNode(count - 32, TYP_INT);
- loResult = m_compiler->gtNewOperNode(oper, TYP_INT, hiOp1, shiftBy);
- Range().InsertBefore(shift, shiftBy, loResult);
- }
+ // Move hiOp1 into loResult.
+ loResult = hiOp1;
}
else
{
- assert(count >= 64);
-
- // Since we're right shifting at least 64 bits, we can remove the hi part of the shifted value
- // iff it has no side effects.
- //
- // TODO-CQ: we could go perform this removal transitively (i.e. iteratively remove everything
- // that feeds the hi operand while there are no side effects)
- if ((hiOp1->gtFlags & GTF_ALL_EFFECT) == 0)
- {
- Range().Remove(hiOp1, true);
- }
- else
- {
- hiOp1->SetUnusedValue();
- }
-
- // Zero out lo
- loResult = m_compiler->gtNewZeroConNode(TYP_INT);
- Range().InsertBefore(shift, loResult);
+ assert(count > 32 && count < 64);
+
+ // Move hiOp1 into loResult, do a GT_RSZ with count - 32.
+ GenTree* shiftBy = m_compiler->gtNewIconNode(count - 32, TYP_INT);
+ loResult = m_compiler->gtNewOperNode(oper, TYP_INT, hiOp1, shiftBy);
+ Range().InsertBefore(shift, shiftBy, loResult);
}
// Zero out hi
@@ -1354,7 +1314,7 @@ GenTree* DecomposeLongs::DecomposeShift(LIR::Use& use)
}
else
{
- assert(count >= 32);
+ assert(count >= 32 && count < 64);
// Since we're right shifting at least 32 bits, we can remove the lo part of the shifted value iff
// it has no side effects.
@@ -1370,47 +1330,28 @@ GenTree* DecomposeLongs::DecomposeShift(LIR::Use& use)
loOp1->SetUnusedValue();
}
- if (count < 64)
+ if (count == 32)
{
- if (count == 32)
- {
- // Move hiOp1 into loResult.
- loResult = hiOp1;
- Range().InsertBefore(shift, loResult);
- }
- else
- {
- assert(count > 32 && count < 64);
-
- // Move hiOp1 into loResult, do a GT_RSH with count - 32.
- GenTree* shiftBy = m_compiler->gtNewIconNode(count - 32, TYP_INT);
- loResult = m_compiler->gtNewOperNode(oper, TYP_INT, hiOp1, shiftBy);
- Range().InsertBefore(shift, hiOp1, shiftBy, loResult);
- }
-
- // Propagate sign bit in hiResult
- GenTree* shiftBy = m_compiler->gtNewIconNode(31, TYP_INT);
- hiResult = m_compiler->gtNewOperNode(GT_RSH, TYP_INT, hiCopy, shiftBy);
- Range().InsertBefore(shift, shiftBy, hiCopy, hiResult);
-
- m_compiler->lvaIncRefCnts(hiCopy);
+ // Move hiOp1 into loResult.
+ loResult = hiOp1;
+ Range().InsertBefore(shift, loResult);
}
else
{
- assert(count >= 64);
+ assert(count > 32 && count < 64);
- // Propagate sign bit in loResult
- GenTree* loShiftBy = m_compiler->gtNewIconNode(31, TYP_INT);
- loResult = m_compiler->gtNewOperNode(GT_RSH, TYP_INT, hiCopy, loShiftBy);
- Range().InsertBefore(shift, hiCopy, loShiftBy, loResult);
+ // Move hiOp1 into loResult, do a GT_RSH with count - 32.
+ GenTree* shiftBy = m_compiler->gtNewIconNode(count - 32, TYP_INT);
+ loResult = m_compiler->gtNewOperNode(oper, TYP_INT, hiOp1, shiftBy);
+ Range().InsertBefore(shift, hiOp1, shiftBy, loResult);
+ }
- // Propagate sign bit in hiResult
- GenTree* shiftBy = m_compiler->gtNewIconNode(31, TYP_INT);
- hiResult = m_compiler->gtNewOperNode(GT_RSH, TYP_INT, hiOp1, shiftBy);
- Range().InsertBefore(shift, shiftBy, hiOp1, hiResult);
+ // Propagate sign bit in hiResult
+ GenTree* shiftBy = m_compiler->gtNewIconNode(31, TYP_INT);
+ hiResult = m_compiler->gtNewOperNode(GT_RSH, TYP_INT, hiCopy, shiftBy);
+ Range().InsertBefore(shift, shiftBy, hiCopy, hiResult);
- m_compiler->lvaIncRefCnts(hiCopy);
- }
+ m_compiler->lvaIncRefCnts(hiCopy);
}
insertAfter = hiResult;
diff --git a/src/jit/valuenum.cpp b/src/jit/valuenum.cpp
index 9ca852e5a2..7cdbfdcfb0 100644
--- a/src/jit/valuenum.cpp
+++ b/src/jit/valuenum.cpp
@@ -408,13 +408,27 @@ T ValueNumStore::EvalOpIntegral(VNFunc vnf, T v0, T v1, ValueNum* pExcSet)
case GT_AND:
return v0 & v1;
case GT_LSH:
- return v0 << v1;
+ if (sizeof(T) == 8)
+ {
+ return v0 << (v1 & 0x3F);
+ }
+ else
+ {
+ return v0 << v1;
+ }
case GT_RSH:
- return v0 >> v1;
+ if (sizeof(T) == 8)
+ {
+ return v0 >> (v1 & 0x3F);
+ }
+ else
+ {
+ return v0 >> v1;
+ }
case GT_RSZ:
if (sizeof(T) == 8)
{
- return UINT64(v0) >> v1;
+ return UINT64(v0) >> (v1 & 0x3F);
}
else
{
diff --git a/src/vm/i386/jithelp.asm b/src/vm/i386/jithelp.asm
index a4bbe1ccf7..27b866881a 100644
--- a/src/vm/i386/jithelp.asm
+++ b/src/vm/i386/jithelp.asm
@@ -520,6 +520,8 @@ JIT_LLsh ENDP
ALIGN 16
PUBLIC JIT_LRsh
JIT_LRsh PROC
+; Reduce shift amount mod 64
+ and ecx, 63
; Handle shifts of between bits 0 and 31
cmp ecx, 32
jae short LRshMORE32
@@ -554,6 +556,8 @@ JIT_LRsh ENDP
ALIGN 16
PUBLIC JIT_LRsz
JIT_LRsz PROC
+; Reduce shift amount mod 64
+ and ecx, 63
; Handle shifts of between bits 0 and 31
cmp ecx, 32
jae short LRszMORE32
diff --git a/src/vm/jithelpers.cpp b/src/vm/jithelpers.cpp
index 9f887e6616..e3d93d21df 100644
--- a/src/vm/jithelpers.cpp
+++ b/src/vm/jithelpers.cpp
@@ -464,7 +464,7 @@ HCIMPLEND
HCIMPL2_VV(INT64, JIT_LRsh, INT64 num, int shift)
{
FCALL_CONTRACT;
- return num >> shift;
+ return num >> (shift & 0x3F);
}
HCIMPLEND
@@ -472,7 +472,7 @@ HCIMPLEND
HCIMPL2_VV(UINT64, JIT_LRsz, UINT64 num, int shift)
{
FCALL_CONTRACT;
- return num >> shift;
+ return num >> (shift & 0x3F);
}
HCIMPLEND
#endif // !BIT64 && !_TARGET_X86_