summaryrefslogtreecommitdiff
path: root/src/jit/hashbv.cpp
diff options
context:
space:
mode:
authorPat Gavlin <pagavlin@microsoft.com>2016-09-23 15:48:16 -0700
committerPat Gavlin <pagavlin@microsoft.com>2016-09-28 16:43:50 -0700
commitd857d37425eca004a5231fba5809c79d8566a008 (patch)
tree7094e5090e48e83f448dcdd98b67af8b8b8f9df2 /src/jit/hashbv.cpp
parent47cb433a8ff57d061e5a00d00ff15788531f41e5 (diff)
downloadcoreclr-d857d37425eca004a5231fba5809c79d8566a008.tar.gz
coreclr-d857d37425eca004a5231fba5809c79d8566a008.tar.bz2
coreclr-d857d37425eca004a5231fba5809c79d8566a008.zip
Fix lowering's containment analysis.
This fixes a silent bad code generation issue that arose during internal testing. The original repro is a test failure under COMPlus_JitStress=2. Due to explicit null check insertion, we (eventually) end up with the following LIR: t6096 = lclVar ref V86 cse10 <l:$4ad, c:$1b5> /--* t6096 ref * st.lclVar ref V41 tmp29 d:26 t2733 = lclVar ref V41 tmp29 u:26 <l:$4ad, c:$1b5> /--* t2733 ref * nullcheck byte <l:$4b8, c:$58a> t2736 = lclVar ref V41 tmp29 u:26 (last use) <l:$4ad, c:$1b5> t2737 = const long 20 field offset Fseq[y] $107 /--* t2736 ref +--* t2737 long t2735 = * + byref <l:$2ad, c:$2ac> t6081 = lclVar ref V83 cse7 <l:$4bd, c:$1b7> /--* t6081 ref * st.lclVar ref V41 tmp29 d:27 t2762 = lclVar ref V41 tmp29 u:27 <l:$4bd, c:$1b7> /--* t2762 ref * nullcheck byte <l:$583, c:$58f> t2765 = lclVar ref V41 tmp29 u:27 (last use) <l:$4bd, c:$1b7> t2766 = const long 20 field offset Fseq[y] $107 /--* t2765 ref +--* t2766 long t2764 = * + byref <l:$2af, c:$2ae> /--* t2764 byref t2763 = * indir int <l:$54e, c:$1ed> t2767 = lclVar int (AX) V07 loc4 $1ee /--* t2763 int +--* t2767 int t2738 = * + int <l:$554, c:$553> /--* t2735 byref +--* t2738 int * storeIndir int During lowering, we attempt to form an RMW add rooted at the final storeIndir. The pattern matching that attempts to form RMW operations, however, does not consider whether or not it is safe to perform the code motion involved in making the destination and source addresses for the operator contained. In this case, lowering moves the evaluation of the address (i.e. the dataflow tree rooted at the add that produces t2735) into the storeIndir. This moves a use of tmp29 across a def of the same and causes the program to store a value to an incorrect address. There are many variations on this pattern. For example, given the following C#: static int T(C[] a, C c) { return a.Length != c.M() ? 100 : 0; } The evaluation of a.Length (including the necessary null check) should occur before the call to c.M(). The lack of correct checks for safe code motion that caused the original repro, however, cause the JIT to generate bad code in this case as well: the null check for a is folded into the load of a.Length, which is then made contained by the compare. This results in the call to c.M() executing before the null check, which causes the program to behave incorrectly in the case that a is null. In order to fix the code motion analysis, this change introduces a new type, `SideEffectSet`, that can be used to summarize the side effects of a set of nodes and check whether or not they interfere with another set of side effects. This change then uses the new type to ensure that it is safe to perform the code motion necessary to make an operand contained before doing so.
Diffstat (limited to 'src/jit/hashbv.cpp')
-rw-r--r--src/jit/hashbv.cpp58
1 files changed, 58 insertions, 0 deletions
diff --git a/src/jit/hashbv.cpp b/src/jit/hashbv.cpp
index 32b286345b..fa06ec7b1e 100644
--- a/src/jit/hashbv.cpp
+++ b/src/jit/hashbv.cpp
@@ -289,6 +289,19 @@ elemType hashBvNode::SubtractWithChange(hashBvNode* other)
return result;
}
+bool hashBvNode::Intersects(hashBvNode* other)
+{
+ for (int i = 0; i < this->numElements(); i++)
+ {
+ if ((this->elements[i] & other->elements[i]) != 0)
+ {
+ return true;
+ }
+ }
+
+ return false;
+}
+
void hashBvNode::AndWith(hashBvNode* other)
{
for (int i = 0; i < this->numElements(); i++)
@@ -1234,6 +1247,46 @@ public:
}
};
+class IntersectsAction
+{
+public:
+ static inline void PreAction(hashBv* lhs, hashBv* rhs)
+ {
+ }
+ static inline void PostAction(hashBv* lhs, hashBv* rhs)
+ {
+ }
+ static inline bool DefaultResult()
+ {
+ return false;
+ }
+
+ static inline void LeftGap(hashBv* lhs, hashBvNode**& l, hashBvNode*& r, bool& result, bool& terminate)
+ {
+ // in rhs, not lhs
+ // so skip rhs
+ r = r->next;
+ }
+ static inline void RightGap(hashBv* lhs, hashBvNode**& l, hashBvNode*& r, bool& result, bool& terminate)
+ {
+ // in lhs, not rhs
+ // so skip lhs
+ l = &((*l)->next);
+ }
+ static inline void BothPresent(hashBv* lhs, hashBvNode**& l, hashBvNode*& r, bool& result, bool& terminate)
+ {
+ if ((*l)->Intersects(r))
+ {
+ terminate = true;
+ result = true;
+ }
+ }
+ static inline void LeftEmpty(hashBv* lhs, hashBvNode**& l, hashBvNode*& r, bool& result, bool& terminate)
+ {
+ r = r->next;
+ }
+};
+
template <typename Action>
bool hashBv::MultiTraverseLHSBigger(hashBv* other)
{
@@ -1507,6 +1560,11 @@ bool hashBv::MultiTraverse(hashBv* other)
}
}
+bool hashBv::Intersects(hashBv* other)
+{
+ return MultiTraverse<IntersectsAction>(other);
+}
+
bool hashBv::AndWithChange(hashBv* other)
{
return MultiTraverse<AndAction>(other);