summaryrefslogtreecommitdiff
path: root/src/jit/compiler.h
diff options
context:
space:
mode:
Diffstat (limited to 'src/jit/compiler.h')
-rw-r--r--src/jit/compiler.h322
1 files changed, 231 insertions, 91 deletions
diff --git a/src/jit/compiler.h b/src/jit/compiler.h
index d8cd491063..4239cf613b 100644
--- a/src/jit/compiler.h
+++ b/src/jit/compiler.h
@@ -268,10 +268,6 @@ public:
unsigned char lvDisqualify : 1; // variable is no longer OK for add copy optimization
unsigned char lvVolatileHint : 1; // hint for AssertionProp
#endif
-#if FANCY_ARRAY_OPT
- unsigned char lvAssignOne : 1; // assigned at least once?
- unsigned char lvAssignTwo : 1; // assigned at least twice?
-#endif
unsigned char lvSpilled : 1; // enregistered variable was spilled
#ifndef _TARGET_64BIT_
@@ -322,6 +318,7 @@ public:
// type of an arg node is TYP_BYREF and a local node is TYP_SIMD*.
unsigned char lvSIMDType : 1; // This is a SIMD struct
unsigned char lvUsedInSIMDIntrinsic : 1; // This tells lclvar is used for simd intrinsic
+ var_types lvBaseType : 5; // Note: this only packs because var_types is a typedef of unsigned char
#endif // FEATURE_SIMD
unsigned char lvRegStruct : 1; // This is a reg-sized non-field-addressed struct.
@@ -330,9 +327,6 @@ public:
// local.
unsigned lvParentLcl; // The index of the local var representing the parent (i.e. the promoted struct local).
// Valid on promoted struct local fields.
-#ifdef FEATURE_SIMD
- var_types lvBaseType; // The base type of a SIMD local var. Valid on TYP_SIMD locals.
-#endif // FEATURE_SIMD
};
unsigned char lvFieldCnt; // Number of fields in the promoted VarDsc.
@@ -676,7 +670,7 @@ public:
#endif // defined(_TARGET_64BIT_)
}
- unsigned lvSize() // Size needed for storage representation. Only used for structs or TYP_BLK.
+ unsigned lvSize() const // Size needed for storage representation. Only used for structs or TYP_BLK.
{
// TODO-Review: Sometimes we get called on ARM with HFA struct variables that have been promoted,
// where the struct itself is no longer used because all access is via its member fields.
@@ -694,7 +688,8 @@ public:
#if defined(FEATURE_SIMD) && !defined(_TARGET_64BIT_)
// For 32-bit architectures, we make local variable SIMD12 types 16 bytes instead of just 12. We can't do
- // this for arguments, which must be passed according the defined ABI.
+ // this for arguments, which must be passed according the defined ABI. We don't want to do this for
+ // dependently promoted struct fields, but we don't know that here. See lvaMapSimd12ToSimd16().
if ((lvType == TYP_SIMD12) && !lvIsParam)
{
assert(lvExactSize == 12);
@@ -711,10 +706,6 @@ public:
BYTE* lvGcLayout; // GC layout info for structs
-#if FANCY_ARRAY_OPT
- GenTreePtr lvKnownDim; // array size if known
-#endif
-
#if ASSERTION_PROP
BlockSet lvRefBlks; // Set of blocks that contain refs
GenTreePtr lvDefStmt; // Pointer to the statement with the single definition
@@ -1195,6 +1186,11 @@ struct fgArgTabEntry
unsigned alignment; // 1 or 2 (slots/registers)
unsigned lateArgInx; // index into gtCallLateArgs list
unsigned tmpNum; // the LclVar number if we had to force evaluation of this arg
+#if defined(UNIX_X86_ABI)
+ unsigned padStkAlign; // Count of number of padding slots for stack alignment. For each Call, only the first
+ // argument may have a value to emit "sub esp, n" to adjust the stack before pushing
+ // the argument.
+#endif
bool isSplit : 1; // True when this argument is split between the registers and OutArg area
bool needTmp : 1; // True when we force this argument's evaluation into a temp LclVar
@@ -1272,6 +1268,10 @@ class fgArgInfo
unsigned argCount; // Updatable arg count value
unsigned nextSlotNum; // Updatable slot count value
unsigned stkLevel; // Stack depth when we make this call (for x86)
+#if defined(UNIX_X86_ABI)
+ unsigned padStkAlign; // Count of number of padding slots for stack alignment. This value is used to turn back
+ // stack pointer before it was adjusted after each Call
+#endif
unsigned argTableSize; // size of argTable array (equal to the argCount when done with fgMorphArgs)
bool hasRegArgs; // true if we have one or more register arguments
@@ -1321,6 +1321,10 @@ public:
void ArgsComplete();
+#if defined(UNIX_X86_ABI)
+ void ArgsAlignPadding();
+#endif
+
void SortArgs();
void EvalArgsToTemps();
@@ -1340,6 +1344,12 @@ public:
{
return nextSlotNum;
}
+#if defined(UNIX_X86_ABI)
+ unsigned GetPadStackAlign()
+ {
+ return padStkAlign;
+ }
+#endif
bool HasRegArgs()
{
return hasRegArgs;
@@ -1352,6 +1362,9 @@ public:
{
return argsComplete;
}
+
+ // Get the late arg for arg at position argIndex. Caller must ensure this position has a late arg.
+ GenTreePtr GetLateArg(unsigned argIndex);
};
#ifdef DEBUG
@@ -1771,7 +1784,11 @@ public:
// a PSPSym for functions with any EH.
bool ehNeedsPSPSym() const
{
+#ifdef _TARGET_X86_
+ return false;
+#else // _TARGET_X86_
return compHndBBtabCount > 0;
+#endif // _TARGET_X86_
}
bool ehAnyFunclets(); // Are there any funclets in this function?
@@ -1936,6 +1953,11 @@ public:
GenTreePtr gtNewOneConNode(var_types type);
+#ifdef FEATURE_SIMD
+ GenTreePtr gtNewSIMDVectorZero(var_types simdType, var_types baseType, unsigned size);
+ GenTreePtr gtNewSIMDVectorOne(var_types simdType, var_types baseType, unsigned size);
+#endif
+
GenTreeBlk* gtNewBlkOpNode(
genTreeOps oper, GenTreePtr dst, GenTreePtr srcOrFillVal, GenTreePtr sizeOrClsTok, bool isVolatile);
@@ -1981,6 +2003,7 @@ public:
SIMDIntrinsicID simdIntrinsicID,
var_types baseType,
unsigned size);
+ void SetOpLclRelatedToSIMDIntrinsic(GenTreePtr op);
#endif
GenTreePtr gtNewLclLNode(unsigned lnum, var_types type, IL_OFFSETX ILoffs = BAD_IL_OFFSET);
@@ -2063,13 +2086,13 @@ public:
bool gtHasLocalsWithAddrOp(GenTreePtr tree);
- unsigned gtHashValue(GenTree* tree);
-
unsigned gtSetListOrder(GenTree* list, bool regs, bool isListCallArgs);
void gtWalkOp(GenTree** op1, GenTree** op2, GenTree* adr, bool constOnly);
#ifdef DEBUG
+ unsigned gtHashValue(GenTree* tree);
+
GenTreePtr gtWalkOpEffectiveVal(GenTreePtr op);
#endif
@@ -2653,6 +2676,35 @@ public:
bool lvaIsFieldOfDependentlyPromotedStruct(const LclVarDsc* varDsc);
bool lvaIsGCTracked(const LclVarDsc* varDsc);
+#if defined(FEATURE_SIMD)
+ bool lvaMapSimd12ToSimd16(const LclVarDsc* varDsc)
+ {
+ assert(varDsc->lvType == TYP_SIMD12);
+ assert(varDsc->lvExactSize == 12);
+
+#if defined(_TARGET_64BIT_)
+ assert(varDsc->lvSize() == 16);
+ return true;
+#else // !defined(_TARGET_64BIT_)
+
+ // For 32-bit architectures, we make local variable SIMD12 types 16 bytes instead of just 12. lvSize()
+ // already does this calculation. However, we also need to prevent mapping types if the var is a
+ // depenendently promoted struct field, which must remain its exact size within its parent struct.
+ // However, we don't know this until late, so we may have already pretended the field is bigger
+ // before that.
+ if ((varDsc->lvSize() == 16) && !lvaIsFieldOfDependentlyPromotedStruct(varDsc))
+ {
+ return true;
+ }
+ else
+ {
+ return false;
+ }
+
+#endif // !defined(_TARGET_64BIT_)
+ }
+#endif // defined(FEATURE_SIMD)
+
BYTE* lvaGetGcLayout(unsigned varNum);
bool lvaTypeIsGC(unsigned varNum);
unsigned lvaGSSecurityCookie; // LclVar number
@@ -2697,21 +2749,21 @@ protected:
static fgWalkPreFn lvaMarkLclRefsCallback;
void lvaMarkLclRefs(GenTreePtr tree);
- // Keeps the mapping from SSA #'s to VN's for the implicit "Heap" variable.
- PerSsaArray lvHeapPerSsaData;
- unsigned lvHeapNumSsaNames;
+ // Keeps the mapping from SSA #'s to VN's for the implicit memory variables.
+ PerSsaArray lvMemoryPerSsaData;
+ unsigned lvMemoryNumSsaNames;
public:
- // Returns the address of the per-Ssa data for "Heap" at the given ssaNum (which is required
+ // Returns the address of the per-Ssa data for memory at the given ssaNum (which is required
// not to be the SsaConfig::RESERVED_SSA_NUM, which indicates that the variable is
// not an SSA variable).
- LclSsaVarDsc* GetHeapPerSsaData(unsigned ssaNum)
+ LclSsaVarDsc* GetMemoryPerSsaData(unsigned ssaNum)
{
assert(ssaNum != SsaConfig::RESERVED_SSA_NUM);
assert(SsaConfig::RESERVED_SSA_NUM == 0);
ssaNum--;
- assert(ssaNum < lvHeapNumSsaNames);
- return &lvHeapPerSsaData.GetRef(ssaNum);
+ assert(ssaNum < lvMemoryNumSsaNames);
+ return &lvMemoryPerSsaData.GetRef(ssaNum);
}
/*
@@ -2780,7 +2832,7 @@ protected:
void impImportNewObjArray(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo);
- bool impCanPInvokeInline(BasicBlock* block);
+ bool impCanPInvokeInline();
bool impCanPInvokeInlineCallSite(BasicBlock* block);
void impCheckForPInvokeCall(
GenTreePtr call, CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* sig, unsigned mflags, BasicBlock* block);
@@ -2831,7 +2883,8 @@ protected:
void impImportLeave(BasicBlock* block);
void impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr);
- GenTreePtr impIntrinsic(CORINFO_CLASS_HANDLE clsHnd,
+ GenTreePtr impIntrinsic(GenTreePtr newobjThis,
+ CORINFO_CLASS_HANDLE clsHnd,
CORINFO_METHOD_HANDLE method,
CORINFO_SIG_INFO* sig,
int memberRef,
@@ -3425,6 +3478,7 @@ public:
bool fgComputePredsDone; // Have we computed the bbPreds list
bool fgCheapPredsValid; // Is the bbCheapPreds list valid?
bool fgDomsComputed; // Have we computed the dominator sets?
+ bool fgOptimizedFinally; // Did we optimize any try-finallys?
bool fgHasSwitch; // any BBJ_SWITCH jumps?
bool fgHasPostfix; // any postfix ++/-- found?
@@ -3493,8 +3547,20 @@ public:
void fgImport();
+ void fgTransformFatCalli();
+
void fgInline();
+ void fgRemoveEmptyTry();
+
+ void fgRemoveEmptyFinally();
+
+ void fgCloneFinally();
+
+ void fgCleanupContinuation(BasicBlock* continuation);
+
+ void fgUpdateFinallyTargetFlags();
+
GenTreePtr fgGetCritSectOfStaticMethod();
#if !defined(_TARGET_X86_)
@@ -3570,10 +3636,9 @@ public:
void fgLocalVarLivenessInit();
#ifdef LEGACY_BACKEND
- GenTreePtr fgLegacyPerStatementLocalVarLiveness(GenTreePtr startNode, GenTreePtr relopNode, GenTreePtr asgdLclVar);
+ GenTreePtr fgLegacyPerStatementLocalVarLiveness(GenTreePtr startNode, GenTreePtr relopNode);
#else
- void fgPerNodeLocalVarLiveness(GenTree* node, GenTree* asgdLclVar);
- void fgPerStatementLocalVarLiveness(GenTree* node, GenTree* asgdLclVar);
+ void fgPerNodeLocalVarLiveness(GenTree* node);
#endif
void fgPerBlockLocalVarLiveness();
@@ -3741,18 +3806,18 @@ public:
// tree node).
void fgValueNumber();
- // Updates "fgCurHeap" via the assignment H[elemTypeEq][arrVN][inx][fldSeq] = rhsVN.
+ // Computes new GcHeap VN via the assignment H[elemTypeEq][arrVN][inx][fldSeq] = rhsVN.
// Assumes that "elemTypeEq" is the (equivalence class rep) of the array element type.
// The 'indType' is the indirection type of the lhs of the assignment and will typically
// match the element type of the array or fldSeq. When this type doesn't match
// or if the fldSeq is 'NotAField' we invalidate the array contents H[elemTypeEq][arrVN]
//
- void fgValueNumberArrIndexAssign(CORINFO_CLASS_HANDLE elemTypeEq,
- ValueNum arrVN,
- ValueNum inxVN,
- FieldSeqNode* fldSeq,
- ValueNum rhsVN,
- var_types indType);
+ ValueNum fgValueNumberArrIndexAssign(CORINFO_CLASS_HANDLE elemTypeEq,
+ ValueNum arrVN,
+ ValueNum inxVN,
+ FieldSeqNode* fldSeq,
+ ValueNum rhsVN,
+ var_types indType);
// Requires that "tree" is a GT_IND marked as an array index, and that its address argument
// has been parsed to yield the other input arguments. If evaluation of the address
@@ -3772,33 +3837,43 @@ public:
// Requires "funcApp" to be a VNF_PtrToArrElem, and "addrXvn" to represent the exception set thrown
// by evaluating the array index expression "tree". Returns the value number resulting from
- // dereferencing the array in the current heap state. If "tree" is non-null, it must be the
+ // dereferencing the array in the current GcHeap state. If "tree" is non-null, it must be the
// "GT_IND" that does the dereference, and it is given the returned value number.
ValueNum fgValueNumberArrIndexVal(GenTreePtr tree, struct VNFuncApp* funcApp, ValueNum addrXvn);
+ // Compute the value number for a byref-exposed load of the given type via the given pointerVN.
+ ValueNum fgValueNumberByrefExposedLoad(var_types type, ValueNum pointerVN);
+
unsigned fgVNPassesCompleted; // Number of times fgValueNumber has been run.
// Utility functions for fgValueNumber.
- // Perform value-numbering for the trees in "blk". When giving VN's to the SSA
- // names defined by phi definitions at the start of "blk", "newVNsForPhis" indicates
- // that these should be given new VN's, irrespective of the values of the LHS.
- // If "false", then we may assume that all inputs to phi RHS's of such definitions
- // have already been assigned value numbers; if they are all assigned the *same* value
- // number, then the LHS SSA name gets the same VN.
- void fgValueNumberBlock(BasicBlock* blk, bool newVNsForPhis);
+ // Perform value-numbering for the trees in "blk".
+ void fgValueNumberBlock(BasicBlock* blk);
// Requires that "entryBlock" is the entry block of loop "loopNum", and that "loopNum" is the
// innermost loop of which "entryBlock" is the entry. Returns the value number that should be
- // assumed for the heap at the start "entryBlk".
- ValueNum fgHeapVNForLoopSideEffects(BasicBlock* entryBlock, unsigned loopNum);
+ // assumed for the memoryKind at the start "entryBlk".
+ ValueNum fgMemoryVNForLoopSideEffects(MemoryKind memoryKind, BasicBlock* entryBlock, unsigned loopNum);
- // Called when an operation (performed by "tree", described by "msg") may cause the global Heap to be mutated.
- void fgMutateHeap(GenTreePtr tree DEBUGARG(const char* msg));
+ // Called when an operation (performed by "tree", described by "msg") may cause the GcHeap to be mutated.
+ // As GcHeap is a subset of ByrefExposed, this will also annotate the ByrefExposed mutation.
+ void fgMutateGcHeap(GenTreePtr tree DEBUGARG(const char* msg));
- // Tree caused an update in the current heap VN. If "tree" has an associated heap SSA #, record that
+ // Called when an operation (performed by "tree", described by "msg") may cause an address-exposed local to be
+ // mutated.
+ void fgMutateAddressExposedLocal(GenTreePtr tree DEBUGARG(const char* msg));
+
+ // For a GC heap store at curTree, record the new curMemoryVN's and update curTree's MemorySsaMap.
+ // As GcHeap is a subset of ByrefExposed, this will also record the ByrefExposed store.
+ void recordGcHeapStore(GenTreePtr curTree, ValueNum gcHeapVN DEBUGARG(const char* msg));
+
+ // For a store to an address-exposed local at curTree, record the new curMemoryVN and update curTree's MemorySsaMap.
+ void recordAddressExposedLocalStore(GenTreePtr curTree, ValueNum memoryVN DEBUGARG(const char* msg));
+
+ // Tree caused an update in the current memory VN. If "tree" has an associated heap SSA #, record that
// value in that SSA #.
- void fgValueNumberRecordHeapSsa(GenTreePtr tree);
+ void fgValueNumberRecordMemorySsa(MemoryKind memoryKind, GenTreePtr tree);
// The input 'tree' is a leaf node that is a constant
// Assign the proper value number to the tree
@@ -3837,11 +3912,11 @@ public:
// Requires "helpFunc" to be pure. Returns the corresponding VNFunc.
VNFunc fgValueNumberHelperMethVNFunc(CorInfoHelpFunc helpFunc);
- // This is the current value number for the "Heap" implicit variable while
- // doing value numbering. This is the value number under the "liberal" interpretation
- // of heap values; the "conservative" interpretation needs no VN, since every access of
- // the heap yields an unknown value.
- ValueNum fgCurHeapVN;
+ // These are the current value number for the memory implicit variables while
+ // doing value numbering. These are the value numbers under the "liberal" interpretation
+ // of memory values; the "conservative" interpretation needs no VN, since every access of
+ // memory yields an unknown value.
+ ValueNum fgCurMemoryVN[MemoryKindCount];
// Return a "pseudo"-class handle for an array element type. If "elemType" is TYP_STRUCT,
// requires "elemStructType" to be non-null (and to have a low-order zero). Otherwise, low order bit
@@ -4272,6 +4347,7 @@ public:
void fgDebugCheckNodeLinks(BasicBlock* block, GenTreePtr stmt);
void fgDebugCheckFlags(GenTreePtr tree);
void fgDebugCheckFlagsHelper(GenTreePtr tree, unsigned treeFlags, unsigned chkFlags);
+ void fgDebugCheckTryFinallyExits();
#endif
#ifdef LEGACY_BACKEND
@@ -4524,7 +4600,6 @@ private:
static MorphAddrContext s_CopyBlockMAC;
#ifdef FEATURE_SIMD
- GenTreePtr fgCopySIMDNode(GenTreeSIMD* simdNode);
GenTreePtr getSIMDStructFromField(GenTreePtr tree,
var_types* baseTypeOut,
unsigned* indexOut,
@@ -4613,11 +4688,13 @@ private:
VARSET_TP fgCurUseSet; // vars used by block (before an assignment)
VARSET_TP fgCurDefSet; // vars assigned by block (before a use)
- bool fgCurHeapUse; // True iff the current basic block uses the heap before defining it.
- bool fgCurHeapDef; // True iff the current basic block defines the heap.
- bool fgCurHeapHavoc; // True if the current basic block is known to set the heap to a "havoc" value.
+ MemoryKindSet fgCurMemoryUse; // True iff the current basic block uses memory.
+ MemoryKindSet fgCurMemoryDef; // True iff the current basic block modifies memory.
+ MemoryKindSet fgCurMemoryHavoc; // True if the current basic block is known to set memory to a "havoc" value.
- void fgMarkUseDef(GenTreeLclVarCommon* tree, GenTree* asgdLclVar = nullptr);
+ bool byrefStatesMatchGcHeapStates; // True iff GcHeap and ByrefExposed memory have all the same def points.
+
+ void fgMarkUseDef(GenTreeLclVarCommon* tree);
void fgBeginScopeLife(VARSET_TP* inScope, VarScopeDsc* var);
void fgEndScopeLife(VARSET_TP* inScope, VarScopeDsc* var);
@@ -4686,6 +4763,9 @@ private:
#ifdef DEBUG
static fgWalkPreFn fgDebugCheckInlineCandidates;
+
+ void CheckNoFatPointerCandidatesLeft();
+ static fgWalkPreFn fgDebugCheckFatPointerCandidates;
#endif
void fgPromoteStructs();
@@ -4968,9 +5048,10 @@ public:
#define LPFLG_ASGVARS_INC 0x8000 // "lpAsgVars" is incomplete -- vars beyond those representable in an AllVarSet
// type are assigned to.
- bool lpLoopHasHeapHavoc; // The loop contains an operation that we assume has arbitrary heap side effects.
- // If this is set, the fields below may not be accurate (since they become irrelevant.)
- bool lpContainsCall; // True if executing the loop body *may* execute a call
+ bool lpLoopHasMemoryHavoc[MemoryKindCount]; // The loop contains an operation that we assume has arbitrary
+ // memory side effects. If this is set, the fields below
+ // may not be accurate (since they become irrelevant.)
+ bool lpContainsCall; // True if executing the loop body *may* execute a call
VARSET_TP lpVarInOut; // The set of variables that are IN or OUT during the execution of this loop
VARSET_TP lpVarUseDef; // The set of variables that are USE or DEF during the execution of this loop
@@ -5307,6 +5388,9 @@ protected:
treeStmtLstPtr csdTreeList; // list of matching tree nodes: head
treeStmtLstPtr csdTreeLast; // list of matching tree nodes: tail
+
+ ValueNum defConservativeVN; // if all def occurrences share the same conservative value
+ // number, this will reflect it; otherwise, NoVN.
};
static const size_t s_optCSEhashSize;
@@ -5462,11 +5546,27 @@ public:
}
};
-#define OMF_HAS_NEWARRAY 0x00000001 // Method contains 'new' of an array
-#define OMF_HAS_NEWOBJ 0x00000002 // Method contains 'new' of an object type.
-#define OMF_HAS_ARRAYREF 0x00000004 // Method contains array element loads or stores.
-#define OMF_HAS_VTABLEREF 0x00000008 // Method contains method table reference.
-#define OMF_HAS_NULLCHECK 0x00000010 // Method contains null check.
+#define OMF_HAS_NEWARRAY 0x00000001 // Method contains 'new' of an array
+#define OMF_HAS_NEWOBJ 0x00000002 // Method contains 'new' of an object type.
+#define OMF_HAS_ARRAYREF 0x00000004 // Method contains array element loads or stores.
+#define OMF_HAS_VTABLEREF 0x00000008 // Method contains method table reference.
+#define OMF_HAS_NULLCHECK 0x00000010 // Method contains null check.
+#define OMF_HAS_FATPOINTER 0x00000020 // Method contains call, that needs fat pointer transformation.
+
+ bool doesMethodHaveFatPointer()
+ {
+ return (optMethodFlags & OMF_HAS_FATPOINTER) != 0;
+ }
+
+ void setMethodHasFatPointer()
+ {
+ optMethodFlags |= OMF_HAS_FATPOINTER;
+ }
+
+ void clearMethodHasFatPointer()
+ {
+ optMethodFlags &= ~OMF_HAS_FATPOINTER;
+ }
unsigned optMethodFlags;
@@ -5931,10 +6031,6 @@ protected:
ssize_t optGetArrayRefScaleAndIndex(GenTreePtr mul, GenTreePtr* pIndex DEBUGARG(bool bRngChk));
GenTreePtr optFindLocalInit(BasicBlock* block, GenTreePtr local, VARSET_TP* pKilledInOut, bool* isKilledAfterInit);
-#if FANCY_ARRAY_OPT
- bool optIsNoMore(GenTreePtr op1, GenTreePtr op2, int add1 = 0, int add2 = 0);
-#endif
-
bool optReachWithoutCall(BasicBlock* srcBB, BasicBlock* dstBB);
protected:
@@ -6845,10 +6941,15 @@ private:
void unwindReserveFunc(FuncInfoDsc* func);
void unwindEmitFunc(FuncInfoDsc* func, void* pHotCode, void* pColdCode);
-#if defined(_TARGET_AMD64_)
+#if defined(_TARGET_AMD64_) || (defined(_TARGET_X86_) && FEATURE_EH_FUNCLETS)
void unwindReserveFuncHelper(FuncInfoDsc* func, bool isHotCode);
void unwindEmitFuncHelper(FuncInfoDsc* func, void* pHotCode, void* pColdCode, bool isHotCode);
+
+#endif // _TARGET_AMD64_ || (_TARGET_X86_ && FEATURE_EH_FUNCLETS)
+
+#if defined(_TARGET_AMD64_)
+
UNATIVE_OFFSET unwindGetCurrentOffset(FuncInfoDsc* func);
void unwindBegPrologWindows();
@@ -6932,6 +7033,20 @@ private:
// Should we support SIMD intrinsics?
bool featureSIMD;
+ // Have we identified any SIMD types?
+ // This is currently used by struct promotion to avoid getting type information for a struct
+ // field to see if it is a SIMD type, if we haven't seen any SIMD types or operations in
+ // the method.
+ bool _usesSIMDTypes;
+ bool usesSIMDTypes()
+ {
+ return _usesSIMDTypes;
+ }
+ void setUsesSIMDTypes(bool value)
+ {
+ _usesSIMDTypes = value;
+ }
+
// This is a temp lclVar allocated on the stack as TYP_SIMD. It is used to implement intrinsics
// that require indexed access to the individual fields of the vector, which is not well supported
// by the hardware. It is allocated when/if such situations are encountered during Lowering.
@@ -7121,6 +7236,9 @@ private:
GenTree** op1,
GenTree** op2);
+ // Creates a GT_SIMD tree for Abs intrinsic.
+ GenTreePtr impSIMDAbs(CORINFO_CLASS_HANDLE typeHnd, var_types baseType, unsigned simdVectorSize, GenTree* op1);
+
#if defined(_TARGET_XARCH_) && !defined(LEGACY_BACKEND)
// Transforms operands and returns the SIMD intrinsic to be applied on
// transformed operands to obtain == comparison result.
@@ -7623,8 +7741,6 @@ public:
static const bool compNoPInvokeInlineCB;
#endif
- bool compMustInlinePInvokeCalli; // Unmanaged CALLI in IL stubs must be inlined
-
#ifdef DEBUG
bool compGcChecks; // Check arguments and return values to ensure they are sane
bool compStackCheckOnRet; // Check ESP on return to ensure it is correct
@@ -7783,11 +7899,22 @@ public:
/* These should not be exhaustively used as they might */ \
/* hide/trivialize other areas */ \
\
- STRESS_MODE(REGS) STRESS_MODE(DBL_ALN) STRESS_MODE(LCL_FLDS) STRESS_MODE(UNROLL_LOOPS) \
- STRESS_MODE(MAKE_CSE) STRESS_MODE(LEGACY_INLINE) STRESS_MODE(CLONE_EXPR) \
- STRESS_MODE(USE_FCOMI) STRESS_MODE(USE_CMOV) STRESS_MODE(FOLD) \
- STRESS_MODE(BB_PROFILE) STRESS_MODE(OPT_BOOLS_GC) STRESS_MODE(REMORPH_TREES) \
- STRESS_MODE(64RSLT_MUL) STRESS_MODE(DO_WHILE_LOOPS) STRESS_MODE(MIN_OPTS) \
+ STRESS_MODE(REGS) \
+ STRESS_MODE(DBL_ALN) \
+ STRESS_MODE(LCL_FLDS) \
+ STRESS_MODE(UNROLL_LOOPS) \
+ STRESS_MODE(MAKE_CSE) \
+ STRESS_MODE(LEGACY_INLINE) \
+ STRESS_MODE(CLONE_EXPR) \
+ STRESS_MODE(USE_FCOMI) \
+ STRESS_MODE(USE_CMOV) \
+ STRESS_MODE(FOLD) \
+ STRESS_MODE(BB_PROFILE) \
+ STRESS_MODE(OPT_BOOLS_GC) \
+ STRESS_MODE(REMORPH_TREES) \
+ STRESS_MODE(64RSLT_MUL) \
+ STRESS_MODE(DO_WHILE_LOOPS) \
+ STRESS_MODE(MIN_OPTS) \
STRESS_MODE(REVERSE_FLAG) /* Will set GTF_REVERSE_OPS whenever we can */ \
STRESS_MODE(REVERSE_COMMA) /* Will reverse commas created with gtNewCommaNode */ \
STRESS_MODE(TAILCALL) /* Will make the call as a tailcall whenever legal */ \
@@ -7796,17 +7923,23 @@ public:
STRESS_MODE(NULL_OBJECT_CHECK) \
STRESS_MODE(PINVOKE_RESTORE_ESP) \
STRESS_MODE(RANDOM_INLINE) \
+ STRESS_MODE(SWITCH_CMP_BR_EXPANSION) \
+ STRESS_MODE(GENERIC_VARN) \
+ \
+ /* After COUNT_VARN, stress level 2 does all of these all the time */ \
\
- STRESS_MODE(GENERIC_VARN) STRESS_MODE(COUNT_VARN) \
+ STRESS_MODE(COUNT_VARN) \
\
/* "Check" stress areas that can be exhaustively used if we */ \
/* dont care about performance at all */ \
\
STRESS_MODE(FORCE_INLINE) /* Treat every method as AggressiveInlining */ \
STRESS_MODE(CHK_FLOW_UPDATE) \
- STRESS_MODE(EMITTER) STRESS_MODE(CHK_REIMPORT) STRESS_MODE(FLATFP) \
- \
- STRESS_MODE(GENERIC_CHECK) STRESS_MODE(COUNT) \
+ STRESS_MODE(EMITTER) \
+ STRESS_MODE(CHK_REIMPORT) \
+ STRESS_MODE(FLATFP) \
+ STRESS_MODE(GENERIC_CHECK) \
+ STRESS_MODE(COUNT)
enum compStressArea
{
@@ -8951,21 +9084,28 @@ public:
return compRoot->m_arrayInfoMap;
}
- NodeToUnsignedMap* m_heapSsaMap;
+ NodeToUnsignedMap* m_memorySsaMap[MemoryKindCount];
- // In some cases, we want to assign intermediate SSA #'s to heap states, and know what nodes create those heap
- // states. (We do this for try blocks, where, if the try block doesn't do a call that loses track of the heap state,
- // all the possible heap states are possible initial states of the corresponding catch block(s).)
- NodeToUnsignedMap* GetHeapSsaMap()
+ // In some cases, we want to assign intermediate SSA #'s to memory states, and know what nodes create those memory
+ // states. (We do this for try blocks, where, if the try block doesn't do a call that loses track of the memory
+ // state, all the possible memory states are possible initial states of the corresponding catch block(s).)
+ NodeToUnsignedMap* GetMemorySsaMap(MemoryKind memoryKind)
{
+ if (memoryKind == GcHeap && byrefStatesMatchGcHeapStates)
+ {
+ // Use the same map for GCHeap and ByrefExposed when their states match.
+ memoryKind = ByrefExposed;
+ }
+
+ assert(memoryKind < MemoryKindCount);
Compiler* compRoot = impInlineRoot();
- if (compRoot->m_heapSsaMap == nullptr)
+ if (compRoot->m_memorySsaMap[memoryKind] == nullptr)
{
// Create a CompAllocator that labels sub-structure with CMK_ArrayInfoMap, and use that for allocation.
- IAllocator* ialloc = new (this, CMK_ArrayInfoMap) CompAllocator(this, CMK_ArrayInfoMap);
- compRoot->m_heapSsaMap = new (ialloc) NodeToUnsignedMap(ialloc);
+ IAllocator* ialloc = new (this, CMK_ArrayInfoMap) CompAllocator(this, CMK_ArrayInfoMap);
+ compRoot->m_memorySsaMap[memoryKind] = new (ialloc) NodeToUnsignedMap(ialloc);
}
- return compRoot->m_heapSsaMap;
+ return compRoot->m_memorySsaMap[memoryKind];
}
// The Refany type is the only struct type whose structure is implicitly assumed by IL. We need its fields.