summaryrefslogtreecommitdiff
path: root/src/jit/importer.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/jit/importer.cpp')
-rw-r--r--src/jit/importer.cpp233
1 files changed, 168 insertions, 65 deletions
diff --git a/src/jit/importer.cpp b/src/jit/importer.cpp
index cb09ff8b8c..b1e0f487ef 100644
--- a/src/jit/importer.cpp
+++ b/src/jit/importer.cpp
@@ -1489,17 +1489,16 @@ var_types Compiler::impNormStructType(CORINFO_CLASS_HANDLE structHnd,
const DWORD structFlags = info.compCompHnd->getClassAttribs(structHnd);
var_types structType = TYP_STRUCT;
-#ifdef FEATURE_CORECLR
- const bool hasGCPtrs = (structFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0;
-#else
- // Desktop CLR won't report FLG_CONTAINS_GC_PTR for RefAnyClass - need to check explicitly.
- const bool isRefAny = (structHnd == impGetRefAnyClass());
- const bool hasGCPtrs = isRefAny || ((structFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0);
-#endif
+ // On coreclr the check for GC includes a "may" to account for the special
+ // ByRef like span structs. The added check for "CONTAINS_STACK_PTR" is the particular bit.
+ // When this is set the struct will contain a ByRef that could be a GC pointer or a native
+ // pointer.
+ const bool mayContainGCPtrs =
+ ((structFlags & CORINFO_FLG_CONTAINS_STACK_PTR) != 0 || ((structFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0));
#ifdef FEATURE_SIMD
// Check to see if this is a SIMD type.
- if (featureSIMD && !hasGCPtrs)
+ if (featureSIMD && !mayContainGCPtrs)
{
unsigned originalSize = info.compCompHnd->getClassSize(structHnd);
@@ -1515,10 +1514,8 @@ var_types Compiler::impNormStructType(CORINFO_CLASS_HANDLE structHnd,
{
*pSimdBaseType = simdBaseType;
}
-#ifdef _TARGET_AMD64_
- // Amd64: also indicate that we use floating point registers
+ // Also indicate that we use floating point registers.
compFloatingPointUsed = true;
-#endif
}
}
}
@@ -1532,9 +1529,10 @@ var_types Compiler::impNormStructType(CORINFO_CLASS_HANDLE structHnd,
// Verify that the quick test up above via the class attributes gave a
// safe view of the type's GCness.
//
- // Note there are cases where hasGCPtrs is true but getClassGClayout
+ // Note there are cases where mayContainGCPtrs is true but getClassGClayout
// does not report any gc fields.
- assert(hasGCPtrs || (numGCVars == 0));
+
+ assert(mayContainGCPtrs || (numGCVars == 0));
if (pNumGCVars != nullptr)
{
@@ -1638,21 +1636,52 @@ GenTreePtr Compiler::impNormStructVal(GenTreePtr structVal,
case GT_COMMA:
{
- // The second thing is the block node.
+ // The second thing could either be a block node or a GT_SIMD or a GT_COMMA node.
GenTree* blockNode = structVal->gtOp.gtOp2;
assert(blockNode->gtType == structType);
- // It had better be a block node - any others should not occur here.
- assert(blockNode->OperIsBlk());
-
- // Sink the GT_COMMA below the blockNode addr.
- GenTree* blockNodeAddr = blockNode->gtOp.gtOp1;
- assert(blockNodeAddr->gtType == TYP_BYREF);
- GenTree* commaNode = structVal;
- commaNode->gtType = TYP_BYREF;
- commaNode->gtOp.gtOp2 = blockNodeAddr;
- blockNode->gtOp.gtOp1 = commaNode;
- structVal = blockNode;
- alreadyNormalized = true;
+
+ // Is this GT_COMMA(op1, GT_COMMA())?
+ GenTree* parent = structVal;
+ if (blockNode->OperGet() == GT_COMMA)
+ {
+ // Find the last node in the comma chain.
+ do
+ {
+ assert(blockNode->gtType == structType);
+ parent = blockNode;
+ blockNode = blockNode->gtOp.gtOp2;
+ } while (blockNode->OperGet() == GT_COMMA);
+ }
+
+#ifdef FEATURE_SIMD
+ if (blockNode->OperGet() == GT_SIMD)
+ {
+ parent->gtOp.gtOp2 = impNormStructVal(blockNode, structHnd, curLevel, forceNormalization);
+ alreadyNormalized = true;
+ }
+ else
+#endif
+ {
+ assert(blockNode->OperIsBlk());
+
+ // Sink the GT_COMMA below the blockNode addr.
+ // That is GT_COMMA(op1, op2=blockNode) is tranformed into
+ // blockNode(GT_COMMA(TYP_BYREF, op1, op2's op1)).
+ //
+ // In case of a chained GT_COMMA case, we sink the last
+ // GT_COMMA below the blockNode addr.
+ GenTree* blockNodeAddr = blockNode->gtOp.gtOp1;
+ assert(blockNodeAddr->gtType == TYP_BYREF);
+ GenTree* commaNode = parent;
+ commaNode->gtType = TYP_BYREF;
+ commaNode->gtOp.gtOp2 = blockNodeAddr;
+ blockNode->gtOp.gtOp1 = commaNode;
+ if (parent == structVal)
+ {
+ structVal = blockNode;
+ }
+ alreadyNormalized = true;
+ }
}
break;
@@ -3240,7 +3269,8 @@ GenTreePtr Compiler::impInitializeArrayIntrinsic(CORINFO_SIG_INFO* sig)
// Returns the GenTree that should be used to do the intrinsic instead of the call.
// Returns NULL if an intrinsic cannot be used
-GenTreePtr Compiler::impIntrinsic(CORINFO_CLASS_HANDLE clsHnd,
+GenTreePtr Compiler::impIntrinsic(GenTreePtr newobjThis,
+ CORINFO_CLASS_HANDLE clsHnd,
CORINFO_METHOD_HANDLE method,
CORINFO_SIG_INFO* sig,
int memberRef,
@@ -3252,7 +3282,7 @@ GenTreePtr Compiler::impIntrinsic(CORINFO_CLASS_HANDLE clsHnd,
#if COR_JIT_EE_VERSION > 460
CorInfoIntrinsics intrinsicID = info.compCompHnd->getIntrinsicID(method, &mustExpand);
#else
- CorInfoIntrinsics intrinsicID = info.compCompHnd->getIntrinsicID(method);
+ CorInfoIntrinsics intrinsicID = info.compCompHnd->getIntrinsicID(method);
#endif
*pIntrinsicID = intrinsicID;
@@ -3576,7 +3606,33 @@ GenTreePtr Compiler::impIntrinsic(CORINFO_CLASS_HANDLE clsHnd,
retNode = op1;
break;
#endif
-
+ // Implement ByReference Ctor. This wraps the assignment of the ref into a byref-like field
+ // in a value type. The canonical example of this is Span<T>. In effect this is just a
+ // substitution. The parameter byref will be assigned into the newly allocated object.
+ case CORINFO_INTRINSIC_ByReference_Ctor:
+ {
+ // Remove call to constructor and directly assign the byref passed
+ // to the call to the first slot of the ByReference struct.
+ op1 = impPopStack().val;
+ GenTreePtr thisptr = newobjThis;
+ CORINFO_FIELD_HANDLE fldHnd = info.compCompHnd->getFieldInClass(clsHnd, 0);
+ GenTreePtr field = gtNewFieldRef(TYP_BYREF, fldHnd, thisptr, 0, false);
+ GenTreePtr assign = gtNewAssignNode(field, op1);
+ GenTreePtr byReferenceStruct = gtCloneExpr(thisptr->gtGetOp1());
+ assert(byReferenceStruct != nullptr);
+ impPushOnStack(byReferenceStruct, typeInfo(TI_STRUCT, clsHnd));
+ retNode = assign;
+ break;
+ }
+ // Implement ptr value getter for ByReference struct.
+ case CORINFO_INTRINSIC_ByReference_Value:
+ {
+ op1 = impPopStack().val;
+ CORINFO_FIELD_HANDLE fldHnd = info.compCompHnd->getFieldInClass(clsHnd, 0);
+ GenTreePtr field = gtNewFieldRef(TYP_BYREF, fldHnd, op1, 0, false);
+ retNode = field;
+ break;
+ }
default:
/* Unknown intrinsic */
break;
@@ -5359,29 +5415,23 @@ GenTreePtr Compiler::impTransformThis(GenTreePtr thisPtr,
}
//------------------------------------------------------------------------
-// impCanPInvokeInline: examine information from a call to see if the call
-// qualifies as an inline pinvoke.
-//
-// Arguments:
-// block - block contaning the call, or for inlinees, block
-// containing the call being inlined
+// impCanPInvokeInline: check whether PInvoke inlining should enabled in current method.
//
// Return Value:
-// true if this call qualifies as an inline pinvoke, false otherwise
+// true if PInvoke inlining should be enabled in current method, false otherwise
//
// Notes:
-// Checks basic legality and then a number of ambient conditions
-// where we could pinvoke but choose not to
+// Checks a number of ambient conditions where we could pinvoke but choose not to
-bool Compiler::impCanPInvokeInline(BasicBlock* block)
+bool Compiler::impCanPInvokeInline()
{
- return impCanPInvokeInlineCallSite(block) && getInlinePInvokeEnabled() && (!opts.compDbgCode) &&
- (compCodeOpt() != SMALL_CODE) && (!opts.compNoPInvokeInlineCB) // profiler is preventing inline pinvoke
+ return getInlinePInvokeEnabled() && (!opts.compDbgCode) && (compCodeOpt() != SMALL_CODE) &&
+ (!opts.compNoPInvokeInlineCB) // profiler is preventing inline pinvoke
;
}
//------------------------------------------------------------------------
-// impCanPInvokeInlineSallSite: basic legality checks using information
+// impCanPInvokeInlineCallSite: basic legality checks using information
// from a call to see if the call qualifies as an inline pinvoke.
//
// Arguments:
@@ -5410,6 +5460,17 @@ bool Compiler::impCanPInvokeInline(BasicBlock* block)
bool Compiler::impCanPInvokeInlineCallSite(BasicBlock* block)
{
+ if (block->hasHndIndex())
+ {
+ return false;
+ }
+
+ // The remaining limitations do not apply to CoreRT
+ if (IsTargetAbi(CORINFO_CORERT_ABI))
+ {
+ return true;
+ }
+
#ifdef _TARGET_AMD64_
// On x64, we disable pinvoke inlining inside of try regions.
// Here is the comment from JIT64 explaining why:
@@ -5431,12 +5492,13 @@ bool Compiler::impCanPInvokeInlineCallSite(BasicBlock* block)
//
// A desktop test case where this seems to matter is
// jit\jit64\ebvts\mcpp\sources2\ijw\__clrcall\vector_ctor_dtor.02\deldtor_clr.exe
- const bool inX64Try = block->hasTryIndex();
-#else
- const bool inX64Try = false;
+ if (block->hasTryIndex())
+ {
+ return false;
+ }
#endif // _TARGET_AMD64_
- return !inX64Try && !block->hasHndIndex();
+ return true;
}
//------------------------------------------------------------------------
@@ -5502,27 +5564,38 @@ void Compiler::impCheckForPInvokeCall(
}
optNativeCallCount++;
- if (opts.compMustInlinePInvokeCalli && methHnd == nullptr)
+ if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) && methHnd == nullptr)
{
- // Always inline pinvoke.
+ // PInvoke CALLI in IL stubs must be inlined
}
else
{
- // Check legality and profitability.
- if (!impCanPInvokeInline(block))
+ // Check legality
+ if (!impCanPInvokeInlineCallSite(block))
{
return;
}
- if (info.compCompHnd->pInvokeMarshalingRequired(methHnd, sig))
+ // PInvoke CALL in IL stubs must be inlined on CoreRT. Skip the ambient conditions checks and
+ // profitability checks
+ if (!(opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) && IsTargetAbi(CORINFO_CORERT_ABI)))
{
- return;
+ if (!impCanPInvokeInline())
+ {
+ return;
+ }
+
+ // Size-speed tradeoff: don't use inline pinvoke at rarely
+ // executed call sites. The non-inline version is more
+ // compact.
+ if (block->isRunRarely())
+ {
+ return;
+ }
}
- // Size-speed tradeoff: don't use inline pinvoke at rarely
- // executed call sites. The non-inline version is more
- // compact.
- if (block->isRunRarely())
+ // The expensive check should be last
+ if (info.compCompHnd->pInvokeMarshalingRequired(methHnd, sig))
{
return;
}
@@ -6189,7 +6262,7 @@ bool Compiler::impIsTailCallILPattern(bool tailPrefixed,
((nextOpcode == CEE_NOP) || ((nextOpcode == CEE_POP) && (++cntPop == 1)))); // Next opcode = nop or exactly
// one pop seen so far.
#else
- nextOpcode = (OPCODE)getU1LittleEndian(codeAddrOfNextOpcode);
+ nextOpcode = (OPCODE)getU1LittleEndian(codeAddrOfNextOpcode);
#endif
if (isCallPopAndRet)
@@ -6359,6 +6432,7 @@ var_types Compiler::impImportCall(OPCODE opcode,
eeGetSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &calliSig);
callRetTyp = JITtype2varType(calliSig.retType);
+ clsHnd = calliSig.retTypeClass;
call = impImportIndirectCall(&calliSig, ilOffset);
@@ -6387,6 +6461,16 @@ var_types Compiler::impImportCall(OPCODE opcode,
call->gtCall.callSig = new (this, CMK_CorSig) CORINFO_SIG_INFO;
*call->gtCall.callSig = calliSig;
#endif // DEBUG
+
+ if (IsTargetAbi(CORINFO_CORERT_ABI))
+ {
+ bool managedCall = (calliSig.callConv & GTF_CALL_UNMANAGED) == 0;
+ if (managedCall)
+ {
+ call->AsCall()->SetFatPointerCandidate();
+ setMethodHasFatPointer();
+ }
+ }
}
else // (opcode != CEE_CALLI)
{
@@ -6435,7 +6519,7 @@ var_types Compiler::impImportCall(OPCODE opcode,
if (mflags & CORINFO_FLG_DONT_INLINE_CALLER)
{
- compInlineResult->NoteFatal(InlineObservation::CALLEE_STACK_CRAWL_MARK);
+ compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NOINLINE_CALLEE);
return callRetTyp;
}
@@ -6490,7 +6574,7 @@ var_types Compiler::impImportCall(OPCODE opcode,
// <NICE> Factor this into getCallInfo </NICE>
if ((mflags & CORINFO_FLG_INTRINSIC) && !pConstrainedResolvedToken)
{
- call = impIntrinsic(clsHnd, methHnd, sig, pResolvedToken->token, readonlyCall,
+ call = impIntrinsic(newobjThis, clsHnd, methHnd, sig, pResolvedToken->token, readonlyCall,
(canTailCall && (tailCall != 0)), &intrinsicID);
if (call != nullptr)
@@ -6533,7 +6617,6 @@ var_types Compiler::impImportCall(OPCODE opcode,
if ((mflags & CORINFO_FLG_VIRTUAL) && (mflags & CORINFO_FLG_EnC) && (opcode == CEE_CALLVIRT))
{
NO_WAY("Virtual call to a function added via EnC is not supported");
- goto DONE_CALL;
}
if ((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_DEFAULT &&
@@ -7469,10 +7552,8 @@ DONE:
}
}
-// Note: we assume that small return types are already normalized by the managed callee
-// or by the pinvoke stub for calls to unmanaged code.
-
-DONE_CALL:
+ // Note: we assume that small return types are already normalized by the managed callee
+ // or by the pinvoke stub for calls to unmanaged code.
if (!bIntrinsicImported)
{
@@ -7517,6 +7598,7 @@ DONE_CALL:
impMarkInlineCandidate(call, exactContextHnd, callInfo);
}
+DONE_CALL:
// Push or append the result of the call
if (callRetTyp == TYP_VOID)
{
@@ -7569,9 +7651,11 @@ DONE_CALL:
}
}
- if (call->gtOper == GT_CALL)
+ if (call->IsCall())
{
// Sometimes "call" is not a GT_CALL (if we imported an intrinsic that didn't turn into a call)
+
+ bool fatPointerCandidate = call->AsCall()->IsFatPointerCandidate();
if (varTypeIsStruct(callRetTyp))
{
call = impFixupCallStructReturn(call, sig->retTypeClass);
@@ -7580,6 +7664,7 @@ DONE_CALL:
if ((call->gtFlags & GTF_CALL_INLINE_CANDIDATE) != 0)
{
assert(opts.OptEnabled(CLFLG_INLINING));
+ assert(!fatPointerCandidate); // We should not try to inline calli.
// Make the call its own tree (spill the stack if needed).
impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
@@ -7589,6 +7674,24 @@ DONE_CALL:
}
else
{
+ if (fatPointerCandidate)
+ {
+ // fatPointer candidates should be in statements of the form call() or var = call().
+ // Such form allows to find statements with fat calls without walking through whole trees
+ // and removes problems with cutting trees.
+ assert(!bIntrinsicImported);
+ assert(IsTargetAbi(CORINFO_CORERT_ABI));
+ if (call->OperGet() != GT_LCL_VAR) // can be already converted by impFixupCallStructReturn.
+ {
+ unsigned calliSlot = lvaGrabTemp(true DEBUGARG("calli"));
+ LclVarDsc* varDsc = &lvaTable[calliSlot];
+ varDsc->lvVerTypeInfo = tiRetVal;
+ impAssignTempGen(calliSlot, call, clsHnd, (unsigned)CHECK_SPILL_NONE);
+ // impAssignTempGen can change src arg list and return type for call that returns struct.
+ var_types type = genActualType(lvaTable[calliSlot].TypeGet());
+ call = gtNewLclvNode(calliSlot, type);
+ }
+ }
// For non-candidates we must also spill, since we
// might have locals live on the eval stack that this
// call can modify.