diff options
Diffstat (limited to 'src/jit/lclvars.cpp')
-rw-r--r-- | src/jit/lclvars.cpp | 97 |
1 files changed, 52 insertions, 45 deletions
diff --git a/src/jit/lclvars.cpp b/src/jit/lclvars.cpp index 369c96322d..ea9c573a02 100644 --- a/src/jit/lclvars.cpp +++ b/src/jit/lclvars.cpp @@ -385,8 +385,9 @@ void Compiler::lvaInitThisPtr(InitVarDscInfo* varDscInfo) if (simdBaseType != TYP_UNKNOWN) { assert(varTypeIsSIMD(type)); - varDsc->lvSIMDType = true; - varDsc->lvBaseType = simdBaseType; + varDsc->lvSIMDType = true; + varDsc->lvBaseType = simdBaseType; + varDsc->lvExactSize = genTypeSize(type); } } #endif // FEATURE_SIMD @@ -1448,12 +1449,16 @@ void Compiler::lvaCanPromoteStructType(CORINFO_CLASS_HANDLE typeHnd, #if 1 // TODO-Cleanup: Consider removing this entire #if block in the future - // This method has two callers. The one in Importer.cpp passes sortFields == false - // and the other passes sortFields == true. - // This is a workaround that leave the inlining behavior the same and before while still - // performing extra struct promotions when compiling the method. - // +// This method has two callers. The one in Importer.cpp passes sortFields == false +// and the other passes sortFields == true. +// This is a workaround that leaves the inlining behavior the same as before while still +// performing extra struct promotions when compiling the method. +// +// The x86 legacy back-end can't handle the more general RyuJIT struct promotion (notably structs +// with holes), in genPushArgList(), so in that case always check for custom layout. +#if FEATURE_FIXED_OUT_ARGS || !defined(LEGACY_BACKEND) if (!sortFields) // the condition "!sortFields" really means "we are inlining" +#endif { treatAsOverlapping = StructHasCustomLayout(typeFlags); } @@ -1736,7 +1741,7 @@ void Compiler::lvaPromoteStructVar(unsigned lclNum, lvaStructPromotionInfo* Stru } } -#if !defined(_TARGET_64BIT_) +#if !defined(LEGACY_BACKEND) && !defined(_TARGET_64BIT_) //------------------------------------------------------------------------ // lvaPromoteLongVars: "Struct promote" all register candidate longs as if they are structs of two ints. // @@ -1752,29 +1757,18 @@ void Compiler::lvaPromoteLongVars() { return; } + // The lvaTable might grow as we grab temps. Make a local copy here. unsigned startLvaCount = lvaCount; for (unsigned lclNum = 0; lclNum < startLvaCount; lclNum++) { LclVarDsc* varDsc = &lvaTable[lclNum]; if (!varTypeIsLong(varDsc) || varDsc->lvDoNotEnregister || varDsc->lvIsMultiRegArgOrRet() || - (varDsc->lvRefCnt == 0)) + (varDsc->lvRefCnt == 0) || varDsc->lvIsStructField || (fgNoStructPromotion && varDsc->lvIsParam)) { continue; } - // Will this work ??? - // We can't have nested promoted structs. - if (varDsc->lvIsStructField) - { - if (lvaGetPromotionType(varDsc->lvParentLcl) != PROMOTION_TYPE_INDEPENDENT) - { - continue; - } - varDsc->lvIsStructField = false; - varDsc->lvTracked = false; - } - varDsc->lvFieldCnt = 2; varDsc->lvFieldLclStart = lvaCount; varDsc->lvPromoted = true; @@ -1823,7 +1817,7 @@ void Compiler::lvaPromoteLongVars() } #endif // DEBUG } -#endif // !_TARGET_64BIT_ +#endif // !defined(LEGACY_BACKEND) && !defined(_TARGET_64BIT_) /***************************************************************************** * Given a fldOffset in a promoted struct var, return the index of the local @@ -1904,6 +1898,10 @@ void Compiler::lvaSetVarDoNotEnregister(unsigned varNum DEBUGARG(DoNotEnregister JITDUMP("it is a struct\n"); assert(varTypeIsStruct(varDsc)); break; + case DNER_IsStructArg: + JITDUMP("it is a struct arg\n"); + assert(varTypeIsStruct(varDsc)); + break; case DNER_BlockOp: JITDUMP("written in a block op\n"); varDsc->lvLclBlockOpAddr = 1; @@ -2038,7 +2036,7 @@ void Compiler::lvaSetStruct(unsigned varNum, CORINFO_CLASS_HANDLE typeHnd, bool } #ifndef _TARGET_64BIT_ - bool fDoubleAlignHint = FALSE; + BOOL fDoubleAlignHint = FALSE; #ifdef _TARGET_X86_ fDoubleAlignHint = TRUE; #endif @@ -2697,6 +2695,10 @@ void Compiler::lvaSortByRefCount() lvaTrackedCount = 0; lvaTrackedCountInSizeTUnits = 0; +#ifdef DEBUG + VarSetOps::AssignNoCopy(this, lvaTrackedVars, VarSetOps::MakeEmpty(this)); +#endif + if (lvaCount == 0) { return; @@ -3386,26 +3388,30 @@ void Compiler::lvaMarkLocalVars() #endif // !FEATURE_EH_FUNCLETS -#if FEATURE_EH_FUNCLETS - if (ehNeedsPSPSym()) + // PSPSym and LocAllocSPvar are not used by the CoreRT ABI + if (!IsTargetAbi(CORINFO_CORERT_ABI)) { - lvaPSPSym = lvaGrabTempWithImplicitUse(false DEBUGARG("PSPSym")); - LclVarDsc* lclPSPSym = &lvaTable[lvaPSPSym]; - lclPSPSym->lvType = TYP_I_IMPL; - } +#if FEATURE_EH_FUNCLETS + if (ehNeedsPSPSym()) + { + lvaPSPSym = lvaGrabTempWithImplicitUse(false DEBUGARG("PSPSym")); + LclVarDsc* lclPSPSym = &lvaTable[lvaPSPSym]; + lclPSPSym->lvType = TYP_I_IMPL; + } #endif // FEATURE_EH_FUNCLETS - if (compLocallocUsed) - { - lvaLocAllocSPvar = lvaGrabTempWithImplicitUse(false DEBUGARG("LocAllocSPvar")); - LclVarDsc* locAllocSPvar = &lvaTable[lvaLocAllocSPvar]; - locAllocSPvar->lvType = TYP_I_IMPL; + // TODO: LocAllocSPvar should be only required by the implicit frame layout expected by the VM on x86. + // It should be removed on other platforms once we check there are no other implicit dependencies. + if (compLocallocUsed) + { + lvaLocAllocSPvar = lvaGrabTempWithImplicitUse(false DEBUGARG("LocAllocSPvar")); + LclVarDsc* locAllocSPvar = &lvaTable[lvaLocAllocSPvar]; + locAllocSPvar->lvType = TYP_I_IMPL; + } } BasicBlock* block; -#if defined(DEBUGGING_SUPPORT) || defined(DEBUG) - #ifndef DEBUG // Assign slot numbers to all variables. // If compiler generated local variables, slot numbers will be @@ -3428,8 +3434,6 @@ void Compiler::lvaMarkLocalVars() } } -#endif // defined(DEBUGGING_SUPPORT) || defined(DEBUG) - /* Mark all local variable references */ lvaRefCountingStarted = true; @@ -4062,12 +4066,11 @@ void Compiler::lvaFixVirtualFrameOffsets() LclVarDsc* varDsc; #if FEATURE_EH_FUNCLETS && defined(_TARGET_AMD64_) - if (ehNeedsPSPSym()) + if (lvaPSPSym != BAD_VAR_NUM) { // We need to fix the offset of the PSPSym so there is no padding between it and the outgoing argument space. // Without this code, lvaAlignFrame might have put the padding lower than the PSPSym, which would be between // the PSPSym and the outgoing argument space. - assert(lvaPSPSym != BAD_VAR_NUM); varDsc = &lvaTable[lvaPSPSym]; assert(varDsc->lvFramePointerBased); // We always access it RBP-relative. assert(!varDsc->lvMustInit); // It is never "must init". @@ -4453,7 +4456,9 @@ int Compiler::lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, noway_assert(argSize); if (Target::g_tgtArgOrder == Target::ARG_ORDER_L2R) + { argOffs -= argSize; + } unsigned fieldVarNum = BAD_VAR_NUM; @@ -4543,7 +4548,9 @@ int Compiler::lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, } if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L && !varDsc->lvIsRegArg) + { argOffs += argSize; + } return argOffs; } @@ -4973,13 +4980,12 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals() #endif //_TARGET_AMD64_ #if FEATURE_EH_FUNCLETS && defined(_TARGET_ARMARCH_) - if (ehNeedsPSPSym()) + if (lvaPSPSym != BAD_VAR_NUM) { // On ARM/ARM64, if we need a PSPSym, allocate it first, before anything else, including // padding (so we can avoid computing the same padding in the funclet // frame). Note that there is no special padding requirement for the PSPSym. noway_assert(codeGen->isFramePointerUsed()); // We need an explicit frame pointer - assert(lvaPSPSym != BAD_VAR_NUM); // We should have created the PSPSym variable stkOffs = lvaAllocLocalAndSetVirtualOffset(lvaPSPSym, TARGET_POINTER_SIZE, stkOffs); } #endif // FEATURE_EH_FUNCLETS && defined(_TARGET_ARMARCH_) @@ -5033,7 +5039,7 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals() stkOffs = lvaAllocLocalAndSetVirtualOffset(lvaSecurityObject, TARGET_POINTER_SIZE, stkOffs); } - if (compLocallocUsed) + if (lvaLocAllocSPvar != BAD_VAR_NUM) { #ifdef JIT32_GCENCODER noway_assert(codeGen->isFramePointerUsed()); // else offsets of locals of frameless methods will be incorrect @@ -5278,7 +5284,9 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals() // a local variable which will need stack frame space. // if (!varDsc->lvIsRegArg) + { continue; + } #ifdef _TARGET_ARM64_ if (info.compIsVarArgs) @@ -5477,13 +5485,12 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals() } #if FEATURE_EH_FUNCLETS && defined(_TARGET_AMD64_) - if (ehNeedsPSPSym()) + if (lvaPSPSym != BAD_VAR_NUM) { // On AMD64, if we need a PSPSym, allocate it last, immediately above the outgoing argument // space. Any padding will be higher on the stack than this // (including the padding added by lvaAlignFrame()). noway_assert(codeGen->isFramePointerUsed()); // We need an explicit frame pointer - assert(lvaPSPSym != BAD_VAR_NUM); // We should have created the PSPSym variable stkOffs = lvaAllocLocalAndSetVirtualOffset(lvaPSPSym, TARGET_POINTER_SIZE, stkOffs); } #endif // FEATURE_EH_FUNCLETS && defined(_TARGET_AMD64_) |