summaryrefslogtreecommitdiff
path: root/src/jit/compiler.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/jit/compiler.cpp')
-rw-r--r--src/jit/compiler.cpp83
1 files changed, 34 insertions, 49 deletions
diff --git a/src/jit/compiler.cpp b/src/jit/compiler.cpp
index 114847c0d0..01c7f8d6a7 100644
--- a/src/jit/compiler.cpp
+++ b/src/jit/compiler.cpp
@@ -1635,18 +1635,16 @@ void Compiler::compDisplayStaticSizes(FILE* fout)
sizeof(bbDummy->bbVarUse));
fprintf(fout, "Offset / size of bbVarDef = %3u / %3u\n", offsetof(BasicBlock, bbVarDef),
sizeof(bbDummy->bbVarDef));
- fprintf(fout, "Offset / size of bbVarTmp = %3u / %3u\n", offsetof(BasicBlock, bbVarTmp),
- sizeof(bbDummy->bbVarTmp));
fprintf(fout, "Offset / size of bbLiveIn = %3u / %3u\n", offsetof(BasicBlock, bbLiveIn),
sizeof(bbDummy->bbLiveIn));
fprintf(fout, "Offset / size of bbLiveOut = %3u / %3u\n", offsetof(BasicBlock, bbLiveOut),
sizeof(bbDummy->bbLiveOut));
- fprintf(fout, "Offset / size of bbHeapSsaPhiFunc = %3u / %3u\n", offsetof(BasicBlock, bbHeapSsaPhiFunc),
- sizeof(bbDummy->bbHeapSsaPhiFunc));
- fprintf(fout, "Offset / size of bbHeapSsaNumIn = %3u / %3u\n", offsetof(BasicBlock, bbHeapSsaNumIn),
- sizeof(bbDummy->bbHeapSsaNumIn));
- fprintf(fout, "Offset / size of bbHeapSsaNumOut = %3u / %3u\n", offsetof(BasicBlock, bbHeapSsaNumOut),
- sizeof(bbDummy->bbHeapSsaNumOut));
+ fprintf(fout, "Offset / size of bbMemorySsaPhiFunc = %3u / %3u\n", offsetof(BasicBlock, bbMemorySsaPhiFunc),
+ sizeof(bbDummy->bbMemorySsaPhiFunc));
+ fprintf(fout, "Offset / size of bbMemorySsaNumIn = %3u / %3u\n", offsetof(BasicBlock, bbMemorySsaNumIn),
+ sizeof(bbDummy->bbMemorySsaNumIn));
+ fprintf(fout, "Offset / size of bbMemorySsaNumOut = %3u / %3u\n", offsetof(BasicBlock, bbMemorySsaNumOut),
+ sizeof(bbDummy->bbMemorySsaNumOut));
fprintf(fout, "Offset / size of bbScope = %3u / %3u\n", offsetof(BasicBlock, bbScope),
sizeof(bbDummy->bbScope));
fprintf(fout, "Offset / size of bbCseGen = %3u / %3u\n", offsetof(BasicBlock, bbCseGen),
@@ -1788,9 +1786,9 @@ void Compiler::compInit(ArenaAllocator* pAlloc, InlineInfo* inlineInfo)
impSpillCliquePredMembers = ExpandArray<BYTE>(getAllocator());
impSpillCliqueSuccMembers = ExpandArray<BYTE>(getAllocator());
- memset(&lvHeapPerSsaData, 0, sizeof(PerSsaArray));
- lvHeapPerSsaData.Init(getAllocator());
- lvHeapNumSsaNames = 0;
+ memset(&lvMemoryPerSsaData, 0, sizeof(PerSsaArray));
+ lvMemoryPerSsaData.Init(getAllocator());
+ lvMemoryNumSsaNames = 0;
//
// Initialize all the per-method statistics gathering data structures.
@@ -1871,8 +1869,11 @@ void Compiler::compInit(ArenaAllocator* pAlloc, InlineInfo* inlineInfo)
m_fieldSeqStore = nullptr;
m_zeroOffsetFieldMap = nullptr;
m_arrayInfoMap = nullptr;
- m_heapSsaMap = nullptr;
m_refAnyClass = nullptr;
+ for (MemoryKind memoryKind : allMemoryKinds())
+ {
+ m_memorySsaMap[memoryKind] = nullptr;
+ }
#ifdef DEBUG
if (!compIsForInlining())
@@ -2312,6 +2313,9 @@ void Compiler::compSetProcessor()
if (opts.compCanUseAVX)
{
codeGen->getEmitter()->SetUseAVX(true);
+ // Assume each JITted method does not contain AVX instruction at first
+ codeGen->getEmitter()->SetContainsAVX(false);
+ codeGen->getEmitter()->SetContains256bitAVX(false);
}
else
#endif // FEATURE_AVX_SUPPORT
@@ -3024,6 +3028,7 @@ void Compiler::compInitOptions(JitFlags* jitFlags)
#ifdef FEATURE_SIMD
// Minimum bar for availing SIMD benefits is SSE2 on AMD64/x86.
featureSIMD = jitFlags->IsSet(JitFlags::JIT_FLAG_FEATURE_SIMD);
+ setUsesSIMDTypes(false);
#endif // FEATURE_SIMD
if (compIsForInlining() || compIsForImportOnly())
@@ -3296,8 +3301,6 @@ void Compiler::compInitOptions(JitFlags* jitFlags)
}
#endif
- opts.compMustInlinePInvokeCalli = jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB);
-
opts.compScopeInfo = opts.compDbgInfo;
#ifdef LATE_DISASM
@@ -4194,11 +4197,17 @@ void Compiler::compCompile(void** methodCodePtr, ULONG* methodCodeSize, JitFlags
assert(!fgComputePredsDone);
if (fgCheapPredsValid)
{
- // Remove cheap predecessors before inlining; allowing the cheap predecessor lists to be inserted
- // with inlined blocks causes problems.
+ // Remove cheap predecessors before inlining and fat call transformation;
+ // allowing the cheap predecessor lists to be inserted causes problems
+ // with splitting existing blocks.
fgRemovePreds();
}
+ if (IsTargetAbi(CORINFO_CORERT_ABI) && doesMethodHaveFatPointer())
+ {
+ fgTransformFatCalli();
+ }
+
EndPhase(PHASE_IMPORTATION);
if (compIsForInlining())
@@ -4598,6 +4607,10 @@ void Compiler::compCompile(void** methodCodePtr, ULONG* methodCodeSize, JitFlags
codeGen->regSet.rsMaskResvd |= RBM_OPT_RSVD;
assert(REG_OPT_RSVD != REG_FP);
}
+ // compRsvdRegCheck() has read out the FramePointerUsed property, but doLinearScan()
+ // tries to overwrite it later. This violates the PhasedVar rule and triggers an assertion.
+ // TODO-ARM-Bug?: What is the proper way to handle this situation?
+ codeGen->resetFramePointerUsedWritePhase();
#ifdef DEBUG
//
@@ -4718,21 +4731,6 @@ void Compiler::ResetOptAnnotations()
tree->ClearVN();
tree->ClearAssertion();
tree->gtCSEnum = NO_CSE;
-
- // Clear any *_ASG_LHS flags -- these are set during SSA construction,
- // and the heap live-in calculation depends on them being unset coming
- // into SSA construction (without clearing them, a block that has a
- // heap def via one of these before any heap use is treated as not having
- // an upwards-exposed heap use, even though subsequent heap uses may not
- // be killed by the store; this seems to be a bug, worked around here).
- if (tree->OperIsIndir())
- {
- tree->gtFlags &= ~GTF_IND_ASG_LHS;
- }
- else if (tree->OperGet() == GT_CLS_VAR)
- {
- tree->gtFlags &= ~GTF_CLS_VAR_ASG_LHS;
- }
}
}
}
@@ -6708,16 +6706,7 @@ Compiler::NodeToIntMap* Compiler::FindReachableNodesInNodeTestData()
if (arg->gtFlags & GTF_LATE_ARG)
{
// Find the corresponding late arg.
- GenTreePtr lateArg = nullptr;
- for (unsigned j = 0; j < call->fgArgInfo->ArgCount(); j++)
- {
- if (call->fgArgInfo->ArgTable()[j]->argNum == i)
- {
- lateArg = call->fgArgInfo->ArgTable()[j]->node;
- break;
- }
- }
- assert(lateArg != nullptr);
+ GenTreePtr lateArg = call->fgArgInfo->GetLateArg(i);
if (GetNodeTestData()->Lookup(lateArg, &tlAndN))
{
reachable->Set(lateArg, 0);
@@ -6805,14 +6794,14 @@ void Compiler::CopyTestDataToCloneTree(GenTreePtr from, GenTreePtr to)
assert(to->gtOp.gtOp1 == nullptr);
}
- if (from->gtGetOp2() != nullptr)
+ if (from->gtGetOp2IfPresent() != nullptr)
{
- assert(to->gtGetOp2() != nullptr);
+ assert(to->gtGetOp2IfPresent() != nullptr);
CopyTestDataToCloneTree(from->gtGetOp2(), to->gtGetOp2());
}
else
{
- assert(to->gtGetOp2() == nullptr);
+ assert(to->gtGetOp2IfPresent() == nullptr);
}
return;
@@ -6863,8 +6852,8 @@ void Compiler::CopyTestDataToCloneTree(GenTreePtr from, GenTreePtr to)
#ifdef FEATURE_SIMD
case GT_SIMD_CHK:
#endif // FEATURE_SIMD
- CopyTestDataToCloneTree(from->gtBoundsChk.gtArrLen, to->gtBoundsChk.gtArrLen);
CopyTestDataToCloneTree(from->gtBoundsChk.gtIndex, to->gtBoundsChk.gtIndex);
+ CopyTestDataToCloneTree(from->gtBoundsChk.gtArrLen, to->gtBoundsChk.gtArrLen);
return;
default:
@@ -9175,10 +9164,6 @@ int cTreeFlagsIR(Compiler* comp, GenTree* tree)
{
chars += printf("[RELOP_QMARK]");
}
- if (tree->gtFlags & GTF_RELOP_SMALL)
- {
- chars += printf("[RELOP_SMALL]");
- }
break;
case GT_QMARK: