summaryrefslogtreecommitdiff
path: root/src/jit/compiler.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/jit/compiler.cpp')
-rw-r--r--src/jit/compiler.cpp590
1 files changed, 92 insertions, 498 deletions
diff --git a/src/jit/compiler.cpp b/src/jit/compiler.cpp
index 8cad182e2a..0a64394e1b 100644
--- a/src/jit/compiler.cpp
+++ b/src/jit/compiler.cpp
@@ -19,12 +19,8 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#include "ssabuilder.h"
#include "valuenum.h"
#include "rangecheck.h"
-
-#ifndef LEGACY_BACKEND
#include "lower.h"
#include "stacklevelsetter.h"
-#endif // !LEGACY_BACKEND
-
#include "jittelemetry.h"
#if defined(DEBUG)
@@ -1006,14 +1002,9 @@ var_types Compiler::getReturnTypeForStruct(CORINFO_CLASS_HANDLE clsHnd,
//
if (structSize <= sizeof(double))
{
-#if defined LEGACY_BACKEND
- if (!IsHfa(clsHnd))
-#endif
- {
- // We set the "primitive" useType based upon the structSize
- // and also examine the clsHnd to see if it is an HFA of count one
- useType = getPrimitiveTypeForStruct(structSize, clsHnd);
- }
+ // We set the "primitive" useType based upon the structSize
+ // and also examine the clsHnd to see if it is an HFA of count one
+ useType = getPrimitiveTypeForStruct(structSize, clsHnd);
}
#endif // UNIX_AMD64_ABI
@@ -1051,10 +1042,8 @@ var_types Compiler::getReturnTypeForStruct(CORINFO_CLASS_HANDLE clsHnd,
// Structs that are HFA's are returned in multiple registers
if (IsHfa(clsHnd))
{
-#if !defined(LEGACY_BACKEND)
// HFA's of count one should have been handled by getPrimitiveTypeForStruct
assert(GetHfaCount(clsHnd) >= 2);
-#endif // !defined(LEGACY_BACKEND)
// setup wbPassType and useType indicate that this is returned by value as an HFA
// using multiple registers
@@ -1852,11 +1841,6 @@ void Compiler::compDisplayStaticSizes(FILE* fout)
sizeof(bbDummy->bbTypesOut));
#endif // VERIFIER
-#if FEATURE_STACK_FP_X87
- fprintf(fout, "Offset / size of bbFPStateX87 = %3u / %3u\n", offsetof(BasicBlock, bbFPStateX87),
- sizeof(bbDummy->bbFPStateX87));
-#endif // FEATURE_STACK_FP_X87
-
#ifdef DEBUG
fprintf(fout, "Offset / size of bbLoopNum = %3u / %3u\n", offsetof(BasicBlock, bbLoopNum),
sizeof(bbDummy->bbLoopNum));
@@ -1907,10 +1891,6 @@ void Compiler::compInit(ArenaAllocator* pAlloc, InlineInfo* inlineInfo)
compAllocatorDebugOnly = nullptr;
#endif // DEBUG
#endif // MEASURE_MEM_ALLOC
-
-#ifdef LEGACY_BACKEND
- compQMarks = nullptr;
-#endif
}
else
{
@@ -1926,10 +1906,6 @@ void Compiler::compInit(ArenaAllocator* pAlloc, InlineInfo* inlineInfo)
compAllocatorDebugOnly = new (this, CMK_Unknown) CompAllocator(this, CMK_DebugOnly);
#endif // DEBUG
#endif // MEASURE_MEM_ALLOC
-
-#ifdef LEGACY_BACKEND
- compQMarks = new (this, CMK_Unknown) JitExpandArrayStack<GenTree*>(getAllocator());
-#endif
}
#ifdef FEATURE_TRACELOGGING
@@ -1956,13 +1932,8 @@ void Compiler::compInit(ArenaAllocator* pAlloc, InlineInfo* inlineInfo)
if (!compIsForInlining())
{
codeGen = getCodeGenerator(this);
-#ifdef LEGACY_BACKEND
- raInit();
-#endif // LEGACY_BACKEND
optInit();
-#ifndef LEGACY_BACKEND
hashBv::Init(this);
-#endif // !LEGACY_BACKEND
compVarScopeMap = nullptr;
@@ -2012,9 +1983,6 @@ void Compiler::compInit(ArenaAllocator* pAlloc, InlineInfo* inlineInfo)
#if CPU_USES_BLOCK_MOVE
compBlkOpUsed = false;
#endif
-#if FEATURE_STACK_FP_X87
- compMayHaveTransitionBlocks = false;
-#endif
compNeedsGSSecurityCookie = false;
compGSReorderStackLayout = false;
#if STACK_PROBES
@@ -2024,9 +1992,7 @@ void Compiler::compInit(ArenaAllocator* pAlloc, InlineInfo* inlineInfo)
compGeneratingProlog = false;
compGeneratingEpilog = false;
-#ifndef LEGACY_BACKEND
- compLSRADone = false;
-#endif // !LEGACY_BACKEND
+ compLSRADone = false;
compRationalIRForm = false;
#ifdef DEBUG
@@ -2285,11 +2251,7 @@ VarName Compiler::compVarName(regNumber reg, bool isFloatReg)
{
if (isFloatReg)
{
-#if FEATURE_STACK_FP_X87
- assert(reg < FP_STK_SIZE); // would like to have same assert as below but sometimes you get -1?
-#else
assert(genIsValidFloatReg(reg));
-#endif
}
else
{
@@ -2322,34 +2284,8 @@ VarName Compiler::compVarName(regNumber reg, bool isFloatReg)
}
}
}
-
-#ifdef LEGACY_BACKEND
- // maybe var is marked dead, but still used (last use)
- if (!isFloatReg && codeGen->regSet.rsUsedTree[reg] != NULL)
- {
- GenTree* nodePtr;
-
- if (GenTree::OperIsUnary(codeGen->regSet.rsUsedTree[reg]->OperGet()))
- {
- assert(codeGen->regSet.rsUsedTree[reg]->gtOp.gtOp1 != NULL);
- nodePtr = codeGen->regSet.rsUsedTree[reg]->gtOp.gtOp1;
- }
- else
- {
- nodePtr = codeGen->regSet.rsUsedTree[reg];
- }
-
- if ((nodePtr->gtOper == GT_REG_VAR) && (nodePtr->gtRegVar.gtRegNum == reg) &&
- (nodePtr->gtRegVar.gtLclNum < info.compVarScopesCount))
- {
- VarScopeDsc* varScope =
- compFindLocalVar(nodePtr->gtRegVar.gtLclNum, compCurBB->bbCodeOffs, compCurBB->bbCodeOffsEnd);
- if (varScope)
- return varScope->vsdName;
- }
- }
-#endif // LEGACY_BACKEND
}
+
return nullptr;
}
@@ -2385,25 +2321,6 @@ const char* Compiler::compRegVarName(regNumber reg, bool displayVar, bool isFloa
return getRegName(reg, isFloatReg);
}
-#define MAX_REG_PAIR_NAME_LENGTH 10
-
-const char* Compiler::compRegPairName(regPairNo regPair)
-{
- static char regNameLong[MAX_REG_PAIR_NAME_LENGTH];
-
- if (regPair == REG_PAIR_NONE)
- {
- return "NA|NA";
- }
-
- assert(regPair >= REG_PAIR_FIRST && regPair <= REG_PAIR_LAST);
-
- strcpy_s(regNameLong, sizeof(regNameLong), compRegVarName(genRegPairLo(regPair)));
- strcat_s(regNameLong, sizeof(regNameLong), "|");
- strcat_s(regNameLong, sizeof(regNameLong), compRegVarName(genRegPairHi(regPair)));
- return regNameLong;
-}
-
const char* Compiler::compRegNameForSize(regNumber reg, size_t size)
{
if (size == 0 || size >= 4)
@@ -2452,30 +2369,6 @@ const char* Compiler::compFPregVarName(unsigned fpReg, bool displayVar)
index = (index + 1) % 2; // circular reuse of index
-#if FEATURE_STACK_FP_X87
- /* 'fpReg' is the distance from the bottom of the stack, ie.
- * it is independant of the current FP stack level
- */
-
- if (displayVar && codeGen->genFPregCnt)
- {
- assert(fpReg < FP_STK_SIZE);
- assert(compCodeGenDone || (fpReg <= codeGen->compCurFPState.m_uStackSize));
-
- int pos = codeGen->genFPregCnt - (fpReg + 1 - codeGen->genGetFPstkLevel());
- if (pos >= 0)
- {
- VarName varName = compVarName((regNumber)pos, true);
-
- if (varName)
- {
- sprintf_s(nameVarReg[index], NAME_VAR_REG_BUFFER_LEN, "ST(%d)'%s'", fpReg, VarNameToStr(varName));
- return nameVarReg[index];
- }
- }
- }
-#endif // FEATURE_STACK_FP_X87
-
/* no debug info required or no variable in that register
-> return standard name */
@@ -2570,7 +2463,7 @@ void Compiler::compSetProcessor()
#if defined(_TARGET_ARM_)
info.genCPU = CPU_ARM;
#elif defined(_TARGET_AMD64_)
- info.genCPU = CPU_X64;
+ info.genCPU = CPU_X64;
#elif defined(_TARGET_X86_)
if (jitFlags.IsSet(JitFlags::JIT_FLAG_TARGET_P4))
info.genCPU = CPU_X86_PENTIUM_4;
@@ -2584,52 +2477,17 @@ void Compiler::compSetProcessor()
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef _TARGET_AMD64_
- opts.compUseFCOMI = false;
- opts.compUseCMOV = true;
- opts.compCanUseSSE2 = true;
+ opts.compUseFCOMI = false;
+ opts.compUseCMOV = true;
#elif defined(_TARGET_X86_)
- opts.compUseFCOMI = jitFlags.IsSet(JitFlags::JIT_FLAG_USE_FCOMI);
- opts.compUseCMOV = jitFlags.IsSet(JitFlags::JIT_FLAG_USE_CMOV);
-
-#ifdef LEGACY_BACKEND
- opts.compCanUseSSE2 = jitFlags.IsSet(JitFlags::JIT_FLAG_USE_SSE2);
-#else
- // RyuJIT/x86 requires SSE2 to be available: there is no support for generating floating-point
- // code with x87 instructions.
- opts.compCanUseSSE2 = true;
-#endif
+ opts.compUseFCOMI = jitFlags.IsSet(JitFlags::JIT_FLAG_USE_FCOMI);
+ opts.compUseCMOV = jitFlags.IsSet(JitFlags::JIT_FLAG_USE_CMOV);
#ifdef DEBUG
if (opts.compUseFCOMI)
opts.compUseFCOMI = !compStressCompile(STRESS_USE_FCOMI, 50);
if (opts.compUseCMOV)
opts.compUseCMOV = !compStressCompile(STRESS_USE_CMOV, 50);
-
-#ifdef LEGACY_BACKEND
-
- // Should we override the SSE2 setting?
- enum
- {
- SSE2_FORCE_DISABLE = 0,
- SSE2_FORCE_USE = 1,
- SSE2_FORCE_INVALID = -1
- };
-
- if (JitConfig.JitCanUseSSE2() == SSE2_FORCE_DISABLE)
- opts.compCanUseSSE2 = false;
- else if (JitConfig.JitCanUseSSE2() == SSE2_FORCE_USE)
- opts.compCanUseSSE2 = true;
- else if (opts.compCanUseSSE2)
- opts.compCanUseSSE2 = !compStressCompile(STRESS_GENERIC_VARN, 50);
-
-#else // !LEGACY_BACKEND
-
- // RyuJIT/x86 requires SSE2 to be available and hence
- // don't turn off compCanUseSSE2 under stress.
- assert(opts.compCanUseSSE2);
-
-#endif // !LEGACY_BACKEND
-
#endif // DEBUG
#endif // _TARGET_X86_
@@ -2640,114 +2498,111 @@ void Compiler::compSetProcessor()
if (!jitFlags.IsSet(JitFlags::JIT_FLAG_PREJIT))
{
- if (opts.compCanUseSSE2)
+ if (configEnableISA(InstructionSet_SSE))
+ {
+ opts.setSupportedISA(InstructionSet_SSE);
+ }
+ if (configEnableISA(InstructionSet_SSE2))
+ {
+ opts.setSupportedISA(InstructionSet_SSE2);
+ }
+ if (jitFlags.IsSet(JitFlags::JIT_FLAG_USE_AES))
{
- if (configEnableISA(InstructionSet_SSE))
+ if (configEnableISA(InstructionSet_AES))
{
- opts.setSupportedISA(InstructionSet_SSE);
+ opts.setSupportedISA(InstructionSet_AES);
}
- if (configEnableISA(InstructionSet_SSE2))
+ }
+ if (jitFlags.IsSet(JitFlags::JIT_FLAG_USE_AVX))
+ {
+ if (configEnableISA(InstructionSet_AVX))
{
- opts.setSupportedISA(InstructionSet_SSE2);
+ opts.setSupportedISA(InstructionSet_AVX);
}
- if (jitFlags.IsSet(JitFlags::JIT_FLAG_USE_AES))
+ }
+ if (jitFlags.IsSet(JitFlags::JIT_FLAG_USE_AVX2))
+ {
+ // COMPlus_EnableAVX is also used to control the code generation of
+ // System.Numerics.Vectors and floating-point arithmetics
+ if (configEnableISA(InstructionSet_AVX) && configEnableISA(InstructionSet_AVX2))
{
- if (configEnableISA(InstructionSet_AES))
- {
- opts.setSupportedISA(InstructionSet_AES);
- }
+ opts.setSupportedISA(InstructionSet_AVX2);
}
- if (jitFlags.IsSet(JitFlags::JIT_FLAG_USE_AVX))
+ }
+ if (jitFlags.IsSet(JitFlags::JIT_FLAG_USE_BMI1))
+ {
+ if (configEnableISA(InstructionSet_BMI1))
{
- if (configEnableISA(InstructionSet_AVX))
- {
- opts.setSupportedISA(InstructionSet_AVX);
- }
+ opts.setSupportedISA(InstructionSet_BMI1);
}
- if (jitFlags.IsSet(JitFlags::JIT_FLAG_USE_AVX2))
+ }
+ if (jitFlags.IsSet(JitFlags::JIT_FLAG_USE_BMI2))
+ {
+ if (configEnableISA(InstructionSet_BMI2))
{
- // COMPlus_EnableAVX is also used to control the code generation of
- // System.Numerics.Vectors and floating-point arithmetics
- if (configEnableISA(InstructionSet_AVX) && configEnableISA(InstructionSet_AVX2))
- {
- opts.setSupportedISA(InstructionSet_AVX2);
- }
+ opts.setSupportedISA(InstructionSet_BMI2);
}
- if (jitFlags.IsSet(JitFlags::JIT_FLAG_USE_BMI1))
+ }
+ if (jitFlags.IsSet(JitFlags::JIT_FLAG_USE_FMA))
+ {
+ if (configEnableISA(InstructionSet_FMA))
{
- if (configEnableISA(InstructionSet_BMI1))
- {
- opts.setSupportedISA(InstructionSet_BMI1);
- }
+ opts.setSupportedISA(InstructionSet_FMA);
}
- if (jitFlags.IsSet(JitFlags::JIT_FLAG_USE_BMI2))
+ }
+ if (jitFlags.IsSet(JitFlags::JIT_FLAG_USE_LZCNT))
+ {
+ if (configEnableISA(InstructionSet_LZCNT))
{
- if (configEnableISA(InstructionSet_BMI2))
- {
- opts.setSupportedISA(InstructionSet_BMI2);
- }
+ opts.setSupportedISA(InstructionSet_LZCNT);
}
- if (jitFlags.IsSet(JitFlags::JIT_FLAG_USE_FMA))
+ }
+ if (jitFlags.IsSet(JitFlags::JIT_FLAG_USE_PCLMULQDQ))
+ {
+ if (configEnableISA(InstructionSet_PCLMULQDQ))
{
- if (configEnableISA(InstructionSet_FMA))
- {
- opts.setSupportedISA(InstructionSet_FMA);
- }
+ opts.setSupportedISA(InstructionSet_PCLMULQDQ);
}
- if (jitFlags.IsSet(JitFlags::JIT_FLAG_USE_LZCNT))
+ }
+ if (jitFlags.IsSet(JitFlags::JIT_FLAG_USE_POPCNT))
+ {
+ if (configEnableISA(InstructionSet_POPCNT))
{
- if (configEnableISA(InstructionSet_LZCNT))
- {
- opts.setSupportedISA(InstructionSet_LZCNT);
- }
+ opts.setSupportedISA(InstructionSet_POPCNT);
}
- if (jitFlags.IsSet(JitFlags::JIT_FLAG_USE_PCLMULQDQ))
+ }
+
+ // There are currently two sets of flags that control SSE3 through SSE4.2 support
+ // This is the general EnableSSE3_4 flag and the individual ISA flags. We need to
+ // check both for any given ISA.
+ if (JitConfig.EnableSSE3_4())
+ {
+ if (jitFlags.IsSet(JitFlags::JIT_FLAG_USE_SSE3))
{
- if (configEnableISA(InstructionSet_PCLMULQDQ))
+ if (configEnableISA(InstructionSet_SSE3))
{
- opts.setSupportedISA(InstructionSet_PCLMULQDQ);
+ opts.setSupportedISA(InstructionSet_SSE3);
}
}
- if (jitFlags.IsSet(JitFlags::JIT_FLAG_USE_POPCNT))
+ if (jitFlags.IsSet(JitFlags::JIT_FLAG_USE_SSE41))
{
- if (configEnableISA(InstructionSet_POPCNT))
+ if (configEnableISA(InstructionSet_SSE41))
{
- opts.setSupportedISA(InstructionSet_POPCNT);
+ opts.setSupportedISA(InstructionSet_SSE41);
}
}
-
- // There are currently two sets of flags that control SSE3 through SSE4.2 support
- // This is the general EnableSSE3_4 flag and the individual ISA flags. We need to
- // check both for any given ISA.
- if (JitConfig.EnableSSE3_4())
+ if (jitFlags.IsSet(JitFlags::JIT_FLAG_USE_SSE42))
{
- if (jitFlags.IsSet(JitFlags::JIT_FLAG_USE_SSE3))
+ if (configEnableISA(InstructionSet_SSE42))
{
- if (configEnableISA(InstructionSet_SSE3))
- {
- opts.setSupportedISA(InstructionSet_SSE3);
- }
+ opts.setSupportedISA(InstructionSet_SSE42);
}
- if (jitFlags.IsSet(JitFlags::JIT_FLAG_USE_SSE41))
- {
- if (configEnableISA(InstructionSet_SSE41))
- {
- opts.setSupportedISA(InstructionSet_SSE41);
- }
- }
- if (jitFlags.IsSet(JitFlags::JIT_FLAG_USE_SSE42))
+ }
+ if (jitFlags.IsSet(JitFlags::JIT_FLAG_USE_SSSE3))
+ {
+ if (configEnableISA(InstructionSet_SSSE3))
{
- if (configEnableISA(InstructionSet_SSE42))
- {
- opts.setSupportedISA(InstructionSet_SSE42);
- }
- }
- if (jitFlags.IsSet(JitFlags::JIT_FLAG_USE_SSSE3))
- {
- if (configEnableISA(InstructionSet_SSSE3))
- {
- opts.setSupportedISA(InstructionSet_SSSE3);
- }
+ opts.setSupportedISA(InstructionSet_SSSE3);
}
}
}
@@ -3727,8 +3582,8 @@ void Compiler::compInitOptions(JitFlags* jitFlags)
opts.compReloc = jitFlags->IsSet(JitFlags::JIT_FLAG_RELOC);
#ifdef DEBUG
-#if defined(_TARGET_XARCH_) && !defined(LEGACY_BACKEND)
- // Whether encoding of absolute addr as PC-rel offset is enabled in RyuJIT
+#if defined(_TARGET_XARCH_)
+ // Whether encoding of absolute addr as PC-rel offset is enabled
opts.compEnablePCRelAddr = (JitConfig.EnablePCRelAddr() != 0);
#endif
#endif // DEBUG
@@ -4995,11 +4850,9 @@ void Compiler::compCompile(void** methodCodePtr, ULONG* methodCodeSize, JitFlags
}
#endif
-#ifndef LEGACY_BACKEND
// rationalize trees
Rationalizer rat(this); // PHASE_RATIONALIZE
rat.Run();
-#endif // !LEGACY_BACKEND
// Here we do "simple lowering". When the RyuJIT backend works for all
// platforms, this will be part of the more general lowering phase. For now, though, we do a separate
@@ -5008,12 +4861,6 @@ void Compiler::compCompile(void** methodCodePtr, ULONG* methodCodeSize, JitFlags
fgSimpleLowering();
EndPhase(PHASE_SIMPLE_LOWERING);
-#ifdef LEGACY_BACKEND
- /* Local variable liveness */
- fgLocalVarLiveness();
- EndPhase(PHASE_LCLVARLIVENESS);
-#endif // !LEGACY_BACKEND
-
#ifdef DEBUG
fgDebugCheckBBlist();
fgDebugCheckLinks();
@@ -5031,39 +4878,9 @@ void Compiler::compCompile(void** methodCodePtr, ULONG* methodCodeSize, JitFlags
codeGen->regSet.rsMaskResvd |= RBM_SAVED_LOCALLOC_SP;
}
#endif // _TARGET_ARM_
-#if defined(_TARGET_ARMARCH_) && defined(LEGACY_BACKEND)
- // Determine whether we need to reserve a register for large lclVar offsets.
- // The determination depends heavily on the number of locals, which changes for RyuJIT backend
- // due to the introduction of new temps during Rationalizer and Lowering.
- // In LEGACY_BACKEND we do that here even though the decision to have a frame pointer or not may
- // change during register allocation, changing the computation somewhat.
- // In RyuJIT backend we do this after determining the frame type, and before beginning
- // register allocation.
- if (compRsvdRegCheck(PRE_REGALLOC_FRAME_LAYOUT))
- {
- // We reserve R10/IP1 in this case to hold the offsets in load/store instructions
- codeGen->regSet.rsMaskResvd |= RBM_OPT_RSVD;
- assert(REG_OPT_RSVD != REG_FP);
- JITDUMP(" Reserved REG_OPT_RSVD (%s) due to large frame\n", getRegName(REG_OPT_RSVD));
- }
- // compRsvdRegCheck() has read out the FramePointerUsed property, but doLinearScan()
- // tries to overwrite it later. This violates the PhasedVar rule and triggers an assertion.
- // TODO-ARM-Bug?: What is the proper way to handle this situation?
- codeGen->resetFramePointerUsedWritePhase();
-
-#ifdef DEBUG
- //
- // Display the pre-regalloc frame offsets that we have tentatively decided upon
- //
- if (verbose)
- lvaTableDump();
-#endif
-#endif // _TARGET_ARMARCH_
/* Assign registers to variables, etc. */
- CLANG_FORMAT_COMMENT_ANCHOR;
-#ifndef LEGACY_BACKEND
///////////////////////////////////////////////////////////////////////////////
// Dominator and reachability sets are no longer valid. They haven't been
// maintained up to here, and shouldn't be used (unless recomputed).
@@ -5089,15 +4906,6 @@ void Compiler::compCompile(void** methodCodePtr, ULONG* methodCodeSize, JitFlags
// Copied from rpPredictRegUse()
genFullPtrRegMap = (codeGen->genInterruptible || !codeGen->isFramePointerUsed());
-#else // LEGACY_BACKEND
-
- lvaTrackedFixed = true; // We cannot add any new tracked variables after this point.
- // For the classic JIT32 at this point lvaSortAgain can be set and raAssignVars() will call lvaSortOnly()
-
- // Now do "classic" register allocation.
- raAssignVars();
- EndPhase(PHASE_RA_ASSIGN_VARS);
-#endif // LEGACY_BACKEND
#ifdef DEBUG
fgDebugCheckLinks();
@@ -5698,12 +5506,10 @@ void Compiler::compCompileFinish()
!opts.optRepeat && // We need extra memory to repeat opts
!compAllocator->bypassHostAllocator() && // ArenaAllocator::getDefaultPageSize() is artificially low for
// DirectAlloc
+ // Factor of 2x is because data-structures are bigger under DEBUG
(compAllocator->getTotalBytesAllocated() > (2 * ArenaAllocator::getDefaultPageSize())) &&
-// Factor of 2x is because data-structures are bigger under DEBUG
-#ifndef LEGACY_BACKEND
// RyuJIT backend needs memory tuning! TODO-Cleanup: remove this case when memory tuning is complete.
(compAllocator->getTotalBytesAllocated() > (10 * ArenaAllocator::getDefaultPageSize())) &&
-#endif
!verbose) // We allocate lots of memory to convert sets to strings for JitDump
{
genSmallMethodsNeedingExtraMemoryCnt++;
@@ -5843,11 +5649,7 @@ void Compiler::compCompileFinish()
#endif // FEATURE_ANYCSE
}
-#ifndef LEGACY_BACKEND
printf(" LSRA |"); // TODO-Cleanup: dump some interesting LSRA stat into the order file?
-#else // LEGACY_BACKEND
- printf("%s%4d p%1d |", (tmpCount > 0) ? "T" : " ", rpStkPredict / BB_UNITY_WEIGHT, rpPasses);
-#endif // LEGACY_BACKEND
printf(" %4d |", info.compMethodInfo->ILCodeSize);
printf(" %5d |", info.compTotalHotCodeSize);
printf(" %5d |", info.compTotalColdCodeSize);
@@ -7496,106 +7298,6 @@ void Compiler::compCallArgStats()
argNonVirtualCalls++;
}
}
-
-#ifdef LEGACY_BACKEND
- // TODO-Cleaenup: We need to add support below for additional node types that RyuJIT backend has in the
- // IR.
- // Gather arguments information.
-
- for (args = call->gtCall.gtCallArgs; args; args = args->gtOp.gtOp2)
- {
- argx = args->gtOp.gtOp1;
-
- argNum++;
-
- switch (genActualType(argx->TypeGet()))
- {
- case TYP_INT:
- case TYP_REF:
- case TYP_BYREF:
- argDWordNum++;
- break;
-
- case TYP_LONG:
- argLngNum++;
- break;
-
- case TYP_FLOAT:
- argFltNum++;
- break;
-
- case TYP_DOUBLE:
- argDblNum++;
- break;
-
- case TYP_VOID:
- /* This is a deferred register argument */
- assert(argx->gtOper == GT_NOP);
- assert(argx->gtFlags & GTF_LATE_ARG);
- argDWordNum++;
- break;
- }
-
- /* Is this argument a register argument? */
-
- if (argx->gtFlags & GTF_LATE_ARG)
- {
- regArgNum++;
-
- /* We either have a deferred argument or a temp */
-
- if (argx->gtOper == GT_NOP)
- {
- regArgDeferred++;
- }
- else
- {
- assert(argx->gtOper == GT_ASG);
- regArgTemp++;
- }
- }
- }
-
- /* Look at the register arguments and count how many constants, local vars */
-
- for (args = call->gtCall.gtCallLateArgs; args; args = args->gtOp.gtOp2)
- {
- argx = args->gtOp.gtOp1;
-
- switch (argx->gtOper)
- {
- case GT_CNS_INT:
- regArgConst++;
- break;
-
- case GT_LCL_VAR:
- regArgLclVar++;
- break;
- }
- }
-
- assert(argNum == argDWordNum + argLngNum + argFltNum + argDblNum);
- assert(regArgNum == regArgDeferred + regArgTemp);
-
- argTotalArgs += argNum;
- argTotalRegArgs += regArgNum;
-
- argTotalDWordArgs += argDWordNum;
- argTotalLongArgs += argLngNum;
- argTotalFloatArgs += argFltNum;
- argTotalDoubleArgs += argDblNum;
-
- argTotalDeferred += regArgDeferred;
- argTotalTemps += regArgTemp;
- argTotalConst += regArgConst;
- argTotalLclVar += regArgLclVar;
-
- argTempsThisMethod += regArgTemp;
-
- argCntTable.record(argNum);
- argDWordCntTable.record(argDWordNum);
- argDWordLngCntTable.record(argDWordNum + (2 * argLngNum));
-#endif // LEGACY_BACKEND
}
}
}
@@ -9419,12 +9121,6 @@ int cTreeKindsIR(Compiler* comp, GenTree* tree)
{
chars += printf("[LOGOP]");
}
-#ifdef LEGACY_BACKEND
- if (kind & GTK_ASGOP)
- {
- chars += printf("[ASGOP]");
- }
-#endif
if (kind & GTK_COMMUTE)
{
chars += printf("[COMMUTE]");
@@ -9646,7 +9342,7 @@ int cTreeFlagsIR(Compiler* comp, GenTree* tree)
break;
case GT_MUL:
-#if !defined(_TARGET_64BIT_) && !defined(LEGACY_BACKEND)
+#if !defined(_TARGET_64BIT_)
case GT_MUL_LONG:
#endif
@@ -9678,14 +9374,6 @@ int cTreeFlagsIR(Compiler* comp, GenTree* tree)
case GT_MOD:
case GT_UMOD:
-
-#ifdef LEGACY_BACKEND
- if (tree->gtFlags & GTF_MOD_INT_RESULT)
- {
- chars += printf("[MOD_INT_RESULT]");
- }
-#endif // LEGACY_BACKEND
-
break;
case GT_EQ:
@@ -9873,12 +9561,6 @@ int cTreeFlagsIR(Compiler* comp, GenTree* tree)
{
chars += printf("[CALL_HOISTABLE]");
}
-#ifdef LEGACY_BACKEND
- if (tree->gtFlags & GTF_CALL_REG_SAVE)
- {
- chars += printf("[CALL_REG_SAVE]");
- }
-#endif // LEGACY_BACKEND
// More flags associated with calls.
@@ -9940,12 +9622,10 @@ int cTreeFlagsIR(Compiler* comp, GenTree* tree)
{
chars += printf("[CALL_M_FRAME_VAR_DEATH]");
}
-#ifndef LEGACY_BACKEND
if (call->gtCallMoreFlags & GTF_CALL_M_TAILCALL_VIA_HELPER)
{
chars += printf("[CALL_M_TAILCALL_VIA_HELPER]");
}
-#endif
#if FEATURE_TAILCALL_OPT
if (call->gtCallMoreFlags & GTF_CALL_M_IMPLICIT_TAILCALL)
{
@@ -9999,10 +9679,6 @@ int cTreeFlagsIR(Compiler* comp, GenTree* tree)
case GT_CAST:
case GT_ADD:
case GT_SUB:
-#ifdef LEGACY_BACKEND
- case GT_ASG_ADD:
- case GT_ASG_SUB:
-#endif
if (tree->gtFlags & GTF_OVERFLOW)
{
chars += printf("[OVERFLOW]");
@@ -10034,20 +9710,6 @@ int cTreeFlagsIR(Compiler* comp, GenTree* tree)
{
chars += printf("[SPILLED_OPER]");
}
-#if defined(LEGACY_BACKEND)
- if (tree->InReg())
- {
- chars += printf("[REG_VAL]");
- }
- if (tree->gtFlags & GTF_SPILLED_OP2)
- {
- chars += printf("[SPILLED_OP2]");
- }
- if (tree->gtFlags & GTF_ZSF_SET)
- {
- chars += printf("[ZSF_SET]");
- }
-#endif
#if FEATURE_SET_FLAGS
if (tree->gtFlags & GTF_SET_FLAGS)
{
@@ -10076,12 +9738,6 @@ int cTreeFlagsIR(Compiler* comp, GenTree* tree)
{
chars += printf("[BOOLEAN]");
}
-#if CPU_HAS_BYTE_REGS && defined(LEGACY_BACKEND)
- if (tree->gtFlags & GTF_SMALL_OK)
- {
- chars += printf("[SMALL_OK]");
- }
-#endif
if (tree->gtFlags & GTF_UNSIGNED)
{
chars += printf("[SMALL_UNSIGNED]");
@@ -10260,16 +9916,7 @@ int cLeafIR(Compiler* comp, GenTree* tree)
{
if (varDsc->lvRegister)
{
- if (isRegPairType(varDsc->TypeGet()))
- {
- chars += printf(":%s:%s",
- getRegName(varDsc->lvOtherReg), // hi32
- getRegName(varDsc->lvRegNum)); // lo32
- }
- else
- {
- chars += printf(":%s", getRegName(varDsc->lvRegNum));
- }
+ chars += printf(":%s", getRegName(varDsc->lvRegNum));
}
else
{
@@ -10278,11 +9925,6 @@ int cLeafIR(Compiler* comp, GenTree* tree)
case GenTree::GT_REGTAG_REG:
chars += printf(":%s", comp->compRegVarName(tree->gtRegNum));
break;
-#if CPU_LONG_USES_REGPAIR
- case GenTree::GT_REGTAG_REGPAIR:
- chars += printf(":%s", comp->compRegPairName(tree->gtRegPair));
- break;
-#endif
default:
break;
}
@@ -10294,18 +9936,7 @@ int cLeafIR(Compiler* comp, GenTree* tree)
{
if (varDsc->lvRegister)
{
- chars += printf("(");
- if (isRegPairType(varDsc->TypeGet()))
- {
- chars += printf("%s:%s",
- getRegName(varDsc->lvOtherReg), // hi32
- getRegName(varDsc->lvRegNum)); // lo32
- }
- else
- {
- chars += printf("%s", getRegName(varDsc->lvRegNum));
- }
- chars += printf(")");
+ chars += printf("(%s)", getRegName(varDsc->lvRegNum));
}
else
{
@@ -10314,11 +9945,6 @@ int cLeafIR(Compiler* comp, GenTree* tree)
case GenTree::GT_REGTAG_REG:
chars += printf("(%s)", comp->compRegVarName(tree->gtRegNum));
break;
-#if CPU_LONG_USES_REGPAIR
- case GenTree::GT_REGTAG_REGPAIR:
- chars += printf("(%s)", comp->compRegPairName(tree->gtRegPair));
- break;
-#endif
default:
break;
}
@@ -10367,16 +9993,7 @@ int cLeafIR(Compiler* comp, GenTree* tree)
{
if (varDsc->lvRegister)
{
- if (isRegPairType(varDsc->TypeGet()))
- {
- chars += printf(":%s:%s",
- getRegName(varDsc->lvOtherReg), // hi32
- getRegName(varDsc->lvRegNum)); // lo32
- }
- else
- {
- chars += printf(":%s", getRegName(varDsc->lvRegNum));
- }
+ chars += printf(":%s", getRegName(varDsc->lvRegNum));
}
else
{
@@ -10385,11 +10002,6 @@ int cLeafIR(Compiler* comp, GenTree* tree)
case GenTree::GT_REGTAG_REG:
chars += printf(":%s", comp->compRegVarName(tree->gtRegNum));
break;
-#if CPU_LONG_USES_REGPAIR
- case GenTree::GT_REGTAG_REGPAIR:
- chars += printf(":%s", comp->compRegPairName(tree->gtRegPair));
- break;
-#endif
default:
break;
}
@@ -10401,18 +10013,7 @@ int cLeafIR(Compiler* comp, GenTree* tree)
{
if (varDsc->lvRegister)
{
- chars += printf("(");
- if (isRegPairType(varDsc->TypeGet()))
- {
- chars += printf("%s:%s",
- getRegName(varDsc->lvOtherReg), // hi32
- getRegName(varDsc->lvRegNum)); // lo32
- }
- else
- {
- chars += printf("%s", getRegName(varDsc->lvRegNum));
- }
- chars += printf(")");
+ chars += printf("(%s)", getRegName(varDsc->lvRegNum));
}
else
{
@@ -10421,11 +10022,6 @@ int cLeafIR(Compiler* comp, GenTree* tree)
case GenTree::GT_REGTAG_REG:
chars += printf("(%s)", comp->compRegVarName(tree->gtRegNum));
break;
-#if CPU_LONG_USES_REGPAIR
- case GenTree::GT_REGTAG_REGPAIR:
- chars += printf("(%s)", comp->compRegPairName(tree->gtRegPair));
- break;
-#endif
default:
break;
}
@@ -10610,9 +10206,7 @@ int cLeafIR(Compiler* comp, GenTree* tree)
case GT_MEMORYBARRIER:
case GT_ARGPLACE:
case GT_PINVOKE_PROLOG:
-#ifndef LEGACY_BACKEND
case GT_JMPTABLE:
-#endif
// Do nothing.
break;