summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorMichelle McDaniel <adiaaida@gmail.com>2016-09-21 09:25:28 -0700
committerMichelle McDaniel <adiaaida@gmail.com>2016-09-21 13:52:04 -0700
commit49a13624affce9b5f7757c9208f902307385827c (patch)
treebafb0522ff684cb562a6817b9fd4bb6b128202de /src
parent96f474c67e83b178c3a27afc1540953948610c73 (diff)
downloadcoreclr-49a13624affce9b5f7757c9208f902307385827c.tar.gz
coreclr-49a13624affce9b5f7757c9208f902307385827c.tar.bz2
coreclr-49a13624affce9b5f7757c9208f902307385827c.zip
Reformat jit code for Windows x64
Diffstat (limited to 'src')
-rw-r--r--src/jit/assertionprop.cpp20
-rw-r--r--src/jit/block.cpp1
-rwxr-xr-xsrc/jit/codegen.h15
-rw-r--r--src/jit/codegenarm.cpp3
-rw-r--r--src/jit/codegenarm64.cpp4
-rwxr-xr-xsrc/jit/codegencommon.cpp12
-rw-r--r--src/jit/codegenlinear.h2
-rw-r--r--src/jit/compiler.cpp46
-rw-r--r--src/jit/compiler.h2
-rw-r--r--src/jit/compiler.hpp6
-rw-r--r--src/jit/decomposelongs.cpp29
-rw-r--r--src/jit/decomposelongs.h2
-rw-r--r--src/jit/earlyprop.cpp2
-rw-r--r--src/jit/emit.h4
-rw-r--r--src/jit/emitxarch.cpp4
-rw-r--r--src/jit/gcencode.cpp138
-rw-r--r--src/jit/gentree.cpp211
-rw-r--r--src/jit/gentree.h23
-rw-r--r--src/jit/gschecks.cpp48
-rw-r--r--src/jit/importer.cpp50
-rw-r--r--src/jit/jit.h14
-rw-r--r--src/jit/jitconfigvalues.h2
-rw-r--r--src/jit/lclvars.cpp14
-rw-r--r--src/jit/lir.cpp30
-rw-r--r--src/jit/liveness.cpp5
-rw-r--r--src/jit/lower.cpp98
-rw-r--r--src/jit/lowerarm64.cpp6
-rw-r--r--src/jit/lowerxarch.cpp49
-rw-r--r--src/jit/lsra.cpp120
-rw-r--r--src/jit/lsra.h47
-rw-r--r--src/jit/morph.cpp82
-rw-r--r--src/jit/rationalize.cpp5
-rw-r--r--src/jit/valuenum.h21
33 files changed, 559 insertions, 556 deletions
diff --git a/src/jit/assertionprop.cpp b/src/jit/assertionprop.cpp
index 8a53278daf..159aa293f4 100644
--- a/src/jit/assertionprop.cpp
+++ b/src/jit/assertionprop.cpp
@@ -3701,18 +3701,18 @@ GenTreePtr Compiler::optAssertionProp_BndsChk(ASSERT_VALARG_TP assertions, const
assert(tree->gtOper == GT_ARR_BOUNDS_CHECK);
#ifdef FEATURE_ENABLE_NO_RANGE_CHECKS
- if (JitConfig.JitNoRangeChks())
- {
+ if (JitConfig.JitNoRangeChks())
+ {
#ifdef DEBUG
- if (verbose)
- {
- printf("\nFlagging check redundant due to JitNoRangeChks in BB%02u:\n", compCurBB->bbNum);
- gtDispTree(tree, nullptr, nullptr, true);
- }
+ if (verbose)
+ {
+ printf("\nFlagging check redundant due to JitNoRangeChks in BB%02u:\n", compCurBB->bbNum);
+ gtDispTree(tree, nullptr, nullptr, true);
+ }
#endif // DEBUG
- tree->gtFlags |= GTF_ARR_BOUND_INBND;
- return nullptr;
- }
+ tree->gtFlags |= GTF_ARR_BOUND_INBND;
+ return nullptr;
+ }
#endif // FEATURE_ENABLE_NO_RANGE_CHECKS
BitVecOps::Iter iter(apTraits, assertions);
diff --git a/src/jit/block.cpp b/src/jit/block.cpp
index 2d37754ec5..cf2a107479 100644
--- a/src/jit/block.cpp
+++ b/src/jit/block.cpp
@@ -667,7 +667,6 @@ GenTreeStmt* BasicBlock::lastStmt()
return result->AsStmt();
}
-
//------------------------------------------------------------------------
// BasicBlock::firstNode: Returns the first node in the block.
//
diff --git a/src/jit/codegen.h b/src/jit/codegen.h
index 9a85f59c0c..e58701e716 100755
--- a/src/jit/codegen.h
+++ b/src/jit/codegen.h
@@ -48,7 +48,6 @@ public:
unsigned* cnsPtr,
bool nogen = false);
-
private:
#if defined(_TARGET_XARCH_) && !FEATURE_STACK_FP_X87
// Bit masks used in negating a float or double number.
@@ -488,14 +487,14 @@ protected:
void genAmd64EmitterUnitTests();
#endif
-//-------------------------------------------------------------------------
-//
-// End prolog/epilog generation
-//
-//-------------------------------------------------------------------------
+ //-------------------------------------------------------------------------
+ //
+ // End prolog/epilog generation
+ //
+ //-------------------------------------------------------------------------
- void genSinglePush();
- void genSinglePop();
+ void genSinglePush();
+ void genSinglePop();
regMaskTP genPushRegs(regMaskTP regs, regMaskTP* byrefRegs, regMaskTP* noRefRegs);
void genPopRegs(regMaskTP regs, regMaskTP byrefRegs, regMaskTP noRefRegs);
diff --git a/src/jit/codegenarm.cpp b/src/jit/codegenarm.cpp
index 4e7a149068..79c19fa0bf 100644
--- a/src/jit/codegenarm.cpp
+++ b/src/jit/codegenarm.cpp
@@ -849,7 +849,8 @@ void CodeGen::genCodeForBBlist()
if (compiler->verbose)
{
printf("\n# ");
- printf("compCycleEstimate = %6d, compSizeEstimate = %5d ", compiler->compCycleEstimate, compiler->compSizeEstimate);
+ printf("compCycleEstimate = %6d, compSizeEstimate = %5d ", compiler->compCycleEstimate,
+ compiler->compSizeEstimate);
printf("%s\n", compiler->info.compFullName);
}
#endif
diff --git a/src/jit/codegenarm64.cpp b/src/jit/codegenarm64.cpp
index 88657996e3..0e013bfd76 100644
--- a/src/jit/codegenarm64.cpp
+++ b/src/jit/codegenarm64.cpp
@@ -6802,8 +6802,8 @@ void CodeGen::genPutArgStk(GenTreePtr treeNode)
var_types type = nextArgNode->TypeGet();
emitAttr attr = emitTypeSize(type);
- // Emit store instructions to store the registers produced by the GT_FIELD_LIST into the outgoing argument
- // area
+ // Emit store instructions to store the registers produced by the GT_FIELD_LIST into the outgoing
+ // argument area
emit->emitIns_S_R(ins_Store(type), attr, reg, varNumOut, argOffsetOut);
argOffsetOut += EA_SIZE_IN_BYTES(attr);
assert(argOffsetOut <= argOffsetMax); // We can't write beyound the outgoing area area
diff --git a/src/jit/codegencommon.cpp b/src/jit/codegencommon.cpp
index 799498a8d0..a9e43e8f0e 100755
--- a/src/jit/codegencommon.cpp
+++ b/src/jit/codegencommon.cpp
@@ -104,7 +104,7 @@ CodeGen::CodeGen(Compiler* theCompiler) : CodeGenInterface(theCompiler)
#endif // defined(_TARGET_XARCH_) && !FEATURE_STACK_FP_X87
#if defined(FEATURE_PUT_STRUCT_ARG_STK) && !defined(_TARGET_X86_)
- m_stkArgVarNum = BAD_VAR_NUM;
+ m_stkArgVarNum = BAD_VAR_NUM;
#endif
regTracker.rsTrackInit(compiler, &regSet);
@@ -7577,9 +7577,9 @@ void CodeGen::genProfilingLeaveCallback(unsigned helper /*= CORINFO_HELP_PROF_FC
bool r0Trashed;
emitAttr attr = EA_UNKNOWN;
- if (compiler->info.compRetType == TYP_VOID ||
- (!compiler->info.compIsVarArgs && !compiler->opts.compUseSoftFP && (varTypeIsFloating(compiler->info.compRetType) ||
- compiler->IsHfa(compiler->info.compMethodInfo->args.retTypeClass))))
+ if (compiler->info.compRetType == TYP_VOID || (!compiler->info.compIsVarArgs && !compiler->opts.compUseSoftFP &&
+ (varTypeIsFloating(compiler->info.compRetType) ||
+ compiler->IsHfa(compiler->info.compMethodInfo->args.retTypeClass))))
{
r0Trashed = false;
}
@@ -10893,7 +10893,9 @@ regMaskTP CodeGen::genPushRegs(regMaskTP regs, regMaskTP* byrefRegs, regMaskTP*
*noRefRegs = RBM_NONE;
if (regs == RBM_NONE)
+ {
return RBM_NONE;
+ }
#if FEATURE_FIXED_OUT_ARGS
@@ -10961,7 +10963,9 @@ regMaskTP CodeGen::genPushRegs(regMaskTP regs, regMaskTP* byrefRegs, regMaskTP*
void CodeGen::genPopRegs(regMaskTP regs, regMaskTP byrefRegs, regMaskTP noRefRegs)
{
if (regs == RBM_NONE)
+ {
return;
+ }
#if FEATURE_FIXED_OUT_ARGS
diff --git a/src/jit/codegenlinear.h b/src/jit/codegenlinear.h
index 1c14f660a6..273685baff 100644
--- a/src/jit/codegenlinear.h
+++ b/src/jit/codegenlinear.h
@@ -226,7 +226,7 @@ bool genIsRegCandidateLocal(GenTreePtr tree)
#ifdef FEATURE_PUT_STRUCT_ARG_STK
#ifdef _TARGET_X86_
bool m_pushStkArg;
-#else // !_TARGET_X86_
+#else // !_TARGET_X86_
unsigned m_stkArgVarNum;
#endif // !_TARGET_X86_
unsigned m_stkArgOffset;
diff --git a/src/jit/compiler.cpp b/src/jit/compiler.cpp
index 3117d9eafd..c94ce3d30e 100644
--- a/src/jit/compiler.cpp
+++ b/src/jit/compiler.cpp
@@ -1252,8 +1252,8 @@ void Compiler::compShutdown()
// Let's not show anything below a threshold
if (pct >= 0.5)
{
- fprintf(fout, " GT_%-17s %7u (%4.1lf%%) %3u bytes each\n",
- GenTree::OpName((genTreeOps)op), cnt, pct, siz);
+ fprintf(fout, " GT_%-17s %7u (%4.1lf%%) %3u bytes each\n", GenTree::OpName((genTreeOps)op), cnt,
+ pct, siz);
rem_total -= cnt;
}
else
@@ -1266,16 +1266,16 @@ void Compiler::compShutdown()
}
if (rem_total > 0)
{
- fprintf(fout, " All other GT_xxx ... %7u (%4.1lf%%) ... %4.1lf%% small + %4.1lf%% large\n",
- rem_total, 100.0 * rem_total / gtc, 100.0 * rem_small / gtc, 100.0 * rem_large / gtc);
+ fprintf(fout, " All other GT_xxx ... %7u (%4.1lf%%) ... %4.1lf%% small + %4.1lf%% large\n", rem_total,
+ 100.0 * rem_total / gtc, 100.0 * rem_small / gtc, 100.0 * rem_large / gtc);
}
fprintf(fout, " -----------------------------------------------------\n");
- fprintf(fout, " Total ....... %11u --ALL-- ... %4.1lf%% small + %4.1lf%% large\n",
- gtc, 100.0 * tot_small / gtc, 100.0 * tot_large / gtc);
+ fprintf(fout, " Total ....... %11u --ALL-- ... %4.1lf%% small + %4.1lf%% large\n", gtc,
+ 100.0 * tot_small / gtc, 100.0 * tot_large / gtc);
fprintf(fout, "\n");
}
-#endif//COUNT_AST_OPERS
+#endif // COUNT_AST_OPERS
#if DISPLAY_SIZES
@@ -4503,7 +4503,9 @@ void Compiler::compCompile(void** methodCodePtr, ULONG* methodCodeSize, CORJIT_F
#ifdef FEATURE_JIT_METHOD_PERF
if (pCompJitTimer)
+ {
pCompJitTimer->Terminate(this, CompTimeSummaryInfo::s_compTimeSummary);
+ }
#endif
RecordStateAtEndOfCompilation();
@@ -4676,13 +4678,13 @@ int Compiler::compCompile(CORINFO_METHOD_HANDLE methodHnd,
checkedForJitTimeLog = true;
}
- if ((Compiler::compJitTimeLogFilename != NULL) || (JitTimeLogCsv() != NULL))
+ if ((Compiler::compJitTimeLogFilename != nullptr) || (JitTimeLogCsv() != nullptr))
{
pCompJitTimer = JitTimer::Create(this, methodInfo->ILCodeSize);
}
else
{
- pCompJitTimer = NULL;
+ pCompJitTimer = nullptr;
}
#endif // FEATURE_JIT_METHOD_PERF
@@ -6963,7 +6965,9 @@ bool CompTimeSummaryInfo::IncludedInFilteredData(CompTimeInfo& info)
void CompTimeSummaryInfo::AddInfo(CompTimeInfo& info)
{
if (info.m_timerFailure)
+ {
return; // Don't update if there was a failure.
+ }
CritSecHolder timeLock(s_compTimeSummaryLock);
m_numMethods++;
@@ -7000,12 +7004,14 @@ void CompTimeSummaryInfo::AddInfo(CompTimeInfo& info)
}
// Static
-LPCWSTR Compiler::compJitTimeLogFilename = NULL;
+LPCWSTR Compiler::compJitTimeLogFilename = nullptr;
void CompTimeSummaryInfo::Print(FILE* f)
{
- if (f == NULL)
+ if (f == nullptr)
+ {
return;
+ }
// Otherwise...
double countsPerSec = CycleTimer::CyclesPerSecond();
if (countsPerSec == 0.0)
@@ -7054,7 +7060,8 @@ void CompTimeSummaryInfo::Print(FILE* f)
double pslop_pct = 100.0 * m_total.m_parentPhaseEndSlop * 1000.0 / countsPerSec / totTime_ms;
if (pslop_pct >= 1.0)
{
- fprintf(f, "\n 'End phase slop' should be very small (if not, there's unattributed time): %9.3f Mcycles = %3.1f%% of total.\n\n",
+ fprintf(f, "\n 'End phase slop' should be very small (if not, there's unattributed time): %9.3f Mcycles = "
+ "%3.1f%% of total.\n\n",
m_total.m_parentPhaseEndSlop / 1000000.0, pslop_pct);
}
}
@@ -7094,8 +7101,9 @@ void CompTimeSummaryInfo::Print(FILE* f)
double fslop_ms = m_filtered.m_parentPhaseEndSlop * 1000.0 / countsPerSec;
if (fslop_ms > 1.0)
{
- fprintf(f, "\n 'End phase slop' should be very small (if not, there's unattributed time): %9.3f Mcycles.\n",
- m_filtered.m_parentPhaseEndSlop);
+ fprintf(f,
+ "\n 'End phase slop' should be very small (if not, there's unattributed time): %9.3f Mcycles.\n",
+ m_filtered.m_parentPhaseEndSlop);
}
}
}
@@ -7169,7 +7177,7 @@ LPCWSTR Compiler::JitTimeLogCsv()
void JitTimer::PrintCsvHeader()
{
LPCWSTR jitTimeLogCsv = Compiler::JitTimeLogCsv();
- if (jitTimeLogCsv == NULL)
+ if (jitTimeLogCsv == nullptr)
{
return;
}
@@ -7212,7 +7220,7 @@ extern ICorJitHost* g_jitHost;
void JitTimer::PrintCsvMethodStats(Compiler* comp)
{
LPCWSTR jitTimeLogCsv = Compiler::JitTimeLogCsv();
- if (jitTimeLogCsv == NULL)
+ if (jitTimeLogCsv == nullptr)
{
return;
}
@@ -7242,7 +7250,9 @@ void JitTimer::PrintCsvMethodStats(Compiler* comp)
for (int i = 0; i < PHASE_NUMBER_OF; i++)
{
if (!PhaseHasChildren[i])
+ {
totCycles += m_info.m_cyclesByPhase[i];
+ }
fprintf(fp, "%I64u,", m_info.m_cyclesByPhase[i]);
}
@@ -7261,7 +7271,9 @@ void JitTimer::Terminate(Compiler* comp, CompTimeSummaryInfo& sum)
for (int i = 0; i < PHASE_NUMBER_OF; i++)
{
if (!PhaseHasChildren[i])
+ {
totCycles2 += m_info.m_cyclesByPhase[i];
+ }
}
// We include m_parentPhaseEndSlop in the next phase's time also (we probably shouldn't)
// totCycles2 += m_info.m_parentPhaseEndSlop;
@@ -7309,7 +7321,9 @@ void Compiler::AggregateMemStats::Print(FILE* f)
{
fprintf(f, "For %9u methods:\n", nMethods);
if (nMethods == 0)
+ {
return;
+ }
fprintf(f, " count: %12u (avg %7u per method)\n", allocCnt, allocCnt / nMethods);
fprintf(f, " alloc size : %12llu (avg %7llu per method)\n", allocSz, allocSz / nMethods);
fprintf(f, " max alloc : %12llu\n", allocSzMax);
diff --git a/src/jit/compiler.h b/src/jit/compiler.h
index 2f062af463..7b5fcf6f39 100644
--- a/src/jit/compiler.h
+++ b/src/jit/compiler.h
@@ -3853,7 +3853,7 @@ public:
//
var_types getReturnTypeForStruct(CORINFO_CLASS_HANDLE clsHnd,
structPassingKind* wbPassStruct = nullptr,
- unsigned structSize = 0);
+ unsigned structSize = 0);
#ifdef DEBUG
// Print a representation of "vnp" or "vn" on standard output.
diff --git a/src/jit/compiler.hpp b/src/jit/compiler.hpp
index 39fb48efea..904da76cd9 100644
--- a/src/jit/compiler.hpp
+++ b/src/jit/compiler.hpp
@@ -804,7 +804,7 @@ void* GenTree::operator new(size_t sz, Compiler* comp, genTreeOps oper)
#if SMALL_TREE_NODES
size_t size = GenTree::s_gtNodeSizes[oper];
#else
- size_t size = TREE_NODE_SZ_LARGE;
+ size_t size = TREE_NODE_SZ_LARGE;
#endif
#if MEASURE_NODE_SIZE
@@ -4385,8 +4385,10 @@ inline bool Compiler::lvaIsGCTracked(const LclVarDsc* varDsc)
inline void Compiler::EndPhase(Phases phase)
{
#if defined(FEATURE_JIT_METHOD_PERF)
- if (pCompJitTimer != NULL)
+ if (pCompJitTimer != nullptr)
+ {
pCompJitTimer->EndPhase(phase);
+ }
#endif
#if DUMP_FLOWGRAPHS
fgDumpFlowGraph(phase);
diff --git a/src/jit/decomposelongs.cpp b/src/jit/decomposelongs.cpp
index c3be74c74a..017fa62c3e 100644
--- a/src/jit/decomposelongs.cpp
+++ b/src/jit/decomposelongs.cpp
@@ -65,7 +65,7 @@ void DecomposeLongs::DecomposeBlock(BasicBlock* block)
assert(block->isEmpty() || block->IsLIR());
m_blockWeight = block->getBBWeight(m_compiler);
- m_range = &LIR::AsRange(block);
+ m_range = &LIR::AsRange(block);
DecomposeRangeHelper();
}
@@ -90,7 +90,7 @@ void DecomposeLongs::DecomposeRange(Compiler* compiler, unsigned blockWeight, LI
DecomposeLongs decomposer(compiler);
decomposer.m_blockWeight = blockWeight;
- decomposer.m_range = &range;
+ decomposer.m_range = &range;
decomposer.DecomposeRangeHelper();
}
@@ -286,7 +286,10 @@ GenTree* DecomposeLongs::DecomposeNode(GenTree* tree)
// Return Value:
// The next node to process.
//
-GenTree* DecomposeLongs::FinalizeDecomposition(LIR::Use& use, GenTree* loResult, GenTree* hiResult, GenTree* insertResultAfter)
+GenTree* DecomposeLongs::FinalizeDecomposition(LIR::Use& use,
+ GenTree* loResult,
+ GenTree* hiResult,
+ GenTree* insertResultAfter)
{
assert(use.IsInitialized());
assert(loResult != nullptr);
@@ -395,7 +398,7 @@ GenTree* DecomposeLongs::DecomposeStoreLclVar(LIR::Use& use)
GenTree* tree = use.Def();
GenTree* rhs = tree->gtGetOp1();
if ((rhs->OperGet() == GT_PHI) || (rhs->OperGet() == GT_CALL) ||
- ((rhs->OperGet() == GT_MUL_LONG) && (rhs->gtFlags & GTF_MUL_64RSLT) != 0))
+ ((rhs->OperGet() == GT_MUL_LONG) && (rhs->gtFlags & GTF_MUL_64RSLT) != 0))
{
// GT_CALLs are not decomposed, so will not be converted to GT_LONG
// GT_STORE_LCL_VAR = GT_CALL are handled in genMultiRegCallStoreToLocal
@@ -640,7 +643,7 @@ GenTree* DecomposeLongs::DecomposeStoreInd(LIR::Use& use)
// + --* t155 long
// * storeIndir long
- GenTree* gtLong = tree->gtOp.gtOp2;
+ GenTree* gtLong = tree->gtOp.gtOp2;
// Save address to a temp. It is used in storeIndLow and storeIndHigh trees.
LIR::Use address(Range(), &tree->gtOp.gtOp1, tree);
@@ -901,11 +904,11 @@ GenTree* DecomposeLongs::DecomposeShift(LIR::Use& use)
{
assert(use.IsInitialized());
- GenTree* tree = use.Def();
- GenTree* gtLong = tree->gtGetOp1();
+ GenTree* tree = use.Def();
+ GenTree* gtLong = tree->gtGetOp1();
GenTree* oldShiftByOp = tree->gtGetOp2();
- genTreeOps oper = tree->OperGet();
+ genTreeOps oper = tree->OperGet();
genTreeOps shiftByOper = oldShiftByOp->OperGet();
assert((oper == GT_LSH) || (oper == GT_RSH) || (oper == GT_RSZ));
@@ -979,9 +982,9 @@ GenTree* DecomposeLongs::DecomposeShift(LIR::Use& use)
// Create a GT_LONG that contains loCopy and hiOp1. This will be used in codegen to
// generate the shld instruction
- GenTree* loCopy = m_compiler->gtNewLclvNode(loOp1LclNum, TYP_INT);
- GenTree* hiOp = new (m_compiler, GT_LONG) GenTreeOp(GT_LONG, TYP_LONG, loCopy, hiOp1);
- hiResult = m_compiler->gtNewOperNode(GT_LSH_HI, TYP_INT, hiOp, shiftByHi);
+ GenTree* loCopy = m_compiler->gtNewLclvNode(loOp1LclNum, TYP_INT);
+ GenTree* hiOp = new (m_compiler, GT_LONG) GenTreeOp(GT_LONG, TYP_LONG, loCopy, hiOp1);
+ hiResult = m_compiler->gtNewOperNode(GT_LSH_HI, TYP_INT, hiOp, shiftByHi);
m_compiler->lvaIncRefCnts(loCopy);
@@ -1339,9 +1342,9 @@ GenTree* DecomposeLongs::DecomposeUMod(LIR::Use& use)
Range().Remove(op2);
// Lo part is the GT_UMOD
- GenTree* loResult = tree;
+ GenTree* loResult = tree;
loResult->gtOp.gtOp2 = loOp2;
- loResult->gtType = TYP_INT;
+ loResult->gtType = TYP_INT;
// Set the high part to 0
GenTree* hiResult = m_compiler->gtNewZeroConNode(TYP_INT);
diff --git a/src/jit/decomposelongs.h b/src/jit/decomposelongs.h
index f087c3ec6d..7955fc7a2e 100644
--- a/src/jit/decomposelongs.h
+++ b/src/jit/decomposelongs.h
@@ -35,7 +35,7 @@ private:
}
// Driver functions
- void DecomposeRangeHelper();
+ void DecomposeRangeHelper();
GenTree* DecomposeNode(GenTree* tree);
// Per-node type decompose cases
diff --git a/src/jit/earlyprop.cpp b/src/jit/earlyprop.cpp
index 5b985df2bb..966063ce32 100644
--- a/src/jit/earlyprop.cpp
+++ b/src/jit/earlyprop.cpp
@@ -189,7 +189,7 @@ void Compiler::optEarlyProp()
// Walk the stmt tree in linear order to rewrite any array length reference with a
// constant array length.
- bool isRewritten = false;
+ bool isRewritten = false;
for (GenTreePtr tree = stmt->gtStmt.gtStmtList; tree != nullptr; tree = tree->gtNext)
{
if (optEarlyPropRewriteTree(tree))
diff --git a/src/jit/emit.h b/src/jit/emit.h
index 8fb24bcd60..add3403380 100644
--- a/src/jit/emit.h
+++ b/src/jit/emit.h
@@ -1742,8 +1742,8 @@ private:
BYTE* emitCurIGfreeEndp; // one byte past the last available byte in buffer
BYTE* emitCurIGfreeBase; // first byte address
- unsigned emitCurIGinsCnt; // # of collected instr's in buffer
- unsigned emitCurIGsize; // estimated code size of current group in bytes
+ unsigned emitCurIGinsCnt; // # of collected instr's in buffer
+ unsigned emitCurIGsize; // estimated code size of current group in bytes
UNATIVE_OFFSET emitCurCodeOffset; // current code offset within group
UNATIVE_OFFSET emitTotalCodeSize; // bytes of code in entire method
diff --git a/src/jit/emitxarch.cpp b/src/jit/emitxarch.cpp
index 6b7777a853..4f18ca8033 100644
--- a/src/jit/emitxarch.cpp
+++ b/src/jit/emitxarch.cpp
@@ -75,8 +75,8 @@ bool emitter::IsThreeOperandBinaryAVXInstruction(instruction ins)
ins == INS_maxss || ins == INS_maxsd || ins == INS_andnps || ins == INS_andnpd || ins == INS_paddb ||
ins == INS_paddw || ins == INS_paddd || ins == INS_paddq || ins == INS_psubb || ins == INS_psubw ||
ins == INS_psubd || ins == INS_psubq || ins == INS_pmuludq || ins == INS_pxor || ins == INS_pmaxub ||
- ins == INS_pminub || ins == INS_pmaxsw || ins == INS_pminsw || ins == INS_insertps || ins == INS_vinsertf128 ||
- ins == INS_punpckldq
+ ins == INS_pminub || ins == INS_pmaxsw || ins == INS_pminsw || ins == INS_insertps ||
+ ins == INS_vinsertf128 || ins == INS_punpckldq
);
}
diff --git a/src/jit/gcencode.cpp b/src/jit/gcencode.cpp
index d243396353..91aa084f2d 100644
--- a/src/jit/gcencode.cpp
+++ b/src/jit/gcencode.cpp
@@ -27,15 +27,15 @@ ReturnKind GCTypeToReturnKind(CorInfoGCType gcType)
{
switch (gcType)
{
- case TYPE_GC_NONE:
- return RT_Scalar;
- case TYPE_GC_REF:
- return RT_Object;
- case TYPE_GC_BYREF:
- return RT_ByRef;
- default:
- _ASSERTE(!"TYP_GC_OTHER is unexpected");
- return RT_Illegal;
+ case TYPE_GC_NONE:
+ return RT_Scalar;
+ case TYPE_GC_REF:
+ return RT_Object;
+ case TYPE_GC_BYREF:
+ return RT_ByRef;
+ default:
+ _ASSERTE(!"TYP_GC_OTHER is unexpected");
+ return RT_Illegal;
}
}
@@ -43,66 +43,66 @@ ReturnKind GCInfo::getReturnKind()
{
switch (compiler->info.compRetType)
{
- case TYP_REF:
- case TYP_ARRAY:
- return RT_Object;
- case TYP_BYREF:
- return RT_ByRef;
- case TYP_STRUCT:
- {
- CORINFO_CLASS_HANDLE structType = compiler->info.compMethodInfo->args.retTypeClass;
- var_types retType = compiler->getReturnTypeForStruct(structType);
-
- switch (retType)
- {
- case TYP_ARRAY:
- _ASSERTE(false && "TYP_ARRAY unexpected from getReturnTypeForStruct()");
- // fall through
case TYP_REF:
+ case TYP_ARRAY:
return RT_Object;
-
case TYP_BYREF:
return RT_ByRef;
-
case TYP_STRUCT:
- if (compiler->IsHfa(structType))
+ {
+ CORINFO_CLASS_HANDLE structType = compiler->info.compMethodInfo->args.retTypeClass;
+ var_types retType = compiler->getReturnTypeForStruct(structType);
+
+ switch (retType)
{
+ case TYP_ARRAY:
+ _ASSERTE(false && "TYP_ARRAY unexpected from getReturnTypeForStruct()");
+ // fall through
+ case TYP_REF:
+ return RT_Object;
+
+ case TYP_BYREF:
+ return RT_ByRef;
+
+ case TYP_STRUCT:
+ if (compiler->IsHfa(structType))
+ {
#ifdef _TARGET_X86_
- _ASSERTE(false && "HFAs not expected for X86");
+ _ASSERTE(false && "HFAs not expected for X86");
#endif // _TARGET_X86_
- return RT_Scalar;
- }
- else
- {
- // Multi-reg return
- BYTE gcPtrs[2] = { TYPE_GC_NONE, TYPE_GC_NONE };
- compiler->info.compCompHnd->getClassGClayout(structType, gcPtrs);
+ return RT_Scalar;
+ }
+ else
+ {
+ // Multi-reg return
+ BYTE gcPtrs[2] = {TYPE_GC_NONE, TYPE_GC_NONE};
+ compiler->info.compCompHnd->getClassGClayout(structType, gcPtrs);
- ReturnKind first = GCTypeToReturnKind((CorInfoGCType)gcPtrs[0]);
- ReturnKind second = GCTypeToReturnKind((CorInfoGCType)gcPtrs[1]);
+ ReturnKind first = GCTypeToReturnKind((CorInfoGCType)gcPtrs[0]);
+ ReturnKind second = GCTypeToReturnKind((CorInfoGCType)gcPtrs[1]);
- return GetStructReturnKind(first, second);
+ return GetStructReturnKind(first, second);
+ }
+
+#ifdef _TARGET_X86_
+ case TYP_FLOAT:
+ case TYP_DOUBLE:
+ return RT_Float;
+#endif // _TARGET_X86_
+ default:
+ return RT_Scalar;
}
+ }
#ifdef _TARGET_X86_
case TYP_FLOAT:
case TYP_DOUBLE:
return RT_Float;
#endif // _TARGET_X86_
+
default:
return RT_Scalar;
- }
- }
-
-#ifdef _TARGET_X86_
- case TYP_FLOAT:
- case TYP_DOUBLE:
- return RT_Float;
-#endif // _TARGET_X86_
-
- default:
- return RT_Scalar;
}
}
@@ -194,8 +194,8 @@ static void regenLog(unsigned encoding, InfoHdr* header, InfoHdr* state)
state->prologSize, state->epilogSize, state->epilogCount, state->epilogAtEnd, state->ediSaved,
state->esiSaved, state->ebxSaved, state->ebpSaved, state->ebpFrame, state->interruptible,
state->doubleAlign, state->security, state->handlers, state->localloc, state->editNcontinue, state->varargs,
- state->profCallbacks, state->genericsContext, state->genericsContextIsMethodDesc,
- state->returnKind, state->argCount, state->frameSize,
+ state->profCallbacks, state->genericsContext, state->genericsContextIsMethodDesc, state->returnKind,
+ state->argCount, state->frameSize,
(state->untrackedCnt <= SET_UNTRACKED_MAX) ? state->untrackedCnt : HAS_UNTRACKED,
(state->varPtrTableSize == 0) ? 0 : HAS_VARPTR,
(state->gsCookieOffset == INVALID_GS_COOKIE_OFFSET) ? 0 : HAS_GS_COOKIE_OFFSET,
@@ -351,11 +351,11 @@ static int bigEncoding4(unsigned cur, unsigned tgt, unsigned max)
return cnt;
}
-BYTE FASTCALL encodeHeaderNext(const InfoHdr& header, InfoHdr* state, BYTE &codeSet)
+BYTE FASTCALL encodeHeaderNext(const InfoHdr& header, InfoHdr* state, BYTE& codeSet)
{
BYTE encoding = 0xff;
- codeSet = 1; // codeSet is 1 or 2, depending on whether the returned encoding
- // corresponds to InfoHdrAdjust, or InfoHdrAdjust2 enumerations.
+ codeSet = 1; // codeSet is 1 or 2, depending on whether the returned encoding
+ // corresponds to InfoHdrAdjust, or InfoHdrAdjust2 enumerations.
if (state->argCount != header.argCount)
{
@@ -638,8 +638,8 @@ BYTE FASTCALL encodeHeaderNext(const InfoHdr& header, InfoHdr* state, BYTE &code
if (GCInfoEncodesReturnKind() && (state->returnKind != header.returnKind))
{
state->returnKind = header.returnKind;
- codeSet = 2; // Two byte encoding
- encoding = header.returnKind;
+ codeSet = 2; // Two byte encoding
+ encoding = header.returnKind;
_ASSERTE(encoding < SET_RET_KIND_MAX);
goto DO_RETURN;
}
@@ -686,19 +686,20 @@ BYTE FASTCALL encodeHeaderNext(const InfoHdr& header, InfoHdr* state, BYTE &code
if (GCInfoEncodesRevPInvokeFrame() && (state->revPInvokeOffset != header.revPInvokeOffset))
{
- assert(state->revPInvokeOffset == INVALID_REV_PINVOKE_OFFSET || state->revPInvokeOffset == HAS_REV_PINVOKE_FRAME_OFFSET);
+ assert(state->revPInvokeOffset == INVALID_REV_PINVOKE_OFFSET ||
+ state->revPInvokeOffset == HAS_REV_PINVOKE_FRAME_OFFSET);
if (state->revPInvokeOffset == INVALID_REV_PINVOKE_OFFSET)
{
- // header.revPInvokeOffset is non-zero.
+ // header.revPInvokeOffset is non-zero.
state->revPInvokeOffset = HAS_REV_PINVOKE_FRAME_OFFSET;
- encoding = FLIP_REV_PINVOKE_FRAME;
+ encoding = FLIP_REV_PINVOKE_FRAME;
goto DO_RETURN;
}
else if (header.revPInvokeOffset == INVALID_REV_PINVOKE_OFFSET)
{
state->revPInvokeOffset = INVALID_REV_PINVOKE_OFFSET;
- encoding = FLIP_REV_PINVOKE_FRAME;
+ encoding = FLIP_REV_PINVOKE_FRAME;
goto DO_RETURN;
}
}
@@ -1297,7 +1298,7 @@ size_t GCInfo::gcInfoBlockHdrSave(
header->genericsContextIsMethodDesc =
header->genericsContext && (compiler->info.compMethodInfo->options & (CORINFO_GENERICS_CTXT_FROM_METHODDESC));
- if (GCInfoEncodesReturnKind())
+ if (GCInfoEncodesReturnKind())
{
ReturnKind returnKind = getReturnKind();
_ASSERTE(IsValidReturnKind(returnKind) && "Return Kind must be valid");
@@ -1368,7 +1369,7 @@ size_t GCInfo::gcInfoBlockHdrSave(
*dest++ = headerEncoding;
BYTE encoding = headerEncoding;
- BYTE codeSet = 1;
+ BYTE codeSet = 1;
while (encoding & MORE_BYTES_TO_FOLLOW)
{
encoding = encodeHeaderNext(*header, &state, codeSet);
@@ -1376,8 +1377,7 @@ size_t GCInfo::gcInfoBlockHdrSave(
#if REGEN_SHORTCUTS
regenLog(headerEncoding, header, &state);
#endif
- _ASSERTE(codeSet == 1 || codeSet == 2 &&
- "Encoding must correspond to InfoHdrAdjust or InfoHdrAdjust2");
+ _ASSERTE(codeSet == 1 || codeSet == 2 && "Encoding must correspond to InfoHdrAdjust or InfoHdrAdjust2");
if (codeSet == 2)
{
*dest++ = NEXT_OPCODE | MORE_BYTES_TO_FOLLOW;
@@ -1925,12 +1925,12 @@ size_t GCInfo::gcMakeRegPtrTable(BYTE* dest, int mask, const InfoHdr& header, un
}
else
{
- /* Stack-passed arguments which are not enregistered
- * are always reported in this "untracked stack
- * pointers" section of the GC info even if lvTracked==true
- */
+/* Stack-passed arguments which are not enregistered
+ * are always reported in this "untracked stack
+ * pointers" section of the GC info even if lvTracked==true
+ */
- /* Has this argument been enregistered? */
+/* Has this argument been enregistered? */
#ifndef LEGACY_BACKEND
if (!varDsc->lvOnFrame)
#else // LEGACY_BACKEND
diff --git a/src/jit/gentree.cpp b/src/jit/gentree.cpp
index fa84ae226b..a605741201 100644
--- a/src/jit/gentree.cpp
+++ b/src/jit/gentree.cpp
@@ -241,8 +241,8 @@ const char* GenTree::OpName(genTreeOps op)
#if MEASURE_NODE_SIZE && SMALL_TREE_NODES
static const char* opStructNames[] = {
- #define GTNODE(en, sn, st, cm, ok) #st,
- #include "gtlist.h"
+#define GTNODE(en, sn, st, cm, ok) #st,
+#include "gtlist.h"
};
const char* GenTree::OpStructName(genTreeOps op)
@@ -269,16 +269,15 @@ unsigned char GenTree::s_gtNodeSizes[GT_COUNT + 1];
#if NODEBASH_STATS || MEASURE_NODE_SIZE || COUNT_AST_OPERS
-unsigned char GenTree::s_gtTrueSizes[GT_COUNT+1]
-{
- #define GTNODE(en, sn, st, cm, ok) sizeof(st),
- #include "gtlist.h"
+unsigned char GenTree::s_gtTrueSizes[GT_COUNT + 1]{
+#define GTNODE(en, sn, st, cm, ok) sizeof(st),
+#include "gtlist.h"
};
#endif // NODEBASH_STATS || MEASURE_NODE_SIZE || COUNT_AST_OPERS
#if COUNT_AST_OPERS
-LONG GenTree::s_gtNodeCounts[GT_COUNT+1] = {0};
+LONG GenTree::s_gtNodeCounts[GT_COUNT + 1] = {0};
#endif // COUNT_AST_OPERS
/* static */
@@ -298,7 +297,7 @@ void GenTree::InitNodeSize()
// Now set all of the appropriate entries to 'large'
CLANG_FORMAT_COMMENT_ANCHOR;
- // clang-format off
+// clang-format off
#if defined(FEATURE_HFA) || defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
// On ARM32, ARM64 and System V for struct returning
// there is code that does GT_ASG-tree.CopyObj call.
@@ -451,37 +450,40 @@ bool GenTree::IsNodeProperlySized() const
#define BASH_HASH_SIZE 211
-inline hashme(genTreeOps op1, genTreeOps op2) { return ((op1 * 104729) ^ (op2 * 56569)) % BASH_HASH_SIZE; }
+inline hashme(genTreeOps op1, genTreeOps op2)
+{
+ return ((op1 * 104729) ^ (op2 * 56569)) % BASH_HASH_SIZE;
+}
struct BashHashDsc
{
- unsigned __int32 bhFullHash; // the hash value (unique for all old->new pairs)
- unsigned __int32 bhCount; // the same old->new bashings seen so far
- unsigned __int8 bhOperOld; // original gtOper
- unsigned __int8 bhOperNew; // new gtOper
+ unsigned __int32 bhFullHash; // the hash value (unique for all old->new pairs)
+ unsigned __int32 bhCount; // the same old->new bashings seen so far
+ unsigned __int8 bhOperOld; // original gtOper
+ unsigned __int8 bhOperNew; // new gtOper
};
-static BashHashDsc BashHash[BASH_HASH_SIZE];
+static BashHashDsc BashHash[BASH_HASH_SIZE];
-void GenTree::RecordOperBashing(genTreeOps operOld, genTreeOps operNew)
+void GenTree::RecordOperBashing(genTreeOps operOld, genTreeOps operNew)
{
- unsigned hash = hashme(operOld, operNew);
- BashHashDsc *desc = BashHash + hash;
+ unsigned hash = hashme(operOld, operNew);
+ BashHashDsc* desc = BashHash + hash;
if (desc->bhFullHash != hash)
{
- noway_assert(desc->bhCount == 0); // if this ever fires, need fix the hash fn
+ noway_assert(desc->bhCount == 0); // if this ever fires, need fix the hash fn
desc->bhFullHash = hash;
}
- desc->bhCount += 1;
- desc->bhOperOld = operOld;
- desc->bhOperNew = operNew;
+ desc->bhCount += 1;
+ desc->bhOperOld = operOld;
+ desc->bhOperNew = operNew;
}
-void GenTree::ReportOperBashing(FILE *f)
+void GenTree::ReportOperBashing(FILE* f)
{
- unsigned total = 0;
+ unsigned total = 0;
fflush(f);
@@ -493,19 +495,16 @@ void GenTree::ReportOperBashing(FILE *f)
for (unsigned h = 0; h < BASH_HASH_SIZE; h++)
{
- unsigned count = BashHash[h].bhCount;
+ unsigned count = BashHash[h].bhCount;
if (count == 0)
continue;
- unsigned opOld = BashHash[h].bhOperOld;
- unsigned opNew = BashHash[h].bhOperNew;
+ unsigned opOld = BashHash[h].bhOperOld;
+ unsigned opNew = BashHash[h].bhOperNew;
fprintf(f, " GT_%-13s -> GT_%-13s [size: %3u->%3u] %c %7u\n", OpName((genTreeOps)opOld),
- OpName((genTreeOps)opNew),
- s_gtTrueSizes[opOld],
- s_gtTrueSizes[opNew],
- (s_gtTrueSizes[opOld] < s_gtTrueSizes[opNew]) ? 'X' : ' ',
- count);
+ OpName((genTreeOps)opNew), s_gtTrueSizes[opOld], s_gtTrueSizes[opNew],
+ (s_gtTrueSizes[opOld] < s_gtTrueSizes[opNew]) ? 'X' : ' ', count);
total += count;
}
fprintf(f, "\n");
@@ -515,7 +514,7 @@ void GenTree::ReportOperBashing(FILE *f)
fflush(f);
}
-#endif// NODEBASH_STATS
+#endif // NODEBASH_STATS
#else // SMALL_TREE_NODES
@@ -534,7 +533,7 @@ bool GenTree::IsNodeProperlySized() const
void GenTree::DumpNodeSizes(FILE* fp)
{
- // Dump the sizes of the various GenTree flavors
+// Dump the sizes of the various GenTree flavors
#if SMALL_TREE_NODES
fprintf(fp, "Small tree node size = %3u bytes\n", TREE_NODE_SZ_SMALL);
@@ -545,18 +544,18 @@ void GenTree::DumpNodeSizes(FILE* fp)
#if SMALL_TREE_NODES
// Verify that node sizes are set kosherly and dump sizes
- for (unsigned op = GT_NONE+1; op < GT_COUNT; op++)
+ for (unsigned op = GT_NONE + 1; op < GT_COUNT; op++)
{
- unsigned needSize = s_gtTrueSizes[op];
- unsigned nodeSize = s_gtNodeSizes[op];
+ unsigned needSize = s_gtTrueSizes[op];
+ unsigned nodeSize = s_gtNodeSizes[op];
const char* structNm = OpStructName((genTreeOps)op);
- const char* operName = OpName((genTreeOps)op);
+ const char* operName = OpName((genTreeOps)op);
- bool repeated = false;
+ bool repeated = false;
// Have we seen this struct flavor before?
- for (unsigned mop = GT_NONE+1; mop < op; mop++)
+ for (unsigned mop = GT_NONE + 1; mop < op; mop++)
{
if (strcmp(structNm, OpStructName((genTreeOps)mop)) == 0)
{
@@ -568,17 +567,14 @@ void GenTree::DumpNodeSizes(FILE* fp)
// Don't repeat the same GenTree flavor unless we have an error
if (!repeated || needSize > nodeSize)
{
- unsigned sizeChar = '?';
+ unsigned sizeChar = '?';
- if (nodeSize == TREE_NODE_SZ_SMALL)
+ if (nodeSize == TREE_NODE_SZ_SMALL)
sizeChar = 'S';
else if (nodeSize == TREE_NODE_SZ_LARGE)
sizeChar = 'L';
- fprintf(fp, "GT_%-16s ... %-19s = %3u bytes (%c)", operName,
- structNm,
- needSize,
- sizeChar);
+ fprintf(fp, "GT_%-16s ... %-19s = %3u bytes (%c)", operName, structNm, needSize, sizeChar);
if (needSize > nodeSize)
{
fprintf(fp, " -- ERROR -- allocation is only %u bytes!", nodeSize);
@@ -593,7 +589,6 @@ void GenTree::DumpNodeSizes(FILE* fp)
}
#endif
-
}
#endif // MEASURE_NODE_SIZE
@@ -1946,9 +1941,10 @@ void GenTreeCall::ReplaceCallOperand(GenTree** useEdge, GenTree* replacement)
assert(TryGetUse(*useEdge, &useEdge));
GenTree* originalOperand = *useEdge;
- *useEdge = replacement;
+ *useEdge = replacement;
- const bool isArgument = (replacement != gtControlExpr) &&
+ const bool isArgument =
+ (replacement != gtControlExpr) &&
((gtCallType != CT_INDIRECT) || ((replacement != gtCallCookie) && (replacement != gtCallAddr)));
if (isArgument)
@@ -2295,7 +2291,9 @@ AGAIN:
#ifdef FEATURE_READYTORUN_COMPILER
if (op1->gtCall.gtEntryPoint.addr != op2->gtCall.gtEntryPoint.addr)
+ {
return false;
+ }
#endif
}
else
@@ -2784,8 +2782,8 @@ AGAIN:
hash = genTreeHashAdd(hash, tree->gtAllocObj.gtNewHelper);
break;
case GT_OBJ:
- hash = genTreeHashAdd(hash, static_cast<unsigned>(
- reinterpret_cast<uintptr_t>(tree->gtObj.gtClass)));
+ hash =
+ genTreeHashAdd(hash, static_cast<unsigned>(reinterpret_cast<uintptr_t>(tree->gtObj.gtClass)));
break;
// For the ones below no extra argument matters for comparison.
@@ -3423,7 +3421,7 @@ GenTreePtr Compiler::gtReverseCond(GenTree* tree)
else if (tree->OperGet() == GT_JCC)
{
GenTreeJumpCC* jcc = tree->AsJumpCC();
- jcc->gtCondition = GenTree::ReverseRelop(jcc->gtCondition);
+ jcc->gtCondition = GenTree::ReverseRelop(jcc->gtCondition);
}
else
{
@@ -3486,24 +3484,24 @@ bool GenTree::gtIsValid64RsltMul()
#endif // DEBUG
- //------------------------------------------------------------------------------
- // gtSetListOrder : Figure out the evaluation order for a list of values.
- //
- //
- // Arguments:
- // list - List to figure out the evaluation order for
- // isListCallArgs - True iff the list is a list of call arguments
- // callArgsInRegs - True iff the list is a list of call arguments and they are passed in registers
- //
- // Return Value:
- // True if the operation can be a root of a bitwise rotation tree; false otherwise.
-
-unsigned Compiler::gtSetListOrder(GenTree *list, bool isListCallArgs, bool callArgsInRegs)
+//------------------------------------------------------------------------------
+// gtSetListOrder : Figure out the evaluation order for a list of values.
+//
+//
+// Arguments:
+// list - List to figure out the evaluation order for
+// isListCallArgs - True iff the list is a list of call arguments
+// callArgsInRegs - True iff the list is a list of call arguments and they are passed in registers
+//
+// Return Value:
+// True if the operation can be a root of a bitwise rotation tree; false otherwise.
+
+unsigned Compiler::gtSetListOrder(GenTree* list, bool isListCallArgs, bool callArgsInRegs)
{
assert((list != nullptr) && list->OperIsAnyList());
assert(!callArgsInRegs || isListCallArgs);
- ArrayStack<GenTree *> listNodes(this);
+ ArrayStack<GenTree*> listNodes(this);
do
{
@@ -3511,27 +3509,27 @@ unsigned Compiler::gtSetListOrder(GenTree *list, bool isListCallArgs,
list = list->gtOp.gtOp2;
} while ((list != nullptr) && (list->OperIsAnyList()));
- unsigned nxtlvl = (list == nullptr) ? 0 : gtSetEvalOrder(list);
+ unsigned nxtlvl = (list == nullptr) ? 0 : gtSetEvalOrder(list);
while (listNodes.Height() > 0)
{
#if FEATURE_STACK_FP_X87
/* Save the current FP stack level since an argument list
* will implicitly pop the FP stack when pushing the argument */
- unsigned FPlvlSave = codeGen->genGetFPstkLevel();
+ unsigned FPlvlSave = codeGen->genGetFPstkLevel();
#endif // FEATURE_STACK_FP_X87
list = listNodes.Pop();
assert(list && list->OperIsAnyList());
- GenTreePtr next = list->gtOp.gtOp2;
+ GenTreePtr next = list->gtOp.gtOp2;
- unsigned level = 0;
- unsigned ftreg = 0;
+ unsigned level = 0;
+ unsigned ftreg = 0;
// TODO: Do we have to compute costs differently for argument lists and
// all other lists?
// https://github.com/dotnet/coreclr/issues/7095
- unsigned costSz = (isListCallArgs || (next == nullptr)) ? 0 : 1;
- unsigned costEx = (isListCallArgs || (next == nullptr)) ? 0 : 1;
+ unsigned costSz = (isListCallArgs || (next == nullptr)) ? 0 : 1;
+ unsigned costEx = (isListCallArgs || (next == nullptr)) ? 0 : 1;
if (next != nullptr)
{
@@ -3547,8 +3545,8 @@ unsigned Compiler::gtSetListOrder(GenTree *list, bool isListCallArgs,
costSz += next->gtCostSz;
}
- GenTreePtr op1 = list->gtOp.gtOp1;
- unsigned lvl = gtSetEvalOrder(op1);
+ GenTreePtr op1 = list->gtOp.gtOp1;
+ unsigned lvl = gtSetEvalOrder(op1);
#if FEATURE_STACK_FP_X87
// restore the FP level
@@ -3562,8 +3560,8 @@ unsigned Compiler::gtSetListOrder(GenTree *list, bool isListCallArgs,
{
unsigned tmpl;
- tmpl = lvl;
- lvl = nxtlvl;
+ tmpl = lvl;
+ lvl = nxtlvl;
nxtlvl = tmpl;
}
@@ -3603,7 +3601,7 @@ unsigned Compiler::gtSetListOrder(GenTree *list, bool isListCallArgs,
{
costSz += op1->gtCostSz;
#ifdef _TARGET_XARCH_
- if (callArgsInRegs) // push is smaller than mov to reg
+ if (callArgsInRegs) // push is smaller than mov to reg
#endif
{
costSz += 1;
@@ -4962,11 +4960,11 @@ unsigned Compiler::gtSetEvalOrder(GenTree* tree)
case GT_LIST:
case GT_FIELD_LIST:
- {
- const bool isListCallArgs = false;
- const bool callArgsInRegs = false;
- return gtSetListOrder(tree, isListCallArgs, callArgsInRegs);
- }
+ {
+ const bool isListCallArgs = false;
+ const bool callArgsInRegs = false;
+ return gtSetListOrder(tree, isListCallArgs, callArgsInRegs);
+ }
default:
break;
@@ -5423,7 +5421,7 @@ unsigned Compiler::gtSetEvalOrder(GenTree* tree)
#endif // FEATURE_STACK_FP_X87
const bool isListCallArgs = true;
const bool callArgsInRegs = false;
- lvl2 = gtSetListOrder(tree->gtCall.gtCallArgs, isListCallArgs, callArgsInRegs);
+ lvl2 = gtSetListOrder(tree->gtCall.gtCallArgs, isListCallArgs, callArgsInRegs);
if (level < lvl2)
{
level = lvl2;
@@ -5447,7 +5445,7 @@ unsigned Compiler::gtSetEvalOrder(GenTree* tree)
#endif // FEATURE_STACK_FP_X87
const bool isListCallArgs = true;
const bool callArgsInRegs = true;
- lvl2 = gtSetListOrder(tree->gtCall.gtCallLateArgs, isListCallArgs, callArgsInRegs);
+ lvl2 = gtSetListOrder(tree->gtCall.gtCallLateArgs, isListCallArgs, callArgsInRegs);
if (level < lvl2)
{
level = lvl2;
@@ -7287,7 +7285,10 @@ GenTree* Compiler::gtNewBlockVal(GenTreePtr addr, unsigned size)
// if FEATURE_SIMD is enabled and the source has a SIMD type.
// isVolatile - Is this marked as volatile memory?
-GenTree* Compiler::gtNewCpObjNode(GenTreePtr dstAddr, GenTreePtr srcAddr, CORINFO_CLASS_HANDLE structHnd, bool isVolatile)
+GenTree* Compiler::gtNewCpObjNode(GenTreePtr dstAddr,
+ GenTreePtr srcAddr,
+ CORINFO_CLASS_HANDLE structHnd,
+ bool isVolatile)
{
GenTreePtr lhs = gtNewStructVal(structHnd, dstAddr);
GenTree* src = nullptr;
@@ -7881,14 +7882,15 @@ GenTreePtr Compiler::gtCloneExpr(GenTree* tree,
case GT_LIST:
assert((tree->gtOp.gtOp2 == nullptr) || tree->gtOp.gtOp2->OperIsList());
- copy = new (this, GT_LIST) GenTreeArgList(tree->gtOp.gtOp1);
+ copy = new (this, GT_LIST) GenTreeArgList(tree->gtOp.gtOp1);
copy->gtOp.gtOp2 = tree->gtOp.gtOp2;
break;
case GT_FIELD_LIST:
- copy = new (this, GT_FIELD_LIST) GenTreeFieldList(tree->gtOp.gtOp1, tree->AsFieldList()->gtFieldOffset, tree->AsFieldList()->gtFieldType, nullptr);
+ copy = new (this, GT_FIELD_LIST) GenTreeFieldList(tree->gtOp.gtOp1, tree->AsFieldList()->gtFieldOffset,
+ tree->AsFieldList()->gtFieldType, nullptr);
copy->gtOp.gtOp2 = tree->gtOp.gtOp2;
- copy->gtFlags = (copy->gtFlags & ~GTF_FIELD_LIST_HEAD) | (tree->gtFlags & GTF_FIELD_LIST_HEAD);
+ copy->gtFlags = (copy->gtFlags & ~GTF_FIELD_LIST_HEAD) | (tree->gtFlags & GTF_FIELD_LIST_HEAD);
break;
case GT_INDEX:
@@ -9057,19 +9059,12 @@ GenTreePtr GenTree::GetChild(unsigned childNum)
}
}
-GenTreeUseEdgeIterator::GenTreeUseEdgeIterator()
- : m_node(nullptr)
- , m_edge(nullptr)
- , m_argList(nullptr)
- , m_state(-1)
+GenTreeUseEdgeIterator::GenTreeUseEdgeIterator() : m_node(nullptr), m_edge(nullptr), m_argList(nullptr), m_state(-1)
{
}
GenTreeUseEdgeIterator::GenTreeUseEdgeIterator(GenTree* node)
- : m_node(node)
- , m_edge(nullptr)
- , m_argList(nullptr)
- , m_state(0)
+ : m_node(node), m_edge(nullptr), m_argList(nullptr), m_state(0)
{
assert(m_node != nullptr);
@@ -9269,13 +9264,13 @@ void GenTreeUseEdgeIterator::MoveToNextCallUseEdge()
{
enum
{
- CALL_INSTANCE = 0,
- CALL_ARGS = 1,
- CALL_LATE_ARGS = 2,
+ CALL_INSTANCE = 0,
+ CALL_ARGS = 1,
+ CALL_LATE_ARGS = 2,
CALL_CONTROL_EXPR = 3,
- CALL_COOKIE = 4,
- CALL_ADDRESS = 5,
- CALL_TERMINAL = 6,
+ CALL_COOKIE = 4,
+ CALL_ADDRESS = 5,
+ CALL_TERMINAL = 6,
};
GenTreeCall* call = m_node->AsCall();
@@ -11225,7 +11220,8 @@ void Compiler::gtDispTree(GenTreePtr tree,
}
else if (tree->OperIsFieldList())
{
- printf(" %s at offset %d", varTypeName(tree->AsFieldList()->gtFieldType), tree->AsFieldList()->gtFieldOffset);
+ printf(" %s at offset %d", varTypeName(tree->AsFieldList()->gtFieldType),
+ tree->AsFieldList()->gtFieldOffset);
}
IndirectAssignmentAnnotation* pIndirAnnote;
@@ -11847,8 +11843,8 @@ void Compiler::gtDispLIRNode(GenTree* node)
}
// Visit operands
- IndentInfo operandArc = IIArcTop;
- int callArgNumber = 0;
+ IndentInfo operandArc = IIArcTop;
+ int callArgNumber = 0;
for (GenTree* operand : node->Operands())
{
if (operand->IsArgPlaceHolderNode() || !operand->IsValue())
@@ -12816,7 +12812,8 @@ GenTreePtr Compiler::gtFoldExprConst(GenTreePtr tree)
if (d1 <= -1.0 && varTypeIsUnsigned(tree->CastToType()))
{
// Don't fold conversions of these cases becasue the result is unspecified per ECMA spec
- // and the native math doing the fold doesn't match the run-time computation on all platforms.
+ // and the native math doing the fold doesn't match the run-time computation on all
+ // platforms.
// We want the behavior to be same with or without folding.
return tree;
}
diff --git a/src/jit/gentree.h b/src/jit/gentree.h
index 6d50cf435c..bad017c6db 100644
--- a/src/jit/gentree.h
+++ b/src/jit/gentree.h
@@ -1516,7 +1516,7 @@ public:
bool OperIsAnyList() const
{
- return OperIsAnyList(gtOper);
+ return OperIsAnyList(gtOper);
}
inline GenTreePtr MoveNext();
@@ -1594,7 +1594,7 @@ public:
static bool Compare(GenTreePtr op1, GenTreePtr op2, bool swapOK = false);
- //---------------------------------------------------------------------
+//---------------------------------------------------------------------
#if defined(DEBUG)
static const char* NodeName(genTreeOps op);
@@ -1645,10 +1645,14 @@ public:
#if SMALL_TREE_NODES
#if NODEBASH_STATS
static void RecordOperBashing(genTreeOps operOld, genTreeOps operNew);
- static void ReportOperBashing(FILE *fp);
+ static void ReportOperBashing(FILE* fp);
#else
- static void RecordOperBashing(genTreeOps operOld, genTreeOps operNew) { /* do nothing */ }
- static void ReportOperBashing(FILE *fp) { /* do nothing */ }
+ static void RecordOperBashing(genTreeOps operOld, genTreeOps operNew)
+ { /* do nothing */
+ }
+ static void ReportOperBashing(FILE* fp)
+ { /* do nothing */
+ }
#endif
#endif
@@ -2760,7 +2764,7 @@ struct GenTreeFieldList : public GenTreeArgList
// While GT_FIELD_LIST can be in a GT_LIST, GT_FIELD_LISTs cannot be nested or have GT_LISTs.
assert(!arg->OperIsAnyList());
gtFieldOffset = fieldOffset;
- gtFieldType = fieldType;
+ gtFieldType = fieldType;
if (prevList == nullptr)
{
gtFlags |= GTF_FIELD_LIST_HEAD;
@@ -4751,21 +4755,18 @@ struct GenTreeAllocObj final : public GenTreeUnOp
#endif
};
-
struct GenTreeJumpCC final : public GenTree
{
genTreeOps gtCondition; // any relop
GenTreeJumpCC(genTreeOps condition)
- : GenTree(GT_JCC, TYP_VOID DEBUGARG(/*largeNode*/ FALSE))
- , gtCondition(condition)
+ : GenTree(GT_JCC, TYP_VOID DEBUGARG(/*largeNode*/ FALSE)), gtCondition(condition)
{
assert(OperIsCompare(condition));
}
#if DEBUGGABLE_GENTREE
- GenTreeJumpCC()
- : GenTree()
+ GenTreeJumpCC() : GenTree()
{
}
#endif // DEBUGGABLE_GENTREE
diff --git a/src/jit/gschecks.cpp b/src/jit/gschecks.cpp
index 43cbb892e9..9255d8fd36 100644
--- a/src/jit/gschecks.cpp
+++ b/src/jit/gschecks.cpp
@@ -40,9 +40,9 @@ const unsigned NO_SHADOW_COPY = UINT_MAX;
* The current function has an unsafe buffer on the stack. Search for vulnerable
* parameters which could be used to modify a code address and take over the process
* in the case of a buffer overrun. Create a safe local copy for each vulnerable parameter,
- * which will be allocated bellow the unsafe buffer. Change uses of the param to the
+ * which will be allocated bellow the unsafe buffer. Change uses of the param to the
* shadow copy.
- *
+ *
* A pointer under indirection is considered vulnerable. A malicious user could read from
* protected memory or write to it. If a parameter is assigned/computed into another variable,
* and is a pointer (i.e., under indirection), then we consider the variable to be part of the
@@ -58,7 +58,7 @@ void Compiler::gsCopyShadowParams()
// Allocate array for shadow param info
gsShadowVarInfo = new (this, CMK_Unknown) ShadowParamVarInfo[lvaCount]();
- // Find groups of variables assigned to each other, and also
+ // Find groups of variables assigned to each other, and also
// tracks variables which are dereferenced and marks them as ptrs.
// Look for assignments to *p, and ptrs passed to functions
if (gsFindVulnerableParams())
@@ -83,7 +83,7 @@ struct MarkPtrsInfo
{
printf(
"[MarkPtrsInfo] = {comp = %p, lvAssignDef = %d, isAssignSrc = %d, isUnderIndir = %d, skipNextNode = %d}\n",
- comp, lvAssignDef, isAssignSrc, isUnderIndir, skipNextNode);
+ comp, lvAssignDef, isAssignSrc, isUnderIndir, skipNextNode);
}
#endif
};
@@ -129,7 +129,7 @@ Compiler::fgWalkResult Compiler::gsMarkPtrsAndAssignGroups(GenTreePtr* pTree, fg
newState.isUnderIndir = true;
{
newState.skipNextNode = true; // Don't have to worry about which kind of node we're dealing with
- comp->fgWalkTreePre(&tree, comp->gsMarkPtrsAndAssignGroups, (void *)&newState);
+ comp->fgWalkTreePre(&tree, comp->gsMarkPtrsAndAssignGroups, (void*)&newState);
}
return WALK_SKIP_SUBTREES;
@@ -160,50 +160,50 @@ Compiler::fgWalkResult Compiler::gsMarkPtrsAndAssignGroups(GenTreePtr* pTree, fg
{
shadowVarInfo[pState->lvAssignDef].assignGroup->bitVectSet(lclNum);
}
-
+
// Point both to the same bit vector
shadowVarInfo[lclNum].assignGroup = shadowVarInfo[pState->lvAssignDef].assignGroup;
}
else if (shadowVarInfo[lclNum].assignGroup)
{
shadowVarInfo[lclNum].assignGroup->bitVectSet(pState->lvAssignDef);
-
+
// Point both to the same bit vector
shadowVarInfo[pState->lvAssignDef].assignGroup = shadowVarInfo[lclNum].assignGroup;
}
else
{
- FixedBitVect* bv = FixedBitVect::bitVectInit(pState->comp->lvaCount, pState->comp);
+ FixedBitVect* bv = FixedBitVect::bitVectInit(pState->comp->lvaCount, pState->comp);
// (shadowVarInfo[pState->lvAssignDef] == NULL && shadowVarInfo[lclNew] == NULL);
// Neither of them has an assign group yet. Make a new one.
shadowVarInfo[pState->lvAssignDef].assignGroup = bv;
- shadowVarInfo[lclNum].assignGroup = bv;
+ shadowVarInfo[lclNum].assignGroup = bv;
bv->bitVectSet(pState->lvAssignDef);
bv->bitVectSet(lclNum);
}
}
return WALK_CONTINUE;
-
+
// Calls - Mark arg variables
case GT_CALL:
newState.isUnderIndir = false;
- newState.isAssignSrc = false;
+ newState.isAssignSrc = false;
{
if (tree->gtCall.gtCallObjp)
{
newState.isUnderIndir = true;
- comp->fgWalkTreePre(&tree->gtCall.gtCallObjp, gsMarkPtrsAndAssignGroups, (void*)&newState);
+ comp->fgWalkTreePre(&tree->gtCall.gtCallObjp, gsMarkPtrsAndAssignGroups, (void*)&newState);
}
for (GenTreeArgList* args = tree->gtCall.gtCallArgs; args; args = args->Rest())
{
- comp->fgWalkTreePre(&args->Current(), gsMarkPtrsAndAssignGroups, (void*)&newState);
+ comp->fgWalkTreePre(&args->Current(), gsMarkPtrsAndAssignGroups, (void*)&newState);
}
for (GenTreeArgList* args = tree->gtCall.gtCallLateArgs; args; args = args->Rest())
{
- comp->fgWalkTreePre(&args->Current(), gsMarkPtrsAndAssignGroups, (void*)&newState);
+ comp->fgWalkTreePre(&args->Current(), gsMarkPtrsAndAssignGroups, (void*)&newState);
}
if (tree->gtCall.gtCallType == CT_INDIRECT)
@@ -213,7 +213,7 @@ Compiler::fgWalkResult Compiler::gsMarkPtrsAndAssignGroups(GenTreePtr* pTree, fg
// A function pointer is treated like a write-through pointer since
// it controls what code gets executed, and so indirectly can cause
// a write to memory.
- comp->fgWalkTreePre(&tree->gtCall.gtCallAddr, gsMarkPtrsAndAssignGroups, (void*)&newState);
+ comp->fgWalkTreePre(&tree->gtCall.gtCallAddr, gsMarkPtrsAndAssignGroups, (void*)&newState);
}
}
return WALK_SKIP_SUBTREES;
@@ -223,7 +223,7 @@ Compiler::fgWalkResult Compiler::gsMarkPtrsAndAssignGroups(GenTreePtr* pTree, fg
// We'll assume p in "**p = " can be vulnerable because by changing 'p', someone
// could control where **p stores to.
{
- comp->fgWalkTreePre(&tree->gtOp.gtOp1, comp->gsMarkPtrsAndAssignGroups, (void*)&newState);
+ comp->fgWalkTreePre(&tree->gtOp.gtOp1, comp->gsMarkPtrsAndAssignGroups, (void*)&newState);
}
return WALK_SKIP_SUBTREES;
@@ -251,7 +251,7 @@ Compiler::fgWalkResult Compiler::gsMarkPtrsAndAssignGroups(GenTreePtr* pTree, fg
{
// Walk dst side
comp->fgWalkTreePre(&tree->gtOp.gtOp1, comp->gsMarkPtrsAndAssignGroups, (void*)&newState);
-
+
// Now handle src side
isLocVar = tree->gtOp.gtOp1->OperGet() == GT_LCL_VAR;
isLocFld = tree->gtOp.gtOp1->OperGet() == GT_LCL_FLD;
@@ -262,7 +262,7 @@ Compiler::fgWalkResult Compiler::gsMarkPtrsAndAssignGroups(GenTreePtr* pTree, fg
newState.lvAssignDef = lclNum;
newState.isAssignSrc = true;
}
-
+
comp->fgWalkTreePre(&tree->gtOp.gtOp2, comp->gsMarkPtrsAndAssignGroups, (void*)&newState);
}
@@ -377,7 +377,7 @@ bool Compiler::gsFindVulnerableParams()
*/
void Compiler::gsParamsToShadows()
{
- // Cache old count since we'll add new variables, and
+ // Cache old count since we'll add new variables, and
// gsShadowVarInfo will not grow to accomodate the new ones.
UINT lvaOldCount = lvaCount;
@@ -513,7 +513,7 @@ void Compiler::gsParamsToShadows()
GenTreePtr src = gtNewLclvNode(shadowVar, lvaTable[shadowVar].TypeGet());
GenTreePtr dst = gtNewLclvNode(lclNum, varDsc->TypeGet());
-
+
src->gtFlags |= GTF_DONT_CSE;
dst->gtFlags |= GTF_DONT_CSE;
@@ -530,7 +530,7 @@ void Compiler::gsParamsToShadows()
{
opAssign = gtNewAssignNode(dst, src);
}
-
+
(void)fgInsertStmtNearEnd(block, fgMorphTree(opAssign));
}
}
@@ -552,8 +552,8 @@ Compiler::fgWalkResult Compiler::gsReplaceShadowParams(GenTreePtr* pTree, fgWalk
{
asg = tree; // "asg" is the assignment tree.
tree = tree->gtOp.gtOp1; // "tree" is the local var tree at the left-hand size of the assignment.
- }
-
+ }
+
if (tree->gtOper == GT_LCL_VAR || tree->gtOper == GT_LCL_FLD)
{
UINT paramNum = tree->gtLclVarCommon.gtLclNum;
@@ -571,7 +571,7 @@ Compiler::fgWalkResult Compiler::gsReplaceShadowParams(GenTreePtr* pTree, fgWalk
if (varTypeIsSmall(comp->lvaTable[paramNum].TypeGet()))
{
tree->gtType = TYP_INT;
- if (asg)
+ if (asg)
{
// If this is an assignment tree, propagate the type to it as well.
asg->gtType = TYP_INT;
diff --git a/src/jit/importer.cpp b/src/jit/importer.cpp
index 001a529315..b80d06417b 100644
--- a/src/jit/importer.cpp
+++ b/src/jit/importer.cpp
@@ -1777,15 +1777,19 @@ GenTreePtr Compiler::impReadyToRunLookupToTree(CORINFO_CONST_LOOKUP* pLookup,
unsigned handleFlags,
void* compileTimeHandle)
{
- CORINFO_GENERIC_HANDLE handle = 0;
- void* pIndirection = 0;
+ CORINFO_GENERIC_HANDLE handle = nullptr;
+ void* pIndirection = nullptr;
assert(pLookup->accessType != IAT_PPVALUE);
if (pLookup->accessType == IAT_VALUE)
+ {
handle = pLookup->handle;
+ }
else if (pLookup->accessType == IAT_PVALUE)
+ {
pIndirection = pLookup->addr;
- return gtNewIconEmbHndNode(handle, pIndirection, handleFlags, 0, 0, compileTimeHandle);
+ }
+ return gtNewIconEmbHndNode(handle, pIndirection, handleFlags, 0, nullptr, compileTimeHandle);
}
GenTreePtr Compiler::impReadyToRunHelperToTree(
@@ -1798,7 +1802,9 @@ GenTreePtr Compiler::impReadyToRunHelperToTree(
CORINFO_CONST_LOOKUP lookup;
#if COR_JIT_EE_VERSION > 460
if (!info.compCompHnd->getReadyToRunHelper(pResolvedToken, pGenericLookupKind, helper, &lookup))
- return NULL;
+ {
+ return nullptr;
+ }
#else
info.compCompHnd->getReadyToRunHelper(pResolvedToken, helper, &lookup);
#endif
@@ -1828,7 +1834,9 @@ GenTreePtr Compiler::impMethodPointer(CORINFO_RESOLVED_TOKEN* pResolvedToken, CO
*op1->gtFptrVal.gtLdftnResolvedToken = *pResolvedToken;
}
else
+ {
op1->gtFptrVal.gtEntryPoint.addr = nullptr;
+ }
#endif
break;
@@ -5001,7 +5009,7 @@ void Compiler::impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken)
if (opts.IsReadyToRun())
{
op1 = impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_NEW, TYP_REF);
- usingReadyToRunHelper = (op1 != NULL);
+ usingReadyToRunHelper = (op1 != nullptr);
}
if (!usingReadyToRunHelper)
@@ -7035,7 +7043,7 @@ var_types Compiler::impImportCall(OPCODE opcode,
{
instParam =
impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_CLASS_HDL, exactClassHandle);
- if (instParam == NULL)
+ if (instParam == nullptr)
{
return callRetTyp;
}
@@ -12403,7 +12411,7 @@ void Compiler::impImportBlockCode(BasicBlock* block)
if (opts.IsReadyToRun())
{
op1 = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_NEW, TYP_REF);
- usingReadyToRunHelper = (op1 != NULL);
+ usingReadyToRunHelper = (op1 != nullptr);
}
if (!usingReadyToRunHelper)
@@ -12832,7 +12840,9 @@ void Compiler::impImportBlockCode(BasicBlock* block)
#ifdef FEATURE_READYTORUN_COMPILER
if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE)
+ {
op1->gtField.gtFieldLookup = fieldInfo.fieldLookup;
+ }
#endif
op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT);
@@ -13138,7 +13148,9 @@ void Compiler::impImportBlockCode(BasicBlock* block)
#ifdef FEATURE_READYTORUN_COMPILER
if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE)
+ {
op1->gtField.gtFieldLookup = fieldInfo.fieldLookup;
+ }
#endif
op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT);
@@ -13380,7 +13392,7 @@ void Compiler::impImportBlockCode(BasicBlock* block)
{
op1 = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_NEWARR_1, TYP_REF,
gtNewArgList(op2));
- usingReadyToRunHelper = (op1 != NULL);
+ usingReadyToRunHelper = (op1 != nullptr);
if (!usingReadyToRunHelper)
{
@@ -13392,9 +13404,11 @@ void Compiler::impImportBlockCode(BasicBlock* block)
// Reason: performance (today, we'll always use the slow helper for the R2R generics case)
// Need to restore array classes before creating array objects on the heap
- op1 = impTokenToHandle(&resolvedToken, NULL, TRUE /*mustRestoreHandle*/);
- if (op1 == NULL) // compDonotInline()
+ op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE /*mustRestoreHandle*/);
+ if (op1 == nullptr)
+ { // compDonotInline()
return;
+ }
}
}
@@ -13502,7 +13516,7 @@ void Compiler::impImportBlockCode(BasicBlock* block)
GenTreePtr opLookup =
impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_ISINSTANCEOF, TYP_REF,
gtNewArgList(op1));
- usingReadyToRunHelper = (opLookup != NULL);
+ usingReadyToRunHelper = (opLookup != nullptr);
op1 = (usingReadyToRunHelper ? opLookup : op1);
if (!usingReadyToRunHelper)
@@ -13514,9 +13528,11 @@ void Compiler::impImportBlockCode(BasicBlock* block)
// 3) Perform the 'is instance' check on the input object
// Reason: performance (today, we'll always use the slow helper for the R2R generics case)
- op2 = impTokenToHandle(&resolvedToken, NULL, FALSE);
- if (op2 == NULL) // compDonotInline()
+ op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
+ if (op2 == nullptr)
+ { // compDonotInline()
return;
+ }
}
}
@@ -14030,7 +14046,7 @@ void Compiler::impImportBlockCode(BasicBlock* block)
{
GenTreePtr opLookup = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_CHKCAST,
TYP_REF, gtNewArgList(op1));
- usingReadyToRunHelper = (opLookup != NULL);
+ usingReadyToRunHelper = (opLookup != nullptr);
op1 = (usingReadyToRunHelper ? opLookup : op1);
if (!usingReadyToRunHelper)
@@ -14042,9 +14058,11 @@ void Compiler::impImportBlockCode(BasicBlock* block)
// 3) Check the object on the stack for the type-cast
// Reason: performance (today, we'll always use the slow helper for the R2R generics case)
- op2 = impTokenToHandle(&resolvedToken, NULL, FALSE);
- if (op2 == NULL) // compDonotInline()
+ op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
+ if (op2 == nullptr)
+ { // compDonotInline()
return;
+ }
}
}
diff --git a/src/jit/jit.h b/src/jit/jit.h
index 7485b8b388..10a0adaee7 100644
--- a/src/jit/jit.h
+++ b/src/jit/jit.h
@@ -259,7 +259,7 @@ struct CLRConfig
#define FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY(x)
#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
-#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)|| (defined(_TARGET_X86_) && !defined(LEGACY_BACKEND))
+#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING) || (defined(_TARGET_X86_) && !defined(LEGACY_BACKEND))
#define FEATURE_PUT_STRUCT_ARG_STK 1
#define PUT_STRUCT_ARG_STK_ONLY_ARG(x) , x
#define PUT_STRUCT_ARG_STK_ONLY(x) x
@@ -477,15 +477,15 @@ typedef ptrdiff_t ssize_t;
#define NODEBASH_STATS 0 // Collect stats on changed gtOper values in GenTree's.
#define COUNT_AST_OPERS 0 // Display use counts for GenTree operators.
-#define VERBOSE_SIZES 0 // Always display GC info sizes. If set, DISPLAY_SIZES must also be set.
-#define VERBOSE_VERIFY 0 // Dump additional information when verifying code. Useful to debug verification bugs.
+#define VERBOSE_SIZES 0 // Always display GC info sizes. If set, DISPLAY_SIZES must also be set.
+#define VERBOSE_VERIFY 0 // Dump additional information when verifying code. Useful to debug verification bugs.
#ifdef DEBUG
-#define MEASURE_MEM_ALLOC 1 // Collect memory allocation stats.
-#define LOOP_HOIST_STATS 1 // Collect loop hoisting stats.
+#define MEASURE_MEM_ALLOC 1 // Collect memory allocation stats.
+#define LOOP_HOIST_STATS 1 // Collect loop hoisting stats.
#else
-#define MEASURE_MEM_ALLOC 0 // You can set this to 1 to get memory stats in retail, as well
-#define LOOP_HOIST_STATS 0 // You can set this to 1 to get loop hoist stats in retail, as well
+#define MEASURE_MEM_ALLOC 0 // You can set this to 1 to get memory stats in retail, as well
+#define LOOP_HOIST_STATS 0 // You can set this to 1 to get loop hoist stats in retail, as well
#endif
/*****************************************************************************/
diff --git a/src/jit/jitconfigvalues.h b/src/jit/jitconfigvalues.h
index d03a31f305..954e863504 100644
--- a/src/jit/jitconfigvalues.h
+++ b/src/jit/jitconfigvalues.h
@@ -215,7 +215,7 @@ CONFIG_INTEGER(JitEnableNoWayAssert, W("JitEnableNoWayAssert"), 1)
// (normally MEASURE_MEM_ALLOC is off for release builds but if it's toggled on
// for release in "jit.h" the flag would be missing for some includers).
// TODO-Cleanup: need to make 'MEASURE_MEM_ALLOC' well-defined here at all times.
-CONFIG_INTEGER(DisplayMemStats, W("JitMemStats"), 0) // Display JIT memory usage statistics
+CONFIG_INTEGER(DisplayMemStats, W("JitMemStats"), 0) // Display JIT memory usage statistics
CONFIG_INTEGER(JitAggressiveInlining, W("JitAggressiveInlining"), 0) // Aggressive inlining of all methods
CONFIG_INTEGER(JitELTHookEnabled, W("JitELTHookEnabled"), 0) // On ARM, setting this will emit Enter/Leave/TailCall
diff --git a/src/jit/lclvars.cpp b/src/jit/lclvars.cpp
index 31e982b78d..e0ebc6a6dd 100644
--- a/src/jit/lclvars.cpp
+++ b/src/jit/lclvars.cpp
@@ -1448,13 +1448,13 @@ void Compiler::lvaCanPromoteStructType(CORINFO_CLASS_HANDLE typeHnd,
#if 1 // TODO-Cleanup: Consider removing this entire #if block in the future
- // This method has two callers. The one in Importer.cpp passes sortFields == false
- // and the other passes sortFields == true.
- // This is a workaround that leaves the inlining behavior the same as before while still
- // performing extra struct promotions when compiling the method.
- //
- // The x86 legacy back-end can't handle the more general RyuJIT struct promotion (notably structs
- // with holes), in genPushArgList(), so in that case always check for custom layout.
+// This method has two callers. The one in Importer.cpp passes sortFields == false
+// and the other passes sortFields == true.
+// This is a workaround that leaves the inlining behavior the same as before while still
+// performing extra struct promotions when compiling the method.
+//
+// The x86 legacy back-end can't handle the more general RyuJIT struct promotion (notably structs
+// with holes), in genPushArgList(), so in that case always check for custom layout.
#if FEATURE_FIXED_OUT_ARGS || !defined(LEGACY_BACKEND)
if (!sortFields) // the condition "!sortFields" really means "we are inlining"
#endif
diff --git a/src/jit/lir.cpp b/src/jit/lir.cpp
index 22a2f1c00d..bc9111eb32 100644
--- a/src/jit/lir.cpp
+++ b/src/jit/lir.cpp
@@ -679,7 +679,7 @@ void LIR::Range::FinishInsertBefore(GenTree* insertionPoint, GenTree* first, Gen
assert(m_lastNode != nullptr);
assert(m_lastNode->gtNext == nullptr);
m_lastNode->gtNext = first;
- first->gtPrev = m_lastNode;
+ first->gtPrev = m_lastNode;
}
m_lastNode = last;
}
@@ -867,7 +867,7 @@ void LIR::Range::FinishInsertAfter(GenTree* insertionPoint, GenTree* first, GenT
assert(m_firstNode != nullptr);
assert(m_firstNode->gtPrev == nullptr);
m_firstNode->gtPrev = last;
- last->gtNext = m_firstNode;
+ last->gtNext = m_firstNode;
}
m_firstNode = first;
}
@@ -1158,7 +1158,6 @@ void LIR::Range::Delete(Compiler* compiler, BasicBlock* block, ReadOnlyRange&& r
Delete(compiler, block, range.m_firstNode, range.m_lastNode);
}
-
//------------------------------------------------------------------------
// LIR::Range::TryGetUse: Try to find the use for a given node.
//
@@ -1617,22 +1616,21 @@ void LIR::InsertBeforeTerminator(BasicBlock* block, LIR::Range&& range)
#if DEBUG
switch (block->bbJumpKind)
{
- case BBJ_COND:
- assert(insertionPoint->OperIsConditionalJump());
- break;
+ case BBJ_COND:
+ assert(insertionPoint->OperIsConditionalJump());
+ break;
- case BBJ_SWITCH:
- assert((insertionPoint->OperGet() == GT_SWITCH) || (insertionPoint->OperGet() == GT_SWITCH_TABLE));
- break;
+ case BBJ_SWITCH:
+ assert((insertionPoint->OperGet() == GT_SWITCH) || (insertionPoint->OperGet() == GT_SWITCH_TABLE));
+ break;
- case BBJ_RETURN:
- assert((insertionPoint->OperGet() == GT_RETURN) ||
- (insertionPoint->OperGet() == GT_JMP) ||
- (insertionPoint->OperGet() == GT_CALL));
- break;
+ case BBJ_RETURN:
+ assert((insertionPoint->OperGet() == GT_RETURN) || (insertionPoint->OperGet() == GT_JMP) ||
+ (insertionPoint->OperGet() == GT_CALL));
+ break;
- default:
- unreached();
+ default:
+ unreached();
}
#endif
}
diff --git a/src/jit/liveness.cpp b/src/jit/liveness.cpp
index a8c674f51d..67f1ba21f6 100644
--- a/src/jit/liveness.cpp
+++ b/src/jit/liveness.cpp
@@ -1113,7 +1113,7 @@ void Compiler::fgExtendDbgLifetimes()
// Create initialization node
if (!block->IsLIR())
{
- GenTree* varNode = gtNewLclvNode(varNum, type);
+ GenTree* varNode = gtNewLclvNode(varNum, type);
GenTree* initNode = gtNewAssignNode(varNode, zero);
// Create a statement for the initializer, sequence it, and append it to the current BB.
@@ -1124,7 +1124,8 @@ void Compiler::fgExtendDbgLifetimes()
}
else
{
- GenTree* store = new (this, GT_STORE_LCL_VAR) GenTreeLclVar(GT_STORE_LCL_VAR, type, varNum, BAD_IL_OFFSET);
+ GenTree* store =
+ new (this, GT_STORE_LCL_VAR) GenTreeLclVar(GT_STORE_LCL_VAR, type, varNum, BAD_IL_OFFSET);
store->gtOp.gtOp1 = zero;
store->gtFlags |= (GTF_VAR_DEF | GTF_ASG);
diff --git a/src/jit/lower.cpp b/src/jit/lower.cpp
index f21073d287..1b21855c33 100644
--- a/src/jit/lower.cpp
+++ b/src/jit/lower.cpp
@@ -238,7 +238,7 @@ GenTree* Lowering::LowerNode(GenTree* node)
#if FEATURE_MULTIREG_RET
GenTree* src = node->gtGetOp1();
assert((src->OperGet() == GT_CALL) && src->AsCall()->HasMultiRegRetVal());
-#else // !FEATURE_MULTIREG_RET
+#else // !FEATURE_MULTIREG_RET
assert(!"Unexpected struct local store in Lowering");
#endif // !FEATURE_MULTIREG_RET
}
@@ -917,19 +917,17 @@ GenTreePtr Lowering::NewPutArg(GenTreeCall* call, GenTreePtr arg, fgArgTabEntryP
// instead of in out-going arg area slot.
PUT_STRUCT_ARG_STK_ONLY(assert(info->isStruct == varTypeIsStruct(type))); // Make sure state is
- // correct
+ // correct
#if FEATURE_FASTTAILCALL
putArg = new (comp, GT_PUTARG_STK)
- GenTreePutArgStk(GT_PUTARG_STK, type, arg,
- info->slotNum PUT_STRUCT_ARG_STK_ONLY_ARG(info->numSlots)
- PUT_STRUCT_ARG_STK_ONLY_ARG(info->isStruct),
+ GenTreePutArgStk(GT_PUTARG_STK, type, arg, info->slotNum PUT_STRUCT_ARG_STK_ONLY_ARG(info->numSlots)
+ PUT_STRUCT_ARG_STK_ONLY_ARG(info->isStruct),
call->IsFastTailCall() DEBUGARG(call));
#else
putArg = new (comp, GT_PUTARG_STK)
- GenTreePutArgStk(GT_PUTARG_STK, type, arg,
- info->slotNum PUT_STRUCT_ARG_STK_ONLY_ARG(info->numSlots)
- PUT_STRUCT_ARG_STK_ONLY_ARG(info->isStruct) DEBUGARG(call));
+ GenTreePutArgStk(GT_PUTARG_STK, type, arg, info->slotNum PUT_STRUCT_ARG_STK_ONLY_ARG(info->numSlots)
+ PUT_STRUCT_ARG_STK_ONLY_ARG(info->isStruct) DEBUGARG(call));
#endif
#ifdef FEATURE_PUT_STRUCT_ARG_STK
@@ -1730,8 +1728,8 @@ GenTree* Lowering::LowerTailCallViaHelper(GenTreeCall* call, GenTree* callTarget
assert(!comp->compLocallocUsed); // tail call from methods that also do localloc
#ifdef _TARGET_AMD64_
- assert(!comp->getNeedsGSSecurityCookie()); // jit64 compat: tail calls from methods that need GS check
-#endif // _TARGET_AMD64_
+ assert(!comp->getNeedsGSSecurityCookie()); // jit64 compat: tail calls from methods that need GS check
+#endif // _TARGET_AMD64_
// We expect to see a call that meets the following conditions
assert(call->IsTailCallViaHelper());
@@ -1950,7 +1948,7 @@ void Lowering::LowerCompare(GenTree* cmp)
}
LIR::Use cmpUse;
-
+
if (!BlockRange().TryGetUse(cmp, &cmpUse) || cmpUse.User()->OperGet() != GT_JTRUE)
{
return;
@@ -1967,7 +1965,7 @@ void Lowering::LowerCompare(GenTree* cmp)
{
loSrc1.ReplaceWithLclVar(comp, weight);
}
-
+
if (loSrc2.Def()->OperGet() != GT_CNS_INT && loSrc2.Def()->OperGet() != GT_LCL_VAR)
{
loSrc2.ReplaceWithLclVar(comp, weight);
@@ -2007,8 +2005,8 @@ void Lowering::LowerCompare(GenTree* cmp)
BlockRange().Remove(loSrc1.Def());
BlockRange().Remove(loSrc2.Def());
- GenTree* loCmp = comp->gtNewOperNode(cmp->OperGet(), TYP_INT, loSrc1.Def(), loSrc2.Def());
- loCmp->gtFlags = cmp->gtFlags;
+ GenTree* loCmp = comp->gtNewOperNode(cmp->OperGet(), TYP_INT, loSrc1.Def(), loSrc2.Def());
+ loCmp->gtFlags = cmp->gtFlags;
GenTree* loJtrue = comp->gtNewOperNode(GT_JTRUE, TYP_VOID, loCmp);
LIR::AsRange(newBlock).InsertAfter(nullptr, loSrc1.Def(), loSrc2.Def(), loCmp, loJtrue);
@@ -2065,31 +2063,31 @@ void Lowering::LowerCompare(GenTree* cmp)
genTreeOps hiCmpOper;
genTreeOps loCmpOper;
-
+
switch (cmp->OperGet())
{
- case GT_LT:
- cmp->gtOper = GT_GT;
- hiCmpOper = GT_LT;
- loCmpOper = GT_LT;
- break;
- case GT_LE:
- cmp->gtOper = GT_GT;
- hiCmpOper = GT_LT;
- loCmpOper = GT_LE;
- break;
- case GT_GT:
- cmp->gtOper = GT_LT;
- hiCmpOper = GT_GT;
- loCmpOper = GT_GT;
- break;
- case GT_GE:
- cmp->gtOper = GT_LT;
- hiCmpOper = GT_GT;
- loCmpOper = GT_GE;
- break;
- default:
- unreached();
+ case GT_LT:
+ cmp->gtOper = GT_GT;
+ hiCmpOper = GT_LT;
+ loCmpOper = GT_LT;
+ break;
+ case GT_LE:
+ cmp->gtOper = GT_GT;
+ hiCmpOper = GT_LT;
+ loCmpOper = GT_LE;
+ break;
+ case GT_GT:
+ cmp->gtOper = GT_LT;
+ hiCmpOper = GT_GT;
+ loCmpOper = GT_GT;
+ break;
+ case GT_GE:
+ cmp->gtOper = GT_LT;
+ hiCmpOper = GT_GT;
+ loCmpOper = GT_GE;
+ break;
+ default:
+ unreached();
}
BasicBlock* newBlock2 = comp->fgSplitBlockAtEnd(newBlock);
@@ -2100,8 +2098,8 @@ void Lowering::LowerCompare(GenTree* cmp)
BlockRange().Remove(loSrc1.Def());
BlockRange().Remove(loSrc2.Def());
- GenTree* loCmp = comp->gtNewOperNode(loCmpOper, TYP_INT, loSrc1.Def(), loSrc2.Def());
- loCmp->gtFlags = cmp->gtFlags | GTF_UNSIGNED;
+ GenTree* loCmp = comp->gtNewOperNode(loCmpOper, TYP_INT, loSrc1.Def(), loSrc2.Def());
+ loCmp->gtFlags = cmp->gtFlags | GTF_UNSIGNED;
GenTree* loJtrue = comp->gtNewOperNode(GT_JTRUE, TYP_VOID, loCmp);
LIR::AsRange(newBlock2).InsertAfter(nullptr, loSrc1.Def(), loSrc2.Def(), loCmp, loJtrue);
@@ -3195,7 +3193,7 @@ GenTree* Lowering::LowerVirtualStubCall(GenTreeCall* call)
// So we don't use a register.
#ifndef _TARGET_X86_
// on x64 we must materialize the target using specific registers.
- addr->gtRegNum = REG_VIRTUAL_STUB_PARAM;
+ addr->gtRegNum = REG_VIRTUAL_STUB_PARAM;
indir->gtRegNum = REG_JUMP_THUNK_PARAM;
indir->gtFlags |= GTF_IND_VSD_TGT;
@@ -3479,12 +3477,12 @@ void Lowering::LowerUnsignedDivOrMod(GenTree* node)
{
assert((node->OperGet() == GT_UDIV) || (node->OperGet() == GT_UMOD));
- GenTree* divisor = node->gtGetOp2();
+ GenTree* divisor = node->gtGetOp2();
GenTree* dividend = node->gtGetOp1();
if (divisor->IsCnsIntOrI()
#ifdef _TARGET_X86_
- && (dividend->OperGet() != GT_LONG)
+ && (dividend->OperGet() != GT_LONG)
#endif
)
{
@@ -4054,16 +4052,16 @@ void Lowering::CheckCallArg(GenTree* arg)
#endif
case GT_FIELD_LIST:
- {
- GenTreeFieldList* list = arg->AsFieldList();
- assert(list->IsFieldListHead());
+ {
+ GenTreeFieldList* list = arg->AsFieldList();
+ assert(list->IsFieldListHead());
- for (; list != nullptr; list = list->Rest())
- {
- assert(list->Current()->OperIsPutArg());
- }
+ for (; list != nullptr; list = list->Rest())
+ {
+ assert(list->Current()->OperIsPutArg());
}
- break;
+ }
+ break;
default:
assert(arg->OperIsPutArg());
diff --git a/src/jit/lowerarm64.cpp b/src/jit/lowerarm64.cpp
index 7c6164f963..c16da58e0d 100644
--- a/src/jit/lowerarm64.cpp
+++ b/src/jit/lowerarm64.cpp
@@ -1306,9 +1306,9 @@ void Lowering::TreeNodeInfoInitBlockStore(GenTreeBlk* blkNode)
{
// CopyObj or CopyBlk
// Sources are src and dest and size if not constant.
- unsigned size = blkNode->gtBlkSize;
- GenTreePtr source = blkNode->Data();
- GenTree* srcAddr = nullptr;
+ unsigned size = blkNode->gtBlkSize;
+ GenTreePtr source = blkNode->Data();
+ GenTree* srcAddr = nullptr;
if (source->gtOper == GT_IND)
{
diff --git a/src/jit/lowerxarch.cpp b/src/jit/lowerxarch.cpp
index 5d6fc0a603..fb459e57fd 100644
--- a/src/jit/lowerxarch.cpp
+++ b/src/jit/lowerxarch.cpp
@@ -1048,17 +1048,17 @@ void Lowering::TreeNodeInfoInitShiftRotate(GenTree* tree)
if (tree->OperGet() == GT_LSH_HI)
{
- GenTreePtr sourceLo = source->gtOp.gtOp1;
+ GenTreePtr sourceLo = source->gtOp.gtOp1;
sourceLo->gtLsraInfo.isDelayFree = true;
}
else
{
- GenTreePtr sourceHi = source->gtOp.gtOp2;
+ GenTreePtr sourceHi = source->gtOp.gtOp2;
sourceHi->gtLsraInfo.isDelayFree = true;
}
source->gtLsraInfo.hasDelayFreeSrc = true;
- info->hasDelayFreeSrc = true;
+ info->hasDelayFreeSrc = true;
}
#endif
@@ -1160,7 +1160,7 @@ void Lowering::TreeNodeInfoInitCall(GenTreeCall* call)
}
else
#endif // _TARGET_X86_
- if (ctrlExpr->isIndir())
+ if (ctrlExpr->isIndir())
{
MakeSrcContained(call, ctrlExpr);
}
@@ -1333,7 +1333,7 @@ void Lowering::TreeNodeInfoInitCall(GenTreeCall* call)
// There could be up to 2 PUTARG_REGs in the list
GenTreeFieldList* fieldListPtr = argNode->AsFieldList();
- unsigned iterationNum = 0;
+ unsigned iterationNum = 0;
for (; fieldListPtr; fieldListPtr = fieldListPtr->Rest())
{
GenTreePtr putArgRegNode = fieldListPtr->Current();
@@ -1911,7 +1911,7 @@ void Lowering::TreeNodeInfoInitPutArgStk(GenTree* tree)
#ifdef _TARGET_X86_
if (tree->gtOp.gtOp1->gtOper == GT_FIELD_LIST)
{
- GenTreeFieldList* fieldListPtr = tree->gtOp.gtOp1->AsFieldList();
+ GenTreeFieldList* fieldListPtr = tree->gtOp.gtOp1->AsFieldList();
for (; fieldListPtr; fieldListPtr = fieldListPtr->Rest())
{
GenTree* fieldNode = fieldListPtr->Current();
@@ -2005,7 +2005,7 @@ void Lowering::TreeNodeInfoInitPutArgStk(GenTree* tree)
#ifdef _TARGET_X86_
if (size >= 8)
-#else // !_TARGET_X86_
+#else // !_TARGET_X86_
if (size >= XMM_REGSIZE_BYTES)
#endif // !_TARGET_X86_
{
@@ -2289,7 +2289,7 @@ void Lowering::TreeNodeInfoInitModDiv(GenTree* tree)
// To avoid reg move would like to have op1's low part in RAX and high part in RDX.
GenTree* loVal = op1->gtGetOp1();
GenTree* hiVal = op1->gtGetOp2();
-
+
// Src count is actually 3, so increment.
assert(op2->IsCnsIntOrI());
info->srcCount++;
@@ -2884,8 +2884,8 @@ void Lowering::SetIndirAddrOpCounts(GenTreePtr indirTree)
assert(base != addr);
m_lsra->clearOperandCounts(addr);
- const bool hasBase = base != nullptr;
- const bool hasIndex = index != nullptr;
+ const bool hasBase = base != nullptr;
+ const bool hasIndex = index != nullptr;
assert(hasBase || hasIndex); // At least one of a base or an index must be present.
// If the addressing mode has both a base and an index, bump its source count by one. If it only has one or the
@@ -2905,7 +2905,7 @@ void Lowering::SetIndirAddrOpCounts(GenTreePtr indirTree)
bool foundBase = !hasBase;
bool foundIndex = !hasIndex;
- for (GenTree* child = addr, *nextChild = nullptr; child != nullptr && !child->OperIsLeaf(); child = nextChild)
+ for (GenTree *child = addr, *nextChild = nullptr; child != nullptr && !child->OperIsLeaf(); child = nextChild)
{
nextChild = nullptr;
GenTree* op1 = child->gtOp.gtOp1;
@@ -3330,8 +3330,9 @@ void Lowering::TreeNodeInfoInitCmp(GenTreePtr tree)
#ifdef DEBUG
if (comp->verbose)
{
- printf("TreeNodeInfoInitCmp: Removing a GT_CAST to TYP_UBYTE and changing castOp1->gtType to "
- "TYP_UBYTE\n");
+ printf(
+ "TreeNodeInfoInitCmp: Removing a GT_CAST to TYP_UBYTE and changing castOp1->gtType to "
+ "TYP_UBYTE\n");
comp->gtDispTreeRange(BlockRange(), tree);
}
#endif
@@ -3358,9 +3359,9 @@ void Lowering::TreeNodeInfoInitCmp(GenTreePtr tree)
}
else if (op1->IsCnsIntOrI())
{
- // TODO-CQ: We should be able to support swapping op1 and op2 to generate cmp reg, imm,
- // but there is currently an assert in CodeGen::genCompareInt().
- // https://github.com/dotnet/coreclr/issues/7270
+ // TODO-CQ: We should be able to support swapping op1 and op2 to generate cmp reg, imm,
+ // but there is currently an assert in CodeGen::genCompareInt().
+ // https://github.com/dotnet/coreclr/issues/7270
SetRegOptional(op2);
}
else
@@ -3893,14 +3894,14 @@ void Lowering::SetMulOpCounts(GenTreePtr tree)
GenTreeIntConCommon* imm = nullptr;
GenTreePtr other = nullptr;
- // There are three forms of x86 multiply:
- // one-op form: RDX:RAX = RAX * r/m
- // two-op form: reg *= r/m
- // three-op form: reg = r/m * imm
+// There are three forms of x86 multiply:
+// one-op form: RDX:RAX = RAX * r/m
+// two-op form: reg *= r/m
+// three-op form: reg = r/m * imm
- // This special widening 32x32->64 MUL is not used on x64
+// This special widening 32x32->64 MUL is not used on x64
#if defined(_TARGET_X86_)
- if(tree->OperGet() != GT_MUL_LONG)
+ if (tree->OperGet() != GT_MUL_LONG)
#endif
{
assert((tree->gtFlags & GTF_MUL_64RSLT) == 0);
@@ -3924,9 +3925,9 @@ void Lowering::SetMulOpCounts(GenTreePtr tree)
}
else if (tree->gtOper == GT_MULHI
#if defined(_TARGET_X86_)
- || tree->OperGet() == GT_MUL_LONG
+ || tree->OperGet() == GT_MUL_LONG
#endif
- )
+ )
{
// have to use the encoding:RDX:RAX = RAX * rm
info->setDstCandidates(m_lsra, RBM_RAX);
diff --git a/src/jit/lsra.cpp b/src/jit/lsra.cpp
index fa774b4196..3b477ca12d 100644
--- a/src/jit/lsra.cpp
+++ b/src/jit/lsra.cpp
@@ -358,12 +358,12 @@ RegRecord* LinearScan::getRegisterRecord(regNumber regNum)
//----------------------------------------------------------------------------
// getConstrainedRegMask: Returns new regMask which is the intersection of
-// regMaskActual and regMaskConstraint if the new regMask has at least
+// regMaskActual and regMaskConstraint if the new regMask has at least
// minRegCount registers, otherwise returns regMaskActual.
//
// Arguments:
// regMaskActual - regMask that needs to be constrained
-// regMaskConstraint - regMask constraint that needs to be
+// regMaskConstraint - regMask constraint that needs to be
// applied to regMaskActual
// minRegCount - Minimum number of regs that should be
// be present in new regMask.
@@ -371,9 +371,7 @@ RegRecord* LinearScan::getRegisterRecord(regNumber regNum)
// Return Value:
// New regMask that has minRegCount registers after instersection.
// Otherwise returns regMaskActual.
-regMaskTP LinearScan::getConstrainedRegMask(regMaskTP regMaskActual,
- regMaskTP regMaskConstraint,
- unsigned minRegCount)
+regMaskTP LinearScan::getConstrainedRegMask(regMaskTP regMaskActual, regMaskTP regMaskConstraint, unsigned minRegCount)
{
regMaskTP newMask = regMaskActual & regMaskConstraint;
if (genCountBits(newMask) >= minRegCount)
@@ -402,42 +400,33 @@ regMaskTP LinearScan::stressLimitRegs(RefPosition* refPosition, regMaskTP mask)
{
if (getStressLimitRegs() != LSRA_LIMIT_NONE)
{
- // The refPosition could be null, for example when called
+ // The refPosition could be null, for example when called
// by getTempRegForResolution().
- int minRegCount = (refPosition != nullptr) ?
- refPosition->minRegCandidateCount : 1;
+ int minRegCount = (refPosition != nullptr) ? refPosition->minRegCandidateCount : 1;
switch (getStressLimitRegs())
{
case LSRA_LIMIT_CALLEE:
if (!compiler->opts.compDbgEnC)
{
- mask = getConstrainedRegMask(mask,
- RBM_CALLEE_SAVED,
- minRegCount);
+ mask = getConstrainedRegMask(mask, RBM_CALLEE_SAVED, minRegCount);
}
break;
case LSRA_LIMIT_CALLER:
- {
- mask = getConstrainedRegMask(mask,
- RBM_CALLEE_TRASH,
- minRegCount);
- }
- break;
+ {
+ mask = getConstrainedRegMask(mask, RBM_CALLEE_TRASH, minRegCount);
+ }
+ break;
case LSRA_LIMIT_SMALL_SET:
if ((mask & LsraLimitSmallIntSet) != RBM_NONE)
{
- mask = getConstrainedRegMask(mask,
- LsraLimitSmallIntSet,
- minRegCount);
+ mask = getConstrainedRegMask(mask, LsraLimitSmallIntSet, minRegCount);
}
else if ((mask & LsraLimitSmallFPSet) != RBM_NONE)
{
- mask = getConstrainedRegMask(mask,
- LsraLimitSmallFPSet,
- minRegCount);
+ mask = getConstrainedRegMask(mask, LsraLimitSmallFPSet, minRegCount);
}
break;
@@ -709,9 +698,8 @@ void LinearScan::applyCalleeSaveHeuristics(RefPosition* rp)
regMaskTP calleeSaveMask = calleeSaveRegs(getRegisterType(theInterval, rp));
if (doReverseCallerCallee())
{
- rp->registerAssignment = getConstrainedRegMask(rp->registerAssignment,
- calleeSaveMask,
- rp->minRegCandidateCount);
+ rp->registerAssignment =
+ getConstrainedRegMask(rp->registerAssignment, calleeSaveMask, rp->minRegCandidateCount);
}
else
#endif // DEBUG
@@ -2826,30 +2814,23 @@ bool LinearScan::buildKillPositionsForNode(GenTree* tree, LsraLocation currentLo
//----------------------------------------------------------------------------
// defineNewInternalTemp: Defines a ref position for an internal temp.
-//
+//
// Arguments:
// tree - Gentree node requiring an internal register
// regType - Register type
// currentLoc - Location of the temp Def position
// regMask - register mask of candidates for temp
// minRegCandidateCount - Minimum registers to be ensured in candidate
-// set under LSRA stress mode. This is a
+// set under LSRA stress mode. This is a
// DEBUG only arg.
RefPosition* LinearScan::defineNewInternalTemp(GenTree* tree,
RegisterType regType,
LsraLocation currentLoc,
- regMaskTP regMask
- DEBUGARG(unsigned minRegCandidateCount))
+ regMaskTP regMask DEBUGARG(unsigned minRegCandidateCount))
{
Interval* current = newInterval(regType);
current->isInternal = true;
- return newRefPosition(current,
- currentLoc,
- RefTypeDef,
- tree,
- regMask,
- 0
- DEBUG_ARG(minRegCandidateCount));
+ return newRefPosition(current, currentLoc, RefTypeDef, tree, regMask, 0 DEBUG_ARG(minRegCandidateCount));
}
//------------------------------------------------------------------------
@@ -2894,16 +2875,16 @@ int LinearScan::buildInternalRegisterDefsForNode(GenTree* tree,
internalIntCands = genFindLowestBit(internalIntCands);
internalCands &= ~internalIntCands;
}
- temps[count] = defineNewInternalTemp(tree, IntRegisterType, currentLoc,
- internalIntCands DEBUG_ARG(minRegCandidateCount));
+ temps[count] =
+ defineNewInternalTemp(tree, IntRegisterType, currentLoc, internalIntCands DEBUG_ARG(minRegCandidateCount));
}
int internalFloatCount = tree->gtLsraInfo.internalFloatCount;
for (int i = 0; i < internalFloatCount; i++)
{
regMaskTP internalFPCands = (internalCands & internalFloatRegCandidates());
- temps[count++] = defineNewInternalTemp(tree, FloatRegisterType, currentLoc,
- internalFPCands DEBUG_ARG(minRegCandidateCount));
+ temps[count++] =
+ defineNewInternalTemp(tree, FloatRegisterType, currentLoc, internalFPCands DEBUG_ARG(minRegCandidateCount));
}
noway_assert(count < MaxInternalRegisters);
@@ -2918,7 +2899,7 @@ int LinearScan::buildInternalRegisterDefsForNode(GenTree* tree,
// Arguments:
// tree - Gentree node that needs internal registers
// currentLoc - Location at which Use positions need to be defined
-// defs - int array containing Def positions of internal
+// defs - int array containing Def positions of internal
// registers.
// total - Total number of Def positions in 'defs' array.
// minRegCandidateCount - Minimum registers to be ensured in candidate
@@ -2930,8 +2911,7 @@ int LinearScan::buildInternalRegisterDefsForNode(GenTree* tree,
void LinearScan::buildInternalRegisterUsesForNode(GenTree* tree,
LsraLocation currentLoc,
RefPosition* defs[],
- int total
- DEBUGARG(unsigned minRegCandidateCount))
+ int total DEBUGARG(unsigned minRegCandidateCount))
{
assert(total < MaxInternalRegisters);
@@ -2948,14 +2928,9 @@ void LinearScan::buildInternalRegisterUsesForNode(GenTree* tree,
}
else
{
- RefPosition* newest = newRefPosition(defs[i]->getInterval(),
- currentLoc,
- RefTypeUse,
- tree,
- mask,
- 0
- DEBUG_ARG(minRegCandidateCount));
- newest->lastUse = true;
+ RefPosition* newest = newRefPosition(defs[i]->getInterval(), currentLoc, RefTypeUse, tree, mask,
+ 0 DEBUG_ARG(minRegCandidateCount));
+ newest->lastUse = true;
}
}
}
@@ -3525,7 +3500,7 @@ void LinearScan::buildRefPositionsForNode(GenTree* tree,
{
// Get the location info for the register defined by the first operand.
LocationInfoList operandDefs;
- bool found = operandToLocationInfoMap.TryGetValue(*(tree->OperandsBegin()), &operandDefs);
+ bool found = operandToLocationInfoMap.TryGetValue(*(tree->OperandsBegin()), &operandDefs);
assert(found);
// Since we only expect to consume one register, we should only have a single register to
@@ -3646,18 +3621,12 @@ void LinearScan::buildRefPositionsForNode(GenTree* tree,
// consume + produce + internalCount. This is the minimum
// set of registers that needs to be ensured in candidate
// set of ref positions created.
- unsigned minRegCount = consume +
- produce +
- info.internalIntCount +
- info.internalFloatCount;
-#endif //DEBUG
+ unsigned minRegCount = consume + produce + info.internalIntCount + info.internalFloatCount;
+#endif // DEBUG
// make intervals for all the 'internal' register requirements for this node
// where internal means additional registers required temporarily
- int internalCount = buildInternalRegisterDefsForNode(tree,
- currentLoc,
- internalRefs
- DEBUG_ARG(minRegCount));
+ int internalCount = buildInternalRegisterDefsForNode(tree, currentLoc, internalRefs DEBUG_ARG(minRegCount));
// pop all ref'd tree temps
GenTreeOperandIterator iterator = tree->OperandsBegin();
@@ -3775,9 +3744,9 @@ void LinearScan::buildRefPositionsForNode(GenTree* tree,
// of GT_DIV node.
//
// Assume further JitStressRegs=2, which would constrain
- // candidates to callee trashable regs { eax, ecx, edx } on
+ // candidates to callee trashable regs { eax, ecx, edx } on
// use positions of v01 and v02. LSRA allocates ecx for v01.
- // Use position of v02 cannot be allocated a regs since it
+ // Use position of v02 cannot be allocated a regs since it
// is marked delay-reg free and {eax,edx} are getting killed
// before the def of GT_DIV. For this reason, minRegCount
// for Use position of v02 also needs to take into account
@@ -3790,10 +3759,10 @@ void LinearScan::buildRefPositionsForNode(GenTree* tree,
{
minRegCountForUsePos += genCountBits(killMask);
}
- }
+ }
#endif // DEBUG
- RefPosition* pos;
+ RefPosition* pos;
if ((candidates & allRegs(i->registerType)) == 0)
{
// This should only occur where we've got a type mismatch due to SIMD
@@ -3806,13 +3775,13 @@ void LinearScan::buildRefPositionsForNode(GenTree* tree,
regNumber physicalReg = genRegNumFromMask(fixedAssignment);
RefPosition* pos = newRefPosition(physicalReg, currentLoc, RefTypeFixedReg, nullptr, fixedAssignment);
}
- pos = newRefPosition(i, currentLoc, RefTypeUse, useNode, allRegs(i->registerType),
- multiRegIdx DEBUG_ARG(minRegCountForUsePos));
+ pos = newRefPosition(i, currentLoc, RefTypeUse, useNode, allRegs(i->registerType),
+ multiRegIdx DEBUG_ARG(minRegCountForUsePos));
pos->registerAssignment = candidates;
}
else
{
- pos = newRefPosition(i, currentLoc, RefTypeUse, useNode, candidates,
+ pos = newRefPosition(i, currentLoc, RefTypeUse, useNode, candidates,
multiRegIdx DEBUG_ARG(minRegCountForUsePos));
}
@@ -3839,8 +3808,7 @@ void LinearScan::buildRefPositionsForNode(GenTree* tree,
listNodePool.ReturnNodes(operandDefs);
}
- buildInternalRegisterUsesForNode(tree, currentLoc, internalRefs,
- internalCount DEBUG_ARG(minRegCount));
+ buildInternalRegisterUsesForNode(tree, currentLoc, internalRefs, internalCount DEBUG_ARG(minRegCount));
RegisterType registerType = getDefType(tree);
regMaskTP candidates = getDefCandidates(tree);
@@ -3944,8 +3912,8 @@ void LinearScan::buildRefPositionsForNode(GenTree* tree,
locationInfoList.Append(listNodePool.GetNode(defLocation, interval, tree, (unsigned)i));
}
- RefPosition* pos = newRefPosition(interval, defLocation, defRefType, defNode,
- currCandidates, (unsigned)i DEBUG_ARG(minRegCount));
+ RefPosition* pos = newRefPosition(interval, defLocation, defRefType, defNode, currCandidates,
+ (unsigned)i DEBUG_ARG(minRegCount));
if (info.isLocalDefUse)
{
pos->isLocalDefUse = true;
@@ -3960,8 +3928,8 @@ void LinearScan::buildRefPositionsForNode(GenTree* tree,
buildUpperVectorRestoreRefPositions(tree, currentLoc, liveLargeVectors);
#endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE
- bool isContainedNode =
- !noAdd && consume == 0 && produce == 0 && (tree->OperIsFieldListHead() || ((tree->TypeGet() != TYP_VOID) && !tree->OperIsStore()));
+ bool isContainedNode = !noAdd && consume == 0 && produce == 0 &&
+ (tree->OperIsFieldListHead() || ((tree->TypeGet() != TYP_VOID) && !tree->OperIsStore()));
if (isContainedNode)
{
// Contained nodes map to the concatenated lists of their operands.
@@ -5567,7 +5535,7 @@ regNumber LinearScan::allocateBusyReg(Interval* current, RefPosition* refPositio
// on x86 under LSRA stress.
if (!allocateIfProfitable)
{
- physRegNextLocation = MaxLocation;
+ physRegNextLocation = MaxLocation;
farthestRefPosWeight = BB_MAX_WEIGHT;
}
}
diff --git a/src/jit/lsra.h b/src/jit/lsra.h
index 54bee3024d..1b23e9a655 100644
--- a/src/jit/lsra.h
+++ b/src/jit/lsra.h
@@ -771,22 +771,19 @@ private:
regMaskTP getDefCandidates(GenTree* tree);
var_types getDefType(GenTree* tree);
- RefPosition* defineNewInternalTemp(GenTree* tree,
- RegisterType regType,
- LsraLocation currentLoc,
- regMaskTP regMask
- DEBUGARG(unsigned minRegCandidateCount));
-
- int buildInternalRegisterDefsForNode(GenTree* tree,
- LsraLocation currentLoc,
- RefPosition* defs[]
- DEBUGARG(unsigned minRegCandidateCount));
-
- void buildInternalRegisterUsesForNode(GenTree* tree,
- LsraLocation currentLoc,
- RefPosition* defs[],
- int total
- DEBUGARG(unsigned minRegCandidateCount));
+ RefPosition* defineNewInternalTemp(GenTree* tree,
+ RegisterType regType,
+ LsraLocation currentLoc,
+ regMaskTP regMask DEBUGARG(unsigned minRegCandidateCount));
+
+ int buildInternalRegisterDefsForNode(GenTree* tree,
+ LsraLocation currentLoc,
+ RefPosition* defs[] DEBUGARG(unsigned minRegCandidateCount));
+
+ void buildInternalRegisterUsesForNode(GenTree* tree,
+ LsraLocation currentLoc,
+ RefPosition* defs[],
+ int total DEBUGARG(unsigned minRegCandidateCount));
void resolveLocalRef(BasicBlock* block, GenTreePtr treeNode, RefPosition* currentRefPosition);
@@ -837,8 +834,7 @@ private:
RefType theRefType,
GenTree* theTreeNode,
regMaskTP mask,
- unsigned multiRegIdx = 0
- DEBUGARG(unsigned minRegCandidateCount = 1));
+ unsigned multiRegIdx = 0 DEBUGARG(unsigned minRegCandidateCount = 1));
RefPosition* newRefPosition(
regNumber reg, LsraLocation theLocation, RefType theRefType, GenTree* theTreeNode, regMaskTP mask);
@@ -950,11 +946,8 @@ private:
char* operandString,
unsigned operandStringLength);
void lsraDispNode(GenTreePtr tree, LsraTupleDumpMode mode, bool hasDest);
- void DumpOperandDefs(GenTree* operand,
- bool& first,
- LsraTupleDumpMode mode,
- char* operandString,
- const unsigned operandStringLength);
+ void DumpOperandDefs(
+ GenTree* operand, bool& first, LsraTupleDumpMode mode, char* operandString, const unsigned operandStringLength);
void TupleStyleDump(LsraTupleDumpMode mode);
bool dumpTerse;
@@ -1395,7 +1388,7 @@ public:
, isLocalDefUse(false)
, delayRegFree(false)
, outOfOrder(false)
-#ifdef DEBUG
+#ifdef DEBUG
, minRegCandidateCount(1)
, rpNum(0)
#endif
@@ -1572,13 +1565,13 @@ public:
#ifdef DEBUG
// Minimum number registers that needs to be ensured while
// constraining candidates for this ref position under
- // LSRA stress.
+ // LSRA stress.
unsigned minRegCandidateCount;
// The unique RefPosition number, equal to its index in the
// refPositions list. Only used for debugging dumps.
- unsigned rpNum;
-#endif // DEBUG
+ unsigned rpNum;
+#endif // DEBUG
bool isIntervalRef()
{
diff --git a/src/jit/morph.cpp b/src/jit/morph.cpp
index 42fbb0775c..d1a85a1c96 100644
--- a/src/jit/morph.cpp
+++ b/src/jit/morph.cpp
@@ -2014,7 +2014,7 @@ GenTreePtr Compiler::fgMakeTmpArgNode(
arg->gtFlags |= GTF_DONT_CSE;
-#else // !FEATURE_UNIX_AMD64_STRUCT_PASSING
+#else // !FEATURE_UNIX_AMD64_STRUCT_PASSING
// Can this type be passed in a single register?
// If so, the following call will return the corresponding primitive type.
// Otherwise, it will return TYP_UNKNOWN and we will pass by reference.
@@ -2759,18 +2759,18 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
// so we record the stack depth on the first morph call when reMorphing
// was false (via RecordStkLevel) and then retrieve that value here (via RetrieveStkLevel)
//
- unsigned callStkLevel = call->fgArgInfo->RetrieveStkLevel();
- if (call->gtCallLateArgs != nullptr)
- {
- fgPtrArgCntCur += callStkLevel;
- call->gtCallLateArgs = fgMorphTree(call->gtCallLateArgs)->AsArgList();
- flagsSummary |= call->gtCallLateArgs->gtFlags;
- fgPtrArgCntCur -= callStkLevel;
- }
- assert(call->fgArgInfo != nullptr);
- call->fgArgInfo->RemorphReset();
-
- numArgs = call->fgArgInfo->ArgCount();
+ unsigned callStkLevel = call->fgArgInfo->RetrieveStkLevel();
+ if (call->gtCallLateArgs != nullptr)
+ {
+ fgPtrArgCntCur += callStkLevel;
+ call->gtCallLateArgs = fgMorphTree(call->gtCallLateArgs)->AsArgList();
+ flagsSummary |= call->gtCallLateArgs->gtFlags;
+ fgPtrArgCntCur -= callStkLevel;
+ }
+ assert(call->fgArgInfo != nullptr);
+ call->fgArgInfo->RemorphReset();
+
+ numArgs = call->fgArgInfo->ArgCount();
}
else
{
@@ -2807,19 +2807,20 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
}
// The x86 shift helpers have custom calling conventions and expect the lo part of the long to be in EAX and the
// hi part to be in EDX. This sets the argument registers up correctly.
- else if (call->IsHelperCall(this, CORINFO_HELP_LLSH) || call->IsHelperCall(this, CORINFO_HELP_LRSH) || call->IsHelperCall(this, CORINFO_HELP_LRSZ))
+ else if (call->IsHelperCall(this, CORINFO_HELP_LLSH) || call->IsHelperCall(this, CORINFO_HELP_LRSH) ||
+ call->IsHelperCall(this, CORINFO_HELP_LRSZ))
{
GenTreeArgList* args = call->gtCallArgs;
- GenTree* arg1 = args->Current();
+ GenTree* arg1 = args->Current();
assert(arg1 != nullptr);
nonStandardArgs.Add(arg1, REG_LNGARG_LO);
- args = args->Rest();
+ args = args->Rest();
GenTree* arg2 = args->Current();
assert(arg2 != nullptr);
nonStandardArgs.Add(arg2, REG_LNGARG_HI);
}
-#else // !defined(_TARGET_X86_)
+#else // !defined(_TARGET_X86_)
// TODO-X86-CQ: Currently RyuJIT/x86 passes args on the stack, so this is not needed.
// If/when we change that, the following code needs to be changed to correctly support the (TBD) managed calling
// convention for x86/SSE.
@@ -3307,7 +3308,7 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
}
}
#else // !FEATURE_UNIX_AMD64_STRUCT_PASSING
- size = 1; // On AMD64, all primitives fit in a single (64-bit) 'slot'
+ size = 1; // On AMD64, all primitives fit in a single (64-bit) 'slot'
#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
#elif defined(_TARGET_ARM64_)
if (isStructArg)
@@ -4134,7 +4135,7 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
}
#endif // !LEGACY_BACKEND
-#if defined (_TARGET_X86_) && !defined(LEGACY_BACKEND)
+#if defined(_TARGET_X86_) && !defined(LEGACY_BACKEND)
if (isStructArg)
{
GenTree* lclNode = fgIsIndirOfAddrOfLocal(argx);
@@ -4142,9 +4143,9 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
(lvaGetPromotionType(lclNode->AsLclVarCommon()->gtLclNum) == Compiler::PROMOTION_TYPE_INDEPENDENT))
{
// Make a GT_FIELD_LIST of the field lclVars.
- GenTreeLclVarCommon* lcl = lclNode->AsLclVarCommon();
- LclVarDsc* varDsc = &(lvaTable[lcl->gtLclNum]);
- GenTreeFieldList* fieldList = nullptr;
+ GenTreeLclVarCommon* lcl = lclNode->AsLclVarCommon();
+ LclVarDsc* varDsc = &(lvaTable[lcl->gtLclNum]);
+ GenTreeFieldList* fieldList = nullptr;
for (unsigned fieldLclNum = varDsc->lvFieldLclStart;
fieldLclNum < varDsc->lvFieldLclStart + varDsc->lvFieldCnt; ++fieldLclNum)
{
@@ -4154,15 +4155,17 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
lcl->SetLclNum(fieldLclNum);
lcl->ChangeOper(GT_LCL_VAR);
lcl->gtType = fieldVarDsc->lvType;
- fieldList = new (this, GT_FIELD_LIST) GenTreeFieldList(lcl, fieldVarDsc->lvFldOffset, fieldVarDsc->lvType, nullptr);
+ fieldList = new (this, GT_FIELD_LIST)
+ GenTreeFieldList(lcl, fieldVarDsc->lvFldOffset, fieldVarDsc->lvType, nullptr);
fgArgTabEntryPtr fp = Compiler::gtArgEntryByNode(call, argx);
fp->node = fieldList;
- args->gtOp.gtOp1 = fieldList;
+ args->gtOp.gtOp1 = fieldList;
}
else
{
GenTree* fieldLcl = gtNewLclvNode(fieldLclNum, fieldVarDsc->lvType);
- fieldList = new (this, GT_FIELD_LIST) GenTreeFieldList(fieldLcl, fieldVarDsc->lvFldOffset, fieldVarDsc->lvType, fieldList);
+ fieldList = new (this, GT_FIELD_LIST)
+ GenTreeFieldList(fieldLcl, fieldVarDsc->lvFldOffset, fieldVarDsc->lvType, fieldList);
}
}
}
@@ -4291,9 +4294,9 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
// is added to make sure to call EvalArgsToTemp.
if (!reMorphing && (call->fgArgInfo->HasRegArgs()
#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
- || hasStackArgCopy
+ || hasStackArgCopy
#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
- ))
+ ))
{
// This is the first time that we morph this call AND it has register arguments.
// Follow into the code below and do the 'defer or eval to temp' analysis.
@@ -4442,8 +4445,8 @@ void Compiler::fgMorphSystemVStructArgs(GenTreeCall* call, bool hasStructArgumen
fgEntryPtr->structDesc.eightByteSizes[0]);
GenTreeFieldList* fieldList =
new (this, GT_FIELD_LIST) GenTreeFieldList(arg, 0, originalType, nullptr);
- fieldList->gtType = originalType; // Preserve the type. It is a special case.
- arg = fieldList;
+ fieldList->gtType = originalType; // Preserve the type. It is a special case.
+ arg = fieldList;
// Second eightbyte.
GenTreeLclFld* newLclField = new (this, GT_LCL_FLD)
@@ -4453,8 +4456,8 @@ void Compiler::fgMorphSystemVStructArgs(GenTreeCall* call, bool hasStructArgumen
lclCommon->gtLclNum, fgEntryPtr->structDesc.eightByteOffsets[1]);
fieldList = new (this, GT_FIELD_LIST) GenTreeFieldList(newLclField, 0, originalType, fieldList);
- fieldList->gtType = originalType; // Preserve the type. It is a special case.
- newLclField->gtFieldSeq = FieldSeqStore::NotAField();
+ fieldList->gtType = originalType; // Preserve the type. It is a special case.
+ newLclField->gtFieldSeq = FieldSeqStore::NotAField();
}
else
{
@@ -4871,7 +4874,7 @@ GenTreePtr Compiler::fgMorphMultiregStructArg(GenTreePtr arg, fgArgTabEntryPtr f
// with a FIELD_LIST(LCLVAR-LO, FIELD_LIST(LCLVAR-HI, nullptr))
//
newArg = new (this, GT_FIELD_LIST) GenTreeFieldList(loLclVar, 0, loType, nullptr);
- (void) new (this, GT_FIELD_LIST) GenTreeFieldList(hiLclVar, TARGET_POINTER_SIZE, hiType, newArg);
+ (void)new (this, GT_FIELD_LIST) GenTreeFieldList(hiLclVar, TARGET_POINTER_SIZE, hiType, newArg);
}
}
}
@@ -4941,11 +4944,11 @@ GenTreePtr Compiler::fgMorphMultiregStructArg(GenTreePtr arg, fgArgTabEntryPtr f
// replace the existing LDOBJ(ADDR(LCLVAR))
// with a FIELD_LIST(LCLFLD-LO, FIELD_LIST(LCLFLD-HI, nullptr) ...)
//
- unsigned offset = 0;
+ unsigned offset = 0;
GenTreeFieldList* listEntry = nullptr;
for (unsigned inx = 0; inx < elemCount; inx++)
{
- elemSize = genTypeSize(type[inx]);
+ elemSize = genTypeSize(type[inx]);
GenTreePtr nextLclFld = gtNewLclFldNode(varNum, type[inx], offset);
listEntry = new (this, GT_FIELD_LIST) GenTreeFieldList(nextLclFld, offset, type[inx], listEntry);
if (newArg == nullptr)
@@ -4968,11 +4971,11 @@ GenTreePtr Compiler::fgMorphMultiregStructArg(GenTreePtr arg, fgArgTabEntryPtr f
// with a FIELD_LIST(IND(EXPR), FIELD_LIST(IND(EXPR+8), nullptr) ...)
//
- unsigned offset = 0;
+ unsigned offset = 0;
GenTreeFieldList* listEntry = nullptr;
for (unsigned inx = 0; inx < elemCount; inx++)
{
- elemSize = genTypeSize(type[inx]);
+ elemSize = genTypeSize(type[inx]);
GenTreePtr curAddr = baseAddr;
if (offset != 0)
{
@@ -4985,7 +4988,7 @@ GenTreePtr Compiler::fgMorphMultiregStructArg(GenTreePtr arg, fgArgTabEntryPtr f
curAddr = baseAddr;
}
GenTreePtr curItem = gtNewOperNode(GT_IND, type[inx], curAddr);
- listEntry = new (this, GT_FIELD_LIST) GenTreeFieldList(curItem, offset, type[inx], listEntry);
+ listEntry = new (this, GT_FIELD_LIST) GenTreeFieldList(curItem, offset, type[inx], listEntry);
if (newArg == nullptr)
{
newArg = listEntry;
@@ -5716,8 +5719,7 @@ GenTreePtr Compiler::fgMorphArrayIndex(GenTreePtr tree)
addr = gtNewOperNode(GT_ADD, TYP_BYREF, addr, cns);
#if SMALL_TREE_NODES
- assert((tree->gtDebugFlags & GTF_DEBUG_NODE_LARGE) ||
- GenTree::s_gtNodeSizes[GT_IND] == TREE_NODE_SZ_SMALL);
+ assert((tree->gtDebugFlags & GTF_DEBUG_NODE_LARGE) || GenTree::s_gtNodeSizes[GT_IND] == TREE_NODE_SZ_SMALL);
#endif
// Change the orginal GT_INDEX node into a GT_IND node
@@ -6248,7 +6250,9 @@ GenTreePtr Compiler::fgMorphField(GenTreePtr tree, MorphAddrContext* mac)
GenTreePtr baseOffset = gtNewIconEmbHndNode(tree->gtField.gtFieldLookup.addr, nullptr, GTF_ICON_FIELD_HDL);
if (tree->gtField.gtFieldLookup.accessType == IAT_PVALUE)
+ {
baseOffset = gtNewOperNode(GT_IND, TYP_I_IMPL, baseOffset);
+ }
addr =
gtNewOperNode(GT_ADD, (var_types)(objRefType == TYP_I_IMPL ? TYP_I_IMPL : TYP_BYREF), addr, baseOffset);
diff --git a/src/jit/rationalize.cpp b/src/jit/rationalize.cpp
index 7e75354380..1a9c38aa5f 100644
--- a/src/jit/rationalize.cpp
+++ b/src/jit/rationalize.cpp
@@ -617,9 +617,8 @@ Compiler::fgWalkResult Rationalizer::RewriteNode(GenTree** useEdge, ArrayStack<G
// First, remove any preceeding list nodes, which are not otherwise visited by the tree walk.
//
// NOTE: GT_FIELD_LIST head nodes, and GT_LIST nodes used by phi nodes will in fact be visited.
- for (GenTree* prev = node->gtPrev;
- prev != nullptr && prev->OperIsAnyList() && !(prev->OperIsFieldListHead());
- prev = node->gtPrev)
+ for (GenTree* prev = node->gtPrev; prev != nullptr && prev->OperIsAnyList() && !(prev->OperIsFieldListHead());
+ prev = node->gtPrev)
{
BlockRange().Remove(prev);
}
diff --git a/src/jit/valuenum.h b/src/jit/valuenum.h
index 17dacfbb54..78adfd971e 100644
--- a/src/jit/valuenum.h
+++ b/src/jit/valuenum.h
@@ -450,7 +450,7 @@ public:
// Get a new, unique value number for an expression that we're not equating to some function,
// which is the value of a tree in the given block.
- ValueNum VNForExpr(BasicBlock *block, var_types typ = TYP_UNKNOWN);
+ ValueNum VNForExpr(BasicBlock* block, var_types typ = TYP_UNKNOWN);
// This controls extra tracing of the "evaluation" of "VNF_MapSelect" functions.
#define FEATURE_VN_TRACE_APPLY_SELECTORS 1
@@ -485,13 +485,11 @@ public:
ValueNumPair VNPairApplySelectors(ValueNumPair map, FieldSeqNode* fieldSeq, var_types indType);
- ValueNumPair VNPairApplySelectorsAssign(ValueNumPair map,
- FieldSeqNode* fieldSeq,
- ValueNumPair rhs,
- var_types indType,
- BasicBlock* block)
+ ValueNumPair VNPairApplySelectorsAssign(
+ ValueNumPair map, FieldSeqNode* fieldSeq, ValueNumPair rhs, var_types indType, BasicBlock* block)
{
- return ValueNumPair(VNApplySelectorsAssign(VNK_Liberal, map.GetLiberal(), fieldSeq, rhs.GetLiberal(), indType, block),
+ return ValueNumPair(VNApplySelectorsAssign(VNK_Liberal, map.GetLiberal(), fieldSeq, rhs.GetLiberal(), indType,
+ block),
VNApplySelectorsAssign(VNK_Conservative, map.GetConservative(), fieldSeq,
rhs.GetConservative(), indType, block));
}
@@ -883,9 +881,14 @@ private:
ChunkExtraAttribs m_attribs;
BasicBlock::loopNumber m_loopNum;
- // Initialize a chunk, starting at "*baseVN", for the given "typ", "attribs", and "loopNum" (using "alloc" for allocations).
+ // Initialize a chunk, starting at "*baseVN", for the given "typ", "attribs", and "loopNum" (using "alloc" for
+ // allocations).
// (Increments "*baseVN" by ChunkSize.)
- Chunk(IAllocator* alloc, ValueNum* baseVN, var_types typ, ChunkExtraAttribs attribs, BasicBlock::loopNumber loopNum);
+ Chunk(IAllocator* alloc,
+ ValueNum* baseVN,
+ var_types typ,
+ ChunkExtraAttribs attribs,
+ BasicBlock::loopNumber loopNum);
// Requires that "m_numUsed < ChunkSize." Returns the offset of the allocated VN within the chunk; the
// actual VN is this added to the "m_baseVN" of the chunk.