summaryrefslogtreecommitdiff
path: root/src/jit
diff options
context:
space:
mode:
authorAndy Ayers <andya@microsoft.com>2019-01-04 17:18:18 -0800
committerAndy Ayers <andya@microsoft.com>2019-01-04 17:18:18 -0800
commit4c68562cbd2f7a33bad60705db9b11e3121d40c2 (patch)
tree6652d9a31af13772f9978b0c40388651cc45cd1e /src/jit
parentdf4a1854aa42c1e97874b00fe49339e216e30af9 (diff)
downloadcoreclr-4c68562cbd2f7a33bad60705db9b11e3121d40c2.tar.gz
coreclr-4c68562cbd2f7a33bad60705db9b11e3121d40c2.tar.bz2
coreclr-4c68562cbd2f7a33bad60705db9b11e3121d40c2.zip
JIT: encapsulate general checks for optimization
Add methods that answer the general question of whether or not the jit is optimizing the code it produces. Use this to replace composite checks for minopts and debug codegen (the two modes where the jit is not optimizing).
Diffstat (limited to 'src/jit')
-rw-r--r--src/jit/codegenxarch.cpp2
-rw-r--r--src/jit/compiler.cpp14
-rw-r--r--src/jit/compiler.h10
-rw-r--r--src/jit/compiler.hpp4
-rw-r--r--src/jit/flowgraph.cpp10
-rw-r--r--src/jit/gentree.cpp4
-rw-r--r--src/jit/importer.cpp32
-rw-r--r--src/jit/lclvars.cpp6
-rw-r--r--src/jit/liveness.cpp2
-rw-r--r--src/jit/lower.cpp4
-rw-r--r--src/jit/morph.cpp13
-rw-r--r--src/jit/optimizer.cpp8
-rw-r--r--src/jit/regalloc.cpp2
-rw-r--r--src/jit/regset.cpp2
-rw-r--r--src/jit/scopeinfo.cpp2
-rw-r--r--src/jit/simd.cpp2
16 files changed, 61 insertions, 56 deletions
diff --git a/src/jit/codegenxarch.cpp b/src/jit/codegenxarch.cpp
index cf9055c31d..5c4d7bd9cb 100644
--- a/src/jit/codegenxarch.cpp
+++ b/src/jit/codegenxarch.cpp
@@ -5713,7 +5713,7 @@ void CodeGen::genCallInstruction(GenTreeCall* call)
// If there is nothing next, that means the result is thrown away, so this value is not live.
// However, for minopts or debuggable code, we keep it live to support managed return value debugging.
- if ((call->gtNext == nullptr) && !compiler->opts.MinOpts() && !compiler->opts.compDbgCode)
+ if ((call->gtNext == nullptr) && compiler->opts.OptimizationEnabled())
{
gcInfo.gcMarkRegSetNpt(RBM_INTRET);
}
diff --git a/src/jit/compiler.cpp b/src/jit/compiler.cpp
index 6813f6b787..40c31c6870 100644
--- a/src/jit/compiler.cpp
+++ b/src/jit/compiler.cpp
@@ -4164,7 +4164,7 @@ _SetMinOpts:
/* Control the optimizations */
- if (opts.MinOpts() || opts.compDbgCode)
+ if (opts.OptimizationDisabled())
{
opts.compFlags &= ~CLFLG_MAXOPT;
opts.compFlags |= CLFLG_MINOPT;
@@ -4175,7 +4175,7 @@ _SetMinOpts:
codeGen->setFramePointerRequired(false);
codeGen->setFrameRequired(false);
- if (opts.MinOpts() || opts.compDbgCode)
+ if (opts.OptimizationDisabled())
{
codeGen->setFrameRequired(true);
}
@@ -4205,7 +4205,7 @@ _SetMinOpts:
}
}
- info.compUnwrapContextful = !opts.MinOpts() && !opts.compDbgCode;
+ info.compUnwrapContextful = opts.OptimizationEnabled();
fgCanRelocateEHRegions = true;
}
@@ -4637,7 +4637,7 @@ void Compiler::compCompile(void** methodCodePtr, ULONG* methodCodeSize, JitFlags
#endif // FEATURE_EH_FUNCLETS
- if (!opts.MinOpts() && !opts.compDbgCode)
+ if (opts.OptimizationEnabled())
{
optOptimizeLayout();
EndPhase(PHASE_OPTIMIZE_LAYOUT);
@@ -4647,7 +4647,7 @@ void Compiler::compCompile(void** methodCodePtr, ULONG* methodCodeSize, JitFlags
EndPhase(PHASE_COMPUTE_REACHABILITY);
}
- if (!opts.MinOpts() && !opts.compDbgCode)
+ if (opts.OptimizationEnabled())
{
/* Perform loop inversion (i.e. transform "while" loops into
"repeat" loops) and discover and classify natural loops
@@ -4684,7 +4684,7 @@ void Compiler::compCompile(void** methodCodePtr, ULONG* methodCodeSize, JitFlags
//
assert(lvaLocalVarRefCounted());
- if (!opts.MinOpts() && !opts.compDbgCode)
+ if (opts.OptimizationEnabled())
{
/* Optimize boolean conditions */
@@ -4721,7 +4721,7 @@ void Compiler::compCompile(void** methodCodePtr, ULONG* methodCodeSize, JitFlags
#endif
// At this point we know if we are fully interruptible or not
- if (!opts.MinOpts() && !opts.compDbgCode)
+ if (opts.OptimizationEnabled())
{
bool doSsa = true;
bool doEarlyProp = true;
diff --git a/src/jit/compiler.h b/src/jit/compiler.h
index 5b04b687dd..69a7cc9ca7 100644
--- a/src/jit/compiler.h
+++ b/src/jit/compiler.h
@@ -8279,6 +8279,16 @@ public:
return compMinOptsIsSet;
}
#endif // !DEBUG
+
+ inline bool OptimizationDisabled()
+ {
+ return MinOpts() || compDbgCode;
+ }
+ inline bool OptimizationEnabled()
+ {
+ return !OptimizationDisabled();
+ }
+
inline void SetMinOpts(bool val)
{
assert(!compMinOptsIsUsed);
diff --git a/src/jit/compiler.hpp b/src/jit/compiler.hpp
index 3fceff729a..b15190983f 100644
--- a/src/jit/compiler.hpp
+++ b/src/jit/compiler.hpp
@@ -1681,7 +1681,7 @@ inline unsigned Compiler::lvaGrabTemp(bool shortLifetime DEBUGARG(const char* re
// this new local will be referenced.
if (lvaLocalVarRefCounted())
{
- if (opts.MinOpts() || opts.compDbgCode)
+ if (opts.OptimizationDisabled())
{
lvaTable[tempNum].lvImplicitlyReferenced = 1;
}
@@ -1818,7 +1818,7 @@ inline unsigned Compiler::lvaGrabTempWithImplicitUse(bool shortLifetime DEBUGARG
inline void LclVarDsc::incRefCnts(BasicBlock::weight_t weight, Compiler* comp, RefCountState state, bool propagate)
{
// In minopts and debug codegen, we don't maintain normal ref counts.
- if ((state == RCS_NORMAL) && (comp->opts.MinOpts() || comp->opts.compDbgCode))
+ if ((state == RCS_NORMAL) && comp->opts.OptimizationDisabled())
{
// Note, at least, that there is at least one reference.
lvImplicitlyReferenced = 1;
diff --git a/src/jit/flowgraph.cpp b/src/jit/flowgraph.cpp
index d2bd44e9dc..4e2fc40bba 100644
--- a/src/jit/flowgraph.cpp
+++ b/src/jit/flowgraph.cpp
@@ -3572,7 +3572,7 @@ void Compiler::fgCreateGCPolls()
}
#endif // DEBUG
- if (!(opts.MinOpts() || opts.compDbgCode))
+ if (opts.OptimizationEnabled())
{
// Remove polls from well formed loops with a constant upper bound.
for (unsigned lnum = 0; lnum < optLoopCount; ++lnum)
@@ -3785,7 +3785,7 @@ void Compiler::fgCreateGCPolls()
// can't or don't want to emit an inline check. Check all of those. If after all of that we still
// have INLINE, then emit an inline check.
- if (opts.MinOpts() || opts.compDbgCode)
+ if (opts.OptimizationDisabled())
{
#ifdef DEBUG
if (verbose)
@@ -3832,7 +3832,7 @@ void Compiler::fgCreateGCPolls()
// past the epilog. We should never split blocks unless we're optimizing.
if (createdPollBlocks)
{
- noway_assert(!opts.MinOpts() && !opts.compDbgCode);
+ noway_assert(opts.OptimizationEnabled());
fgReorderBlocks();
}
}
@@ -13023,7 +13023,7 @@ void Compiler::fgComputeBlockAndEdgeWeights()
JITDUMP("*************** In fgComputeBlockAndEdgeWeights()\n");
const bool usingProfileWeights = fgIsUsingProfileWeights();
- const bool isOptimizing = !opts.MinOpts() && !opts.compDbgCode;
+ const bool isOptimizing = opts.OptimizationEnabled();
fgHaveValidEdgeWeights = false;
fgCalledCount = BB_UNITY_WEIGHT;
@@ -16444,7 +16444,7 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication)
/* This should never be called for debuggable code */
- noway_assert(!opts.MinOpts() && !opts.compDbgCode);
+ noway_assert(opts.OptimizationEnabled());
#ifdef DEBUG
if (verbose)
diff --git a/src/jit/gentree.cpp b/src/jit/gentree.cpp
index 5d00ebb06e..893695a194 100644
--- a/src/jit/gentree.cpp
+++ b/src/jit/gentree.cpp
@@ -11673,7 +11673,7 @@ GenTree* Compiler::gtFoldExpr(GenTree* tree)
}
else if ((kind & GTK_BINOP) && op1 && tree->gtOp.gtOp2 &&
// Don't take out conditionals for debugging
- !((opts.compDbgCode || opts.MinOpts()) && tree->OperIsCompare()))
+ (opts.OptimizationEnabled() || !tree->OperIsCompare()))
{
GenTree* op2 = tree->gtOp.gtOp2;
@@ -11772,7 +11772,7 @@ GenTree* Compiler::gtFoldExprCall(GenTreeCall* call)
}
// Defer folding if not optimizing.
- if (opts.compDbgCode || opts.MinOpts())
+ if (opts.OptimizationDisabled())
{
return call;
}
diff --git a/src/jit/importer.cpp b/src/jit/importer.cpp
index 22bc79e7d6..30812737ad 100644
--- a/src/jit/importer.cpp
+++ b/src/jit/importer.cpp
@@ -3533,7 +3533,7 @@ GenTree* Compiler::impIntrinsic(GenTree* newobjThis,
GenTree* retNode = nullptr;
// Under debug and minopts, only expand what is required.
- if (!mustExpand && (opts.compDbgCode || opts.MinOpts()))
+ if (!mustExpand && opts.OptimizationDisabled())
{
*pIntrinsicID = CORINFO_INTRINSIC_Illegal;
return retNode;
@@ -3654,7 +3654,7 @@ GenTree* Compiler::impIntrinsic(GenTree* newobjThis,
case CORINFO_INTRINSIC_StringLength:
op1 = impPopStack().val;
- if (!opts.MinOpts() && !opts.compDbgCode)
+ if (opts.OptimizationEnabled())
{
GenTreeArrLen* arrLen = gtNewArrLen(TYP_INT, op1, OFFSETOF__CORINFO_String__stringLen);
op1 = arrLen;
@@ -6263,7 +6263,7 @@ void Compiler::impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken)
// structs is cheap.
JITDUMP("\nCompiler::impImportAndPushBox -- handling BOX(value class) via");
bool canExpandInline = (boxHelper == CORINFO_HELP_BOX);
- bool optForSize = !exprToBox->IsCall() && (operCls != nullptr) && (opts.compDbgCode || opts.MinOpts());
+ bool optForSize = !exprToBox->IsCall() && (operCls != nullptr) && opts.OptimizationDisabled();
bool expandInline = canExpandInline && !optForSize;
if (expandInline)
@@ -6281,7 +6281,7 @@ void Compiler::impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken)
// and the other you get
// *(temp+4) = expr
- if (opts.MinOpts() || opts.compDbgCode)
+ if (opts.OptimizationDisabled())
{
// For minopts/debug code, try and minimize the total number
// of box temps by reusing an existing temp when possible.
@@ -7508,7 +7508,7 @@ bool Compiler::impIsImplicitTailCallCandidate(
return false;
}
- if (opts.compDbgCode || opts.MinOpts())
+ if (opts.OptimizationDisabled())
{
return false;
}
@@ -10530,7 +10530,7 @@ GenTree* Compiler::impOptimizeCastClassOrIsInst(GenTree* op1, CORINFO_RESOLVED_T
assert(op1->TypeGet() == TYP_REF);
// Don't optimize for minopts or debug codegen.
- if (opts.compDbgCode || opts.MinOpts())
+ if (opts.OptimizationDisabled())
{
return nullptr;
}
@@ -10637,7 +10637,7 @@ GenTree* Compiler::impCastClassOrIsInstToTree(GenTree* op1,
// Don't bother with inline expansion when jit is trying to
// generate code quickly, or the cast is in code that won't run very
// often, or the method already is pretty big.
- if (compCurBB->isRunRarely() || opts.compDbgCode || opts.MinOpts())
+ if (compCurBB->isRunRarely() || opts.OptimizationDisabled())
{
// not worth the code expansion if jitting fast or in a rarely run block
shouldExpandInline = false;
@@ -12616,7 +12616,7 @@ void Compiler::impImportBlockCode(BasicBlock* block)
type = op1->TypeGet();
// brfalse and brtrue is only allowed on I4, refs, and byrefs.
- if (!opts.MinOpts() && !opts.compDbgCode && block->bbJumpDest == block->bbNext)
+ if (opts.OptimizationEnabled() && (block->bbJumpDest == block->bbNext))
{
block->bbJumpKind = BBJ_NONE;
@@ -12850,7 +12850,7 @@ void Compiler::impImportBlockCode(BasicBlock* block)
varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet()) ||
varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
- if (!opts.MinOpts() && !opts.compDbgCode && block->bbJumpDest == block->bbNext)
+ if (opts.OptimizationEnabled() && (block->bbJumpDest == block->bbNext))
{
block->bbJumpKind = BBJ_NONE;
@@ -15415,7 +15415,7 @@ void Compiler::impImportBlockCode(BasicBlock* block)
// Check legality and profitability of inline expansion for unboxing.
const bool canExpandInline = (helper == CORINFO_HELP_UNBOX);
- const bool shouldExpandInline = !(compCurBB->isRunRarely() || opts.compDbgCode || opts.MinOpts());
+ const bool shouldExpandInline = !compCurBB->isRunRarely() && opts.OptimizationEnabled();
if (canExpandInline && shouldExpandInline)
{
@@ -16237,7 +16237,7 @@ void Compiler::impImportBlockCode(BasicBlock* block)
}
op1 = impPopStack().val;
- if (!opts.MinOpts() && !opts.compDbgCode)
+ if (opts.OptimizationEnabled())
{
/* Use GT_ARR_LENGTH operator so rng check opts see this */
GenTreeArrLen* arrLen = gtNewArrLen(TYP_INT, op1, OFFSETOF__CORINFO_Array__length);
@@ -20225,13 +20225,7 @@ void Compiler::impDevirtualizeCall(GenTreeCall* call,
assert(call->IsVirtual());
// Bail if not optimizing
- if (opts.MinOpts())
- {
- return;
- }
-
- // Bail if debuggable codegen
- if (opts.compDbgCode)
+ if (opts.OptimizationDisabled())
{
return;
}
@@ -20937,7 +20931,7 @@ void Compiler::addGuardedDevirtualizationCandidate(GenTreeCall* call,
}
// Bail if not optimizing or the call site is very likely cold
- if (compCurBB->isRunRarely() || opts.compDbgCode || opts.MinOpts())
+ if (compCurBB->isRunRarely() || opts.OptimizationDisabled())
{
JITDUMP("NOT Marking call [%06u] as guarded devirtualization candidate -- rare / dbg / minopts\n",
dspTreeID(call));
diff --git a/src/jit/lclvars.cpp b/src/jit/lclvars.cpp
index e58130bd5b..c9029443b8 100644
--- a/src/jit/lclvars.cpp
+++ b/src/jit/lclvars.cpp
@@ -4021,13 +4021,13 @@ void Compiler::lvaMarkLocalVars()
lvaComputeRefCounts(isRecompute, setSlotNumbers);
// If we're not optimizing, we're done.
- if (opts.MinOpts() || opts.compDbgCode)
+ if (opts.OptimizationDisabled())
{
return;
}
#if ASSERTION_PROP
- assert(!opts.MinOpts() && !opts.compDbgCode);
+ assert(opts.OptimizationEnabled());
// Note: optAddCopies() depends on lvaRefBlks, which is set in lvaMarkLocalVars(BasicBlock*), called above.
optAddCopies();
@@ -4076,7 +4076,7 @@ void Compiler::lvaComputeRefCounts(bool isRecompute, bool setSlotNumbers)
//
// On first compute: mark all locals as implicitly referenced and untracked.
// On recompute: do nothing.
- if (opts.MinOpts() || opts.compDbgCode)
+ if (opts.OptimizationDisabled())
{
if (isRecompute)
{
diff --git a/src/jit/liveness.cpp b/src/jit/liveness.cpp
index df28bc330f..039f8641aa 100644
--- a/src/jit/liveness.cpp
+++ b/src/jit/liveness.cpp
@@ -165,7 +165,7 @@ void Compiler::fgLocalVarLivenessInit()
JITDUMP("In fgLocalVarLivenessInit\n");
// Sort locals first, if we're optimizing
- if (!opts.MinOpts() && !opts.compDbgCode)
+ if (opts.OptimizationEnabled())
{
lvaSortByRefCount();
}
diff --git a/src/jit/lower.cpp b/src/jit/lower.cpp
index e563609d3c..3c38a5775f 100644
--- a/src/jit/lower.cpp
+++ b/src/jit/lower.cpp
@@ -457,7 +457,7 @@ GenTree* Lowering::LowerSwitch(GenTree* node)
if (targetCnt == 1)
{
JITDUMP("Lowering switch " FMT_BB ": single target; converting to BBJ_ALWAYS\n", originalSwitchBB->bbNum);
- noway_assert(comp->opts.MinOpts() || comp->opts.compDbgCode);
+ noway_assert(comp->opts.OptimizationDisabled());
if (originalSwitchBB->bbNext == jumpTab[0])
{
originalSwitchBB->bbJumpKind = BBJ_NONE;
@@ -5229,7 +5229,7 @@ void Lowering::DoPhase()
comp->fgLocalVarLiveness();
// local var liveness can delete code, which may create empty blocks
- if (!comp->opts.MinOpts() && !comp->opts.compDbgCode)
+ if (comp->opts.OptimizationEnabled())
{
comp->optLoopsMarked = false;
bool modified = comp->fgUpdateFlowGraph();
diff --git a/src/jit/morph.cpp b/src/jit/morph.cpp
index df95c9344b..b2cb5859cb 100644
--- a/src/jit/morph.cpp
+++ b/src/jit/morph.cpp
@@ -5055,7 +5055,7 @@ void Compiler::fgMakeOutgoingStructArgCopy(GenTreeCall* call,
//
// We can't determine that all of the time, but if there is only
// one use and the method has no loops, then this use must be the last.
- if (!(opts.compDbgCode || opts.MinOpts()))
+ if (opts.OptimizationEnabled())
{
GenTreeLclVarCommon* lcl = nullptr;
@@ -13208,7 +13208,7 @@ DONE_MORPHING_CHILDREN:
/* Try to change *(&lcl + cns) into lcl[cns] to prevent materialization of &lcl */
if (op1->gtOp.gtOp1->OperGet() == GT_ADDR && op1->gtOp.gtOp2->OperGet() == GT_CNS_INT &&
- (!(opts.MinOpts() || opts.compDbgCode)))
+ opts.OptimizationEnabled())
{
// No overflow arithmetic with pointers
noway_assert(!op1->gtOverflow());
@@ -15109,7 +15109,7 @@ bool Compiler::fgFoldConditional(BasicBlock* block)
bool result = false;
// We don't want to make any code unreachable
- if (opts.compDbgCode || opts.MinOpts())
+ if (opts.OptimizationDisabled())
{
return false;
}
@@ -15611,7 +15611,8 @@ void Compiler::fgMorphStmts(BasicBlock* block, bool* lnot, bool* loadw)
continue;
}
#ifdef FEATURE_SIMD
- if (!opts.MinOpts() && stmt->gtStmtExpr->TypeGet() == TYP_FLOAT && stmt->gtStmtExpr->OperGet() == GT_ASG)
+ if (opts.OptimizationEnabled() && stmt->gtStmtExpr->TypeGet() == TYP_FLOAT &&
+ stmt->gtStmtExpr->OperGet() == GT_ASG)
{
fgMorphCombineSIMDFieldAssignments(block, stmt);
}
@@ -15834,7 +15835,7 @@ void Compiler::fgMorphBlocks()
//
// Local assertion prop is enabled if we are optimized
//
- optLocalAssertionProp = (!opts.compDbgCode && !opts.MinOpts());
+ optLocalAssertionProp = opts.OptimizationEnabled();
if (optLocalAssertionProp)
{
@@ -16862,7 +16863,7 @@ void Compiler::fgMorph()
// TODO-ObjectStackAllocation: Enable the optimization for architectures using
// JIT32_GCENCODER (i.e., x86).
#ifndef JIT32_GCENCODER
- if (JitConfig.JitObjectStackAllocation() && !opts.MinOpts() && !opts.compDbgCode)
+ if (JitConfig.JitObjectStackAllocation() && opts.OptimizationEnabled())
{
objectAllocator.EnableObjectStackAllocation();
}
diff --git a/src/jit/optimizer.cpp b/src/jit/optimizer.cpp
index 1f57e053d0..bf9140af91 100644
--- a/src/jit/optimizer.cpp
+++ b/src/jit/optimizer.cpp
@@ -51,7 +51,7 @@ DataFlow::DataFlow(Compiler* pCompiler) : m_pCompiler(pCompiler)
void Compiler::optSetBlockWeights()
{
- noway_assert(!opts.MinOpts() && !opts.compDbgCode);
+ noway_assert(opts.OptimizationEnabled());
assert(fgDomsComputed);
#ifdef DEBUG
@@ -4050,7 +4050,7 @@ static GenTree* optFindLoopTermTest(BasicBlock* bottom)
void Compiler::fgOptWhileLoop(BasicBlock* block)
{
- noway_assert(!opts.MinOpts() && !opts.compDbgCode);
+ noway_assert(opts.OptimizationEnabled());
noway_assert(compCodeOpt() != SMALL_CODE);
/*
@@ -4360,7 +4360,7 @@ void Compiler::fgOptWhileLoop(BasicBlock* block)
void Compiler::optOptimizeLayout()
{
- noway_assert(!opts.MinOpts() && !opts.compDbgCode);
+ noway_assert(opts.OptimizationEnabled());
#ifdef DEBUG
if (verbose)
@@ -4414,7 +4414,7 @@ void Compiler::optOptimizeLayout()
void Compiler::optOptimizeLoops()
{
- noway_assert(!opts.MinOpts() && !opts.compDbgCode);
+ noway_assert(opts.OptimizationEnabled());
#ifdef DEBUG
if (verbose)
diff --git a/src/jit/regalloc.cpp b/src/jit/regalloc.cpp
index eb6ddf8921..196c3727c0 100644
--- a/src/jit/regalloc.cpp
+++ b/src/jit/regalloc.cpp
@@ -207,7 +207,7 @@ bool Compiler::rpMustCreateEBPFrame(INDEBUG(const char** wbReason))
#endif
#if ETW_EBP_FRAMED
- if (!result && (opts.MinOpts() || opts.compDbgCode))
+ if (!result && opts.OptimizationDisabled())
{
INDEBUG(reason = "Debug Code");
result = true;
diff --git a/src/jit/regset.cpp b/src/jit/regset.cpp
index 94e3252baf..ad6763e306 100644
--- a/src/jit/regset.cpp
+++ b/src/jit/regset.cpp
@@ -90,7 +90,7 @@ void RegSet::verifyRegUsed(regNumber reg)
void RegSet::verifyRegistersUsed(regMaskTP regMask)
{
- if (m_rsCompiler->opts.MinOpts() || m_rsCompiler->opts.compDbgCode)
+ if (m_rsCompiler->opts.OptimizationDisabled())
{
return;
}
diff --git a/src/jit/scopeinfo.cpp b/src/jit/scopeinfo.cpp
index d70a33b082..f152bf5bc6 100644
--- a/src/jit/scopeinfo.cpp
+++ b/src/jit/scopeinfo.cpp
@@ -492,7 +492,7 @@ void CodeGen::siBeginBlock(BasicBlock* block)
// For debuggable or minopts code, scopes can begin only on block boundaries.
// For other codegen modes (eg minopts/tier0) we currently won't report any
// untracked locals.
- if (compiler->opts.compDbgCode || compiler->opts.MinOpts())
+ if (compiler->opts.OptimizationDisabled())
{
// Check if there are any scopes on the current block's start boundary.
VarScopeDsc* varScope = nullptr;
diff --git a/src/jit/simd.cpp b/src/jit/simd.cpp
index 4941f56cd1..3d265ee12d 100644
--- a/src/jit/simd.cpp
+++ b/src/jit/simd.cpp
@@ -2261,7 +2261,7 @@ GenTree* Compiler::createAddressNodeForSIMDInit(GenTree* tree, unsigned simdSize
void Compiler::impMarkContiguousSIMDFieldAssignments(GenTree* stmt)
{
- if (!featureSIMD || opts.MinOpts())
+ if (!featureSIMD || opts.OptimizationDisabled())
{
return;
}