summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/ToolBox/SOS/Strike/util.cpp22
-rw-r--r--src/debug/daccess/request.cpp36
-rw-r--r--src/inc/corinfo.h3
-rw-r--r--src/inc/dacprivate.h14
-rw-r--r--src/inc/eventtracebase.h107
-rw-r--r--src/jit/compiler.cpp11
-rw-r--r--src/jit/compiler.h4
-rw-r--r--src/jit/flowgraph.cpp29
-rw-r--r--src/vm/ClrEtwAll.man186
-rw-r--r--src/vm/ClrEtwAllMeta.lst20
-rw-r--r--src/vm/callcounter.cpp12
-rw-r--r--src/vm/callcounter.h1
-rw-r--r--src/vm/codeversion.h3
-rw-r--r--src/vm/common.h1
-rw-r--r--src/vm/eeconfig.cpp69
-rw-r--r--src/vm/eventtrace.cpp204
-rw-r--r--src/vm/eventtrace.inl49
-rw-r--r--src/vm/jitinterface.cpp24
-rw-r--r--src/vm/method.cpp28
-rw-r--r--src/vm/method.hpp61
-rw-r--r--src/vm/methodtablebuilder.cpp6
-rw-r--r--src/vm/prestub.cpp50
-rw-r--r--src/vm/threads.cpp2
-rw-r--r--src/vm/threads.h27
-rw-r--r--src/vm/threads.inl33
-rw-r--r--src/vm/tieredcompilation.cpp91
-rw-r--r--src/vm/tieredcompilation.h2
27 files changed, 933 insertions, 162 deletions
diff --git a/src/ToolBox/SOS/Strike/util.cpp b/src/ToolBox/SOS/Strike/util.cpp
index f9fe957922..9ff51d45f6 100644
--- a/src/ToolBox/SOS/Strike/util.cpp
+++ b/src/ToolBox/SOS/Strike/util.cpp
@@ -3252,21 +3252,27 @@ void DumpTieredNativeCodeAddressInfo(struct DacpTieredVersionData * pTieredVersi
for(int i = cTieredVersionData - 1; i >= 0; --i)
{
const char *descriptor = NULL;
- switch(pTieredVersionData[i].TieredInfo)
+ switch(pTieredVersionData[i].OptimizationTier)
{
- case DacpTieredVersionData::TIERED_UNKNOWN:
+ case DacpTieredVersionData::OptimizationTier_Unknown:
default:
_ASSERTE(!"Update SOS to understand the new tier");
descriptor = "Unknown Tier";
break;
- case DacpTieredVersionData::NON_TIERED:
- descriptor = "Non-Tiered";
+ case DacpTieredVersionData::OptimizationTier_MinOptJitted:
+ descriptor = "MinOptJitted";
break;
- case DacpTieredVersionData::TIERED_0:
- descriptor = "Tier 0";
+ case DacpTieredVersionData::OptimizationTier_Optimized:
+ descriptor = "Optimized";
break;
- case DacpTieredVersionData::TIERED_1:
- descriptor = "Tier 1";
+ case DacpTieredVersionData::OptimizationTier_QuickJitted:
+ descriptor = "QuickJitted";
+ break;
+ case DacpTieredVersionData::OptimizationTier_OptimizedTier1:
+ descriptor = "OptimizedTier1";
+ break;
+ case DacpTieredVersionData::OptimizationTier_ReadyToRun:
+ descriptor = "ReadyToRun";
break;
}
diff --git a/src/debug/daccess/request.cpp b/src/debug/daccess/request.cpp
index dff24c8eb6..c78b5b2f34 100644
--- a/src/debug/daccess/request.cpp
+++ b/src/debug/daccess/request.cpp
@@ -1131,32 +1131,56 @@ HRESULT ClrDataAccess::GetTieredVersions(
goto cleanup;
}
+ TADDR r2rImageBase = NULL;
+ TADDR r2rImageEnd = NULL;
+ {
+ PTR_Module pModule = (PTR_Module)pMD->GetModule();
+ if (pModule->IsReadyToRun())
+ {
+ PTR_PEImageLayout pImage = pModule->GetReadyToRunInfo()->GetImage();
+ r2rImageBase = dac_cast<TADDR>(pImage->GetBase());
+ r2rImageEnd = r2rImageBase + pImage->GetSize();
+ }
+ }
+
NativeCodeVersionCollection nativeCodeVersions = ilCodeVersion.GetNativeCodeVersions(pMD);
int count = 0;
for (NativeCodeVersionIterator iter = nativeCodeVersions.Begin(); iter != nativeCodeVersions.End(); iter++)
{
- nativeCodeAddrs[count].NativeCodeAddr = (*iter).GetNativeCode();
+ TADDR pNativeCode = PCODEToPINSTR((*iter).GetNativeCode());
+ nativeCodeAddrs[count].NativeCodeAddr = pNativeCode;
PTR_NativeCodeVersionNode pNode = (*iter).AsNode();
nativeCodeAddrs[count].NativeCodeVersionNodePtr = TO_CDADDR(PTR_TO_TADDR(pNode));
- if (pMD->IsEligibleForTieredCompilation())
+ if (r2rImageBase <= pNativeCode && pNativeCode < r2rImageEnd)
+ {
+ nativeCodeAddrs[count].OptimizationTier = DacpTieredVersionData::OptimizationTier_ReadyToRun;
+ }
+ else if (pMD->IsEligibleForTieredCompilation())
{
switch ((*iter).GetOptimizationTier())
{
default:
- nativeCodeAddrs[count].TieredInfo = DacpTieredVersionData::TIERED_UNKNOWN;
+ nativeCodeAddrs[count].OptimizationTier = DacpTieredVersionData::OptimizationTier_Unknown;
break;
case NativeCodeVersion::OptimizationTier0:
- nativeCodeAddrs[count].TieredInfo = DacpTieredVersionData::TIERED_0;
+ nativeCodeAddrs[count].OptimizationTier = DacpTieredVersionData::OptimizationTier_QuickJitted;
break;
case NativeCodeVersion::OptimizationTier1:
- nativeCodeAddrs[count].TieredInfo = DacpTieredVersionData::TIERED_1;
+ nativeCodeAddrs[count].OptimizationTier = DacpTieredVersionData::OptimizationTier_OptimizedTier1;
+ break;
+ case NativeCodeVersion::OptimizationTierOptimized:
+ nativeCodeAddrs[count].OptimizationTier = DacpTieredVersionData::OptimizationTier_Optimized;
break;
}
}
+ else if (pMD->IsJitOptimizationDisabled())
+ {
+ nativeCodeAddrs[count].OptimizationTier = DacpTieredVersionData::OptimizationTier_MinOptJitted;
+ }
else
{
- nativeCodeAddrs[count].TieredInfo = DacpTieredVersionData::NON_TIERED;
+ nativeCodeAddrs[count].OptimizationTier = DacpTieredVersionData::OptimizationTier_Optimized;
}
++count;
diff --git a/src/inc/corinfo.h b/src/inc/corinfo.h
index 1a223a1f8b..083450b016 100644
--- a/src/inc/corinfo.h
+++ b/src/inc/corinfo.h
@@ -864,7 +864,8 @@ enum CorInfoMethodRuntimeFlags
CORINFO_FLG_BAD_INLINEE = 0x00000001, // The method is not suitable for inlining
CORINFO_FLG_VERIFIABLE = 0x00000002, // The method has verifiable code
CORINFO_FLG_UNVERIFIABLE = 0x00000004, // The method has unverifiable code
- CORINFO_FLG_SWITCHED_TO_TIER1 = 0x00000008, // The JIT decided to switch to tier 1 for this method, when a different tier was requested
+ CORINFO_FLG_SWITCHED_TO_MIN_OPT = 0x00000008, // The JIT decided to switch to MinOpt for this method, when it was not requested
+ CORINFO_FLG_SWITCHED_TO_OPTIMIZED = 0x00000010, // The JIT decided to switch to tier 1 for this method, when a different tier was requested
};
diff --git a/src/inc/dacprivate.h b/src/inc/dacprivate.h
index 978d8a8c51..7a12c642e4 100644
--- a/src/inc/dacprivate.h
+++ b/src/inc/dacprivate.h
@@ -587,16 +587,18 @@ struct MSLAYOUT DacpMethodDescTransparencyData : ZeroInit<DacpMethodDescTranspar
struct MSLAYOUT DacpTieredVersionData
{
- enum TieredState
+ enum OptimizationTier
{
- NON_TIERED,
- TIERED_0,
- TIERED_1,
- TIERED_UNKNOWN
+ OptimizationTier_Unknown,
+ OptimizationTier_MinOptJitted,
+ OptimizationTier_Optimized,
+ OptimizationTier_QuickJitted,
+ OptimizationTier_OptimizedTier1,
+ OptimizationTier_ReadyToRun,
};
CLRDATA_ADDRESS NativeCodeAddr;
- TieredState TieredInfo;
+ OptimizationTier OptimizationTier;
CLRDATA_ADDRESS NativeCodeVersionNodePtr;
};
diff --git a/src/inc/eventtracebase.h b/src/inc/eventtracebase.h
index 5a92d98fd9..b4be4dfbc6 100644
--- a/src/inc/eventtracebase.h
+++ b/src/inc/eventtracebase.h
@@ -31,7 +31,7 @@
struct EventStructTypeData;
void InitializeEventTracing();
-typedef DWORD NativeCodeVersionId; // keep in sync with codeversion.h
+class PrepareCodeConfig;
// !!!!!!! NOTE !!!!!!!!
// The flags must match those in the ETW manifest exactly
@@ -582,7 +582,7 @@ namespace ETW
static VOID SendEventsForNgenMethods(Module *pModule, DWORD dwEventOptions);
static VOID SendMethodJitStartEvent(MethodDesc *pMethodDesc, SString *namespaceOrClassName=NULL, SString *methodName=NULL, SString *methodSignature=NULL);
static VOID SendMethodILToNativeMapEvent(MethodDesc * pMethodDesc, DWORD dwEventOptions, PCODE pNativeCodeStartAddress, ReJITID ilCodeId);
- static VOID SendMethodEvent(MethodDesc *pMethodDesc, DWORD dwEventOptions, BOOL bIsJit, SString *namespaceOrClassName=NULL, SString *methodName=NULL, SString *methodSignature=NULL, PCODE pNativeCodeStartAddress = 0, NativeCodeVersionId nativeCodeId = 0, BOOL bProfilerRejectedPrecompiledCode = FALSE, BOOL bReadyToRunRejectedPrecompiledCode = FALSE);
+ static VOID SendMethodEvent(MethodDesc *pMethodDesc, DWORD dwEventOptions, BOOL bIsJit, SString *namespaceOrClassName=NULL, SString *methodName=NULL, SString *methodSignature=NULL, PCODE pNativeCodeStartAddress = 0, PrepareCodeConfig *pConfig = NULL);
static VOID SendHelperEvent(ULONGLONG ullHelperStartAddress, ULONG ulHelperSize, LPCWSTR pHelperName);
public:
typedef union _MethodStructs
@@ -596,6 +596,7 @@ namespace ETW
JitHelperMethod=0x10,
ProfilerRejectedPrecompiledCode=0x20,
ReadyToRunRejectedPrecompiledCode=0x40,
+ // 0x80 to 0x200 are used for the optimization tier
}MethodFlags;
typedef enum _MethodExtent
@@ -606,9 +607,23 @@ namespace ETW
}MethodStructs;
+ enum class JitOptimizationTier
+ {
+ Unknown, // to identify older runtimes that would send this value
+ MinOptJitted,
+ Optimized,
+ QuickJitted,
+ OptimizedTier1,
+
+ Count
+ };
+
+ static const UINT8 MethodFlagsJitOptimizationTierShift = 7;
+ static const unsigned int MethodFlagsJitOptimizationTierLowMask = 0x7;
+
static VOID GetR2RGetEntryPoint(MethodDesc *pMethodDesc, PCODE pEntryPoint);
- static VOID MethodJitting(MethodDesc *pMethodDesc, SString *namespaceOrClassName=NULL, SString *methodName=NULL, SString *methodSignature=NULL);
- static VOID MethodJitted(MethodDesc *pMethodDesc, SString *namespaceOrClassName=NULL, SString *methodName=NULL, SString *methodSignature=NULL, PCODE pNativeCodeStartAddress = 0, ReJITID ilCodeId = 0, NativeCodeVersionId nativeCodeId = 0, BOOL bProfilerRejectedPrecompiledCode = FALSE, BOOL bReadyToRunRejectedPrecompiledCode = FALSE);
+ static VOID MethodJitting(MethodDesc *pMethodDesc, SString *namespaceOrClassName, SString *methodName, SString *methodSignature);
+ static VOID MethodJitted(MethodDesc *pMethodDesc, SString *namespaceOrClassName, SString *methodName, SString *methodSignature, PCODE pNativeCodeStartAddress, PrepareCodeConfig *pConfig);
static VOID StubInitialized(ULONGLONG ullHelperStartAddress, LPCWSTR pHelperName);
static VOID StubsInitialized(PVOID *pHelperStartAddresss, PVOID *pHelperNames, LONG ulNoOfHelpers);
static VOID MethodRestored(MethodDesc * pMethodDesc);
@@ -617,8 +632,8 @@ namespace ETW
#else // FEATURE_EVENT_TRACE
public:
static VOID GetR2RGetEntryPoint(MethodDesc *pMethodDesc, PCODE pEntryPoint) {};
- static VOID MethodJitting(MethodDesc *pMethodDesc, SString *namespaceOrClassName=NULL, SString *methodName=NULL, SString *methodSignature=NULL) {};
- static VOID MethodJitted(MethodDesc *pMethodDesc, SString *namespaceOrClassName=NULL, SString *methodName=NULL, SString *methodSignature=NULL, PCODE pNativeCodeStartAddress = 0, ReJITID ilCodeId = 0, NativeCodeVersionId nativeCodeId = 0, BOOL bProfilerRejectedPrecompiledCode = FALSE, BOOL bReadyToRunRejectedPrecompiledCode = FALSE) {};
+ static VOID MethodJitting(MethodDesc *pMethodDesc, SString *namespaceOrClassName, SString *methodName, SString *methodSignature);
+ static VOID MethodJitted(MethodDesc *pMethodDesc, SString *namespaceOrClassName, SString *methodName, SString *methodSignature, PCODE pNativeCodeStartAddress, PrepareCodeConfig *pConfig);
static VOID StubInitialized(ULONGLONG ullHelperStartAddress, LPCWSTR pHelperName) {};
static VOID StubsInitialized(PVOID *pHelperStartAddresss, PVOID *pHelperNames, LONG ulNoOfHelpers) {};
static VOID MethodRestored(MethodDesc * pMethodDesc) {};
@@ -873,6 +888,86 @@ namespace ETW
DWORD countSymbolBytes, DWORD* pCountSymbolBytesRead) { return S_OK; }
#endif // FEATURE_EVENT_TRACE
};
+
+#define DISABLE_CONSTRUCT_COPY(T) \
+ T() = delete; \
+ T(const T &) = delete; \
+ T &operator =(const T &) = delete
+
+ // Class to wrap all Compilation logic for ETW
+ class CompilationLog
+ {
+ public:
+ class Runtime
+ {
+ public:
+#ifdef FEATURE_EVENT_TRACE
+ static bool IsEnabled();
+#else
+ static bool IsEnabled() { return false; }
+#endif
+
+ DISABLE_CONSTRUCT_COPY(Runtime);
+ };
+
+ class Rundown
+ {
+ public:
+#ifdef FEATURE_EVENT_TRACE
+ static bool IsEnabled();
+#else
+ static bool IsEnabled() { return false; }
+#endif
+
+ DISABLE_CONSTRUCT_COPY(Rundown);
+ };
+
+ // Class to wrap all TieredCompilation logic for ETW
+ class TieredCompilation
+ {
+ private:
+ static void GetSettings(UINT32 *flagsRef);
+
+ public:
+ class Runtime
+ {
+ public:
+#ifdef FEATURE_EVENT_TRACE
+ static bool IsEnabled();
+ static void SendSettings();
+ static void SendPause();
+ static void SendResume(UINT32 newMethodCount);
+ static void SendBackgroundJitStart(UINT32 pendingMethodCount);
+ static void SendBackgroundJitStop(UINT32 pendingMethodCount, UINT32 jittedMethodCount);
+#else
+ static bool IsEnabled() { return false; }
+ static void SendSettings() {}
+#endif
+
+ DISABLE_CONSTRUCT_COPY(Runtime);
+ };
+
+ class Rundown
+ {
+ public:
+#ifdef FEATURE_EVENT_TRACE
+ static bool IsEnabled();
+ static void SendSettings();
+#else
+ static bool IsEnabled() { return false; }
+ static void SendSettings() {}
+#endif
+
+ DISABLE_CONSTRUCT_COPY(Rundown);
+ };
+
+ DISABLE_CONSTRUCT_COPY(TieredCompilation);
+ };
+
+ DISABLE_CONSTRUCT_COPY(CompilationLog);
+ };
+
+#undef DISABLE_CONSTRUCT_COPY
};
diff --git a/src/jit/compiler.cpp b/src/jit/compiler.cpp
index 4c4a90d937..25249e19bb 100644
--- a/src/jit/compiler.cpp
+++ b/src/jit/compiler.cpp
@@ -4050,6 +4050,13 @@ _SetMinOpts:
// Set the MinOpts value
opts.SetMinOpts(theMinOptsValue);
+ // Notify the VM if MinOpts is being used when not requested
+ if (theMinOptsValue && !compIsForInlining() && !opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0) &&
+ !opts.jitFlags->IsSet(JitFlags::JIT_FLAG_MIN_OPT) && !opts.compDbgCode)
+ {
+ info.compCompHnd->setMethodAttribs(info.compMethodHnd, CORINFO_FLG_SWITCHED_TO_MIN_OPT);
+ }
+
#ifdef DEBUG
if (verbose && !compIsForInlining())
{
@@ -5949,14 +5956,14 @@ int Compiler::compCompileHelper(CORINFO_MODULE_HANDLE classPtr,
}
#ifdef FEATURE_CORECLR
- if (fgHasBackwardJump && (info.compFlags & CORINFO_FLG_DISABLE_TIER0_FOR_LOOPS) != 0 && fgCanSwitchToTier1())
+ if (fgHasBackwardJump && (info.compFlags & CORINFO_FLG_DISABLE_TIER0_FOR_LOOPS) != 0 && fgCanSwitchToOptimized())
#else // !FEATURE_CORECLR
// We may want to use JitConfig value here to support DISABLE_TIER0_FOR_LOOPS
if (fgHasBackwardJump && fgCanSwitchToTier1())
#endif
{
// Method likely has a loop, switch to the OptimizedTier to avoid spending too much time running slower code
- fgSwitchToTier1();
+ fgSwitchToOptimized();
}
compSetOptimizationLevel();
diff --git a/src/jit/compiler.h b/src/jit/compiler.h
index 1db70d1612..c73ca30c16 100644
--- a/src/jit/compiler.h
+++ b/src/jit/compiler.h
@@ -5105,8 +5105,8 @@ protected:
bool fgHasBackwardJump;
- bool fgCanSwitchToTier1();
- void fgSwitchToTier1();
+ bool fgCanSwitchToOptimized();
+ void fgSwitchToOptimized();
bool fgMayExplicitTailCall();
diff --git a/src/jit/flowgraph.cpp b/src/jit/flowgraph.cpp
index d8b12188ec..62d958fa88 100644
--- a/src/jit/flowgraph.cpp
+++ b/src/jit/flowgraph.cpp
@@ -4253,10 +4253,10 @@ private:
};
//------------------------------------------------------------------------
-// fgCanSwitchToTier1: Determines if conditions are met to allow switching the opt level to tier 1
+// fgCanSwitchToOptimized: Determines if conditions are met to allow switching the opt level to optimized
//
// Return Value:
-// True if the opt level may be switched to tier 1, false otherwise
+// True if the opt level may be switched from tier 0 to optimized, false otherwise
//
// Assumptions:
// - compInitOptions() has been called
@@ -4266,7 +4266,7 @@ private:
// This method is to be called at some point before compSetOptimizationLevel() to determine if the opt level may be
// changed based on information gathered in early phases.
-bool Compiler::fgCanSwitchToTier1()
+bool Compiler::fgCanSwitchToOptimized()
{
bool result = opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0) && !opts.jitFlags->IsSet(JitFlags::JIT_FLAG_MIN_OPT) &&
!opts.compDbgCode && !compIsForInlining();
@@ -4281,28 +4281,27 @@ bool Compiler::fgCanSwitchToTier1()
}
//------------------------------------------------------------------------
-// fgSwitchToTier1: Switch the opt level to tier 1
+// fgSwitchToOptimized: Switch the opt level from tier 0 to optimized
//
// Assumptions:
-// - fgCanSwitchToTier1() is true
+// - fgCanSwitchToOptimized() is true
// - compSetOptimizationLevel() has not been called
//
// Notes:
-// This method is to be called at some point before compSetOptimizationLevel() to switch the opt level to tier 1
+// This method is to be called at some point before compSetOptimizationLevel() to switch the opt level to optimized
// based on information gathered in early phases.
-void Compiler::fgSwitchToTier1()
+void Compiler::fgSwitchToOptimized()
{
- assert(fgCanSwitchToTier1());
+ assert(fgCanSwitchToOptimized());
- // Switch to tier 1 and re-init options
+ // Switch to optimized and re-init options
assert(opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0));
opts.jitFlags->Clear(JitFlags::JIT_FLAG_TIER0);
- opts.jitFlags->Set(JitFlags::JIT_FLAG_TIER1);
compInitOptions(opts.jitFlags);
// Notify the VM of the change
- info.compCompHnd->setMethodAttribs(info.compMethodHnd, CORINFO_FLG_SWITCHED_TO_TIER1);
+ info.compCompHnd->setMethodAttribs(info.compMethodHnd, CORINFO_FLG_SWITCHED_TO_OPTIMIZED);
}
//------------------------------------------------------------------------
@@ -5605,12 +5604,12 @@ unsigned Compiler::fgMakeBasicBlocks(const BYTE* codeAddr, IL_OFFSET codeSize, F
#endif // !FEATURE_CORECLR && _TARGET_AMD64_
}
- if (fgCanSwitchToTier1() && fgMayExplicitTailCall())
+ if (fgCanSwitchToOptimized() && fgMayExplicitTailCall())
{
// Method has an explicit tail call that may run like a loop or may not be generated as a tail
- // call in tier 0, switch to tier 1 to avoid spending too much time running slower code and to
- // avoid stack overflow from recursion
- fgSwitchToTier1();
+ // call in tier 0, switch to optimized to avoid spending too much time running slower code and
+ // to avoid stack overflow from recursion
+ fgSwitchToOptimized();
}
#if !defined(FEATURE_CORECLR) && defined(_TARGET_AMD64_)
diff --git a/src/vm/ClrEtwAll.man b/src/vm/ClrEtwAll.man
index 3c81d22c43..817febbee4 100644
--- a/src/vm/ClrEtwAll.man
+++ b/src/vm/ClrEtwAll.man
@@ -77,6 +77,8 @@
message="$(string.RuntimePublisher.CodeSymbolsKeywordMessage)" symbol="CLR_CODESYMBOLS_KEYWORD" />
<keyword name="EventSourceKeyword" mask="0x800000000"
message="$(string.RuntimePublisher.EventSourceKeywordMessage)" symbol="CLR_EVENTSOURCE_KEYWORD" />
+ <keyword name="CompilationKeyword" mask="0x1000000000"
+ message="$(string.RuntimePublisher.CompilationKeywordMessage)" symbol="CLR_COMPILATION_KEYWORD" />
</keywords>
<!--Tasks-->
<tasks>
@@ -364,16 +366,25 @@
<task name="DebugExceptionProcessing" symbol="CLR_EXCEPTION_PROCESSING_TASK"
value="26" eventGUID="{C4412198-EF03-47F1-9BD1-11C6637A2062}"
message="$(string.RuntimePublisher.DebugExceptionProcessingTaskMessage)">
- <opcodes>
- </opcodes>
- </task>
+ <opcodes>
+ </opcodes>
+ </task>
<task name="CodeSymbols" symbol="CLR_CODE_SYMBOLS_TASK"
value="30" eventGUID="{53aedf69-2049-4f7d-9345-d3018b5c4d80}"
message="$(string.RuntimePublisher.CodeSymbolsTaskMessage)">
- <opcodes>
- </opcodes>
- </task>
- <!--Next available ID is 31-->
+ <opcodes>
+ </opcodes>
+ </task>
+ <task name="TieredCompilation" symbol="CLR_TIERED_COMPILATION_TASK"
+ value="31" eventGUID="{A77F474D-9D0D-4311-B98E-CFBCF84B9E0F}"
+ message="$(string.RuntimePublisher.TieredCompilationTaskMessage)">
+ <opcodes>
+ <opcode name="Settings" message="$(string.RuntimePublisher.TieredCompilationSettingsOpcodeMessage)" symbol="CLR_TIERED_COMPILATION_SETTINGS_OPCODE" value="11"/>
+ <opcode name="Pause" message="$(string.RuntimePublisher.TieredCompilationPauseOpcodeMessage)" symbol="CLR_TIERED_COMPILATION_PAUSE_OPCODE" value="12"/>
+ <opcode name="Resume" message="$(string.RuntimePublisher.TieredCompilationResumeOpcodeMessage)" symbol="CLR_TIERED_COMPILATION_RESUME_OPCODE" value="13"/>
+ </opcodes>
+ </task>
+ <!--Next available ID is 32-->
</tasks>
<!--Maps-->
<maps>
@@ -483,6 +494,10 @@
<map value="0x2" message="$(string.RuntimePublisher.Method.GenericMapMessage)"/>
<map value="0x4" message="$(string.RuntimePublisher.Method.HasSharedGenericCodeMapMessage)"/>
<map value="0x8" message="$(string.RuntimePublisher.Method.JittedMapMessage)"/>
+ <map value="0x10" message="$(string.RuntimePublisher.Method.JitHelperMapMessage)"/>
+ <map value="0x20" message="$(string.RuntimePublisher.Method.ProfilerRejectedPrecompiledCodeMapMessage)"/>
+ <map value="0x40" message="$(string.RuntimePublisher.Method.ReadyToRunRejectedPrecompiledCodeMapMessage)"/>
+ <!-- 0x80 to 0x200 are used for the optimization tier -->
</bitMap>
<bitMap name="StartupModeMap">
<map value="0x1" message="$(string.RuntimePublisher.StartupMode.ManagedExeMapMessage)"/>
@@ -551,6 +566,11 @@
<map value="0x2" message="$(string.RuntimePublisher.ThreadFlags.Finalizer)"/>
<map value="0x4" message="$(string.RuntimePublisher.ThreadFlags.ThreadPoolWorker)"/>
</bitMap>
+ <bitMap name="TieredCompilationSettingsFlagsMap">
+ <map value="0x0" message="$(string.RuntimePublisher.TieredCompilationSettingsFlags.NoneMapMessage)"/>
+ <map value="0x1" message="$(string.RuntimePublisher.TieredCompilationSettingsFlags.QuickJitMapMessage)"/>
+ <map value="0x2" message="$(string.RuntimePublisher.TieredCompilationSettingsFlags.QuickJitForLoopsMapMessage)"/>
+ </bitMap>
</maps>
<!--Templates-->
@@ -1415,12 +1435,12 @@
<template tid="ContentionStop_V1">
<data name="ContentionFlags" inType="win:UInt8" map="ContentionFlagsMap" />
<data name="ClrInstanceID" inType="win:UInt16" />
- <data name="Duration" inType="win:Double" />
+ <data name="DurationNs" inType="win:Double" />
<UserData>
<Contention xmlns="myNs">
<ContentionFlags> %1 </ContentionFlags>
<ClrInstanceID> %2 </ClrInstanceID>
- <Duration> %3 </Duration>
+ <DurationNs> %3 </DurationNs>
</Contention>
</UserData>
</template>
@@ -2400,6 +2420,60 @@
</UserData>
</template>
+ <template tid="TieredCompilationEmpty">
+ <data name="ClrInstanceID" inType="win:UInt16"/>
+ <UserData>
+ <Settings xmlns="myNs">
+ <ClrInstanceID> %1 </ClrInstanceID>
+ </Settings>
+ </UserData>
+ </template>
+
+ <template tid="TieredCompilationSettings">
+ <data name="ClrInstanceID" inType="win:UInt16"/>
+ <data name="Flags" inType="win:UInt32" outType="win:HexInt32" map="TieredCompilationSettingsFlagsMap"/>
+ <UserData>
+ <Settings xmlns="myNs">
+ <ClrInstanceID> %1 </ClrInstanceID>
+ <Flags> %2 </Flags>
+ </Settings>
+ </UserData>
+ </template>
+
+ <template tid="TieredCompilationResume">
+ <data name="ClrInstanceID" inType="win:UInt16"/>
+ <data name="NewMethodCount" inType="win:UInt32"/>
+ <UserData>
+ <Settings xmlns="myNs">
+ <ClrInstanceID> %1 </ClrInstanceID>
+ <NewMethodCount> %2 </NewMethodCount>
+ </Settings>
+ </UserData>
+ </template>
+
+ <template tid="TieredCompilationBackgroundJitStart">
+ <data name="ClrInstanceID" inType="win:UInt16"/>
+ <data name="PendingMethodCount" inType="win:UInt32"/>
+ <UserData>
+ <Settings xmlns="myNs">
+ <ClrInstanceID> %1 </ClrInstanceID>
+ <PendingMethodCount> %2 </PendingMethodCount>
+ </Settings>
+ </UserData>
+ </template>
+
+ <template tid="TieredCompilationBackgroundJitStop">
+ <data name="ClrInstanceID" inType="win:UInt16"/>
+ <data name="PendingMethodCount" inType="win:UInt32"/>
+ <data name="JittedMethodCount" inType="win:UInt32"/>
+ <UserData>
+ <Settings xmlns="myNs">
+ <ClrInstanceID> %1 </ClrInstanceID>
+ <PendingMethodCount> %2 </PendingMethodCount>
+ <JittedMethodCount> %2 </JittedMethodCount>
+ </Settings>
+ </UserData>
+ </template>
</templates>
<events>
@@ -3307,9 +3381,26 @@
task="CodeSymbols"
symbol="CodeSymbols" message="$(string.RuntimePublisher.CodeSymbolsEventMessage)"/>
- <event value="270" version="0" level="win:Informational" template="EventSource"
+ <event value="270" version="0" level="win:Informational" template="EventSource"
keywords="EventSourceKeyword"
symbol="EventSource" />
+
+ <!-- Tiered compilation events 280-289 -->
+ <event value="280" version="0" level="win:Informational" template="TieredCompilationSettings"
+ keywords="CompilationKeyword" task="TieredCompilation" opcode="Settings"
+ symbol="TieredCompilationSettings" message="$(string.RuntimePublisher.TieredCompilationSettingsEventMessage)"/>
+ <event value="281" version="0" level="win:Informational" template="TieredCompilationEmpty"
+ keywords="CompilationKeyword" task="TieredCompilation" opcode="Pause"
+ symbol="TieredCompilationPause" message="$(string.RuntimePublisher.TieredCompilationPauseEventMessage)"/>
+ <event value="282" version="0" level="win:Informational" template="TieredCompilationResume"
+ keywords="CompilationKeyword" task="TieredCompilation" opcode="Resume"
+ symbol="TieredCompilationResume" message="$(string.RuntimePublisher.TieredCompilationResumeEventMessage)"/>
+ <event value="283" version="0" level="win:Informational" template="TieredCompilationBackgroundJitStart"
+ keywords="CompilationKeyword" task="TieredCompilation" opcode="win:Start"
+ symbol="TieredCompilationBackgroundJitStart" message="$(string.RuntimePublisher.TieredCompilationBackgroundJitStartEventMessage)"/>
+ <event value="284" version="0" level="win:Informational" template="TieredCompilationBackgroundJitStop"
+ keywords="CompilationKeyword" task="TieredCompilation" opcode="win:Stop"
+ symbol="TieredCompilationBackgroundJitStop" message="$(string.RuntimePublisher.TieredCompilationBackgroundJitStopEventMessage)"/>
</events>
</provider>
@@ -3346,6 +3437,8 @@
message="$(string.RundownPublisher.PerfTrackRundownKeywordMessage)" symbol="CLR_RUNDOWNPERFTRACK_KEYWORD"/>
<keyword name="StackKeyword" mask="0x40000000"
message="$(string.RundownPublisher.StackKeywordMessage)" symbol="CLR_RUNDOWNSTACK_KEYWORD"/>
+ <keyword name="CompilationKeyword" mask="0x1000000000"
+ message="$(string.RundownPublisher.CompilationKeywordMessage)" symbol="CLR_COMPILATION_RUNDOWN_KEYWORD" />
</keywords>
<!--Tasks-->
@@ -3406,6 +3499,14 @@
<opcode name="ModuleRangeDCEnd" message="$(string.RundownPublisher.ModuleRangeDCEndOpcodeMessage)" symbol="CLR_PERFTRACKRUNDOWN_MODULERANGEDCEND_OPCODE" value="11"> </opcode>
</opcodes>
</task>
+
+ <task name="TieredCompilationRundown" symbol="CLR_TIERED_COMPILATION_RUNDOWN_TASK"
+ value="31" eventGUID="{A1673472-0564-48EA-A95D-B49D4173F105}"
+ message="$(string.RundownPublisher.TieredCompilationTaskMessage)">
+ <opcodes>
+ <opcode name="SettingsDCStart" message="$(string.RundownPublisher.TieredCompilationSettingsDCStartOpcodeMessage)" symbol="CLR_TIERED_COMPILATION_SETTINGS_DCSTART_OPCODE" value="11"/>
+ </opcodes>
+ </task>
</tasks>
<maps>
@@ -3439,6 +3540,10 @@
<map value="0x2" message="$(string.RundownPublisher.Method.GenericMapMessage)"/>
<map value="0x4" message="$(string.RundownPublisher.Method.HasSharedGenericCodeMapMessage)"/>
<map value="0x8" message="$(string.RundownPublisher.Method.JittedMapMessage)"/>
+ <map value="0x10" message="$(string.RuntimePublisher.Method.JitHelperMapMessage)"/>
+ <map value="0x20" message="$(string.RuntimePublisher.Method.ProfilerRejectedPrecompiledCodeMapMessage)"/>
+ <map value="0x40" message="$(string.RuntimePublisher.Method.ReadyToRunRejectedPrecompiledCodeMapMessage)"/>
+ <!-- 0x80 to 0x200 are used for the optimization tier -->
</bitMap>
<bitMap name="StartupModeMap">
<map value="0x1" message="$(string.RundownPublisher.StartupMode.ManagedExeMapMessage)"/>
@@ -3473,6 +3578,11 @@
<map value="0x2" message="$(string.RundownPublisher.ThreadFlags.Finalizer)"/>
<map value="0x4" message="$(string.RundownPublisher.ThreadFlags.ThreadPoolWorker)"/>
</bitMap>
+ <bitMap name="TieredCompilationSettingsFlagsMap">
+ <map value="0x0" message="$(string.RundownPublisher.TieredCompilationSettingsFlags.NoneMapMessage)"/>
+ <map value="0x1" message="$(string.RundownPublisher.TieredCompilationSettingsFlags.QuickJitMapMessage)"/>
+ <map value="0x2" message="$(string.RundownPublisher.TieredCompilationSettingsFlags.QuickJitForLoopsMapMessage)"/>
+ </bitMap>
</maps>
<!--Templates-->
@@ -3920,6 +4030,17 @@
</ModuleRangeRundown>
</UserData>
</template>
+
+ <template tid="TieredCompilationSettings">
+ <data name="ClrInstanceID" inType="win:UInt16"/>
+ <data name="Flags" inType="win:UInt32" outType="win:HexInt32" map="TieredCompilationSettingsFlagsMap"/>
+ <UserData>
+ <Settings xmlns="myNs">
+ <ClrInstanceID> %1 </ClrInstanceID>
+ <Flags> %2 </Flags>
+ </Settings>
+ </UserData>
+ </template>
</templates>
<events>
@@ -4153,6 +4274,11 @@
opcode="win:Start"
task="CLRRuntimeInformationRundown"
symbol="RuntimeInformationDCStart" message="$(string.RundownPublisher.RuntimeInformationEventMessage)"/>
+
+ <!-- Tiered compilation events 280-289 -->
+ <event value="280" version="0" level="win:Informational" template="TieredCompilationSettings"
+ keywords="CompilationKeyword" task="TieredCompilationRundown" opcode="SettingsDCStart"
+ symbol="TieredCompilationSettingsDCStart" message="$(string.RundownPublisher.TieredCompilationSettingsDCStartEventMessage)"/>
</events>
</provider>
@@ -6426,7 +6552,7 @@
<string id="RuntimePublisher.ContentionStartEventMessage" value="NONE" />
<string id="RuntimePublisher.ContentionStart_V1EventMessage" value="ContentionFlags=%1;%nClrInstanceID=%2"/>
<string id="RuntimePublisher.ContentionStopEventMessage" value="ContentionFlags=%1;%nClrInstanceID=%2"/>
- <string id="RuntimePublisher.ContentionStop_V1EventMessage" value="ContentionFlags=%1;%nClrInstanceID=%2;Duration=%3"/>
+ <string id="RuntimePublisher.ContentionStop_V1EventMessage" value="ContentionFlags=%1;%nClrInstanceID=%2;DurationNs=%3"/>
<string id="RuntimePublisher.DCStartCompleteEventMessage" value="NONE" />
<string id="RuntimePublisher.DCEndCompleteEventMessage" value="NONE" />
<string id="RuntimePublisher.MethodDCStartEventMessage" value="MethodID=%1;%nModuleID=%2;%nMethodStartAddress=%3;%nMethodSize=%4;%nMethodToken=%5;%nMethodFlags=%6" />
@@ -6494,6 +6620,12 @@
<string id="RuntimePublisher.SetGCHandleEventMessage" value="HandleID=%1;%nObjectID=%2;%nKind=%3;%nGeneration=%4;%nAppDomainID=%5;%nClrInstanceID=%6" />
<string id="RuntimePublisher.DestroyGCHandleEventMessage" value="HandleID=%1;%nClrInstanceID=%2" />
<string id="RuntimePublisher.CodeSymbolsEventMessage" value="%nClrInstanceId=%1;%nModuleId=%2;%nTotalChunks=%3;%nChunkNumber=%4;%nChunkLength=%5;%nChunk=%6" />
+ <string id="RuntimePublisher.TieredCompilationSettingsEventMessage" value="ClrInstanceID=%1;%nFlags=%2" />
+ <string id="RuntimePublisher.TieredCompilationPauseEventMessage" value="ClrInstanceID=%1" />
+ <string id="RuntimePublisher.TieredCompilationResumeEventMessage" value="ClrInstanceID=%1;%nNewMethodCount=%2" />
+ <string id="RuntimePublisher.TieredCompilationBackgroundJitStartEventMessage" value="ClrInstanceID=%1;%nPendingMethodCount=%2" />
+ <string id="RuntimePublisher.TieredCompilationBackgroundJitStopEventMessage" value="ClrInstanceID=%1;%nPendingMethodCount=%2;%nJittedMethodCount=%3" />
+
<string id="RundownPublisher.MethodDCStartEventMessage" value="MethodID=%1;%nModuleID=%2;%nMethodStartAddress=%3;%nMethodSize=%4;%nMethodToken=%5;%nMethodFlags=%6" />
<string id="RundownPublisher.MethodDCStart_V1EventMessage" value="MethodID=%1;%nModuleID=%2;%nMethodStartAddress=%3;%nMethodSize=%4;%nMethodToken=%5;%nMethodFlags=%6;%nClrInstanceID=%7" />
<string id="RundownPublisher.MethodDCStart_V2EventMessage" value="MethodID=%1;%nModuleID=%2;%nMethodStartAddress=%3;%nMethodSize=%4;%nMethodToken=%5;%nMethodFlags=%6;%nClrInstanceID=%7;%nReJITID=%8" />
@@ -6536,9 +6668,12 @@
<string id="RundownPublisher.StackEventMessage" value="ClrInstanceID=%1;%nReserved1=%2;%nReserved2=%3;%nFrameCount=%4;%nStack=%5" />
<string id="RundownPublisher.ModuleRangeDCStartEventMessage" value="ClrInstanceID=%1;%ModuleID=%2;%nRangeBegin=%3;%nRangeSize=%4;%nRangeType=%5" />
<string id="RundownPublisher.ModuleRangeDCEndEventMessage" value= "ClrInstanceID=%1;%ModuleID=%2;%nRangeBegin=%3;%nRangeSize=%4;%nRangeType=%5" />
+ <string id="RundownPublisher.TieredCompilationSettingsDCStartEventMessage" value="ClrInstanceID=%1;%nFlags=%2" />
+
<string id="StressPublisher.StressLogEventMessage" value="Facility=%1;%nLevel=%2;%nMessage=%3" />
<string id="StressPublisher.StressLog_V1EventMessage" value="Facility=%1;%nLevel=%2;%nMessage=%3;%nClrInstanceID=%4" />
<string id="StressPublisher.StackEventMessage" value="ClrInstanceID=%1;%nReserved1=%2;%nReserved2=%3;%nFrameCount=%4;%nStack=%5" />
+
<string id="PrivatePublisher.FailFastEventMessage" value="FailFastUserMessage=%1;%nFailedEIP=%2;%nOSExitCode=%3;%nClrExitCode=%4;%nClrInstanceID=%5" />
<string id="PrivatePublisher.FinalizeObjectEventMessage" value="TypeName=%1;%nTypeID=%2;%nObjectID=%3;%nClrInstanceID=%4" />
<string id="PrivatePublisher.SetGCHandleEventMessage" value="HandleID=%1;%nObjectID=%2;%n;%nClrInstanceID=%3" />
@@ -6650,11 +6785,15 @@
<string id="RuntimePublisher.DebugIPCEventTaskMessage" value="DebugIPCEvent" />
<string id="RuntimePublisher.DebugExceptionProcessingTaskMessage" value="DebugExceptionProcessing" />
<string id="RuntimePublisher.CodeSymbolsTaskMessage" value="CodeSymbols" />
+ <string id="RuntimePublisher.TieredCompilationTaskMessage" value="TieredCompilation" />
+
<string id="RundownPublisher.EEStartupTaskMessage" value="Runtime" />
<string id="RundownPublisher.MethodTaskMessage" value="Method" />
<string id="RundownPublisher.LoaderTaskMessage" value="Loader" />
<string id="RundownPublisher.StackTaskMessage" value="ClrStack" />
<string id="RundownPublisher.PerfTrackTaskMessage" value="ClrPerfTrack" />
+ <string id="RundownPublisher.TieredCompilationTaskMessage" value="TieredCompilation" />
+
<string id="PrivatePublisher.GarbageCollectionTaskMessage" value="GC" />
<string id="PrivatePublisher.StartupTaskMessage" value="Startup"/>
<string id="PrivatePublisher.StackTaskMessage" value="ClrStack" />
@@ -6691,6 +6830,9 @@
<string id="RuntimePublisher.Method.GenericMapMessage" value="Generic" />
<string id="RuntimePublisher.Method.HasSharedGenericCodeMapMessage" value="HasSharedGenericCode" />
<string id="RuntimePublisher.Method.JittedMapMessage" value="Jitted" />
+ <string id="RuntimePublisher.Method.JitHelperMapMessage" value="JitHelper" />
+ <string id="RuntimePublisher.Method.ProfilerRejectedPrecompiledCodeMapMessage" value="ProfilerRejectedPrecompiledCode" />
+ <string id="RuntimePublisher.Method.ReadyToRunRejectedPrecompiledCodeMapMessage" value="ReadyToRunRejectedPrecompiledCode" />
<string id="RuntimePublisher.GCSegment.SmallObjectHeapMapMessage" value="SmallObjectHeap" />
<string id="RuntimePublisher.GCSegment.LargeObjectHeapMapMessage" value="LargeObjectHeap" />
<string id="RuntimePublisher.GCSegment.ReadOnlyHeapMapMessage" value="ReadOnlyHeap" />
@@ -6794,6 +6936,10 @@
<string id="RuntimePublisher.GCHandleKind.DependentMessage" value="Dependent" />
<string id="RuntimePublisher.GCHandleKind.AsyncPinnedMessage" value="AsyncPinned" />
<string id="RuntimePublisher.GCHandleKind.SizedRefMessage" value="SizedRef" />
+ <string id="RuntimePublisher.TieredCompilationSettingsFlags.NoneMapMessage" value="None" />
+ <string id="RuntimePublisher.TieredCompilationSettingsFlags.QuickJitMapMessage" value="QuickJit" />
+ <string id="RuntimePublisher.TieredCompilationSettingsFlags.QuickJitForLoopsMapMessage" value="QuickJitForLoops" />
+
<string id="RundownPublisher.AppDomain.ExecutableMapMessage" value="Executable" />
<string id="RundownPublisher.AppDomain.SharedMapMessage" value="Shared" />
<string id="RundownPublisher.Assembly.DomainNeutralMapMessage" value="DomainNeutral" />
@@ -6811,6 +6957,9 @@
<string id="RundownPublisher.Method.GenericMapMessage" value="Generic" />
<string id="RundownPublisher.Method.HasSharedGenericCodeMapMessage" value="HasSharedGenericCode" />
<string id="RundownPublisher.Method.JittedMapMessage" value="Jitted" />
+ <string id="RundownPublisher.Method.JitHelperMapMessage" value="JitHelper" />
+ <string id="RundownPublisher.Method.ProfilerRejectedPrecompiledCodeMapMessage" value="ProfilerRejectedPrecompiledCode" />
+ <string id="RundownPublisher.Method.ReadyToRunRejectedPrecompiledCodeMapMessage" value="ReadyToRunRejectedPrecompiledCode" />
<string id="RundownPublisher.StartupMode.ManagedExeMapMessage" value="ManagedExe" />
<string id="RundownPublisher.StartupMode.HostedCLRMapMessage" value="HostedClr" />
<string id="RundownPublisher.StartupMode.IjwDllMapMessage" value="IjwDll" />
@@ -6837,6 +6986,10 @@
<string id="RundownPublisher.ThreadFlags.GCSpecial" value="GCSpecial"/>
<string id="RundownPublisher.ThreadFlags.Finalizer" value="Finalizer"/>
<string id="RundownPublisher.ThreadFlags.ThreadPoolWorker" value="ThreadPoolWorker"/>
+ <string id="RundownPublisher.TieredCompilationSettingsFlags.NoneMapMessage" value="None" />
+ <string id="RundownPublisher.TieredCompilationSettingsFlags.QuickJitMapMessage" value="QuickJit" />
+ <string id="RundownPublisher.TieredCompilationSettingsFlags.QuickJitForLoopsMapMessage" value="QuickJitForLoops" />
+
<string id="PrivatePublisher.ModuleRangeSectionTypeMap.ModuleSection" value="ModuleSection"/>
<string id="PrivatePublisher.ModuleRangeSectionTypeMap.EETableSection" value="EETableSection"/>
<string id="PrivatePublisher.ModuleRangeSectionTypeMap.WriteDataSection" value="WriteDataSection"/>
@@ -6931,6 +7084,8 @@
<string id="RuntimePublisher.MonitoringKeywordMessage" value="Monitoring" />
<string id="RuntimePublisher.CodeSymbolsKeywordMessage" value="CodeSymbols" />
<string id="RuntimePublisher.EventSourceKeywordMessage" value="EventSource" />
+ <string id="RuntimePublisher.CompilationKeywordMessage" value="Compilation" />
+
<string id="RundownPublisher.LoaderKeywordMessage" value="Loader" />
<string id="RundownPublisher.JitKeywordMessage" value="Jit" />
<string id="RundownPublisher.JittedMethodILToNativeMapRundownKeywordMessage" value="JittedMethodILToNativeMapRundown" />
@@ -6942,6 +7097,8 @@
<string id="RundownPublisher.OverrideAndSuppressNGenEventsRundownKeywordMessage" value="OverrideAndSuppressNGenEvents" />
<string id="RundownPublisher.PerfTrackRundownKeywordMessage" value="PerfTrack" />
<string id="RundownPublisher.StackKeywordMessage" value="Stack" />
+ <string id="RundownPublisher.CompilationKeywordMessage" value="Compilation" />
+
<string id="PrivatePublisher.GCPrivateKeywordMessage" value="GC" />
<string id="PrivatePublisher.StartupKeywordMessage" value="Startup" />
<string id="PrivatePublisher.StackKeywordMessage" value="Stack" />
@@ -7051,6 +7208,10 @@
<string id="RuntimePublisher.DebugIPCEventEndOpcodeMessage" value="IPCEventEnd" />
<string id="RuntimePublisher.DebugExceptionProcessingStartOpcodeMessage" value="ExceptionProcessingStart" />
<string id="RuntimePublisher.DebugExceptionProcessingEndOpcodeMessage" value="ExceptionProcessingEnd" />
+
+ <string id="RuntimePublisher.TieredCompilationSettingsOpcodeMessage" value="Settings" />
+ <string id="RuntimePublisher.TieredCompilationPauseOpcodeMessage" value="Pause" />
+ <string id="RuntimePublisher.TieredCompilationResumeOpcodeMessage" value="Resume" />
<string id="RundownPublisher.MethodDCStartOpcodeMessage" value="DCStart" />
<string id="RundownPublisher.MethodDCEndOpcodeMessage" value="DCStop" />
@@ -7074,9 +7235,10 @@
<string id="RundownPublisher.CLRStackWalkOpcodeMessage" value="Walk" />
<string id="RundownPublisher.ModuleRangeDCStartOpcodeMessage" value="ModuleRangeDCStart" />
<string id="RundownPublisher.ModuleRangeDCEndOpcodeMessage" value="ModuleRangeDCEnd" />
+ <string id="RundownPublisher.TieredCompilationSettingsDCStartOpcodeMessage" value="SettingsDCStart" />
+
<string id="PrivatePublisher.FailFastOpcodeMessage" value="FailFast" />
-
<string id="PrivatePublisher.GCDecisionOpcodeMessage" value="Decision" />
<string id="PrivatePublisher.GCSettingsOpcodeMessage" value="Settings" />
<string id="PrivatePublisher.GCOptimizedOpcodeMessage" value="Optimized" />
diff --git a/src/vm/ClrEtwAllMeta.lst b/src/vm/ClrEtwAllMeta.lst
index ba4f892fcf..39737798e3 100644
--- a/src/vm/ClrEtwAllMeta.lst
+++ b/src/vm/ClrEtwAllMeta.lst
@@ -297,6 +297,20 @@ nomac:CLRAuthenticodeVerification:::AuthenticodeVerificationStop_V1
####################
nostack:CLRRuntimeInformation:::RuntimeInformationStart
+###########################
+# Tiered compilation events
+###########################
+nomac:TieredCompilation:::TieredCompilationSettings
+nostack:TieredCompilation:::TieredCompilationSettings
+nomac:TieredCompilation:::TieredCompilationPause
+nostack:TieredCompilation:::TieredCompilationPause
+nomac:TieredCompilation:::TieredCompilationResume
+nostack:TieredCompilation:::TieredCompilationResume
+nomac:TieredCompilation:::TieredCompilationBackgroundJitStart
+nostack:TieredCompilation:::TieredCompilationBackgroundJitStart
+nomac:TieredCompilation:::TieredCompilationBackgroundJitStop
+nostack:TieredCompilation:::TieredCompilationBackgroundJitStop
+
##################################
# Events from the rundown provider
##################################
@@ -375,6 +389,12 @@ nostack:CLRPerfTrack:::ModuleRangeDCEnd
####################
nomac:CLRRuntimeInformationRundown:::RuntimeInformationDCStart
+###########################
+# Tiered compilation events
+###########################
+nomac:TieredCompilationRundown:::TieredCompilationSettingsDCStart
+nostack:TieredCompilationRundown:::TieredCompilationSettingsDCStart
+
##################################
# Events from the private provider
##################################
diff --git a/src/vm/callcounter.cpp b/src/vm/callcounter.cpp
index 6b94f7303e..ecfd880a68 100644
--- a/src/vm/callcounter.cpp
+++ b/src/vm/callcounter.cpp
@@ -91,6 +91,18 @@ void CallCounter::DisableCallCounting(MethodDesc* pMethodDesc)
m_methodToCallCount.Add(CallCounterEntry::CreateWithCallCountingDisabled(pMethodDesc));
}
+bool CallCounter::WasCalledAtMostOnce(MethodDesc* pMethodDesc)
+{
+ WRAPPER_NO_CONTRACT;
+
+ SpinLockHolder holder(&m_lock);
+
+ const CallCounterEntry *existingEntry = m_methodToCallCount.LookupPtr(pMethodDesc);
+ return
+ existingEntry == nullptr ||
+ existingEntry->callCountLimit >= (int)g_pConfig->TieredCompilation_CallCountThreshold() - 1;
+}
+
// This is called by the prestub each time the method is invoked in a particular
// AppDomain (the AppDomain for which AppDomain.GetCallCounter() == this). These
// calls continue until we backpatch the prestub to avoid future calls. This allows
diff --git a/src/vm/callcounter.h b/src/vm/callcounter.h
index 6bcefd3845..4646a40de6 100644
--- a/src/vm/callcounter.h
+++ b/src/vm/callcounter.h
@@ -94,6 +94,7 @@ public:
bool IsCallCountingEnabled(PTR_MethodDesc pMethodDesc);
#ifndef DACCESS_COMPILE
void DisableCallCounting(MethodDesc* pMethodDesc);
+ bool WasCalledAtMostOnce(MethodDesc* pMethodDesc);
#endif
void OnMethodCalled(MethodDesc* pMethodDesc, TieredCompilationManager *pTieredCompilationManager, BOOL* shouldStopCountingCallsRef, BOOL* wasPromotedToNextTierRef);
diff --git a/src/vm/codeversion.h b/src/vm/codeversion.h
index 84673c6406..23fb908575 100644
--- a/src/vm/codeversion.h
+++ b/src/vm/codeversion.h
@@ -67,7 +67,8 @@ public:
enum OptimizationTier
{
OptimizationTier0,
- OptimizationTier1
+ OptimizationTier1,
+ OptimizationTierOptimized, // may do less optimizations than tier 1
};
#ifdef FEATURE_TIERED_COMPILATION
OptimizationTier GetOptimizationTier() const;
diff --git a/src/vm/common.h b/src/vm/common.h
index c273b52a3d..18044b5b13 100644
--- a/src/vm/common.h
+++ b/src/vm/common.h
@@ -467,6 +467,7 @@ extern DummyGlobalContract ___contract;
#include "WinRTRedirector.h"
#include "winrtredirector.inl"
#endif // FEATURE_COMINTEROP
+#include "eventtrace.inl"
#if defined(COMMON_TURNED_FPO_ON)
#pragma optimize("", on) // Go back to command line default optimizations
diff --git a/src/vm/eeconfig.cpp b/src/vm/eeconfig.cpp
index 83c1f1e904..83b579bb75 100644
--- a/src/vm/eeconfig.cpp
+++ b/src/vm/eeconfig.cpp
@@ -1205,47 +1205,56 @@ fTrackDynamicMethodDebugInfo = CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_
#if defined(FEATURE_TIERED_COMPILATION)
fTieredCompilation = Configuration::GetKnobBooleanValue(W("System.Runtime.TieredCompilation"), CLRConfig::EXTERNAL_TieredCompilation);
+ if (fTieredCompilation)
+ {
+ fTieredCompilation_QuickJit =
+ Configuration::GetKnobBooleanValue(
+ W("System.Runtime.TieredCompilation.QuickJit"),
+ CLRConfig::EXTERNAL_TC_QuickJit);
+ if (fTieredCompilation_QuickJit)
+ {
+ fTieredCompilation_QuickJitForLoops =
+ Configuration::GetKnobBooleanValue(
+ W("System.Runtime.TieredCompilation.QuickJitForLoops"),
+ CLRConfig::UNSUPPORTED_TC_QuickJitForLoops);
+ }
- fTieredCompilation_QuickJit =
- Configuration::GetKnobBooleanValue(
- W("System.Runtime.TieredCompilation.QuickJit"),
- CLRConfig::EXTERNAL_TC_QuickJit);
- fTieredCompilation_QuickJitForLoops =
- Configuration::GetKnobBooleanValue(
- W("System.Runtime.TieredCompilation.QuickJitForLoops"),
- CLRConfig::UNSUPPORTED_TC_QuickJitForLoops);
-
- fTieredCompilation_CallCounting = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_TC_CallCounting) != 0;
+ fTieredCompilation_CallCounting = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_TC_CallCounting) != 0;
- tieredCompilation_CallCountThreshold = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_TC_CallCountThreshold);
- if (tieredCompilation_CallCountThreshold < 1)
- {
- tieredCompilation_CallCountThreshold = 1;
- }
- else if (tieredCompilation_CallCountThreshold > INT_MAX) // CallCounter uses 'int'
- {
- tieredCompilation_CallCountThreshold = INT_MAX;
- }
+ tieredCompilation_CallCountThreshold = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_TC_CallCountThreshold);
+ if (tieredCompilation_CallCountThreshold < 1)
+ {
+ tieredCompilation_CallCountThreshold = 1;
+ }
+ else if (tieredCompilation_CallCountThreshold > INT_MAX) // CallCounter uses 'int'
+ {
+ tieredCompilation_CallCountThreshold = INT_MAX;
+ }
- tieredCompilation_CallCountingDelayMs = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_TC_CallCountingDelayMs);
+ tieredCompilation_CallCountingDelayMs = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_TC_CallCountingDelayMs);
#ifndef FEATURE_PAL
- bool hadSingleProcessorAtStartup = CPUGroupInfo::HadSingleProcessorAtStartup();
+ bool hadSingleProcessorAtStartup = CPUGroupInfo::HadSingleProcessorAtStartup();
#else // !FEATURE_PAL
- bool hadSingleProcessorAtStartup = g_SystemInfo.dwNumberOfProcessors == 1;
+ bool hadSingleProcessorAtStartup = g_SystemInfo.dwNumberOfProcessors == 1;
#endif // !FEATURE_PAL
-
- if (hadSingleProcessorAtStartup)
- {
- DWORD delayMultiplier = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_TC_DelaySingleProcMultiplier);
- if (delayMultiplier > 1)
+ if (hadSingleProcessorAtStartup)
{
- DWORD newDelay = tieredCompilation_CallCountingDelayMs * delayMultiplier;
- if (newDelay / delayMultiplier == tieredCompilation_CallCountingDelayMs)
+ DWORD delayMultiplier = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_TC_DelaySingleProcMultiplier);
+ if (delayMultiplier > 1)
{
- tieredCompilation_CallCountingDelayMs = newDelay;
+ DWORD newDelay = tieredCompilation_CallCountingDelayMs * delayMultiplier;
+ if (newDelay / delayMultiplier == tieredCompilation_CallCountingDelayMs)
+ {
+ tieredCompilation_CallCountingDelayMs = newDelay;
+ }
}
}
+
+ if (ETW::CompilationLog::TieredCompilation::Runtime::IsEnabled())
+ {
+ ETW::CompilationLog::TieredCompilation::Runtime::SendSettings();
+ }
}
#endif
diff --git a/src/vm/eventtrace.cpp b/src/vm/eventtrace.cpp
index e15709129b..cd110eca34 100644
--- a/src/vm/eventtrace.cpp
+++ b/src/vm/eventtrace.cpp
@@ -4464,6 +4464,11 @@ extern "C"
// Fire the runtime information event
ETW::InfoLog::RuntimeInformation(ETW::InfoLog::InfoStructs::Callback);
+ if (ETW::CompilationLog::TieredCompilation::Rundown::IsEnabled() && g_pConfig->TieredCompilation())
+ {
+ ETW::CompilationLog::TieredCompilation::Rundown::SendSettings();
+ }
+
// Start and End Method/Module Rundowns
// Used to fire events that we missed since we started the controller after the process started
// flags for immediate start rundown
@@ -5212,7 +5217,7 @@ VOID ETW::MethodLog::GetR2RGetEntryPoint(MethodDesc *pMethodDesc, PCODE pEntryPo
/*******************************************************/
/* This is called by the runtime when a method is jitted completely */
/*******************************************************/
-VOID ETW::MethodLog::MethodJitted(MethodDesc *pMethodDesc, SString *namespaceOrClassName, SString *methodName, SString *methodSignature, PCODE pNativeCodeStartAddress, ReJITID ilCodeId, NativeCodeVersionId nativeCodeId, BOOL bProfilerRejectedPrecompiledCode, BOOL bReadyToRunRejectedPrecompiledCode)
+VOID ETW::MethodLog::MethodJitted(MethodDesc *pMethodDesc, SString *namespaceOrClassName, SString *methodName, SString *methodSignature, PCODE pNativeCodeStartAddress, PrepareCodeConfig *pConfig)
{
CONTRACTL {
NOTHROW;
@@ -5225,7 +5230,7 @@ VOID ETW::MethodLog::MethodJitted(MethodDesc *pMethodDesc, SString *namespaceOrC
TRACE_LEVEL_INFORMATION,
CLR_JIT_KEYWORD))
{
- ETW::MethodLog::SendMethodEvent(pMethodDesc, ETW::EnumerationLog::EnumerationStructs::JitMethodLoad, TRUE, namespaceOrClassName, methodName, methodSignature, pNativeCodeStartAddress, nativeCodeId, bProfilerRejectedPrecompiledCode, bReadyToRunRejectedPrecompiledCode);
+ ETW::MethodLog::SendMethodEvent(pMethodDesc, ETW::EnumerationLog::EnumerationStructs::JitMethodLoad, TRUE, namespaceOrClassName, methodName, methodSignature, pNativeCodeStartAddress, pConfig);
}
if(ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context,
@@ -5240,7 +5245,7 @@ VOID ETW::MethodLog::MethodJitted(MethodDesc *pMethodDesc, SString *namespaceOrC
_ASSERTE(g_pDebugInterface != NULL);
g_pDebugInterface->InitializeLazyDataIfNecessary();
- ETW::MethodLog::SendMethodILToNativeMapEvent(pMethodDesc, ETW::EnumerationLog::EnumerationStructs::JitMethodILToNativeMap, pNativeCodeStartAddress, ilCodeId);
+ ETW::MethodLog::SendMethodILToNativeMapEvent(pMethodDesc, ETW::EnumerationLog::EnumerationStructs::JitMethodILToNativeMap, pNativeCodeStartAddress, pConfig->GetCodeVersion().GetILCodeVersionId());
}
} EX_CATCH { } EX_END_CATCH(SwallowAllExceptions);
@@ -6248,7 +6253,7 @@ VOID ETW::MethodLog::SendMethodJitStartEvent(MethodDesc *pMethodDesc, SString *n
/****************************************************************************/
/* This routine is used to send a method load/unload or rundown event */
/****************************************************************************/
-VOID ETW::MethodLog::SendMethodEvent(MethodDesc *pMethodDesc, DWORD dwEventOptions, BOOL bIsJit, SString *namespaceOrClassName, SString *methodName, SString *methodSignature, PCODE pNativeCodeStartAddress, NativeCodeVersionId nativeCodeId, BOOL bProfilerRejectedPrecompiledCode, BOOL bReadyToRunRejectedPrecompiledCode)
+VOID ETW::MethodLog::SendMethodEvent(MethodDesc *pMethodDesc, DWORD dwEventOptions, BOOL bIsJit, SString *namespaceOrClassName, SString *methodName, SString *methodSignature, PCODE pNativeCodeStartAddress, PrepareCodeConfig *pConfig)
{
CONTRACTL {
THROWS;
@@ -6317,13 +6322,77 @@ VOID ETW::MethodLog::SendMethodEvent(MethodDesc *pMethodDesc, DWORD dwEventOptio
if(pMethodDesc->GetMethodTable_NoLogging())
bIsGenericMethod = pMethodDesc->HasClassOrMethodInstantiation_NoLogging();
- ulMethodFlags = ((ulMethodFlags |
+ int jitOptimizationTier = -1;
+ NativeCodeVersionId nativeCodeId = 0;
+ ulMethodFlags = ulMethodFlags |
(bHasSharedGenericCode ? ETW::MethodLog::MethodStructs::SharedGenericCode : 0) |
(bIsGenericMethod ? ETW::MethodLog::MethodStructs::GenericMethod : 0) |
(bIsDynamicMethod ? ETW::MethodLog::MethodStructs::DynamicMethod : 0) |
- (bIsJit ? ETW::MethodLog::MethodStructs::JittedMethod : 0) |
- (bProfilerRejectedPrecompiledCode ? ETW::MethodLog::MethodStructs::ProfilerRejectedPrecompiledCode : 0) |
- (bReadyToRunRejectedPrecompiledCode ? ETW::MethodLog::MethodStructs::ReadyToRunRejectedPrecompiledCode : 0)));
+ (bIsJit ? ETW::MethodLog::MethodStructs::JittedMethod : 0);
+ if (pConfig != nullptr)
+ {
+ if (pConfig->ProfilerRejectedPrecompiledCode())
+ {
+ ulMethodFlags |= ETW::MethodLog::MethodStructs::ProfilerRejectedPrecompiledCode;
+ }
+ if (pConfig->ReadyToRunRejectedPrecompiledCode())
+ {
+ ulMethodFlags |= ETW::MethodLog::MethodStructs::ReadyToRunRejectedPrecompiledCode;
+ }
+
+ if (pConfig->JitSwitchedToMinOpt())
+ {
+ jitOptimizationTier = (int)JitOptimizationTier::MinOptJitted;
+ }
+#ifdef FEATURE_TIERED_COMPILATION
+ else if (pConfig->JitSwitchedToOptimized())
+ {
+ _ASSERTE(pMethodDesc->IsEligibleForTieredCompilation());
+ _ASSERTE(pConfig->GetCodeVersion().GetOptimizationTier() == NativeCodeVersion::OptimizationTierOptimized);
+ jitOptimizationTier = (int)JitOptimizationTier::Optimized;
+ }
+ else if (pMethodDesc->IsEligibleForTieredCompilation())
+ {
+ switch (pConfig->GetCodeVersion().GetOptimizationTier())
+ {
+ case NativeCodeVersion::OptimizationTier0:
+ jitOptimizationTier = (int)JitOptimizationTier::QuickJitted;
+ break;
+
+ case NativeCodeVersion::OptimizationTier1:
+ jitOptimizationTier = (int)JitOptimizationTier::OptimizedTier1;
+ break;
+
+ case NativeCodeVersion::OptimizationTierOptimized:
+ jitOptimizationTier = (int)JitOptimizationTier::Optimized;
+ break;
+
+ default:
+ UNREACHABLE();
+ }
+ }
+#endif
+
+#ifdef FEATURE_CODE_VERSIONING
+ nativeCodeId = pConfig->GetCodeVersion().GetVersionId();
+#endif
+ }
+
+ if (jitOptimizationTier < 0)
+ {
+ if (pMethodDesc->IsJitOptimizationDisabled())
+ {
+ jitOptimizationTier = (int)JitOptimizationTier::MinOptJitted;
+ }
+ else
+ {
+ jitOptimizationTier = (int)JitOptimizationTier::Optimized;
+ }
+ }
+ static_assert_no_msg((unsigned int)JitOptimizationTier::Count - 1 <= MethodFlagsJitOptimizationTierLowMask);
+ _ASSERTE((unsigned int)jitOptimizationTier <= MethodFlagsJitOptimizationTierLowMask);
+ _ASSERTE(((ulMethodFlags >> MethodFlagsJitOptimizationTierShift) & MethodFlagsJitOptimizationTierLowMask) == 0);
+ ulMethodFlags |= (unsigned int)jitOptimizationTier << MethodFlagsJitOptimizationTierShift;
// Intentionally set the extent flags (cold vs. hot) only after all the other common
// flags (above) have been set.
@@ -6813,18 +6882,18 @@ VOID ETW::MethodLog::SendEventsForJitMethodsHelper(LoaderAllocator *pLoaderAlloc
PCODE codeStart = PINSTRToPCODE(heapIterator.GetMethodCode());
- // Get the IL and native code IDs. In some cases, such as collectible loader
+ // Get info relevant to the native code version. In some cases, such as collectible loader
// allocators, we don't support code versioning so we need to short circuit the call.
// This also allows our caller to avoid having to pre-enter the relevant locks.
// see code:#TableLockHolder
ReJITID ilCodeId = 0;
- NativeCodeVersionId nativeCodeId = 0;
+ NativeCodeVersion nativeCodeVersion;
#ifdef FEATURE_CODE_VERSIONING
if (fGetCodeIds)
{
CodeVersionManager *pCodeVersionManager = pMD->GetCodeVersionManager();
_ASSERTE(pCodeVersionManager->LockOwnedByCurrentThread());
- NativeCodeVersion nativeCodeVersion = pCodeVersionManager->GetNativeCodeVersion(pMD, codeStart);
+ nativeCodeVersion = pCodeVersionManager->GetNativeCodeVersion(pMD, codeStart);
if (nativeCodeVersion.IsNull())
{
// The code version manager hasn't been updated with the jitted code
@@ -6835,7 +6904,6 @@ VOID ETW::MethodLog::SendEventsForJitMethodsHelper(LoaderAllocator *pLoaderAlloc
}
else
{
- nativeCodeId = nativeCodeVersion.GetVersionId();
ilCodeId = nativeCodeVersion.GetILCodeVersionId();
}
}
@@ -6846,6 +6914,8 @@ VOID ETW::MethodLog::SendEventsForJitMethodsHelper(LoaderAllocator *pLoaderAlloc
continue;
}
+ PrepareCodeConfig config(!nativeCodeVersion.IsNull() ? nativeCodeVersion : NativeCodeVersion(pMD), FALSE, FALSE);
+
// When we're called to announce loads, then the methodload event itself must
// precede any supplemental events, so that the method load or method jitting
// event is the first event the profiler sees for that MethodID (and not, say,
@@ -6862,7 +6932,7 @@ VOID ETW::MethodLog::SendEventsForJitMethodsHelper(LoaderAllocator *pLoaderAlloc
NULL, // methodName
NULL, // methodSignature
codeStart,
- nativeCodeId);
+ &config);
}
}
@@ -6885,7 +6955,7 @@ VOID ETW::MethodLog::SendEventsForJitMethodsHelper(LoaderAllocator *pLoaderAlloc
NULL, // methodName
NULL, // methodSignature
codeStart,
- nativeCodeId);
+ &config);
}
}
}
@@ -7332,6 +7402,112 @@ VOID ETW::EnumerationLog::EnumerationHelper(Module *moduleFilter, BaseDomain *do
}
}
+void ETW::CompilationLog::TieredCompilation::GetSettings(UINT32 *flagsRef)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+ _ASSERTE(g_pConfig->TieredCompilation());
+ _ASSERTE(flagsRef != nullptr);
+
+ enum class Flags : UINT32
+ {
+ None = 0x0,
+ QuickJit = 0x1,
+ QuickJitForLoops = 0x2,
+ };
+
+ UINT32 flags = (UINT32)Flags::None;
+ if (g_pConfig->TieredCompilation_QuickJit())
+ {
+ flags |= (UINT32)Flags::QuickJit;
+ if (g_pConfig->TieredCompilation_QuickJitForLoops())
+ {
+ flags |= (UINT32)Flags::QuickJitForLoops;
+ }
+ }
+ *flagsRef = flags;
+}
+
+void ETW::CompilationLog::TieredCompilation::Runtime::SendSettings()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+ _ASSERTE(IsEnabled());
+ _ASSERTE(g_pConfig->TieredCompilation());
+
+ UINT32 flags;
+ GetSettings(&flags);
+
+ FireEtwTieredCompilationSettings(GetClrInstanceId(), flags);
+}
+
+void ETW::CompilationLog::TieredCompilation::Rundown::SendSettings()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+ _ASSERTE(IsEnabled());
+ _ASSERTE(g_pConfig->TieredCompilation());
+
+ UINT32 flags;
+ GetSettings(&flags);
+
+ FireEtwTieredCompilationSettingsDCStart(GetClrInstanceId(), flags);
+}
+
+void ETW::CompilationLog::TieredCompilation::Runtime::SendPause()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+ _ASSERTE(IsEnabled());
+ _ASSERTE(g_pConfig->TieredCompilation());
+
+ FireEtwTieredCompilationPause(GetClrInstanceId());
+}
+
+void ETW::CompilationLog::TieredCompilation::Runtime::SendResume(UINT32 newMethodCount)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+ _ASSERTE(IsEnabled());
+ _ASSERTE(g_pConfig->TieredCompilation());
+
+ FireEtwTieredCompilationResume(GetClrInstanceId(), newMethodCount);
+}
+
+void ETW::CompilationLog::TieredCompilation::Runtime::SendBackgroundJitStart(UINT32 pendingMethodCount)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+ _ASSERTE(IsEnabled());
+ _ASSERTE(g_pConfig->TieredCompilation());
+
+ FireEtwTieredCompilationBackgroundJitStart(GetClrInstanceId(), pendingMethodCount);
+}
+
+void ETW::CompilationLog::TieredCompilation::Runtime::SendBackgroundJitStop(UINT32 pendingMethodCount, UINT32 jittedMethodCount)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+ _ASSERTE(IsEnabled());
+ _ASSERTE(g_pConfig->TieredCompilation());
+
+ FireEtwTieredCompilationBackgroundJitStop(GetClrInstanceId(), pendingMethodCount, jittedMethodCount);
+}
+
#endif // !FEATURE_REDHAWK
#ifdef FEATURE_PERFTRACING
diff --git a/src/vm/eventtrace.inl b/src/vm/eventtrace.inl
new file mode 100644
index 0000000000..af5481b4de
--- /dev/null
+++ b/src/vm/eventtrace.inl
@@ -0,0 +1,49 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+#pragma once
+
+#ifdef FEATURE_EVENT_TRACE
+
+FORCEINLINE bool ETW::CompilationLog::Runtime::IsEnabled()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+ return
+ ETW_TRACING_CATEGORY_ENABLED(
+ MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context,
+ TRACE_LEVEL_INFORMATION,
+ CLR_COMPILATION_KEYWORD);
+}
+
+FORCEINLINE bool ETW::CompilationLog::Rundown::IsEnabled()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+ return
+ ETW_TRACING_CATEGORY_ENABLED(
+ MICROSOFT_WINDOWS_DOTNETRUNTIME_RUNDOWN_PROVIDER_Context,
+ TRACE_LEVEL_INFORMATION,
+ CLR_COMPILATION_RUNDOWN_KEYWORD);
+}
+
+FORCEINLINE bool ETW::CompilationLog::TieredCompilation::Runtime::IsEnabled()
+{
+ WRAPPER_NO_CONTRACT;
+ return CompilationLog::Runtime::IsEnabled();
+}
+
+FORCEINLINE bool ETW::CompilationLog::TieredCompilation::Rundown::IsEnabled()
+{
+ WRAPPER_NO_CONTRACT;
+ return CompilationLog::Rundown::IsEnabled();
+}
+
+#endif // FEATURE_EVENT_TRACE
diff --git a/src/vm/jitinterface.cpp b/src/vm/jitinterface.cpp
index cf177a3b4d..c38c53d60b 100644
--- a/src/vm/jitinterface.cpp
+++ b/src/vm/jitinterface.cpp
@@ -6916,13 +6916,27 @@ void CEEInfo::setMethodAttribs (
}
}
-#ifdef FEATURE_TIERED_COMPILATION
- if (attribs & CORINFO_FLG_SWITCHED_TO_TIER1)
+#ifndef CROSSGEN_COMPILE
+ if (attribs & (CORINFO_FLG_SWITCHED_TO_OPTIMIZED | CORINFO_FLG_SWITCHED_TO_MIN_OPT))
{
- _ASSERTE(ftn->IsEligibleForTieredCompilation());
- ftn->GetCallCounter()->DisableCallCounting(ftn);
- }
+ PrepareCodeConfig *config = GetThread()->GetCurrentPrepareCodeConfig();
+ if (config != nullptr)
+ {
+ if (attribs & CORINFO_FLG_SWITCHED_TO_MIN_OPT)
+ {
+ _ASSERTE(!ftn->IsJitOptimizationDisabled());
+ config->SetJitSwitchedToMinOpt();
+ }
+#ifdef FEATURE_TIERED_COMPILATION
+ else if (attribs & CORINFO_FLG_SWITCHED_TO_OPTIMIZED)
+ {
+ _ASSERTE(ftn->IsEligibleForTieredCompilation());
+ config->SetJitSwitchedToOptimized();
+ }
#endif
+ }
+ }
+#endif // !CROSSGEN_COMPILE
EE_TO_JIT_TRANSITION();
}
diff --git a/src/vm/method.cpp b/src/vm/method.cpp
index d8688e14a4..06cdc06c00 100644
--- a/src/vm/method.cpp
+++ b/src/vm/method.cpp
@@ -4802,12 +4802,12 @@ bool MethodDesc::DetermineAndSetIsEligibleForTieredCompilation()
// Functional requirement
CodeVersionManager::IsMethodSupported(this) &&
- // Policy - If quick JIT is disabled for the startup tier and the module is not ReadyToRun, the method would effectively
- // not be tiered currently, so make the method ineligible for tiering to avoid some unnecessary overhead
- (g_pConfig->TieredCompilation_QuickJit() || GetModule()->IsReadyToRun()) &&
+ // Policy - If QuickJit is disabled and the module does not have any pregenerated code, the method would effectively not
+ // be tiered currently, so make the method ineligible for tiering to avoid some unnecessary overhead
+ (g_pConfig->TieredCompilation_QuickJit() || GetModule()->HasNativeOrReadyToRunImage()) &&
- // Policy - Debugging works much better with unoptimized code
- !CORDisableJITOptimizations(GetModule()->GetDebuggerInfoBits()) &&
+ // Policy - Generating optimized code is not disabled
+ !IsJitOptimizationDisabled() &&
// Policy - Tiered compilation is not disabled by the profiler
!CORProfilerDisableTieredCompilation())
@@ -4821,6 +4821,24 @@ bool MethodDesc::DetermineAndSetIsEligibleForTieredCompilation()
return false;
}
+#endif // !DACCESS_COMPILE
+
+#ifndef CROSSGEN_COMPILE
+bool MethodDesc::IsJitOptimizationDisabled()
+{
+ WRAPPER_NO_CONTRACT;
+
+ return
+ g_pConfig->JitMinOpts() ||
+#ifdef _DEBUG
+ g_pConfig->GenDebuggableCode() ||
+#endif
+ CORDisableJITOptimizations(GetModule()->GetDebuggerInfoBits()) ||
+ (!IsNoMetadata() && IsMiNoOptimization(GetImplAttrs()));
+}
+#endif
+
+#ifndef DACCESS_COMPILE
#ifndef CROSSGEN_COMPILE
void MethodDesc::RecordAndBackpatchEntryPointSlot(
diff --git a/src/vm/method.hpp b/src/vm/method.hpp
index 66aacb94d2..09f57cd6f4 100644
--- a/src/vm/method.hpp
+++ b/src/vm/method.hpp
@@ -1253,6 +1253,8 @@ public:
// can optimize its performance? Eligibility is invariant for the lifetime of a method.
bool DetermineAndSetIsEligibleForTieredCompilation();
+ bool IsJitOptimizationDisabled();
+
private:
// This function is not intended to be called in most places, and is named as such to discourage calling it accidentally
bool Helper_IsEligibleForVersioningWithVtableSlotBackpatch()
@@ -2065,6 +2067,56 @@ public:
BOOL ReadyToRunRejectedPrecompiledCode();
void SetProfilerRejectedPrecompiledCode();
void SetReadyToRunRejectedPrecompiledCode();
+
+#ifndef CROSSGEN_COMPILE
+ bool JitSwitchedToMinOpt() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_jitSwitchedToMinOpt;
+ }
+
+ void SetJitSwitchedToMinOpt()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+#ifdef FEATURE_TIERED_COMPILATION
+ m_jitSwitchedToOptimized = false;
+#endif
+ m_jitSwitchedToMinOpt = true;
+ }
+
+#ifdef FEATURE_TIERED_COMPILATION
+ bool JitSwitchedToOptimized() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_jitSwitchedToOptimized;
+ }
+
+ void SetJitSwitchedToOptimized()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ if (!m_jitSwitchedToMinOpt)
+ {
+ m_jitSwitchedToOptimized = true;
+ }
+ }
+#endif
+
+ PrepareCodeConfig *GetNextInSameThread() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_nextInSameThread;
+ }
+
+ void SetNextInSameThread(PrepareCodeConfig *config)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(config == nullptr || m_nextInSameThread == nullptr);
+
+ m_nextInSameThread = config;
+ }
+#endif // !CROSSGEN_COMPILE
protected:
MethodDesc* m_pMethodDesc;
@@ -2073,6 +2125,15 @@ protected:
BOOL m_mayUsePrecompiledCode;
BOOL m_ProfilerRejectedPrecompiledCode;
BOOL m_ReadyToRunRejectedPrecompiledCode;
+
+#ifndef CROSSGEN_COMPILE
+private:
+ bool m_jitSwitchedToMinOpt; // when it wasn't requested
+#ifdef FEATURE_TIERED_COMPILATION
+ bool m_jitSwitchedToOptimized; // when a different tier was requested
+#endif
+ PrepareCodeConfig *m_nextInSameThread;
+#endif // !CROSSGEN_COMPILE
};
#ifdef FEATURE_CODE_VERSIONING
diff --git a/src/vm/methodtablebuilder.cpp b/src/vm/methodtablebuilder.cpp
index 0bafe59f8a..e1075cb035 100644
--- a/src/vm/methodtablebuilder.cpp
+++ b/src/vm/methodtablebuilder.cpp
@@ -6957,9 +6957,9 @@ MethodTableBuilder::NeedsNativeCodeSlot(bmtMDMethod * pMDMethod)
// Keep in-sync with MethodDesc::DetermineAndSetIsEligibleForTieredCompilation()
if (g_pConfig->TieredCompilation() &&
- // Policy - If QuickJit is disabled and the module is not ReadyToRun, the method would be ineligible for tiering
- // currently to avoid some unnecessary overhead
- (g_pConfig->TieredCompilation_QuickJit() || GetModule()->IsReadyToRun()) &&
+ // Policy - If QuickJit is disabled and the module does not have any pregenerated code, the method would be ineligible
+ // for tiering currently to avoid some unnecessary overhead
+ (g_pConfig->TieredCompilation_QuickJit() || GetModule()->HasNativeOrReadyToRunImage()) &&
(pMDMethod->GetMethodType() == METHOD_TYPE_NORMAL || pMDMethod->GetMethodType() == METHOD_TYPE_INSTANTIATED))
{
diff --git a/src/vm/prestub.cpp b/src/vm/prestub.cpp
index 0e45b96cad..056fdb7a01 100644
--- a/src/vm/prestub.cpp
+++ b/src/vm/prestub.cpp
@@ -377,7 +377,7 @@ PCODE MethodDesc::PrepareILBasedCode(PrepareCodeConfig* pConfig)
CallCounter::IsEligibleForCallCounting(this))
{
GetCallCounter()->DisableCallCounting(this);
- pConfig->GetCodeVersion().SetOptimizationTier(NativeCodeVersion::OptimizationTier1);
+ pConfig->GetCodeVersion().SetOptimizationTier(NativeCodeVersion::OptimizationTierOptimized);
}
#endif
@@ -811,10 +811,7 @@ PCODE MethodDesc::JitCompileCodeLockedEventWrapper(PrepareCodeConfig* pConfig, J
&methodName,
&methodSignature,
pCode,
- pConfig->GetCodeVersion().GetILCodeVersionId(),
- pConfig->GetCodeVersion().GetVersionId(),
- pConfig->ProfilerRejectedPrecompiledCode(),
- pConfig->ReadyToRunRejectedPrecompiledCode());
+ pConfig);
}
}
@@ -915,6 +912,10 @@ PCODE MethodDesc::JitCompileCodeLocked(PrepareCodeConfig* pConfig, JitListLockEn
PCODE pOtherCode = NULL;
EX_TRY
{
+#ifndef CROSSGEN_COMPILE
+ Thread::CurrentPrepareCodeConfigHolder threadPrepareCodeConfigHolder(GetThread(), pConfig);
+#endif
+
pCode = UnsafeJitFunction(pConfig->GetCodeVersion(), pilHeader, *pFlags, pSizeOfCode);
}
EX_CATCH
@@ -940,15 +941,15 @@ PCODE MethodDesc::JitCompileCodeLocked(PrepareCodeConfig* pConfig, JitListLockEn
}
EX_END_CATCH(RethrowTerminalExceptions)
- if (pOtherCode != NULL)
- {
- // Somebody finished jitting recursively while we were jitting the method.
- // Just use their method & leak the one we finished. (Normally we hope
- // not to finish our JIT in this case, as we will abort early if we notice
- // a reentrant jit has occurred. But we may not catch every place so we
- // do a definitive final check here.
- return pOtherCode;
- }
+ if (pOtherCode != NULL)
+ {
+ // Somebody finished jitting recursively while we were jitting the method.
+ // Just use their method & leak the one we finished. (Normally we hope
+ // not to finish our JIT in this case, as we will abort early if we notice
+ // a reentrant jit has occurred. But we may not catch every place so we
+ // do a definitive final check here.
+ return pOtherCode;
+ }
_ASSERTE(pCode != NULL);
@@ -992,17 +993,17 @@ PCODE MethodDesc::JitCompileCodeLocked(PrepareCodeConfig* pConfig, JitListLockEn
}
#ifdef FEATURE_TIERED_COMPILATION
- if (pFlags->IsSet(CORJIT_FLAGS::CORJIT_FLAG_TIER0))
+ if (pConfig->JitSwitchedToOptimized())
{
+ _ASSERTE(pFlags->IsSet(CORJIT_FLAGS::CORJIT_FLAG_TIER0));
+
MethodDesc *methodDesc = pConfig->GetMethodDesc();
_ASSERTE(methodDesc->IsEligibleForTieredCompilation());
- // Update the tier in the code version. The JIT may have decided to switch from tier 0 to tier 1, in which case call
- // counting would have been disabled for the method.
- if (!methodDesc->GetCallCounter()->IsCallCountingEnabled(methodDesc))
- {
- pConfig->GetCodeVersion().SetOptimizationTier(NativeCodeVersion::OptimizationTier1);
- }
+ // Update the tier in the code version. The JIT may have decided to switch from tier 0 to optimized, in which case call
+ // counting would have to be disabled for the method.
+ methodDesc->GetCallCounter()->DisableCallCounting(methodDesc);
+ pConfig->GetCodeVersion().SetOptimizationTier(NativeCodeVersion::OptimizationTierOptimized);
}
#endif
@@ -1026,7 +1027,12 @@ PrepareCodeConfig::PrepareCodeConfig(NativeCodeVersion codeVersion, BOOL needsMu
m_needsMulticoreJitNotification(needsMulticoreJitNotification),
m_mayUsePrecompiledCode(mayUsePrecompiledCode),
m_ProfilerRejectedPrecompiledCode(FALSE),
- m_ReadyToRunRejectedPrecompiledCode(FALSE)
+ m_ReadyToRunRejectedPrecompiledCode(FALSE),
+ m_jitSwitchedToMinOpt(false),
+#ifdef FEATURE_TIERED_COMPILATION
+ m_jitSwitchedToOptimized(false),
+#endif
+ m_nextInSameThread(nullptr)
{}
MethodDesc* PrepareCodeConfig::GetMethodDesc()
diff --git a/src/vm/threads.cpp b/src/vm/threads.cpp
index 4dab2896fd..7632bda555 100644
--- a/src/vm/threads.cpp
+++ b/src/vm/threads.cpp
@@ -1549,6 +1549,8 @@ Thread::Thread()
#endif // FEATURE_PERFTRACING
m_HijackReturnKind = RT_Illegal;
m_DeserializationTracker = NULL;
+
+ m_currentPrepareCodeConfig = nullptr;
}
//--------------------------------------------------------------------
diff --git a/src/vm/threads.h b/src/vm/threads.h
index 633cb36bfc..7dac8017de 100644
--- a/src/vm/threads.h
+++ b/src/vm/threads.h
@@ -167,6 +167,7 @@ enum BinderMethodID : int;
class CRWLock;
struct LockEntry;
class PendingTypeLoadHolder;
+class PrepareCodeConfig;
struct ThreadLocalBlock;
typedef DPTR(struct ThreadLocalBlock) PTR_ThreadLocalBlock;
@@ -5008,6 +5009,32 @@ private:
public:
static uint64_t dead_threads_non_alloc_bytes;
+
+#ifndef DACCESS_COMPILE
+public:
+ class CurrentPrepareCodeConfigHolder
+ {
+ private:
+ Thread *const m_thread;
+#ifdef _DEBUG
+ PrepareCodeConfig *const m_config;
+#endif
+
+ public:
+ CurrentPrepareCodeConfigHolder(Thread *thread, PrepareCodeConfig *config);
+ ~CurrentPrepareCodeConfigHolder();
+ };
+
+public:
+ PrepareCodeConfig *GetCurrentPrepareCodeConfig() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_currentPrepareCodeConfig;
+ }
+#endif // !DACCESS_COMPILE
+
+private:
+ PrepareCodeConfig *m_currentPrepareCodeConfig;
};
// End of class Thread
diff --git a/src/vm/threads.inl b/src/vm/threads.inl
index 2e81b9e967..c63a916ef8 100644
--- a/src/vm/threads.inl
+++ b/src/vm/threads.inl
@@ -183,4 +183,37 @@ inline void Thread::SetGCSpecial(bool fGCSpecial)
m_fGCSpecial = fGCSpecial;
}
+#if !defined(DACCESS_COMPILE) && !defined(CROSSGEN_COMPILE)
+
+inline Thread::CurrentPrepareCodeConfigHolder::CurrentPrepareCodeConfigHolder(Thread *thread, PrepareCodeConfig *config)
+ : m_thread(thread)
+#ifdef _DEBUG
+ , m_config(config)
+#endif
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(thread != nullptr);
+ _ASSERTE(thread == GetThread());
+ _ASSERTE(config != nullptr);
+
+ PrepareCodeConfig *previousConfig = thread->m_currentPrepareCodeConfig;
+ if (previousConfig != nullptr)
+ {
+ config->SetNextInSameThread(previousConfig);
+ }
+ thread->m_currentPrepareCodeConfig = config;
+}
+
+inline Thread::CurrentPrepareCodeConfigHolder::~CurrentPrepareCodeConfigHolder()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ PrepareCodeConfig *config = m_thread->m_currentPrepareCodeConfig;
+ _ASSERTE(config == m_config);
+ m_thread->m_currentPrepareCodeConfig = config->GetNextInSameThread();
+ config->SetNextInSameThread(nullptr);
+}
+
+#endif // !DACCESS_COMPILE && !CROSSGEN_COMPILE
+
#endif
diff --git a/src/vm/tieredcompilation.cpp b/src/vm/tieredcompilation.cpp
index d2df5a270d..d8f90e57cd 100644
--- a/src/vm/tieredcompilation.cpp
+++ b/src/vm/tieredcompilation.cpp
@@ -63,9 +63,9 @@
// Called at AppDomain construction
TieredCompilationManager::TieredCompilationManager() :
m_lock(CrstTieredCompilation),
+ m_countOfMethodsToOptimize(0),
m_isAppDomainShuttingDown(FALSE),
m_countOptimizationThreadsRunning(0),
- m_optimizationQuantumMs(50),
m_methodsPendingCountingForTier1(nullptr),
m_tieringDelayTimerHandle(nullptr),
m_tier1CallCountingCandidateMethodRecentlyRecorded(false)
@@ -102,8 +102,7 @@ NativeCodeVersion::OptimizationTier TieredCompilationManager::GetInitialOptimiza
if (pMethodDesc->RequestedAggressiveOptimization())
{
- // Methods flagged with MethodImplOptions.AggressiveOptimization begin at tier 1, as a workaround to cold methods with
- // hot loops performing poorly (https://github.com/dotnet/coreclr/issues/19751)
+ // Methods flagged with MethodImplOptions.AggressiveOptimization start with and stay at tier 1
return NativeCodeVersion::OptimizationTier1;
}
@@ -115,9 +114,9 @@ NativeCodeVersion::OptimizationTier TieredCompilationManager::GetInitialOptimiza
if (!pMethodDesc->GetCallCounter()->IsCallCountingEnabled(pMethodDesc))
{
- // Tier 0 call counting may have been disabled based on information about precompiled code or for other reasons, the
- // intention is to begin at tier 1
- return NativeCodeVersion::OptimizationTier1;
+ // Tier 0 call counting may have been disabled for several reasons, the intention is to start with and stay at an
+ // optimized tier
+ return NativeCodeVersion::OptimizationTierOptimized;
}
#endif
@@ -240,7 +239,9 @@ void TieredCompilationManager::AsyncPromoteMethodToTier1(MethodDesc* pMethodDesc
NativeCodeVersionCollection nativeVersions = ilVersion.GetNativeCodeVersions(pMethodDesc);
for (NativeCodeVersionIterator cur = nativeVersions.Begin(), end = nativeVersions.End(); cur != end; cur++)
{
- if (cur->GetOptimizationTier() == NativeCodeVersion::OptimizationTier1)
+ NativeCodeVersion::OptimizationTier optimizationTier = cur->GetOptimizationTier();
+ if (optimizationTier == NativeCodeVersion::OptimizationTier1 ||
+ optimizationTier == NativeCodeVersion::OptimizationTierOptimized)
{
// we've already promoted
LOG((LF_TIEREDCOMPILATION, LL_INFO100000, "TieredCompilationManager::AsyncPromoteMethodToTier1 Method=0x%pM (%s::%s) ignoring already promoted method\n",
@@ -275,6 +276,7 @@ void TieredCompilationManager::AsyncPromoteMethodToTier1(MethodDesc* pMethodDesc
if (pMethodListItem != NULL)
{
m_methodsToOptimize.InsertTail(pMethodListItem);
+ ++m_countOfMethodsToOptimize;
}
LOG((LF_TIEREDCOMPILATION, LL_INFO10000, "TieredCompilationManager::AsyncPromoteMethodToTier1 Method=0x%pM (%s::%s), code version id=0x%x queued\n",
@@ -384,6 +386,10 @@ bool TieredCompilationManager::TryInitiateTieringDelay()
}
timerContextHolder.SuppressRelease(); // the timer context is automatically deleted by the timer infrastructure
+ if (ETW::CompilationLog::TieredCompilation::Runtime::IsEnabled())
+ {
+ ETW::CompilationLog::TieredCompilation::Runtime::SendPause();
+ }
return true;
}
@@ -479,9 +485,25 @@ void TieredCompilationManager::TieringDelayTimerCallbackWorker()
optimizeMethods = IncrementWorkerThreadCountIfNeeded();
}
- // Install call counters
MethodDesc** methods = methodsPendingCountingForTier1->GetElements();
COUNT_T methodCount = methodsPendingCountingForTier1->GetCount();
+
+ if (ETW::CompilationLog::TieredCompilation::Runtime::IsEnabled())
+ {
+ // TODO: Avoid scanning the list in the future
+ UINT32 newMethodCount = 0;
+ for (COUNT_T i = 0; i < methodCount; ++i)
+ {
+ MethodDesc *methodDesc = methods[i];
+ if (methodDesc->GetCallCounter()->WasCalledAtMostOnce(methodDesc))
+ {
+ ++newMethodCount;
+ }
+ }
+ ETW::CompilationLog::TieredCompilation::Runtime::SendResume(newMethodCount);
+ }
+
+ // Install call counters
for (COUNT_T i = 0; i < methodCount; ++i)
{
ResumeCountingCalls(methods[i]);
@@ -592,16 +614,23 @@ void TieredCompilationManager::OptimizeMethodsCallback()
// on a background thread. Each such method will be jitted with code
// optimizations enabled and then installed as the active implementation
// of the method entrypoint.
-//
-// We need to be carefuly not to work for too long in a single invocation
-// of this method or we could starve the threadpool and force
-// it to create unnecessary additional threads.
void TieredCompilationManager::OptimizeMethods()
{
WRAPPER_NO_CONTRACT;
_ASSERTE(DebugGetWorkerThreadCount() != 0);
- ULONGLONG startTickCount = CLRGetTickCount64();
+ // We need to be careful not to work for too long in a single invocation of this method or we could starve the thread pool
+ // and force it to create unnecessary additional threads. We will JIT for a minimum of this quantum, then schedule another
+ // work item to the thread pool and return this thread back to the pool.
+ const DWORD OptimizationQuantumMs = 50;
+
+ if (ETW::CompilationLog::TieredCompilation::Runtime::IsEnabled())
+ {
+ ETW::CompilationLog::TieredCompilation::Runtime::SendBackgroundJitStart(m_countOfMethodsToOptimize);
+ }
+
+ UINT32 jittedMethodCount = 0;
+ DWORD startTickCount = GetTickCount();
NativeCodeVersion nativeCodeVersion;
EX_TRY
{
@@ -624,13 +653,15 @@ void TieredCompilationManager::OptimizeMethods()
break;
}
}
+
OptimizeMethod(nativeCodeVersion);
+ ++jittedMethodCount;
// If we have been running for too long return the thread to the threadpool and queue another event
// This gives the threadpool a chance to service other requests on this thread before returning to
// this work.
- ULONGLONG currentTickCount = CLRGetTickCount64();
- if (currentTickCount >= startTickCount + m_optimizationQuantumMs)
+ DWORD currentTickCount = GetTickCount();
+ if (currentTickCount - startTickCount >= OptimizationQuantumMs)
{
if (!TryAsyncOptimizeMethods())
{
@@ -652,6 +683,11 @@ void TieredCompilationManager::OptimizeMethods()
GET_EXCEPTION()->GetHR(), nativeCodeVersion.GetMethodDesc());
}
EX_END_CATCH(RethrowTerminalExceptions);
+
+ if (ETW::CompilationLog::TieredCompilation::Runtime::IsEnabled())
+ {
+ ETW::CompilationLog::TieredCompilation::Runtime::SendBackgroundJitStop(m_countOfMethodsToOptimize, jittedMethodCount);
+ }
}
// Jit compiles and installs new optimized code for a method.
@@ -760,6 +796,7 @@ NativeCodeVersion TieredCompilationManager::GetNextMethodToOptimize()
{
NativeCodeVersion nativeCodeVersion = pElem->GetValue();
delete pElem;
+ --m_countOfMethodsToOptimize;
return nativeCodeVersion;
}
return NativeCodeVersion();
@@ -815,17 +852,25 @@ CORJIT_FLAGS TieredCompilationManager::GetJitFlags(NativeCodeVersion nativeCodeV
#endif
return flags;
}
-
- if (nativeCodeVersion.GetOptimizationTier() == NativeCodeVersion::OptimizationTier0)
- {
- flags.Set(CORJIT_FLAGS::CORJIT_FLAG_TIER0);
- }
- else
+
+ switch (nativeCodeVersion.GetOptimizationTier())
{
- flags.Set(CORJIT_FLAGS::CORJIT_FLAG_TIER1);
+ case NativeCodeVersion::OptimizationTier0:
+ flags.Set(CORJIT_FLAGS::CORJIT_FLAG_TIER0);
+ break;
+
+ case NativeCodeVersion::OptimizationTier1:
+ flags.Set(CORJIT_FLAGS::CORJIT_FLAG_TIER1);
+ // fall through
+
+ case NativeCodeVersion::OptimizationTierOptimized:
#ifdef FEATURE_INTERPRETER
- flags.Set(CORJIT_FLAGS::CORJIT_FLAG_MAKEFINALCODE);
+ flags.Set(CORJIT_FLAGS::CORJIT_FLAG_MAKEFINALCODE);
#endif
+ break;
+
+ default:
+ UNREACHABLE();
}
return flags;
}
diff --git a/src/vm/tieredcompilation.h b/src/vm/tieredcompilation.h
index 6cc21a9580..5012175a9f 100644
--- a/src/vm/tieredcompilation.h
+++ b/src/vm/tieredcompilation.h
@@ -65,9 +65,9 @@ private:
Crst m_lock;
SList<SListElem<NativeCodeVersion>> m_methodsToOptimize;
+ UINT32 m_countOfMethodsToOptimize;
BOOL m_isAppDomainShuttingDown;
DWORD m_countOptimizationThreadsRunning;
- DWORD m_optimizationQuantumMs;
SArray<MethodDesc*>* m_methodsPendingCountingForTier1;
HANDLE m_tieringDelayTimerHandle;
bool m_tier1CallCountingCandidateMethodRecentlyRecorded;