summaryrefslogtreecommitdiff
path: root/src/vm/prestub.cpp
diff options
context:
space:
mode:
authorKoundinya Veluri <kouvel@users.noreply.github.com>2019-06-10 23:27:02 -0700
committerGitHub <noreply@github.com>2019-06-10 23:27:02 -0700
commitd5906ecce405c8ff9476a1ea3d9a297bf5d9991c (patch)
tree7d9758b6426e190490f6cd729f92c51f4c692c08 /src/vm/prestub.cpp
parenta6292a6573e1f7d6e218293773e443280683cd83 (diff)
downloadcoreclr-d5906ecce405c8ff9476a1ea3d9a297bf5d9991c.tar.gz
coreclr-d5906ecce405c8ff9476a1ea3d9a297bf5d9991c.tar.bz2
coreclr-d5906ecce405c8ff9476a1ea3d9a297bf5d9991c.zip
Add optimization tiers to the Linux perf maps for perfcollect (#24967)
Add optimization tiers to the Linux perf maps for perfcollect Fixes https://github.com/dotnet/coreclr/issues/23222: - It looks like module unloads are currently not taken into account. Once they would be taken into account, Although we have method JIT events from `lttng` with the code address and optimization tier, samples can only be associated with method JIT events by associating the time range when the module is loaded with times of samples, and the event times from `lttng` would not necessarily correspond with times from samples taken by `perf`. - Updated to include the optimization tier in the perf map for each jitted or R2R method code address - Refactored common code between eventtrace and perfmap for getting jit tiers
Diffstat (limited to 'src/vm/prestub.cpp')
-rw-r--r--src/vm/prestub.cpp63
1 files changed, 62 insertions, 1 deletions
diff --git a/src/vm/prestub.cpp b/src/vm/prestub.cpp
index 056fdb7a01..d83c419e22 100644
--- a/src/vm/prestub.cpp
+++ b/src/vm/prestub.cpp
@@ -867,7 +867,7 @@ PCODE MethodDesc::JitCompileCodeLockedEventWrapper(PrepareCodeConfig* pConfig, J
{
#ifdef FEATURE_PERFMAP
// Save the JIT'd method information so that perf can resolve JIT'd call frames.
- PerfMap::LogJITCompiledMethod(this, pCode, sizeOfCode);
+ PerfMap::LogJITCompiledMethod(this, pCode, sizeOfCode, pConfig);
#endif
}
@@ -1143,6 +1143,67 @@ BOOL PrepareCodeConfig::MayUsePrecompiledCode()
return m_mayUsePrecompiledCode;
}
+PrepareCodeConfig::JitOptimizationTier PrepareCodeConfig::GetJitOptimizationTier(
+ PrepareCodeConfig *config,
+ MethodDesc *methodDesc)
+{
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(methodDesc != nullptr);
+ _ASSERTE(config == nullptr || methodDesc == config->GetMethodDesc());
+
+ if (config != nullptr)
+ {
+ if (config->JitSwitchedToMinOpt())
+ {
+ return JitOptimizationTier::MinOptJitted;
+ }
+ #ifdef FEATURE_TIERED_COMPILATION
+ else if (config->JitSwitchedToOptimized())
+ {
+ _ASSERTE(methodDesc->IsEligibleForTieredCompilation());
+ _ASSERTE(config->GetCodeVersion().GetOptimizationTier() == NativeCodeVersion::OptimizationTierOptimized);
+ return JitOptimizationTier::Optimized;
+ }
+ else if (methodDesc->IsEligibleForTieredCompilation())
+ {
+ switch (config->GetCodeVersion().GetOptimizationTier())
+ {
+ case NativeCodeVersion::OptimizationTier0:
+ return JitOptimizationTier::QuickJitted;
+
+ case NativeCodeVersion::OptimizationTier1:
+ return JitOptimizationTier::OptimizedTier1;
+
+ case NativeCodeVersion::OptimizationTierOptimized:
+ return JitOptimizationTier::Optimized;
+
+ default:
+ UNREACHABLE();
+ }
+ }
+ #endif
+ }
+
+ return methodDesc->IsJitOptimizationDisabled() ? JitOptimizationTier::MinOptJitted : JitOptimizationTier::Optimized;
+}
+
+const char *PrepareCodeConfig::GetJitOptimizationTierStr(PrepareCodeConfig *config, MethodDesc *methodDesc)
+{
+ WRAPPER_NO_CONTRACT;
+
+ switch (GetJitOptimizationTier(config, methodDesc))
+ {
+ case JitOptimizationTier::Unknown: return "Unknown";
+ case JitOptimizationTier::MinOptJitted: return "MinOptJitted";
+ case JitOptimizationTier::Optimized: return "Optimized";
+ case JitOptimizationTier::QuickJitted: return "QuickJitted";
+ case JitOptimizationTier::OptimizedTier1: return "OptimizedTier1";
+
+ default:
+ UNREACHABLE();
+ }
+}
+
#ifdef FEATURE_CODE_VERSIONING
VersionedPrepareCodeConfig::VersionedPrepareCodeConfig() {}