summaryrefslogtreecommitdiff
path: root/src/vm
diff options
context:
space:
mode:
authorKoundinya Veluri <kouvel@users.noreply.github.com>2018-10-03 08:52:40 -0700
committerGitHub <noreply@github.com>2018-10-03 08:52:40 -0700
commitb68296ce2c56188cf2a7bd263903e27c67717702 (patch)
tree3c535c8fe4f92d91fc590a6cd25731ed5b222f25 /src/vm
parent50567db6e3851f4c4680771424a354e2258333b4 (diff)
downloadcoreclr-b68296ce2c56188cf2a7bd263903e27c67717702.tar.gz
coreclr-b68296ce2c56188cf2a7bd263903e27c67717702.tar.bz2
coreclr-b68296ce2c56188cf2a7bd263903e27c67717702.zip
Add MethodImplOptions.AggressiveOptimization and use it for tiering (#20009)
Add MethodImplOptions.AggressiveOptimization and use it for tiering Part of fix for https://github.com/dotnet/corefx/issues/32235 Workaround for https://github.com/dotnet/coreclr/issues/19751 - Added and set CORJIT_FLAG_AGGRESSIVE_OPT to indicate that a method is flagged with AggressiveOptimization - For a method flagged with AggressiveOptimization, tiering uses a foreground tier 1 JIT on first call to the method, skipping the tier 0 JIT and call counting - When tiering is disabled, a method flagged with AggressiveOptimization does not use r2r-pregenerated code - R2r crossgen does not generate code for a method flagged with AggressiveOptimization
Diffstat (limited to 'src/vm')
-rw-r--r--src/vm/CMakeLists.txt2
-rw-r--r--src/vm/codeversion.cpp56
-rw-r--r--src/vm/codeversion.h16
-rw-r--r--src/vm/jitinterface.cpp13
-rw-r--r--src/vm/method.hpp11
-rw-r--r--src/vm/prestub.cpp22
-rw-r--r--src/vm/tieredcompilation.cpp39
-rw-r--r--src/vm/tieredcompilation.h14
8 files changed, 105 insertions, 68 deletions
diff --git a/src/vm/CMakeLists.txt b/src/vm/CMakeLists.txt
index 54c73cce44..58de64694e 100644
--- a/src/vm/CMakeLists.txt
+++ b/src/vm/CMakeLists.txt
@@ -111,6 +111,7 @@ set(VM_SOURCES_DAC_AND_WKS_COMMON
threadpoolrequest.cpp
threads.cpp
threadstatics.cpp
+ tieredcompilation.cpp
typectxt.cpp
typedesc.cpp
typehandle.cpp
@@ -381,7 +382,6 @@ set(VM_SOURCES_WKS
synch.cpp
synchronizationcontextnative.cpp
testhookmgr.cpp
- tieredcompilation.cpp
threaddebugblockinginfo.cpp
threadsuspend.cpp
typeparse.cpp
diff --git a/src/vm/codeversion.cpp b/src/vm/codeversion.cpp
index 86e8558112..5f7ef16f7b 100644
--- a/src/vm/codeversion.cpp
+++ b/src/vm/codeversion.cpp
@@ -50,13 +50,20 @@ bool NativeCodeVersion::operator!=(const NativeCodeVersion & rhs) const { return
#define CORPROF_E_RUNTIME_SUSPEND_REQUIRED 0x80131381
#ifndef DACCESS_COMPILE
-NativeCodeVersionNode::NativeCodeVersionNode(NativeCodeVersionId id, MethodDesc* pMethodDesc, ReJITID parentId) :
+NativeCodeVersionNode::NativeCodeVersionNode(
+ NativeCodeVersionId id,
+ MethodDesc* pMethodDesc,
+ ReJITID parentId,
+ NativeCodeVersion::OptimizationTier optimizationTier)
+ :
m_pNativeCode(NULL),
m_pMethodDesc(pMethodDesc),
m_parentId(parentId),
m_pNextMethodDescSibling(NULL),
m_id(id),
- m_optTier(NativeCodeVersion::OptimizationTier0),
+#ifdef FEATURE_TIERED_COMPILATION
+ m_optTier(optimizationTier),
+#endif
m_flags(0)
{}
#endif
@@ -143,15 +150,8 @@ void NativeCodeVersionNode::SetActiveChildFlag(BOOL isActive)
NativeCodeVersion::OptimizationTier NativeCodeVersionNode::GetOptimizationTier() const
{
LIMITED_METHOD_DAC_CONTRACT;
- return m_optTier.Load();
-}
-#ifndef DACCESS_COMPILE
-void NativeCodeVersionNode::SetOptimizationTier(NativeCodeVersion::OptimizationTier tier)
-{
- LIMITED_METHOD_DAC_CONTRACT;
- m_optTier.Store(tier);
+ return m_optTier;
}
-#endif
#endif // FEATURE_TIERED_COMPILATION
NativeCodeVersion::NativeCodeVersion() :
@@ -336,24 +336,9 @@ NativeCodeVersion::OptimizationTier NativeCodeVersion::GetOptimizationTier() con
}
else
{
- return NativeCodeVersion::OptimizationTier0;
+ return TieredCompilationManager::GetInitialOptimizationTier(GetMethodDesc());
}
}
-
-#ifndef DACCESS_COMPILE
-void NativeCodeVersion::SetOptimizationTier(NativeCodeVersion::OptimizationTier tier)
-{
- LIMITED_METHOD_CONTRACT;
- if (m_storageKind == StorageKind::Explicit)
- {
- AsNode()->SetOptimizationTier(tier);
- }
- else
- {
- _ASSERTE(!"Do not call SetOptimizationTier on default code versions - these versions are immutable");
- }
-}
-#endif
#endif
PTR_NativeCodeVersionNode NativeCodeVersion::AsNode() const
@@ -881,11 +866,14 @@ void ILCodeVersion::SetInstrumentedILMap(SIZE_T cMap, COR_IL_MAP * rgMap)
AsNode()->SetInstrumentedILMap(cMap, rgMap);
}
-HRESULT ILCodeVersion::AddNativeCodeVersion(MethodDesc* pClosedMethodDesc, NativeCodeVersion* pNativeCodeVersion)
+HRESULT ILCodeVersion::AddNativeCodeVersion(
+ MethodDesc* pClosedMethodDesc,
+ NativeCodeVersion::OptimizationTier optimizationTier,
+ NativeCodeVersion* pNativeCodeVersion)
{
LIMITED_METHOD_CONTRACT;
CodeVersionManager* pManager = GetModule()->GetCodeVersionManager();
- HRESULT hr = pManager->AddNativeCodeVersion(*this, pClosedMethodDesc, pNativeCodeVersion);
+ HRESULT hr = pManager->AddNativeCodeVersion(*this, pClosedMethodDesc, optimizationTier, pNativeCodeVersion);
if (FAILED(hr))
{
_ASSERTE(hr == E_OUTOFMEMORY);
@@ -901,7 +889,9 @@ HRESULT ILCodeVersion::GetOrCreateActiveNativeCodeVersion(MethodDesc* pClosedMet
NativeCodeVersion activeNativeChild = GetActiveNativeCodeVersion(pClosedMethodDesc);
if (activeNativeChild.IsNull())
{
- if (FAILED(hr = AddNativeCodeVersion(pClosedMethodDesc, &activeNativeChild)))
+ NativeCodeVersion::OptimizationTier optimizationTier =
+ TieredCompilationManager::GetInitialOptimizationTier(pClosedMethodDesc);
+ if (FAILED(hr = AddNativeCodeVersion(pClosedMethodDesc, optimizationTier, &activeNativeChild)))
{
_ASSERTE(hr == E_OUTOFMEMORY);
return hr;
@@ -2098,7 +2088,11 @@ HRESULT CodeVersionManager::SetActiveILCodeVersions(ILCodeVersion* pActiveVersio
return S_OK;
}
-HRESULT CodeVersionManager::AddNativeCodeVersion(ILCodeVersion ilCodeVersion, MethodDesc* pClosedMethodDesc, NativeCodeVersion* pNativeCodeVersion)
+HRESULT CodeVersionManager::AddNativeCodeVersion(
+ ILCodeVersion ilCodeVersion,
+ MethodDesc* pClosedMethodDesc,
+ NativeCodeVersion::OptimizationTier optimizationTier,
+ NativeCodeVersion* pNativeCodeVersion)
{
LIMITED_METHOD_CONTRACT;
_ASSERTE(LockOwnedByCurrentThread());
@@ -2112,7 +2106,7 @@ HRESULT CodeVersionManager::AddNativeCodeVersion(ILCodeVersion ilCodeVersion, Me
}
NativeCodeVersionId newId = pMethodVersioningState->AllocateVersionId();
- NativeCodeVersionNode* pNativeCodeVersionNode = new (nothrow) NativeCodeVersionNode(newId, pClosedMethodDesc, ilCodeVersion.GetVersionId());
+ NativeCodeVersionNode* pNativeCodeVersionNode = new (nothrow) NativeCodeVersionNode(newId, pClosedMethodDesc, ilCodeVersion.GetVersionId(), optimizationTier);
if (pNativeCodeVersionNode == NULL)
{
return E_OUTOFMEMORY;
diff --git a/src/vm/codeversion.h b/src/vm/codeversion.h
index b9004a583f..6dddacc69d 100644
--- a/src/vm/codeversion.h
+++ b/src/vm/codeversion.h
@@ -64,16 +64,13 @@ public:
#ifndef DACCESS_COMPILE
BOOL SetNativeCodeInterlocked(PCODE pCode, PCODE pExpected = NULL);
#endif
-#ifdef FEATURE_TIERED_COMPILATION
enum OptimizationTier
{
OptimizationTier0,
OptimizationTier1
};
+#ifdef FEATURE_TIERED_COMPILATION
OptimizationTier GetOptimizationTier() const;
-#ifndef DACCESS_COMPILE
- void SetOptimizationTier(OptimizationTier tier);
-#endif
#endif // FEATURE_TIERED_COMPILATION
bool operator==(const NativeCodeVersion & rhs) const;
bool operator!=(const NativeCodeVersion & rhs) const;
@@ -152,7 +149,7 @@ public:
void SetIL(COR_ILMETHOD* pIL);
void SetJitFlags(DWORD flags);
void SetInstrumentedILMap(SIZE_T cMap, COR_IL_MAP * rgMap);
- HRESULT AddNativeCodeVersion(MethodDesc* pClosedMethodDesc, NativeCodeVersion* pNativeCodeVersion);
+ HRESULT AddNativeCodeVersion(MethodDesc* pClosedMethodDesc, NativeCodeVersion::OptimizationTier optimizationTier, NativeCodeVersion* pNativeCodeVersion);
HRESULT GetOrCreateActiveNativeCodeVersion(MethodDesc* pClosedMethodDesc, NativeCodeVersion* pNativeCodeVersion);
HRESULT SetActiveNativeCodeVersion(NativeCodeVersion activeNativeCodeVersion, BOOL fEESuspended);
#endif //DACCESS_COMPILE
@@ -223,7 +220,7 @@ class NativeCodeVersionNode
friend ILCodeVersionNode;
public:
#ifndef DACCESS_COMPILE
- NativeCodeVersionNode(NativeCodeVersionId id, MethodDesc* pMethod, ReJITID parentId);
+ NativeCodeVersionNode(NativeCodeVersionId id, MethodDesc* pMethod, ReJITID parentId, NativeCodeVersion::OptimizationTier optimizationTier);
#endif
#ifdef DEBUG
BOOL LockOwnedByCurrentThread() const;
@@ -240,9 +237,6 @@ public:
#endif
#ifdef FEATURE_TIERED_COMPILATION
NativeCodeVersion::OptimizationTier GetOptimizationTier() const;
-#ifndef DACCESS_COMPILE
- void SetOptimizationTier(NativeCodeVersion::OptimizationTier tier);
-#endif
#endif
private:
@@ -256,7 +250,7 @@ private:
PTR_NativeCodeVersionNode m_pNextMethodDescSibling;
NativeCodeVersionId m_id;
#ifdef FEATURE_TIERED_COMPILATION
- Volatile<NativeCodeVersion::OptimizationTier> m_optTier;
+ NativeCodeVersion::OptimizationTier m_optTier;
#endif
enum NativeCodeVersionNodeFlags
@@ -609,7 +603,7 @@ public:
};
HRESULT AddILCodeVersion(Module* pModule, mdMethodDef methodDef, ReJITID rejitId, ILCodeVersion* pILCodeVersion);
- HRESULT AddNativeCodeVersion(ILCodeVersion ilCodeVersion, MethodDesc* pClosedMethodDesc, NativeCodeVersion* pNativeCodeVersion);
+ HRESULT AddNativeCodeVersion(ILCodeVersion ilCodeVersion, MethodDesc* pClosedMethodDesc, NativeCodeVersion::OptimizationTier optimizationTier, NativeCodeVersion* pNativeCodeVersion);
HRESULT DoJumpStampIfNecessary(MethodDesc* pMD, PCODE pCode);
PCODE PublishVersionableCodeIfNecessary(MethodDesc* pMethodDesc, BOOL fCanBackpatchPrestub);
HRESULT PublishNativeCodeVersion(MethodDesc* pMethodDesc, NativeCodeVersion nativeCodeVersion, BOOL fEESuspended);
diff --git a/src/vm/jitinterface.cpp b/src/vm/jitinterface.cpp
index 2140d34bde..28e17e9e3e 100644
--- a/src/vm/jitinterface.cpp
+++ b/src/vm/jitinterface.cpp
@@ -6776,6 +6776,17 @@ DWORD CEEInfo::getMethodAttribsInternal (CORINFO_METHOD_HANDLE ftn)
result |= CORINFO_FLG_DONT_INLINE_CALLER;
}
+ // Check for the aggressive optimization directive. AggressiveOptimization only makes sense for IL methods.
+ DWORD ilMethodImplAttribs = 0;
+ if (pMD->IsIL())
+ {
+ ilMethodImplAttribs = pMD->GetImplAttrs();
+ if (IsMiAggressiveOptimization(ilMethodImplAttribs))
+ {
+ result |= CORINFO_FLG_AGGRESSIVE_OPT;
+ }
+ }
+
// Check for an inlining directive.
if (pMD->IsNotInline())
{
@@ -6783,7 +6794,7 @@ DWORD CEEInfo::getMethodAttribsInternal (CORINFO_METHOD_HANDLE ftn)
result |= CORINFO_FLG_DONT_INLINE;
}
// AggressiveInlining only makes sense for IL methods.
- else if (pMD->IsIL() && IsMiAggressiveInlining(pMD->GetImplAttrs()))
+ else if (pMD->IsIL() && IsMiAggressiveInlining(ilMethodImplAttribs))
{
result |= CORINFO_FLG_FORCEINLINE;
}
diff --git a/src/vm/method.hpp b/src/vm/method.hpp
index 5ed6869222..57561ef899 100644
--- a/src/vm/method.hpp
+++ b/src/vm/method.hpp
@@ -1267,12 +1267,13 @@ public:
}
#endif
- // Returns a code version that represents the first (default)
- // code body that this method would have.
- NativeCodeVersion GetInitialCodeVersion()
+ bool RequestedAggressiveOptimization()
{
- LIMITED_METHOD_DAC_CONTRACT;
- return NativeCodeVersion(dac_cast<PTR_MethodDesc>(this));
+ WRAPPER_NO_CONTRACT;
+
+ return
+ IsIL() && // only makes sense for IL methods, and this implies !IsNoMetadata()
+ IsMiAggressiveOptimization(GetImplAttrs());
}
// Does this method force the NativeCodeSlot to stay fixed after it
diff --git a/src/vm/prestub.cpp b/src/vm/prestub.cpp
index 378dc2e120..1893cf6c23 100644
--- a/src/vm/prestub.cpp
+++ b/src/vm/prestub.cpp
@@ -1745,20 +1745,16 @@ PCODE MethodDesc::DoPrestub(MethodTable *pDispatchingMT)
// When the TieredCompilationManager has received enough call notifications
// for this method only then do we back-patch it.
BOOL fCanBackpatchPrestub = TRUE;
- BOOL fEligibleForCallCounting = FALSE;
#ifdef FEATURE_TIERED_COMPILATION
+ BOOL fNeedsCallCounting = FALSE;
TieredCompilationManager* pTieredCompilationManager = nullptr;
- BOOL fEligibleForTieredCompilation = IsEligibleForTieredCompilation();
- BOOL fWasPromotedToTier1 = FALSE;
- if (fEligibleForTieredCompilation)
+ if (IsEligibleForTieredCompilation() && TieredCompilationManager::RequiresCallCounting(this))
{
- fEligibleForCallCounting = g_pConfig->TieredCompilation_CallCounting();
- if (fEligibleForCallCounting)
- {
- pTieredCompilationManager = GetAppDomain()->GetTieredCompilationManager();
- CallCounter * pCallCounter = GetCallCounter();
- pCallCounter->OnMethodCalled(this, pTieredCompilationManager, &fCanBackpatchPrestub, &fWasPromotedToTier1);
- }
+ pTieredCompilationManager = GetAppDomain()->GetTieredCompilationManager();
+ CallCounter * pCallCounter = GetCallCounter();
+ BOOL fWasPromotedToTier1 = FALSE;
+ pCallCounter->OnMethodCalled(this, pTieredCompilationManager, &fCanBackpatchPrestub, &fWasPromotedToTier1);
+ fNeedsCallCounting = !fWasPromotedToTier1;
}
#endif
@@ -1771,10 +1767,12 @@ PCODE MethodDesc::DoPrestub(MethodTable *pDispatchingMT)
{
pCode = GetCodeVersionManager()->PublishVersionableCodeIfNecessary(this, fCanBackpatchPrestub);
- if (pTieredCompilationManager != nullptr && fEligibleForCallCounting && fCanBackpatchPrestub && pCode != NULL && !fWasPromotedToTier1)
+#ifdef FEATURE_TIERED_COMPILATION
+ if (pTieredCompilationManager != nullptr && fNeedsCallCounting && fCanBackpatchPrestub && pCode != NULL)
{
pTieredCompilationManager->OnMethodCallCountingStoppedWithoutTier1Promotion(this);
}
+#endif
fIsPointingToPrestub = IsPointingToPrestub();
}
diff --git a/src/vm/tieredcompilation.cpp b/src/vm/tieredcompilation.cpp
index 9c9e76de18..2c24618bd8 100644
--- a/src/vm/tieredcompilation.cpp
+++ b/src/vm/tieredcompilation.cpp
@@ -58,7 +58,7 @@
// errors are limited to OS resource exhaustion or poorly behaved managed code
// (for example within an AssemblyResolve event or static constructor triggered by the JIT).
-#ifdef FEATURE_TIERED_COMPILATION
+#if defined(FEATURE_TIERED_COMPILATION) && !defined(DACCESS_COMPILE)
// Called at AppDomain construction
TieredCompilationManager::TieredCompilationManager() :
@@ -91,6 +91,38 @@ void TieredCompilationManager::Init(ADID appDomainId)
m_callCountOptimizationThreshhold = g_pConfig->TieredCompilation_Tier1CallCountThreshold();
}
+#endif // FEATURE_TIERED_COMPILATION && !DACCESS_COMPILE
+
+NativeCodeVersion::OptimizationTier TieredCompilationManager::GetInitialOptimizationTier(PTR_MethodDesc pMethodDesc)
+{
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(pMethodDesc != NULL);
+
+#ifdef FEATURE_TIERED_COMPILATION
+ if (pMethodDesc->RequestedAggressiveOptimization())
+ {
+ // Methods flagged with MethodImplOptions.AggressiveOptimization begin at tier 1, as a workaround to cold methods with
+ // hot loops performing poorly (https://github.com/dotnet/coreclr/issues/19751)
+ return NativeCodeVersion::OptimizationTier1;
+ }
+#endif // FEATURE_TIERED_COMPILATION
+
+ return NativeCodeVersion::OptimizationTier0;
+}
+
+#if defined(FEATURE_TIERED_COMPILATION) && !defined(DACCESS_COMPILE)
+
+bool TieredCompilationManager::RequiresCallCounting(MethodDesc* pMethodDesc)
+{
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(pMethodDesc != NULL);
+ _ASSERTE(pMethodDesc->IsEligibleForTieredCompilation());
+
+ return
+ g_pConfig->TieredCompilation_CallCounting() &&
+ GetInitialOptimizationTier(pMethodDesc) == NativeCodeVersion::OptimizationTier0;
+}
+
// Called each time code in this AppDomain has been run. This is our sole entrypoint to begin
// tiered compilation for now. Returns TRUE if no more notifications are necessary, but
// more notifications may come anyways.
@@ -213,7 +245,7 @@ void TieredCompilationManager::AsyncPromoteMethodToTier1(MethodDesc* pMethodDesc
}
HRESULT hr = S_OK;
- if (FAILED(hr = ilVersion.AddNativeCodeVersion(pMethodDesc, &t1NativeCodeVersion)))
+ if (FAILED(hr = ilVersion.AddNativeCodeVersion(pMethodDesc, NativeCodeVersion::OptimizationTier1, &t1NativeCodeVersion)))
{
// optimization didn't work for some reason (presumably OOM)
// just give up and continue on
@@ -222,7 +254,6 @@ void TieredCompilationManager::AsyncPromoteMethodToTier1(MethodDesc* pMethodDesc
hr, pMethodDesc);
return;
}
- t1NativeCodeVersion.SetOptimizationTier(NativeCodeVersion::OptimizationTier1);
}
// Insert the method into the optimization queue and trigger a thread to service
@@ -791,4 +822,4 @@ CORJIT_FLAGS TieredCompilationManager::GetJitFlags(NativeCodeVersion nativeCodeV
return flags;
}
-#endif // FEATURE_TIERED_COMPILATION
+#endif // FEATURE_TIERED_COMPILATION && !DACCESS_COMPILE
diff --git a/src/vm/tieredcompilation.h b/src/vm/tieredcompilation.h
index b208f26256..072f8e16c4 100644
--- a/src/vm/tieredcompilation.h
+++ b/src/vm/tieredcompilation.h
@@ -10,13 +10,13 @@
#ifndef TIERED_COMPILATION_H
#define TIERED_COMPILATION_H
-#ifdef FEATURE_TIERED_COMPILATION
-
// TieredCompilationManager determines which methods should be recompiled and
// how they should be recompiled to best optimize the running code. It then
// handles logistics of getting new code created and installed.
class TieredCompilationManager
{
+#ifdef FEATURE_TIERED_COMPILATION
+
public:
#if defined(DACCESS_COMPILE) || defined(CROSSGEN_COMPILE)
TieredCompilationManager() {}
@@ -26,7 +26,15 @@ public:
void Init(ADID appDomainId);
+#endif // FEATURE_TIERED_COMPILATION
+
public:
+ static NativeCodeVersion::OptimizationTier GetInitialOptimizationTier(PTR_MethodDesc pMethodDesc);
+
+#ifdef FEATURE_TIERED_COMPILATION
+
+public:
+ static bool RequiresCallCounting(MethodDesc* pMethodDesc);
void OnMethodCalled(MethodDesc* pMethodDesc, DWORD currentCallCount, BOOL* shouldStopCountingCallsRef, BOOL* wasPromotedToTier1Ref);
void OnMethodCallCountingStoppedWithoutTier1Promotion(MethodDesc* pMethodDesc);
void AsyncPromoteMethodToTier1(MethodDesc* pMethodDesc);
@@ -68,8 +76,8 @@ private:
bool m_tier1CallCountingCandidateMethodRecentlyRecorded;
CLREvent m_asyncWorkDoneEvent;
-};
#endif // FEATURE_TIERED_COMPILATION
+};
#endif // TIERED_COMPILATION_H