summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authornoahfalk <noahfalk@microsoft.com>2017-07-24 17:38:30 -0700
committernoahfalk <noahfalk@microsoft.com>2017-07-24 17:38:30 -0700
commitfd1998903d5eef356f27c54e5a9d490711cbc9e7 (patch)
treeb1d3d7ff1978483060a0f7c879b22006edd417b4 /src
parent39cd3cfcb078154f9b595ae476f2c5fb7b445e18 (diff)
downloadcoreclr-fd1998903d5eef356f27c54e5a9d490711cbc9e7.tar.gz
coreclr-fd1998903d5eef356f27c54e5a9d490711cbc9e7.tar.bz2
coreclr-fd1998903d5eef356f27c54e5a9d490711cbc9e7.zip
Add the runtime code versioning feature
This makes tiered compilation work properly with profiler ReJIT, and positions the runtime to integrate other versioning related features together in the future. See the newly added code-versioning design-doc in this commit for more information. Breaking changes for profilers: See code-versioning-profiler-breaking-changes.md for more details.
Diffstat (limited to 'src')
-rw-r--r--src/debug/daccess/dacdbiimpl.cpp123
-rw-r--r--src/debug/daccess/dacdbiimpl.h6
-rw-r--r--src/debug/daccess/request.cpp88
-rw-r--r--src/debug/di/module.cpp8
-rw-r--r--src/debug/di/rsfunction.cpp16
-rw-r--r--src/debug/di/rspriv.h4
-rw-r--r--src/debug/di/rsstackwalk.cpp15
-rw-r--r--src/debug/inc/dacdbiinterface.h78
-rw-r--r--src/debug/inc/dbgipcevents.h3
-rw-r--r--src/inc/corhlpr.h4
-rw-r--r--src/inc/corjit.h4
-rw-r--r--src/inc/shash.h1
-rw-r--r--src/jit/jitee.h6
-rw-r--r--src/vm/CMakeLists.txt3
-rw-r--r--src/vm/appdomain.cpp21
-rw-r--r--src/vm/appdomain.hpp35
-rw-r--r--src/vm/callcounter.cpp19
-rw-r--r--src/vm/callcounter.h3
-rw-r--r--src/vm/ceeload.cpp90
-rw-r--r--src/vm/ceeload.h116
-rw-r--r--src/vm/ceeload.inl14
-rw-r--r--src/vm/classcompat.cpp1
-rw-r--r--src/vm/codeversion.cpp2862
-rw-r--r--src/vm/codeversion.h689
-rw-r--r--src/vm/crossgen/CMakeLists.txt2
-rw-r--r--src/vm/crossgencompile.cpp4
-rw-r--r--src/vm/crst.h9
-rw-r--r--src/vm/dllimport.cpp3
-rw-r--r--src/vm/eventtrace.cpp6
-rw-r--r--src/vm/gccover.cpp6
-rw-r--r--src/vm/i386/stublinkerx86.cpp4
-rw-r--r--src/vm/ilinstrumentation.cpp90
-rw-r--r--src/vm/ilinstrumentation.h116
-rw-r--r--src/vm/interpreter.cpp104
-rw-r--r--src/vm/interpreter.h3
-rw-r--r--src/vm/jitinterface.cpp65
-rw-r--r--src/vm/jitinterface.h14
-rw-r--r--src/vm/listlock.cpp96
-rw-r--r--src/vm/listlock.h179
-rw-r--r--src/vm/listlock.inl51
-rw-r--r--src/vm/loaderallocator.hpp4
-rw-r--r--src/vm/memberload.cpp1
-rw-r--r--src/vm/method.cpp96
-rw-r--r--src/vm/method.hpp176
-rw-r--r--src/vm/method.inl14
-rw-r--r--src/vm/methodtable.cpp6
-rw-r--r--src/vm/multicorejit.cpp3
-rw-r--r--src/vm/multicorejit.h2
-rw-r--r--src/vm/multicorejitplayer.cpp27
-rw-r--r--src/vm/prestub.cpp1330
-rw-r--r--src/vm/profilingenumerators.cpp2
-rw-r--r--src/vm/proftoeeinterfaceimpl.cpp31
-rw-r--r--src/vm/rejit.cpp3248
-rw-r--r--src/vm/rejit.h482
-rw-r--r--src/vm/rejit.inl253
-rw-r--r--src/vm/tieredcompilation.cpp183
-rw-r--r--src/vm/tieredcompilation.h12
57 files changed, 5540 insertions, 5291 deletions
diff --git a/src/debug/daccess/dacdbiimpl.cpp b/src/debug/daccess/dacdbiimpl.cpp
index f48ecc0bd0..a5252a2c9c 100644
--- a/src/debug/daccess/dacdbiimpl.cpp
+++ b/src/debug/daccess/dacdbiimpl.cpp
@@ -903,14 +903,15 @@ void DacDbiInterfaceImpl::GetNativeVarData(MethodDesc * pMethodDesc,
// pEntryCount is the number of valid entries in nativeMap, and it may be adjusted downwards
// as part of the composition.
//-----------------------------------------------------------------------------
-void DacDbiInterfaceImpl::ComposeMapping(InstrumentedILOffsetMapping profilerILMap, ICorDebugInfo::OffsetMapping nativeMap[], ULONG32* pEntryCount)
+void DacDbiInterfaceImpl::ComposeMapping(const InstrumentedILOffsetMapping * pProfilerILMap, ICorDebugInfo::OffsetMapping nativeMap[], ULONG32* pEntryCount)
{
// Translate the IL offset if the profiler has provided us with a mapping.
// The ICD public API should always expose the original IL offsets, but GetBoundaries()
// directly accesses the debug info, which stores the instrumented IL offsets.
ULONG32 entryCount = *pEntryCount;
- if (!profilerILMap.IsNull())
+ // The map pointer could be NULL or there could be no entries in the map, in either case no work to do
+ if (pProfilerILMap && !pProfilerILMap->IsNull())
{
// If we did instrument, then we can't have any sequence points that
// are "in-between" the old-->new map that the profiler gave us.
@@ -925,7 +926,7 @@ void DacDbiInterfaceImpl::ComposeMapping(InstrumentedILOffsetMapping profilerILM
ULONG32 prevILOffset = (ULONG32)(ICorDebugInfo::MAX_ILNUM);
for (ULONG32 i = 0; i < entryCount; i++)
{
- ULONG32 origILOffset = TranslateInstrumentedILOffsetToOriginal(nativeMap[i].ilOffset, &profilerILMap);
+ ULONG32 origILOffset = TranslateInstrumentedILOffsetToOriginal(nativeMap[i].ilOffset, pProfilerILMap);
if (origILOffset == prevILOffset)
{
@@ -1003,12 +1004,12 @@ void DacDbiInterfaceImpl::GetSequencePoints(MethodDesc * pMethodDesc,
// if there is a rejit IL map for this function, apply that in preference to load-time mapping
#ifdef FEATURE_REJIT
- ReJitManager * pReJitMgr = pMethodDesc->GetReJitManager();
- ReJitInfo* pReJitInfo = pReJitMgr->FindReJitInfo(dac_cast<PTR_MethodDesc>(pMethodDesc), (PCODE)startAddr, 0);
- if (pReJitInfo != NULL)
+ CodeVersionManager * pCodeVersionManager = pMethodDesc->GetCodeVersionManager();
+ NativeCodeVersion nativeCodeVersion = pCodeVersionManager->GetNativeCodeVersion(dac_cast<PTR_MethodDesc>(pMethodDesc), (PCODE)startAddr);
+ if (!nativeCodeVersion.IsNull())
{
- InstrumentedILOffsetMapping rejitMapping = pReJitInfo->m_pShared->m_instrumentedILMap;
- ComposeMapping(rejitMapping, mapCopy, &entryCount);
+ const InstrumentedILOffsetMapping * pRejitMapping = nativeCodeVersion.GetILCodeVersion().GetInstrumentedILMap();
+ ComposeMapping(pRejitMapping, mapCopy, &entryCount);
}
else
{
@@ -1016,7 +1017,7 @@ void DacDbiInterfaceImpl::GetSequencePoints(MethodDesc * pMethodDesc,
// if there is a profiler load-time mapping and not a rejit mapping, apply that instead
InstrumentedILOffsetMapping loadTimeMapping =
pMethodDesc->GetModule()->GetInstrumentedILOffsetMapping(pMethodDesc->GetMemberDef());
- ComposeMapping(loadTimeMapping, mapCopy, &entryCount);
+ ComposeMapping(&loadTimeMapping, mapCopy, &entryCount);
#ifdef FEATURE_REJIT
}
#endif
@@ -7149,26 +7150,36 @@ HRESULT DacDbiInterfaceImpl::GetPEFileMDInternalRW(VMPTR_PEFile vmPEFile, OUT TA
HRESULT DacDbiInterfaceImpl::GetReJitInfo(VMPTR_Module vmModule, mdMethodDef methodTk, OUT VMPTR_ReJitInfo* pvmReJitInfo)
{
DD_ENTER_MAY_THROW;
- if (pvmReJitInfo == NULL)
+ _ASSERTE(!"You shouldn't be calling this - use GetActiveRejitILCodeVersionNode instead");
+ return S_OK;
+}
+
+HRESULT DacDbiInterfaceImpl::GetActiveRejitILCodeVersionNode(VMPTR_Module vmModule, mdMethodDef methodTk, OUT VMPTR_ILCodeVersionNode* pVmILCodeVersionNode)
+{
+ DD_ENTER_MAY_THROW;
+ if (pVmILCodeVersionNode == NULL)
return E_INVALIDARG;
#ifdef FEATURE_REJIT
PTR_Module pModule = vmModule.GetDacPtr();
- ReJitManager * pReJitMgr = pModule->GetReJitManager();
- PTR_ReJitInfo pReJitInfoCurrent = pReJitMgr->FindNonRevertedReJitInfo(pModule, methodTk);
- // if the token lookup failed, we need to search again by method desc
- // The rejit manager will index by token if the method isn't loaded when RequestReJIT runs
- // and by methoddesc if it was loaded
- if (pReJitInfoCurrent == NULL)
- {
- MethodDesc* pMD = pModule->LookupMethodDef(methodTk);
- if (pMD != NULL)
- {
- pReJitInfoCurrent = pReJitMgr->FindNonRevertedReJitInfo(dac_cast<PTR_MethodDesc>(pMD));
- }
+ CodeVersionManager * pCodeVersionManager = pModule->GetCodeVersionManager();
+ // Be careful, there are two different definitions of 'active' being used here
+ // For the CodeVersionManager, the active IL version is whatever one should be used in the next invocation of the method
+ // 'rejit active' narrows that to only include rejit IL bodies where the profiler has already provided the definition
+ // for the new IL (ilCodeVersion.GetRejitState()==ILCodeVersion::kStateActive). It is possible that the code version
+ // manager's active IL version hasn't yet asked the profiler for the IL body to use, in which case we want to filter it
+ // out from the return in this method.
+ ILCodeVersion activeILVersion = pCodeVersionManager->GetActiveILCodeVersion(pModule, methodTk);
+ if (activeILVersion.IsNull() || activeILVersion.GetRejitState() != ILCodeVersion::kStateActive)
+ {
+ pVmILCodeVersionNode->SetDacTargetPtr(0);
+ }
+ else
+ {
+ pVmILCodeVersionNode->SetDacTargetPtr(PTR_TO_TADDR(activeILVersion.AsNode()));
}
- pvmReJitInfo->SetDacTargetPtr(PTR_TO_TADDR(pReJitInfoCurrent));
#else
- pvmReJitInfo->SetDacTargetPtr(0);
+ _ASSERTE(!"You shouldn't be calling this - rejit is not supported in this build");
+ pVmILCodeVersionNode->SetDacTargetPtr(0);
#endif
return S_OK;
}
@@ -7176,15 +7187,22 @@ HRESULT DacDbiInterfaceImpl::GetReJitInfo(VMPTR_Module vmModule, mdMethodDef met
HRESULT DacDbiInterfaceImpl::GetReJitInfo(VMPTR_MethodDesc vmMethod, CORDB_ADDRESS codeStartAddress, OUT VMPTR_ReJitInfo* pvmReJitInfo)
{
DD_ENTER_MAY_THROW;
- if (pvmReJitInfo == NULL)
+ _ASSERTE(!"You shouldn't be calling this - use GetNativeCodeVersionNode instead");
+ return S_OK;
+}
+
+HRESULT DacDbiInterfaceImpl::GetNativeCodeVersionNode(VMPTR_MethodDesc vmMethod, CORDB_ADDRESS codeStartAddress, OUT VMPTR_NativeCodeVersionNode* pVmNativeCodeVersionNode)
+{
+ DD_ENTER_MAY_THROW;
+ if (pVmNativeCodeVersionNode == NULL)
return E_INVALIDARG;
#ifdef FEATURE_REJIT
PTR_MethodDesc pMD = vmMethod.GetDacPtr();
- ReJitManager * pReJitMgr = pMD->GetReJitManager();
- PTR_ReJitInfo pReJitInfoCurrent = pReJitMgr->FindReJitInfo(pMD, (PCODE)codeStartAddress, 0);
- pvmReJitInfo->SetDacTargetPtr(PTR_TO_TADDR(pReJitInfoCurrent));
+ CodeVersionManager * pCodeVersionManager = pMD->GetCodeVersionManager();
+ NativeCodeVersion codeVersion = pCodeVersionManager->GetNativeCodeVersion(pMD, (PCODE)codeStartAddress);
+ pVmNativeCodeVersionNode->SetDacTargetPtr(PTR_TO_TADDR(codeVersion.AsNode()));
#else
- pvmReJitInfo->SetDacTargetPtr(0);
+ pVmNativeCodeVersionNode->SetDacTargetPtr(0);
#endif
return S_OK;
}
@@ -7192,14 +7210,21 @@ HRESULT DacDbiInterfaceImpl::GetReJitInfo(VMPTR_MethodDesc vmMethod, CORDB_ADDRE
HRESULT DacDbiInterfaceImpl::GetSharedReJitInfo(VMPTR_ReJitInfo vmReJitInfo, OUT VMPTR_SharedReJitInfo* pvmSharedReJitInfo)
{
DD_ENTER_MAY_THROW;
- if (pvmSharedReJitInfo == NULL)
+ _ASSERTE(!"You shouldn't be calling this - use GetLCodeVersionNode instead");
+ return S_OK;
+}
+
+HRESULT DacDbiInterfaceImpl::GetILCodeVersionNode(VMPTR_NativeCodeVersionNode vmNativeCodeVersionNode, VMPTR_ILCodeVersionNode* pVmILCodeVersionNode)
+{
+ DD_ENTER_MAY_THROW;
+ if (pVmILCodeVersionNode == NULL)
return E_INVALIDARG;
#ifdef FEATURE_REJIT
- ReJitInfo* pReJitInfo = vmReJitInfo.GetDacPtr();
- pvmSharedReJitInfo->SetDacTargetPtr(PTR_TO_TADDR(pReJitInfo->m_pShared));
+ NativeCodeVersionNode* pNativeCodeVersionNode = vmNativeCodeVersionNode.GetDacPtr();
+ pVmILCodeVersionNode->SetDacTargetPtr(PTR_TO_TADDR(pNativeCodeVersionNode->GetILCodeVersion().AsNode()));
#else
- _ASSERTE(!"You shouldn't be calling this - how did you get a ReJitInfo?");
- pvmSharedReJitInfo->SetDacTargetPtr(0);
+ _ASSERTE(!"You shouldn't be calling this - rejit is not supported in this build");
+ pVmILCodeVersionNode->SetDacTargetPtr(0);
#endif
return S_OK;
}
@@ -7207,15 +7232,31 @@ HRESULT DacDbiInterfaceImpl::GetSharedReJitInfo(VMPTR_ReJitInfo vmReJitInfo, OUT
HRESULT DacDbiInterfaceImpl::GetSharedReJitInfoData(VMPTR_SharedReJitInfo vmSharedReJitInfo, DacSharedReJitInfo* pData)
{
DD_ENTER_MAY_THROW;
+ _ASSERTE(!"You shouldn't be calling this - use GetILCodeVersionNodeData instead");
+ return S_OK;
+}
+
+HRESULT DacDbiInterfaceImpl::GetILCodeVersionNodeData(VMPTR_ILCodeVersionNode vmILCodeVersionNode, DacSharedReJitInfo* pData)
+{
+ DD_ENTER_MAY_THROW;
#ifdef FEATURE_REJIT
- SharedReJitInfo* pSharedReJitInfo = vmSharedReJitInfo.GetDacPtr();
- pData->m_state = pSharedReJitInfo->GetState();
- pData->m_pbIL = PTR_TO_CORDB_ADDRESS(pSharedReJitInfo->m_pbIL);
- pData->m_dwCodegenFlags = pSharedReJitInfo->m_dwCodegenFlags;
- pData->m_cInstrumentedMapEntries = (ULONG)pSharedReJitInfo->m_instrumentedILMap.GetCount();
- pData->m_rgInstrumentedMapEntries = PTR_TO_CORDB_ADDRESS(dac_cast<ULONG_PTR>(pSharedReJitInfo->m_instrumentedILMap.GetOffsets()));
+ ILCodeVersionNode* pILCodeVersionNode = vmILCodeVersionNode.GetDacPtr();
+ pData->m_state = pILCodeVersionNode->GetRejitState();
+ pData->m_pbIL = PTR_TO_CORDB_ADDRESS(dac_cast<ULONG_PTR>(pILCodeVersionNode->GetIL()));
+ pData->m_dwCodegenFlags = pILCodeVersionNode->GetJitFlags();
+ const InstrumentedILOffsetMapping* pMapping = pILCodeVersionNode->GetInstrumentedILMap();
+ if (pMapping)
+ {
+ pData->m_cInstrumentedMapEntries = (ULONG)pMapping->GetCount();
+ pData->m_rgInstrumentedMapEntries = PTR_TO_CORDB_ADDRESS(dac_cast<ULONG_PTR>(pMapping->GetOffsets()));
+ }
+ else
+ {
+ pData->m_cInstrumentedMapEntries = 0;
+ pData->m_rgInstrumentedMapEntries = 0;
+ }
#else
- _ASSERTE(!"You shouldn't be calling this - how did you get a SharedReJitInfo?");
+ _ASSERTE(!"You shouldn't be calling this - rejit isn't supported in this build");
#endif
return S_OK;
}
diff --git a/src/debug/daccess/dacdbiimpl.h b/src/debug/daccess/dacdbiimpl.h
index a86072325c..b13c75a561 100644
--- a/src/debug/daccess/dacdbiimpl.h
+++ b/src/debug/daccess/dacdbiimpl.h
@@ -147,9 +147,13 @@ public:
void GetGCHeapInformation(COR_HEAPINFO * pHeapInfo);
HRESULT GetPEFileMDInternalRW(VMPTR_PEFile vmPEFile, OUT TADDR* pAddrMDInternalRW);
HRESULT GetReJitInfo(VMPTR_Module vmModule, mdMethodDef methodTk, OUT VMPTR_ReJitInfo* pReJitInfo);
+ HRESULT GetActiveRejitILCodeVersionNode(VMPTR_Module vmModule, mdMethodDef methodTk, OUT VMPTR_ILCodeVersionNode* pVmILCodeVersionNode);
HRESULT GetReJitInfo(VMPTR_MethodDesc vmMethod, CORDB_ADDRESS codeStartAddress, OUT VMPTR_ReJitInfo* pReJitInfo);
+ HRESULT GetNativeCodeVersionNode(VMPTR_MethodDesc vmMethod, CORDB_ADDRESS codeStartAddress, OUT VMPTR_NativeCodeVersionNode* pVmNativeCodeVersionNode);
HRESULT GetSharedReJitInfo(VMPTR_ReJitInfo vmReJitInfo, VMPTR_SharedReJitInfo* pSharedReJitInfo);
+ HRESULT GetILCodeVersionNode(VMPTR_NativeCodeVersionNode vmNativeCodeVersionNode, VMPTR_ILCodeVersionNode* pVmILCodeVersionNode);
HRESULT GetSharedReJitInfoData(VMPTR_SharedReJitInfo sharedReJitInfo, DacSharedReJitInfo* pData);
+ HRESULT GetILCodeVersionNodeData(VMPTR_ILCodeVersionNode vmILCodeVersionNode, DacSharedReJitInfo* pData);
HRESULT GetDefinesBitField(ULONG32 *pDefines);
HRESULT GetMDStructuresVersion(ULONG32* pMDStructuresVersion);
@@ -174,7 +178,7 @@ private:
SequencePoints * pNativeMap);
// Helper to compose a IL->IL and IL->Native mapping
- void ComposeMapping(InstrumentedILOffsetMapping profilerILMap, ICorDebugInfo::OffsetMapping nativeMap[], ULONG32* pEntryCount);
+ void ComposeMapping(const InstrumentedILOffsetMapping * pProfilerILMap, ICorDebugInfo::OffsetMapping nativeMap[], ULONG32* pEntryCount);
// Helper function to convert an instrumented IL offset to the corresponding original IL offset.
ULONG TranslateInstrumentedILOffsetToOriginal(ULONG ilOffset,
diff --git a/src/debug/daccess/request.cpp b/src/debug/daccess/request.cpp
index ebaa1f833f..0c198182c5 100644
--- a/src/debug/daccess/request.cpp
+++ b/src/debug/daccess/request.cpp
@@ -817,29 +817,32 @@ ClrDataAccess::GetThreadData(CLRDATA_ADDRESS threadAddr, struct DacpThreadData *
}
#ifdef FEATURE_REJIT
-void CopyReJitInfoToReJitData(ReJitInfo * pReJitInfo, DacpReJitData * pReJitData)
+void CopyNativeCodeVersionToReJitData(NativeCodeVersion nativeCodeVersion, NativeCodeVersion activeCodeVersion, DacpReJitData * pReJitData)
{
- pReJitData->rejitID = pReJitInfo->m_pShared->GetId();
- pReJitData->NativeCodeAddr = pReJitInfo->m_pCode;
+ pReJitData->rejitID = nativeCodeVersion.GetILCodeVersion().GetVersionId();
+ pReJitData->NativeCodeAddr = nativeCodeVersion.GetNativeCode();
- switch (pReJitInfo->m_pShared->GetState())
+ if (nativeCodeVersion != activeCodeVersion)
{
- default:
- _ASSERTE(!"Unknown SharedRejitInfo state. DAC should be updated to understand this new state.");
- pReJitData->flags = DacpReJitData::kUnknown;
- break;
-
- case SharedReJitInfo::kStateRequested:
- pReJitData->flags = DacpReJitData::kRequested;
- break;
+ pReJitData->flags = DacpReJitData::kReverted;
+ }
+ else
+ {
+ switch (nativeCodeVersion.GetILCodeVersion().GetRejitState())
+ {
+ default:
+ _ASSERTE(!"Unknown SharedRejitInfo state. DAC should be updated to understand this new state.");
+ pReJitData->flags = DacpReJitData::kUnknown;
+ break;
- case SharedReJitInfo::kStateActive:
- pReJitData->flags = DacpReJitData::kActive;
- break;
+ case ILCodeVersion::kStateRequested:
+ pReJitData->flags = DacpReJitData::kRequested;
+ break;
- case SharedReJitInfo::kStateReverted:
- pReJitData->flags = DacpReJitData::kReverted;
- break;
+ case ILCodeVersion::kStateActive:
+ pReJitData->flags = DacpReJitData::kActive;
+ break;
+ }
}
}
#endif // FEATURE_REJIT
@@ -944,33 +947,39 @@ HRESULT ClrDataAccess::GetMethodDescData(
EX_TRY
{
- ReJitManager * pReJitMgr = pMD->GetReJitManager();
+ CodeVersionManager * pCodeVersionManager = pMD->GetCodeVersionManager();
// Current ReJitInfo
- ReJitInfo * pReJitInfoCurrent = pReJitMgr->FindNonRevertedReJitInfo(pMD);
- if (pReJitInfoCurrent != NULL)
+ ILCodeVersion activeILCodeVersion = pCodeVersionManager->GetActiveILCodeVersion(pMD);
+ NativeCodeVersion activeChild = activeILCodeVersion.GetActiveNativeCodeVersion(pMD);
+ NativeCodeVersionCollection nativeCodeVersions = activeILCodeVersion.GetNativeCodeVersions(pMD);
+ for (NativeCodeVersionIterator iter = nativeCodeVersions.Begin(); iter != nativeCodeVersions.End(); iter++)
{
- CopyReJitInfoToReJitData(pReJitInfoCurrent, &methodDescData->rejitDataCurrent);
+ // This arbitrarily captures the first jitted version for the active IL version, but with
+ // tiered compilation there could be many such method bodies. Before tiered compilation is enabled in a broader set
+ // of scenarios we need to consider how this change goes all the way up to the UI - probably exposing the
+ // entire set of methods.
+ CopyNativeCodeVersionToReJitData(*iter, activeChild, &methodDescData->rejitDataCurrent);
+ break;
}
// Requested ReJitInfo
_ASSERTE(methodDescData->rejitDataRequested.rejitID == 0);
if (methodDescData->requestedIP != NULL)
{
- ReJitInfo * pReJitInfoRequested = pReJitMgr->FindReJitInfo(
+ NativeCodeVersion nativeCodeVersionRequested = pCodeVersionManager->GetNativeCodeVersion(
pMD,
- CLRDATA_ADDRESS_TO_TADDR(methodDescData->requestedIP),
- NULL /* reJitId */);
+ CLRDATA_ADDRESS_TO_TADDR(methodDescData->requestedIP));
- if (pReJitInfoRequested != NULL)
+ if (!nativeCodeVersionRequested.IsNull())
{
- CopyReJitInfoToReJitData(pReJitInfoRequested, &methodDescData->rejitDataRequested);
+ CopyNativeCodeVersionToReJitData(nativeCodeVersionRequested, activeChild, &methodDescData->rejitDataRequested);
}
}
// Total number of jitted rejit versions
ULONG cJittedRejitVersions;
- if (SUCCEEDED(pReJitMgr->GetReJITIDs(pMD, 0 /* cReJitIds */, &cJittedRejitVersions, NULL /* reJitIds */)))
+ if (SUCCEEDED(ReJitManager::GetReJITIDs(pMD, 0 /* cReJitIds */, &cJittedRejitVersions, NULL /* reJitIds */)))
{
methodDescData->cJittedRejitVersions = cJittedRejitVersions;
}
@@ -997,28 +1006,35 @@ HRESULT ClrDataAccess::GetMethodDescData(
ReJITID * rgReJitIds = reJitIds.OpenRawBuffer(cRevertedRejitVersions + 1);
if (rgReJitIds != NULL)
{
- hr = pReJitMgr->GetReJITIDs(pMD, cRevertedRejitVersions + 1, &cReJitIds, rgReJitIds);
+ hr = ReJitManager::GetReJITIDs(pMD, cRevertedRejitVersions + 1, &cReJitIds, rgReJitIds);
if (SUCCEEDED(hr))
{
// Go through rejitids. For each reverted one, populate a entry in rgRevertedRejitData
reJitIds.CloseRawBuffer(cReJitIds);
ULONG iRejitDataReverted = 0;
+ ILCodeVersion activeVersion = pCodeVersionManager->GetActiveILCodeVersion(pMD);
for (COUNT_T i=0;
(i < cReJitIds) && (iRejitDataReverted < cRevertedRejitVersions);
i++)
{
- ReJitInfo * pRejitInfo = pReJitMgr->FindReJitInfo(
- pMD,
- NULL /* pCodeStart */,
- reJitIds[i]);
+ ILCodeVersion ilCodeVersion = pCodeVersionManager->GetILCodeVersion(pMD, reJitIds[i]);
- if ((pRejitInfo == NULL) ||
- (pRejitInfo->m_pShared->GetState() != SharedReJitInfo::kStateReverted))
+ if ((ilCodeVersion.IsNull()) ||
+ (ilCodeVersion == activeVersion))
{
continue;
}
- CopyReJitInfoToReJitData(pRejitInfo, &rgRevertedRejitData[iRejitDataReverted]);
+ NativeCodeVersionCollection nativeCodeVersions = ilCodeVersion.GetNativeCodeVersions(pMD);
+ for (NativeCodeVersionIterator iter = nativeCodeVersions.Begin(); iter != nativeCodeVersions.End(); iter++)
+ {
+ // This arbitrarily captures the first jitted version for this reverted IL version, but with
+ // tiered compilation there could be many such method bodies. Before tiered compilation is enabled in a broader set
+ // of scenarios we need to consider how this change goes all the way up to the UI - probably exposing the
+ // entire set of methods.
+ CopyNativeCodeVersionToReJitData(*iter, activeChild, &rgRevertedRejitData[iRejitDataReverted]);
+ break;
+ }
iRejitDataReverted++;
}
// pcNeededRevertedRejitData != NULL as per condition at top of function (cuz rgRevertedRejitData !=
diff --git a/src/debug/di/module.cpp b/src/debug/di/module.cpp
index 36cc6f5f9e..5d1d3da427 100644
--- a/src/debug/di/module.cpp
+++ b/src/debug/di/module.cpp
@@ -3391,15 +3391,15 @@ mdSignature CordbILCode::GetLocalVarSigToken()
return m_localVarSigToken;
}
-CordbReJitILCode::CordbReJitILCode(CordbFunction *pFunction, SIZE_T encVersion, VMPTR_SharedReJitInfo vmSharedReJitInfo) :
-CordbILCode(pFunction, TargetBuffer(), encVersion, mdSignatureNil, VmPtrToCookie(vmSharedReJitInfo)),
+CordbReJitILCode::CordbReJitILCode(CordbFunction *pFunction, SIZE_T encVersion, VMPTR_ILCodeVersionNode vmILCodeVersionNode) :
+CordbILCode(pFunction, TargetBuffer(), encVersion, mdSignatureNil, VmPtrToCookie(vmILCodeVersionNode)),
m_cClauses(0),
m_cbLocalIL(0),
m_cILMap(0)
{
- _ASSERTE(!vmSharedReJitInfo.IsNull());
+ _ASSERTE(!vmILCodeVersionNode.IsNull());
DacSharedReJitInfo data = { 0 };
- IfFailThrow(GetProcess()->GetDAC()->GetSharedReJitInfoData(vmSharedReJitInfo, &data));
+ IfFailThrow(GetProcess()->GetDAC()->GetILCodeVersionNodeData(vmILCodeVersionNode, &data));
IfFailThrow(Init(&data));
}
diff --git a/src/debug/di/rsfunction.cpp b/src/debug/di/rsfunction.cpp
index 8621edcedc..bf3c49bb98 100644
--- a/src/debug/di/rsfunction.cpp
+++ b/src/debug/di/rsfunction.cpp
@@ -557,14 +557,12 @@ HRESULT CordbFunction::GetActiveReJitRequestILCode(ICorDebugILCode **ppReJitedIL
{
*ppReJitedILCode = NULL;
- VMPTR_ReJitInfo vmReJitInfo = VMPTR_ReJitInfo::NullPtr();
- GetProcess()->GetDAC()->GetReJitInfo(GetModule()->m_vmModule, m_MDToken, &vmReJitInfo);
- if (!vmReJitInfo.IsNull())
+ VMPTR_ILCodeVersionNode vmILCodeVersionNode = VMPTR_ILCodeVersionNode::NullPtr();
+ GetProcess()->GetDAC()->GetActiveRejitILCodeVersionNode(GetModule()->m_vmModule, m_MDToken, &vmILCodeVersionNode);
+ if (!vmILCodeVersionNode.IsNull())
{
- VMPTR_SharedReJitInfo vmSharedReJitInfo = VMPTR_SharedReJitInfo::NullPtr();
- GetProcess()->GetDAC()->GetSharedReJitInfo(vmReJitInfo, &vmSharedReJitInfo);
RSSmartPtr<CordbReJitILCode> pILCode;
- IfFailThrow(LookupOrCreateReJitILCode(vmSharedReJitInfo, &pILCode));
+ IfFailThrow(LookupOrCreateReJitILCode(vmILCodeVersionNode, &pILCode));
IfFailThrow(pILCode->QueryInterface(IID_ICorDebugILCode, (void**)ppReJitedILCode));
}
}
@@ -1165,21 +1163,21 @@ VOID CordbFunction::NotifyCodeCreated(CordbNativeCode* nativeCode)
// If the CordbReJitILCode doesn't exist, it creates it.
//
//
-HRESULT CordbFunction::LookupOrCreateReJitILCode(VMPTR_SharedReJitInfo vmSharedReJitInfo, CordbReJitILCode** ppILCode)
+HRESULT CordbFunction::LookupOrCreateReJitILCode(VMPTR_ILCodeVersionNode vmILCodeVersionNode, CordbReJitILCode** ppILCode)
{
INTERNAL_API_ENTRY(this);
HRESULT hr = S_OK;
_ASSERTE(GetProcess()->ThreadHoldsProcessLock());
- CordbReJitILCode * pILCode = m_reJitILCodes.GetBase(VmPtrToCookie(vmSharedReJitInfo));
+ CordbReJitILCode * pILCode = m_reJitILCodes.GetBase(VmPtrToCookie(vmILCodeVersionNode));
// special case non-existance as need to add to the hash table too
if (pILCode == NULL)
{
// we don't yet support ENC and ReJIT together, so the version should be 1
_ASSERTE(m_dwEnCVersionNumber == 1);
- RSInitHolder<CordbReJitILCode> pILCodeHolder(new CordbReJitILCode(this, 1, vmSharedReJitInfo));
+ RSInitHolder<CordbReJitILCode> pILCodeHolder(new CordbReJitILCode(this, 1, vmILCodeVersionNode));
IfFailRet(m_reJitILCodes.AddBase(pILCodeHolder));
pILCode = pILCodeHolder;
pILCodeHolder.ClearAndMarkDontNeuter();
diff --git a/src/debug/di/rspriv.h b/src/debug/di/rspriv.h
index 1abe087693..e0489c53ad 100644
--- a/src/debug/di/rspriv.h
+++ b/src/debug/di/rspriv.h
@@ -5431,7 +5431,7 @@ public:
HRESULT GetILCode(CordbILCode ** ppCode);
// Finds or creates an ILCode for a given rejit request
- HRESULT LookupOrCreateReJitILCode(VMPTR_SharedReJitInfo vmSharedRejitInfo,
+ HRESULT LookupOrCreateReJitILCode(VMPTR_ILCodeVersionNode vmILCodeVersionNode,
CordbReJitILCode** ppILCode);
@@ -5775,7 +5775,7 @@ class CordbReJitILCode : public CordbILCode, public ICorDebugILCode, public ICor
{
public:
// Initialize a new CordbILCode instance
- CordbReJitILCode(CordbFunction *pFunction, SIZE_T encVersion, VMPTR_SharedReJitInfo vmSharedReJitInfo);
+ CordbReJitILCode(CordbFunction *pFunction, SIZE_T encVersion, VMPTR_ILCodeVersionNode vmILCodeVersionNode);
//-----------------------------------------------------------
// IUnknown
diff --git a/src/debug/di/rsstackwalk.cpp b/src/debug/di/rsstackwalk.cpp
index 8ade4c9a74..466e113d8f 100644
--- a/src/debug/di/rsstackwalk.cpp
+++ b/src/debug/di/rsstackwalk.cpp
@@ -749,13 +749,16 @@ HRESULT CordbStackWalk::GetFrameWorker(ICorDebugFrame ** ppFrame)
RSSmartPtr<CordbReJitILCode> pReJitCode;
EX_TRY_ALLOW_DATATARGET_MISSING_MEMORY
{
- VMPTR_ReJitInfo reJitInfo = VMPTR_ReJitInfo::NullPtr();
- IfFailThrow(GetProcess()->GetDAC()->GetReJitInfo(pJITFuncData->vmNativeCodeMethodDescToken, pJITFuncData->nativeStartAddressPtr, &reJitInfo));
- if (!reJitInfo.IsNull())
+ VMPTR_NativeCodeVersionNode vmNativeCodeVersionNode = VMPTR_NativeCodeVersionNode::NullPtr();
+ IfFailThrow(GetProcess()->GetDAC()->GetNativeCodeVersionNode(pJITFuncData->vmNativeCodeMethodDescToken, pJITFuncData->nativeStartAddressPtr, &vmNativeCodeVersionNode));
+ if (!vmNativeCodeVersionNode.IsNull())
{
- VMPTR_SharedReJitInfo sharedReJitInfo = VMPTR_SharedReJitInfo::NullPtr();
- IfFailThrow(GetProcess()->GetDAC()->GetSharedReJitInfo(reJitInfo, &sharedReJitInfo));
- IfFailThrow(pFunction->LookupOrCreateReJitILCode(sharedReJitInfo, &pReJitCode));
+ VMPTR_ILCodeVersionNode vmILCodeVersionNode = VMPTR_ILCodeVersionNode::NullPtr();
+ IfFailThrow(GetProcess()->GetDAC()->GetILCodeVersionNode(vmNativeCodeVersionNode, &vmILCodeVersionNode));
+ if (!vmILCodeVersionNode.IsNull())
+ {
+ IfFailThrow(pFunction->LookupOrCreateReJitILCode(vmILCodeVersionNode, &pReJitCode));
+ }
}
}
EX_END_CATCH_ALLOW_DATATARGET_MISSING_MEMORY
diff --git a/src/debug/inc/dacdbiinterface.h b/src/debug/inc/dacdbiinterface.h
index 4077ad426a..5e765c94f3 100644
--- a/src/debug/inc/dacdbiinterface.h
+++ b/src/debug/inc/dacdbiinterface.h
@@ -2532,6 +2532,7 @@ public:
virtual
HRESULT GetPEFileMDInternalRW(VMPTR_PEFile vmPEFile, OUT TADDR* pAddrMDInternalRW) = 0;
+ // DEPRECATED - use GetActiveRejitILCodeVersionNode
// Retrieves the active ReJitInfo for a given module/methodDef, if it exists.
// Active is defined as after GetReJitParameters returns from the profiler dll and
// no call to Revert has completed yet.
@@ -2550,17 +2551,16 @@ public:
virtual
HRESULT GetReJitInfo(VMPTR_Module vmModule, mdMethodDef methodTk, OUT VMPTR_ReJitInfo* pReJitInfo) = 0;
- // Retrieves the active ReJitInfo for a given MethodDesc/code address, if it exists.
- // Active is defined as after GetReJitParameters returns from the profiler dll and
- // no call to Revert has completed yet.
+ // DEPRECATED - use GetNativeCodeVersionNode
+ // Retrieves the ReJitInfo for a given MethodDesc/code address, if it exists.
//
//
// Arguments:
// vmMethod - The method to look for
// codeStartAddress - The code start address disambiguates between multiple rejitted instances
// of the method.
- // pReJitInfo - [out] The RejitInfo request, if any, that is active on this method. If no request
- // is active this will be pReJitInfo->IsNull() == TRUE.
+ // pReJitInfo - [out] The RejitInfo request that corresponds to this MethodDesc/code address, if it exists.
+ // NULL otherwise.
//
// Returns:
// S_OK regardless of whether a rejit request is active or not, as long as the answer is certain
@@ -2569,7 +2569,7 @@ public:
virtual
HRESULT GetReJitInfo(VMPTR_MethodDesc vmMethod, CORDB_ADDRESS codeStartAddress, OUT VMPTR_ReJitInfo* pReJitInfo) = 0;
-
+ // DEPRECATED - use GetILCodeVersion
// Retrieves the SharedReJitInfo for a given ReJitInfo.
//
//
@@ -2584,6 +2584,7 @@ public:
virtual
HRESULT GetSharedReJitInfo(VMPTR_ReJitInfo vmReJitInfo, VMPTR_SharedReJitInfo* pSharedReJitInfo) = 0;
+ // DEPRECATED - use GetILCodeVersionData
// Retrieves useful data from a SharedReJitInfo such as IL code and IL mapping.
//
//
@@ -2630,6 +2631,71 @@ public:
virtual
HRESULT GetMDStructuresVersion(ULONG32* pMDStructuresVersion) = 0;
+ // Retrieves the active rejit ILCodeVersionNode for a given module/methodDef, if it exists.
+ // Active is defined as after GetReJitParameters returns from the profiler dll and
+ // no call to Revert has completed yet.
+ //
+ //
+ // Arguments:
+ // vmModule - The module to search in
+ // methodTk - The methodDef token indicates the method within the module to check
+ // pILCodeVersionNode - [out] The Rejit request, if any, that is active on this method. If no request
+ // is active this will be pILCodeVersionNode->IsNull() == TRUE.
+ //
+ // Returns:
+ // S_OK regardless of whether a rejit request is active or not, as long as the answer is certain
+ // error HRESULTs such as CORDBG_READ_VIRTUAL_FAILURE are possible
+ //
+ virtual
+ HRESULT GetActiveRejitILCodeVersionNode(VMPTR_Module vmModule, mdMethodDef methodTk, OUT VMPTR_ILCodeVersionNode* pVmILCodeVersionNode) = 0;
+
+ // Retrieves the NativeCodeVersionNode for a given MethodDesc/code address, if it exists.
+ // NOTE: The initial (default) code generated for a MethodDesc is a valid MethodDesc/code address pair but it won't have a corresponding
+ // NativeCodeVersionNode.
+ //
+ //
+ // Arguments:
+ // vmMethod - The method to look for
+ // codeStartAddress - The code start address disambiguates between multiple jitted instances of the method.
+ // pVmNativeCodeVersionNode - [out] The NativeCodeVersionNode request that corresponds to this MethodDesc/code address, if it exists.
+ // NULL otherwise.
+ //
+ // Returns:
+ // S_OK regardless of whether a rejit request is active or not, as long as the answer is certain
+ // error HRESULTs such as CORDBG_READ_VIRTUAL_FAILURE are possible
+ //
+ virtual
+ HRESULT GetNativeCodeVersionNode(VMPTR_MethodDesc vmMethod, CORDB_ADDRESS codeStartAddress, OUT VMPTR_NativeCodeVersionNode* pVmNativeCodeVersionNode) = 0;
+
+ // Retrieves the ILCodeVersionNode for a given NativeCodeVersionNode.
+ // This may return a NULL node if the native code belongs to the default IL version for this this method.
+ //
+ //
+ // Arguments:
+ // vmNativeCodeVersionNode - The NativeCodeVersionNode to inspect
+ // pVmILCodeVersionNode - [out] The ILCodeVersionNode that is pointed to by vmNativeCodeVersionNode, if any.
+ //
+ // Returns:
+ // S_OK if no error
+ // error HRESULTs such as CORDBG_READ_VIRTUAL_FAILURE are possible
+ //
+ virtual
+ HRESULT GetILCodeVersionNode(VMPTR_NativeCodeVersionNode vmNativeCodeVersionNode, VMPTR_ILCodeVersionNode* pVmILCodeVersionNode) = 0;
+
+ // Retrieves useful data from an ILCodeVersion such as IL code and IL mapping.
+ //
+ //
+ // Arguments:
+ // ilCodeVersionNode - The ILCodeVersionNode to inspect
+ // pData - [out] Various properties of the ILCodeVersionNode such as IL code and IL mapping.
+ //
+ // Returns:
+ // S_OK if no error
+ // error HRESULTs such as CORDBG_READ_VIRTUAL_FAILURE are possible
+ //
+ virtual
+ HRESULT GetILCodeVersionNodeData(VMPTR_ILCodeVersionNode ilCodeVersionNode, DacSharedReJitInfo* pData) = 0;
+
// The following tag tells the DD-marshalling tool to stop scanning.
// END_MARSHAL
diff --git a/src/debug/inc/dbgipcevents.h b/src/debug/inc/dbgipcevents.h
index 1b4dec7c16..dc900660c3 100644
--- a/src/debug/inc/dbgipcevents.h
+++ b/src/debug/inc/dbgipcevents.h
@@ -879,7 +879,8 @@ DEFINE_VMPTR(class SimpleRWLock, PTR_SimpleRWLock, VMPTR_SimpleRWLock);
DEFINE_VMPTR(class SimpleRWLock, PTR_SimpleRWLock, VMPTR_RWLock);
DEFINE_VMPTR(struct ReJitInfo, PTR_ReJitInfo, VMPTR_ReJitInfo);
DEFINE_VMPTR(struct SharedReJitInfo, PTR_SharedReJitInfo, VMPTR_SharedReJitInfo);
-
+DEFINE_VMPTR(class NativeCodeVersionNode, PTR_NativeCodeVersionNode, VMPTR_NativeCodeVersionNode);
+DEFINE_VMPTR(class ILCodeVersionNode, PTR_ILCodeVersionNode, VMPTR_ILCodeVersionNode);
typedef CORDB_ADDRESS GENERICS_TYPE_TOKEN;
diff --git a/src/inc/corhlpr.h b/src/inc/corhlpr.h
index 02555c9ec3..5b263a5382 100644
--- a/src/inc/corhlpr.h
+++ b/src/inc/corhlpr.h
@@ -633,6 +633,10 @@ extern "C" {
class COR_ILMETHOD_DECODER : public COR_ILMETHOD_FAT
{
public:
+ // This returns an uninitialized decoder, suitable for placement new but nothing
+ // else. Use with caution.
+ COR_ILMETHOD_DECODER() {}
+
// Typically the ONLY way you should access COR_ILMETHOD is through
// this constructor so format changes are easier.
COR_ILMETHOD_DECODER(const COR_ILMETHOD* header)
diff --git a/src/inc/corjit.h b/src/inc/corjit.h
index e6e8257afe..39eafe2a89 100644
--- a/src/inc/corjit.h
+++ b/src/inc/corjit.h
@@ -152,8 +152,10 @@ public:
#if defined(_TARGET_ARM_)
CORJIT_FLAG_RELATIVE_CODE_RELOCS = 41, // JIT should generate PC-relative address computations instead of EE relocation records
#else // !defined(_TARGET_ARM_)
- CORJIT_FLAG_UNUSED11 = 41
+ CORJIT_FLAG_UNUSED11 = 41,
#endif // !defined(_TARGET_ARM_)
+
+ CORJIT_FLAG_NO_INLINING = 42 // JIT should not inline any called method into this method
};
CORJIT_FLAGS()
diff --git a/src/inc/shash.h b/src/inc/shash.h
index cece2dd345..1650ca15b4 100644
--- a/src/inc/shash.h
+++ b/src/inc/shash.h
@@ -327,6 +327,7 @@ class SHash : public TRAITS
count_t m_tableSize;
count_t m_index;
+
Index(const SHash *hash, BOOL begin)
: m_table(hash->m_table),
m_tableSize(hash->m_tableSize),
diff --git a/src/jit/jitee.h b/src/jit/jitee.h
index 7a03dd69a9..5fc2c2cd8b 100644
--- a/src/jit/jitee.h
+++ b/src/jit/jitee.h
@@ -84,8 +84,10 @@ public:
#if defined(_TARGET_ARM_)
JIT_FLAG_RELATIVE_CODE_RELOCS = 41, // JIT should generate PC-relative address computations instead of EE relocation records
#else // !defined(_TARGET_ARM_)
- JIT_FLAG_UNUSED11 = 41
+ JIT_FLAG_UNUSED11 = 41,
#endif // !defined(_TARGET_ARM_)
+
+ JIT_FLAG_NO_INLINING = 42, // JIT should not inline any called method into this method
};
// clang-format on
@@ -204,6 +206,8 @@ public:
#endif // _TARGET_ARM_
+ FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_NO_INLINING, JIT_FLAG_NO_INLINING);
+
#undef FLAGS_EQUAL
}
diff --git a/src/vm/CMakeLists.txt b/src/vm/CMakeLists.txt
index 8bb2292816..b0da9587c8 100644
--- a/src/vm/CMakeLists.txt
+++ b/src/vm/CMakeLists.txt
@@ -48,6 +48,7 @@ set(VM_SOURCES_DAC_AND_WKS_COMMON
classhash.cpp
clsload.cpp
codeman.cpp
+ codeversion.cpp
comdelegate.cpp
contractimpl.cpp
coreassemblyspec.cpp
@@ -79,6 +80,7 @@ set(VM_SOURCES_DAC_AND_WKS_COMMON
generics.cpp
hash.cpp
hillclimbing.cpp
+ ilinstrumentation.cpp
ilstubcache.cpp
ilstubresolver.cpp
inlinetracking.cpp
@@ -217,7 +219,6 @@ set(VM_SOURCES_WKS
interpreter.cpp
invokeutil.cpp
jithelpers.cpp
- listlock.cpp
managedmdimport.cpp
marshalnative.cpp
marvin32.cpp
diff --git a/src/vm/appdomain.cpp b/src/vm/appdomain.cpp
index 946009ac06..71635a3bf5 100644
--- a/src/vm/appdomain.cpp
+++ b/src/vm/appdomain.cpp
@@ -58,7 +58,6 @@
#include "typeequivalencehash.hpp"
#endif
-#include "listlock.inl"
#include "appdomain.inl"
#include "typeparse.h"
#include "mdaassistants.h"
@@ -758,8 +757,8 @@ BaseDomain::BaseDomain()
m_ClassInitLock.PreInit();
m_ILStubGenLock.PreInit();
-#ifdef FEATURE_REJIT
- m_reJitMgr.PreInit(this == (BaseDomain *) g_pSharedDomainMemory);
+#ifdef FEATURE_CODE_VERSIONING
+ m_codeVersionManager.PreInit(this == (BaseDomain *)g_pSharedDomainMemory);
#endif
} //BaseDomain::BaseDomain
@@ -874,22 +873,23 @@ void BaseDomain::Terminate()
m_DomainLocalBlockCrst.Destroy();
m_InteropDataCrst.Destroy();
+ JitListLockEntry* pJitElement;
ListLockEntry* pElement;
// All the threads that are in this domain had better be stopped by this
// point.
//
// We might be jitting or running a .cctor so we need to empty that queue.
- pElement = m_JITLock.Pop(TRUE);
- while (pElement)
+ pJitElement = m_JITLock.Pop(TRUE);
+ while (pJitElement)
{
#ifdef STRICT_JITLOCK_ENTRY_LEAK_DETECTION
_ASSERTE ((m_JITLock.m_pHead->m_dwRefCount == 1
&& m_JITLock.m_pHead->m_hrResultCode == E_FAIL) ||
dbg_fDrasticShutdown || g_fInControlC);
#endif // STRICT_JITLOCK_ENTRY_LEAK_DETECTION
- delete(pElement);
- pElement = m_JITLock.Pop(TRUE);
+ delete(pJitElement);
+ pJitElement = m_JITLock.Pop(TRUE);
}
m_JITLock.Destroy();
@@ -4280,7 +4280,6 @@ void AppDomain::Init()
#endif //FEATURE_COMINTEROP
#ifdef FEATURE_TIERED_COMPILATION
- m_callCounter.SetTieredCompilationManager(GetTieredCompilationManager());
m_tieredCompilationManager.Init(GetId());
#endif
#endif // CROSSGEN_COMPILE
@@ -5040,7 +5039,7 @@ FileLoadLock::~FileLoadLock()
MODE_ANY;
}
CONTRACTL_END;
- ((PEFile *) m_pData)->Release();
+ ((PEFile *) m_data)->Release();
}
DomainFile *FileLoadLock::GetDomainFile()
@@ -8145,14 +8144,14 @@ void AppDomain::Exit(BOOL fRunFinalizers, BOOL fAsyncExit)
LOG((LF_APPDOMAIN | LF_CORDB, LL_INFO10, "AppDomain::Domain [%d] %#08x %ls is exited.\n",
GetId().m_dwId, this, GetFriendlyNameForLogging()));
- ReJitManager::OnAppDomainExit(this);
-
// Send ETW events for this domain's unload and potentially iterate through this
// domain's modules & assemblies to send events for their unloads as well. This
// needs to occur before STAGE_FINALIZED (to ensure everything is there), so we do
// this before any finalization occurs at all.
ETW::LoaderLog::DomainUnload(this);
+ CodeVersionManager::OnAppDomainExit(this);
+
//
// Spin running finalizers until we flush them all. We need to make multiple passes
// in case the finalizers create more finalizable objects. This is important to clear
diff --git a/src/vm/appdomain.hpp b/src/vm/appdomain.hpp
index 18bc73e5a5..c5af6e79bc 100644
--- a/src/vm/appdomain.hpp
+++ b/src/vm/appdomain.hpp
@@ -49,6 +49,8 @@
#include "callcounter.h"
#endif
+#include "codeversion.h"
+
class BaseDomain;
class SystemDomain;
class SharedDomain;
@@ -839,7 +841,7 @@ public:
pEntry != NULL;
pEntry = pEntry->m_pNext)
{
- if (((PEFile *)pEntry->m_pData)->Equals(pFile))
+ if (((PEFile *)pEntry->m_data)->Equals(pFile))
{
return pEntry;
}
@@ -949,6 +951,9 @@ typedef FileLoadLock::Holder FileLoadLockHolder;
typedef ReleaseHolder<FileLoadLock> FileLoadLockRefHolder;
#endif // DACCESS_COMPILE
+ typedef ListLockBase<NativeCodeVersion> JitListLock;
+ typedef ListLockEntryBase<NativeCodeVersion> JitListLockEntry;
+
#ifdef _MSC_VER
#pragma warning(push)
@@ -1204,7 +1209,7 @@ public:
return &m_ClassInitLock;
}
- ListLock* GetJitLock()
+ JitListLock* GetJitLock()
{
LIMITED_METHOD_CONTRACT;
return &m_JITLock;
@@ -1398,7 +1403,7 @@ protected:
CrstExplicitInit m_crstAssemblyList;
BOOL m_fDisableInterfaceCache; // RCW COM interface cache
ListLock m_ClassInitLock;
- ListLock m_JITLock;
+ JitListLock m_JITLock;
ListLock m_ILStubGenLock;
// Fusion context, used for adding assemblies to the is domain. It defines
@@ -1547,12 +1552,21 @@ public:
return m_dwSizedRefHandles;
}
- // Profiler rejit
+#ifdef FEATURE_CODE_VERSIONING
+private:
+ CodeVersionManager m_codeVersionManager;
+
+public:
+ CodeVersionManager* GetCodeVersionManager() { return &m_codeVersionManager; }
+#endif //FEATURE_CODE_VERSIONING
+
+#ifdef FEATURE_TIERED_COMPILATION
private:
- ReJitManager m_reJitMgr;
+ CallCounter m_callCounter;
public:
- ReJitManager * GetReJitManager() { return &m_reJitMgr; }
+ CallCounter* GetCallCounter() { return &m_callCounter; }
+#endif
#ifdef DACCESS_COMPILE
public:
@@ -3823,15 +3837,6 @@ public:
private:
TieredCompilationManager m_tieredCompilationManager;
-public:
- CallCounter * GetCallCounter()
- {
- LIMITED_METHOD_CONTRACT;
- return &m_callCounter;
- }
-
-private:
- CallCounter m_callCounter;
#endif
#ifdef FEATURE_COMINTEROP
diff --git a/src/vm/callcounter.cpp b/src/vm/callcounter.cpp
index 90013c79fb..14d9e6e6a4 100644
--- a/src/vm/callcounter.cpp
+++ b/src/vm/callcounter.cpp
@@ -23,23 +23,6 @@ CallCounter::CallCounter()
m_lock.Init(LOCK_TYPE_DEFAULT);
}
-// Init our connection to the tiered compilation manager during
-// AppDomain startup. This pointer will remain valid for the lifetime
-// of the AppDomain.
-void CallCounter::SetTieredCompilationManager(TieredCompilationManager* pTieredCompilationManager)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- CAN_TAKE_LOCK;
- MODE_PREEMPTIVE;
- }
- CONTRACTL_END;
-
- m_pTieredCompilationManager.Store(pTieredCompilationManager);
-}
-
// This is called by the prestub each time the method is invoked in a particular
// AppDomain (the AppDomain for which AppDomain.GetCallCounter() == this). These
// calls continue until we backpatch the prestub to avoid future calls. This allows
@@ -92,7 +75,7 @@ BOOL CallCounter::OnMethodCalled(MethodDesc* pMethodDesc)
}
}
- return m_pTieredCompilationManager.Load()->OnMethodCalled(pMethodDesc, callCount);
+ return GetAppDomain()->GetTieredCompilationManager()->OnMethodCalled(pMethodDesc, callCount);
}
#endif // FEATURE_TIERED_COMPILATION
diff --git a/src/vm/callcounter.h b/src/vm/callcounter.h
index 82d14b76d9..ed98ccb1c8 100644
--- a/src/vm/callcounter.h
+++ b/src/vm/callcounter.h
@@ -70,13 +70,10 @@ public:
CallCounter();
#endif
- void SetTieredCompilationManager(TieredCompilationManager* pTieredCompilationManager);
BOOL OnMethodCalled(MethodDesc* pMethodDesc);
private:
- VolatilePtr<TieredCompilationManager> m_pTieredCompilationManager;
-
// fields protected by lock
SpinLock m_lock;
CallCounterHash m_methodToCallCount;
diff --git a/src/vm/ceeload.cpp b/src/vm/ceeload.cpp
index 68af979b2f..fd5d2b12c5 100644
--- a/src/vm/ceeload.cpp
+++ b/src/vm/ceeload.cpp
@@ -99,85 +99,6 @@
#define NGEN_STATICS_ALLCLASSES_WERE_LOADED -1
-
-//---------------------------------------------------------------------------------------
-InstrumentedILOffsetMapping::InstrumentedILOffsetMapping()
-{
- LIMITED_METHOD_DAC_CONTRACT;
-
- m_cMap = 0;
- m_rgMap = NULL;
- _ASSERTE(IsNull());
-}
-
-//---------------------------------------------------------------------------------------
-//
-// Check whether there is any mapping information stored in this object.
-//
-// Notes:
-// The memory should be alive throughout the process lifetime until
-// the Module containing the instrumented method is destructed.
-//
-
-BOOL InstrumentedILOffsetMapping::IsNull()
-{
- LIMITED_METHOD_DAC_CONTRACT;
-
- _ASSERTE((m_cMap == 0) == (m_rgMap == NULL));
- return (m_cMap == 0);
-}
-
-#if !defined(DACCESS_COMPILE)
-//---------------------------------------------------------------------------------------
-//
-// Release the memory used by the array of COR_IL_MAPs.
-//
-// Notes:
-// * The memory should be alive throughout the process lifetime until the Module containing
-// the instrumented method is destructed.
-// * This struct should be read-only in DAC builds.
-//
-
-void InstrumentedILOffsetMapping::Clear()
-{
- LIMITED_METHOD_CONTRACT;
-
- if (m_rgMap != NULL)
- {
- delete [] m_rgMap;
- }
-
- m_cMap = 0;
- m_rgMap = NULL;
-}
-#endif // !DACCESS_COMPILE
-
-#if !defined(DACCESS_COMPILE)
-void InstrumentedILOffsetMapping::SetMappingInfo(SIZE_T cMap, COR_IL_MAP * rgMap)
-{
- WRAPPER_NO_CONTRACT;
- _ASSERTE((cMap == 0) == (rgMap == NULL));
- m_cMap = cMap;
- m_rgMap = ARRAY_PTR_COR_IL_MAP(rgMap);
-}
-#endif // !DACCESS_COMPILE
-
-SIZE_T InstrumentedILOffsetMapping::GetCount() const
-{
- LIMITED_METHOD_DAC_CONTRACT;
-
- _ASSERTE((m_cMap == 0) == (m_rgMap == NULL));
- return m_cMap;
-}
-
-ARRAY_PTR_COR_IL_MAP InstrumentedILOffsetMapping::GetOffsets() const
-{
- LIMITED_METHOD_DAC_CONTRACT;
-
- _ASSERTE((m_cMap == 0) == (m_rgMap == NULL));
- return m_rgMap;
-}
-
BOOL Module::HasInlineTrackingMap()
{
LIMITED_METHOD_DAC_CONTRACT;
@@ -14235,16 +14156,7 @@ void Module::ExpandAll()
|| pMD->HasClassInstantiation())
&& (pMD->MayHaveNativeCode() && !pMD->IsFCallOrIntrinsic()))
{
- COR_ILMETHOD * ilHeader = pMD->GetILHeader();
- COR_ILMETHOD_DECODER::DecoderStatus ignored;
- NewHolder<COR_ILMETHOD_DECODER> pHeader(new COR_ILMETHOD_DECODER(ilHeader,
- pMD->GetMDImport(),
- &ignored));
-#ifdef FEATURE_INTERPRETER
- pMD->MakeJitWorker(pHeader, CORJIT_FLAGS(CORJIT_FLAGS::CORJIT_FLAG_MAKEFINALCODE));
-#else
- pMD->MakeJitWorker(pHeader, CORJIT_FLAGS());
-#endif
+ pMD->PrepareInitialCode();
}
}
static void CompileMethodsForMethodTable(MethodTable * pMT)
diff --git a/src/vm/ceeload.h b/src/vm/ceeload.h
index bc9937a828..99b4ad8f35 100644
--- a/src/vm/ceeload.h
+++ b/src/vm/ceeload.h
@@ -47,6 +47,8 @@
#include "readytoruninfo.h"
#endif
+#include "ilinstrumentation.h"
+
class PELoader;
class Stub;
class MethodDesc;
@@ -77,7 +79,9 @@ class CerNgenRootTable;
struct MethodContextElement;
class TypeHandleList;
class ProfileEmitter;
-class ReJitManager;
+class CodeVersionManager;
+class CallCounter;
+class TieredCompilationManager;
class TrackingMap;
struct MethodInModule;
class PersistentInlineTrackingMapNGen;
@@ -1085,104 +1089,6 @@ typedef SHash<DynamicILBlobTraits> DynamicILBlobTable;
typedef DPTR(DynamicILBlobTable) PTR_DynamicILBlobTable;
-// declare an array type of COR_IL_MAP entries
-typedef ArrayDPTR(COR_IL_MAP) ARRAY_PTR_COR_IL_MAP;
-
-//---------------------------------------------------------------------------------------
-//
-// A profiler may instrument a method by changing the IL. This is typically done when the profiler receives
-// a JITCompilationStarted notification. The profiler also has the option to provide the runtime with
-// a mapping between original IL offsets and instrumented IL offsets. This struct is a simple container
-// for storing the mapping information. We store the mapping information on the Module class, where it can
-// be accessed by the debugger from out-of-process.
-//
-
-class InstrumentedILOffsetMapping
-{
-public:
- InstrumentedILOffsetMapping();
-
- // Check whether there is any mapping information stored in this object.
- BOOL IsNull();
-
-#if !defined(DACCESS_COMPILE)
- // Release the memory used by the array of COR_IL_MAPs.
- void Clear();
-
- void SetMappingInfo(SIZE_T cMap, COR_IL_MAP * rgMap);
-#endif // !DACCESS_COMPILE
-
- SIZE_T GetCount() const;
- ARRAY_PTR_COR_IL_MAP GetOffsets() const;
-
-private:
- SIZE_T m_cMap; // the number of elements in m_rgMap
- ARRAY_PTR_COR_IL_MAP m_rgMap; // an array of COR_IL_MAPs
-};
-
-//---------------------------------------------------------------------------------------
-//
-// Hash table entry for storing InstrumentedILOffsetMapping. This is keyed by the MethodDef token.
-//
-
-struct ILOffsetMappingEntry
-{
- ILOffsetMappingEntry()
- {
- LIMITED_METHOD_DAC_CONTRACT;
-
- m_methodToken = mdMethodDefNil;
- // No need to initialize m_mapping. The default ctor of InstrumentedILOffsetMapping does the job.
- }
-
- ILOffsetMappingEntry(mdMethodDef token, InstrumentedILOffsetMapping mapping)
- {
- LIMITED_METHOD_DAC_CONTRACT;
-
- m_methodToken = token;
- m_mapping = mapping;
- }
-
- mdMethodDef m_methodToken;
- InstrumentedILOffsetMapping m_mapping;
-};
-
-//---------------------------------------------------------------------------------------
-//
-// This class is used to create the hash table for the instrumented IL offset mapping.
-// It encapsulates the desired behaviour of the templated hash table and implements
-// the various functions needed by the hash table.
-//
-
-class ILOffsetMappingTraits : public NoRemoveSHashTraits<DefaultSHashTraits<ILOffsetMappingEntry> >
-{
-public:
- typedef mdMethodDef key_t;
-
- static key_t GetKey(element_t e)
- {
- LIMITED_METHOD_DAC_CONTRACT;
- return e.m_methodToken;
- }
- static BOOL Equals(key_t k1, key_t k2)
- {
- LIMITED_METHOD_DAC_CONTRACT;
- return (k1 == k2);
- }
- static count_t Hash(key_t k)
- {
- LIMITED_METHOD_DAC_CONTRACT;
- return (count_t)(size_t)k;
- }
- static const element_t Null()
- {
- LIMITED_METHOD_DAC_CONTRACT;
- ILOffsetMappingEntry e;
- return e;
- }
- static bool IsNull(const element_t &e) { LIMITED_METHOD_DAC_CONTRACT; return e.m_methodToken == mdMethodDefNil; }
-};
-
// ESymbolFormat specified the format used by a symbol stream
typedef enum
{
@@ -1192,11 +1098,6 @@ typedef enum
}ESymbolFormat;
-// Hash table of profiler-provided instrumented IL offset mapping, keyed by the MethodDef token
-typedef SHash<ILOffsetMappingTraits> ILOffsetMappingTable;
-typedef DPTR(ILOffsetMappingTable) PTR_ILOffsetMappingTable;
-
-
#ifdef FEATURE_COMINTEROP
//---------------------------------------------------------------------------------------
@@ -1885,7 +1786,12 @@ protected:
ClassLoader *GetClassLoader();
PTR_BaseDomain GetDomain();
- ReJitManager * GetReJitManager();
+#ifdef FEATURE_CODE_VERSIONING
+ CodeVersionManager * GetCodeVersionManager();
+#endif
+#ifdef FEATURE_TIERED_COMPILATION
+ CallCounter * GetCallCounter();
+#endif
mdFile GetModuleRef()
{
diff --git a/src/vm/ceeload.inl b/src/vm/ceeload.inl
index 8226dce7d7..3afef732cc 100644
--- a/src/vm/ceeload.inl
+++ b/src/vm/ceeload.inl
@@ -656,10 +656,20 @@ inline MethodTable* Module::GetDynamicClassMT(DWORD dynamicClassID)
return m_pDynamicStaticsInfo[dynamicClassID].pEnclosingMT;
}
-inline ReJitManager * Module::GetReJitManager()
+#ifdef FEATURE_CODE_VERSIONING
+inline CodeVersionManager * Module::GetCodeVersionManager()
{
LIMITED_METHOD_CONTRACT;
- return GetDomain()->GetReJitManager();
+ return GetDomain()->GetCodeVersionManager();
}
+#endif // FEATURE_CODE_VERSIONING
+
+#ifdef FEATURE_TIERED_COMPILATION
+inline CallCounter * Module::GetCallCounter()
+{
+ LIMITED_METHOD_CONTRACT;
+ return GetDomain()->GetCallCounter();
+}
+#endif // FEATURE_TIERED_COMPILATION
#endif // CEELOAD_INL_
diff --git a/src/vm/classcompat.cpp b/src/vm/classcompat.cpp
index 91004cdbc7..84ec3958ce 100644
--- a/src/vm/classcompat.cpp
+++ b/src/vm/classcompat.cpp
@@ -54,7 +54,6 @@
#include "clrtocomcall.h"
#include "runtimecallablewrapper.h"
-#include "listlock.inl"
#include "generics.h"
#include "contractimpl.h"
diff --git a/src/vm/codeversion.cpp b/src/vm/codeversion.cpp
new file mode 100644
index 0000000000..10d3013f35
--- /dev/null
+++ b/src/vm/codeversion.cpp
@@ -0,0 +1,2862 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+// ===========================================================================
+// File: CodeVersion.cpp
+//
+// ===========================================================================
+
+#include "common.h"
+#include "codeversion.h"
+
+#ifdef FEATURE_CODE_VERSIONING
+#include "threadsuspend.h"
+#include "methoditer.h"
+#include "../debug/ee/debugger.h"
+#include "../debug/ee/walker.h"
+#include "../debug/ee/controller.h"
+#endif // FEATURE_CODE_VERSIONING
+
+#ifndef FEATURE_CODE_VERSIONING
+
+//
+// When not using code versioning we've got a minimal implementation of
+// NativeCodeVersion that simply wraps a MethodDesc* with no additional
+// versioning information
+//
+
+NativeCodeVersion::NativeCodeVersion(const NativeCodeVersion & rhs) : m_pMethod(rhs.m_pMethod) {}
+NativeCodeVersion::NativeCodeVersion(PTR_MethodDesc pMethod) : m_pMethod(pMethod) {}
+BOOL NativeCodeVersion::IsNull() const { return m_pMethod == NULL; }
+PTR_MethodDesc NativeCodeVersion::GetMethodDesc() const { return m_pMethod; }
+PCODE NativeCodeVersion::GetNativeCode() const { return m_pMethod->GetNativeCode(); }
+NativeCodeVersionId NativeCodeVersion::GetVersionId() const { return 0; }
+ReJITID NativeCodeVersion::GetILCodeVersionId() const; { return 0; }
+ILCodeVersion NativeCodeVersion::GetILCodeVersion() const { return ILCodeVersion(m_pMethod); }
+#ifndef DACCESS_COMPILE
+BOOL NativeCodeVersion::SetNativeCodeInterlocked(PCODE pCode, PCODE pExpected) { return m_pMethod->SetNativeCodeInterlocked(pCode, pExpected); }
+#endif
+bool NativeCodeVersion::operator==(const NativeCodeVersion & rhs) const { return m_pMethod == rhs.m_pMethod; }
+bool NativeCodeVersion::operator!=(const NativeCodeVersion & rhs) const { return !operator==(rhs); }
+
+
+#else // FEATURE_CODE_VERSIONING
+
+
+// This HRESULT is only used as a private implementation detail. If it escapes through public APIS
+// it is a bug. Corerror.xml has a comment in it reserving this value for our use but it doesn't
+// appear in the public headers.
+
+#define CORPROF_E_RUNTIME_SUSPEND_REQUIRED 0x80131381
+
+#ifndef DACCESS_COMPILE
+NativeCodeVersionNode::NativeCodeVersionNode(NativeCodeVersionId id, MethodDesc* pMethodDesc, ReJITID parentId) :
+ m_pNativeCode(NULL),
+ m_pMethodDesc(pMethodDesc),
+ m_parentId(parentId),
+ m_pNextMethodDescSibling(NULL),
+ m_id(id),
+ m_optTier(NativeCodeVersion::OptimizationTier0),
+ m_flags(0)
+{}
+#endif
+
+#ifdef DEBUG
+BOOL NativeCodeVersionNode::LockOwnedByCurrentThread() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return GetMethodDesc()->GetCodeVersionManager()->LockOwnedByCurrentThread();
+}
+#endif //DEBUG
+
+PTR_MethodDesc NativeCodeVersionNode::GetMethodDesc() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_pMethodDesc;
+}
+
+PCODE NativeCodeVersionNode::GetNativeCode() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_pNativeCode;
+}
+
+ReJITID NativeCodeVersionNode::GetILVersionId() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_parentId;
+}
+
+ILCodeVersion NativeCodeVersionNode::GetILCodeVersion() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+#ifdef DEBUG
+ if (GetILVersionId() != 0)
+ {
+ _ASSERTE(LockOwnedByCurrentThread());
+ }
+#endif
+ PTR_MethodDesc pMD = GetMethodDesc();
+ return pMD->GetCodeVersionManager()->GetILCodeVersion(pMD, GetILVersionId());
+}
+
+NativeCodeVersionId NativeCodeVersionNode::GetVersionId() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_id;
+}
+
+#ifndef DACCESS_COMPILE
+BOOL NativeCodeVersionNode::SetNativeCodeInterlocked(PCODE pCode, PCODE pExpected)
+{
+ LIMITED_METHOD_CONTRACT;
+ return FastInterlockCompareExchangePointer(&m_pNativeCode,
+ (TADDR&)pCode, (TADDR&)pExpected) == (TADDR&)pExpected;
+}
+#endif
+
+BOOL NativeCodeVersionNode::IsActiveChildVersion() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ _ASSERTE(LockOwnedByCurrentThread());
+ return (m_flags & IsActiveChildFlag) != 0;
+}
+
+#ifndef DACCESS_COMPILE
+void NativeCodeVersionNode::SetActiveChildFlag(BOOL isActive)
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(LockOwnedByCurrentThread());
+ if (isActive)
+ {
+ m_flags |= IsActiveChildFlag;
+ }
+ else
+ {
+ m_flags &= ~IsActiveChildFlag;
+ }
+}
+#endif
+
+
+#ifdef FEATURE_TIERED_COMPILATION
+NativeCodeVersion::OptimizationTier NativeCodeVersionNode::GetOptimizationTier() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_optTier.Load();
+}
+#ifndef DACCESS_COMPILE
+void NativeCodeVersionNode::SetOptimizationTier(NativeCodeVersion::OptimizationTier tier)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ m_optTier.Store(tier);
+}
+#endif
+#endif // FEATURE_TIERED_COMPILATION
+
+NativeCodeVersion::NativeCodeVersion() :
+ m_storageKind(StorageKind::Unknown)
+{}
+
+NativeCodeVersion::NativeCodeVersion(const NativeCodeVersion & rhs) :
+ m_storageKind(rhs.m_storageKind)
+{
+ if(m_storageKind == StorageKind::Explicit)
+ {
+ m_pVersionNode = rhs.m_pVersionNode;
+ }
+ else if(m_storageKind == StorageKind::Synthetic)
+ {
+ m_synthetic = rhs.m_synthetic;
+ }
+}
+
+NativeCodeVersion::NativeCodeVersion(PTR_NativeCodeVersionNode pVersionNode) :
+ m_storageKind(pVersionNode != NULL ? StorageKind::Explicit : StorageKind::Unknown),
+ m_pVersionNode(pVersionNode)
+{}
+
+NativeCodeVersion::NativeCodeVersion(PTR_MethodDesc pMethod) :
+ m_storageKind(pMethod != NULL ? StorageKind::Synthetic : StorageKind::Unknown)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ m_synthetic.m_pMethodDesc = pMethod;
+}
+
+BOOL NativeCodeVersion::IsNull() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_storageKind == StorageKind::Unknown;
+}
+
+BOOL NativeCodeVersion::IsDefaultVersion() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_storageKind == StorageKind::Synthetic;
+}
+
+PTR_MethodDesc NativeCodeVersion::GetMethodDesc() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ if (m_storageKind == StorageKind::Explicit)
+ {
+ return AsNode()->GetMethodDesc();
+ }
+ else
+ {
+ return m_synthetic.m_pMethodDesc;
+ }
+}
+
+PCODE NativeCodeVersion::GetNativeCode() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ if (m_storageKind == StorageKind::Explicit)
+ {
+ return AsNode()->GetNativeCode();
+ }
+ else
+ {
+ return GetMethodDesc()->GetNativeCode();
+ }
+}
+
+ReJITID NativeCodeVersion::GetILCodeVersionId() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ if (m_storageKind == StorageKind::Explicit)
+ {
+ return AsNode()->GetILVersionId();
+ }
+ else
+ {
+ return 0;
+ }
+}
+
+ILCodeVersion NativeCodeVersion::GetILCodeVersion() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ if (m_storageKind == StorageKind::Explicit)
+ {
+ return AsNode()->GetILCodeVersion();
+ }
+ else
+ {
+ PTR_MethodDesc pMethod = GetMethodDesc();
+ return ILCodeVersion(dac_cast<PTR_Module>(pMethod->GetModule()), pMethod->GetMemberDef());
+ }
+}
+
+NativeCodeVersionId NativeCodeVersion::GetVersionId() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ if (m_storageKind == StorageKind::Explicit)
+ {
+ return AsNode()->GetVersionId();
+ }
+ else
+ {
+ return 0;
+ }
+}
+
+#ifndef DACCESS_COMPILE
+BOOL NativeCodeVersion::SetNativeCodeInterlocked(PCODE pCode, PCODE pExpected)
+{
+ LIMITED_METHOD_CONTRACT;
+ if (m_storageKind == StorageKind::Explicit)
+ {
+ return AsNode()->SetNativeCodeInterlocked(pCode, pExpected);
+ }
+ else
+ {
+ return GetMethodDesc()->SetNativeCodeInterlocked(pCode, pExpected);
+ }
+}
+#endif
+
+BOOL NativeCodeVersion::IsActiveChildVersion() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ if (m_storageKind == StorageKind::Explicit)
+ {
+ return AsNode()->IsActiveChildVersion();
+ }
+ else
+ {
+ MethodDescVersioningState* pMethodVersioningState = GetMethodDescVersioningState();
+ if (pMethodVersioningState == NULL)
+ {
+ return TRUE;
+ }
+ return pMethodVersioningState->IsDefaultVersionActiveChild();
+ }
+}
+
+PTR_MethodDescVersioningState NativeCodeVersion::GetMethodDescVersioningState() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ PTR_MethodDesc pMethodDesc = GetMethodDesc();
+ CodeVersionManager* pCodeVersionManager = pMethodDesc->GetCodeVersionManager();
+ return pCodeVersionManager->GetMethodDescVersioningState(pMethodDesc);
+}
+
+#ifndef DACCESS_COMPILE
+void NativeCodeVersion::SetActiveChildFlag(BOOL isActive)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ if (m_storageKind == StorageKind::Explicit)
+ {
+ AsNode()->SetActiveChildFlag(isActive);
+ }
+ else
+ {
+ MethodDescVersioningState* pMethodVersioningState = GetMethodDescVersioningState();
+ pMethodVersioningState->SetDefaultVersionActiveChildFlag(isActive);
+ }
+}
+
+MethodDescVersioningState* NativeCodeVersion::GetMethodDescVersioningState()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ MethodDesc* pMethodDesc = GetMethodDesc();
+ CodeVersionManager* pCodeVersionManager = pMethodDesc->GetCodeVersionManager();
+ return pCodeVersionManager->GetMethodDescVersioningState(pMethodDesc);
+}
+#endif
+
+#ifdef FEATURE_TIERED_COMPILATION
+NativeCodeVersion::OptimizationTier NativeCodeVersion::GetOptimizationTier() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ if (m_storageKind == StorageKind::Explicit)
+ {
+ return AsNode()->GetOptimizationTier();
+ }
+ else
+ {
+ return NativeCodeVersion::OptimizationTier0;
+ }
+}
+
+#ifndef DACCESS_COMPILE
+void NativeCodeVersion::SetOptimizationTier(NativeCodeVersion::OptimizationTier tier)
+{
+ LIMITED_METHOD_CONTRACT;
+ if (m_storageKind == StorageKind::Explicit)
+ {
+ AsNode()->SetOptimizationTier(tier);
+ }
+ else
+ {
+ _ASSERTE(!"Do not call SetOptimizationTier on default code versions - these versions are immutable");
+ }
+}
+#endif
+#endif
+
+PTR_NativeCodeVersionNode NativeCodeVersion::AsNode() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ if (m_storageKind == StorageKind::Explicit)
+ {
+ return m_pVersionNode;
+ }
+ else
+ {
+ return NULL;
+ }
+}
+
+#ifndef DACCESS_COMPILE
+PTR_NativeCodeVersionNode NativeCodeVersion::AsNode()
+{
+ LIMITED_METHOD_CONTRACT;
+ if (m_storageKind == StorageKind::Explicit)
+ {
+ return m_pVersionNode;
+ }
+ else
+ {
+ return NULL;
+ }
+}
+#endif
+
+bool NativeCodeVersion::operator==(const NativeCodeVersion & rhs) const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ if (m_storageKind == StorageKind::Explicit)
+ {
+ return (rhs.m_storageKind == StorageKind::Explicit) &&
+ (rhs.AsNode() == AsNode());
+ }
+ else if (m_storageKind == StorageKind::Synthetic)
+ {
+ return (rhs.m_storageKind == StorageKind::Synthetic) &&
+ (m_synthetic.m_pMethodDesc == rhs.m_synthetic.m_pMethodDesc);
+ }
+ else
+ {
+ return rhs.m_storageKind == StorageKind::Unknown;
+ }
+}
+bool NativeCodeVersion::operator!=(const NativeCodeVersion & rhs) const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return !operator==(rhs);
+}
+
+NativeCodeVersionCollection::NativeCodeVersionCollection(PTR_MethodDesc pMethodDescFilter, ILCodeVersion ilCodeFilter) :
+ m_pMethodDescFilter(pMethodDescFilter),
+ m_ilCodeFilter(ilCodeFilter)
+{
+}
+
+NativeCodeVersionIterator NativeCodeVersionCollection::Begin()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return NativeCodeVersionIterator(this);
+}
+NativeCodeVersionIterator NativeCodeVersionCollection::End()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return NativeCodeVersionIterator(NULL);
+}
+
+NativeCodeVersionIterator::NativeCodeVersionIterator(NativeCodeVersionCollection* pNativeCodeVersionCollection) :
+ m_stage(IterationStage::Initial),
+ m_pCollection(pNativeCodeVersionCollection),
+ m_pLinkedListCur(dac_cast<PTR_NativeCodeVersionNode>(nullptr))
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ First();
+}
+void NativeCodeVersionIterator::First()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ if (m_pCollection == NULL)
+ {
+ m_stage = IterationStage::End;
+ }
+ Next();
+}
+void NativeCodeVersionIterator::Next()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ if (m_stage == IterationStage::Initial)
+ {
+ ILCodeVersion ilCodeFilter = m_pCollection->m_ilCodeFilter;
+ m_stage = IterationStage::ImplicitCodeVersion;
+ if (ilCodeFilter.IsNull() || ilCodeFilter.IsDefaultVersion())
+ {
+ m_cur = NativeCodeVersion(m_pCollection->m_pMethodDescFilter);
+ return;
+ }
+ }
+ if (m_stage == IterationStage::ImplicitCodeVersion)
+ {
+ m_stage = IterationStage::LinkedList;
+ CodeVersionManager* pCodeVersionManager = m_pCollection->m_pMethodDescFilter->GetCodeVersionManager();
+ MethodDescVersioningState* pMethodDescVersioningState = pCodeVersionManager->GetMethodDescVersioningState(m_pCollection->m_pMethodDescFilter);
+ if (pMethodDescVersioningState == NULL)
+ {
+ m_pLinkedListCur = NULL;
+ }
+ else
+ {
+ ILCodeVersion ilCodeFilter = m_pCollection->m_ilCodeFilter;
+ m_pLinkedListCur = pMethodDescVersioningState->GetFirstVersionNode();
+ while (m_pLinkedListCur != NULL && !ilCodeFilter.IsNull() && ilCodeFilter.GetVersionId() != m_pLinkedListCur->GetILVersionId())
+ {
+ m_pLinkedListCur = m_pLinkedListCur->m_pNextMethodDescSibling;
+ }
+ }
+ if (m_pLinkedListCur != NULL)
+ {
+ m_cur = NativeCodeVersion(m_pLinkedListCur);
+ return;
+ }
+ }
+ if (m_stage == IterationStage::LinkedList)
+ {
+ if (m_pLinkedListCur != NULL)
+ {
+ ILCodeVersion ilCodeFilter = m_pCollection->m_ilCodeFilter;
+ do
+ {
+ m_pLinkedListCur = m_pLinkedListCur->m_pNextMethodDescSibling;
+ } while (m_pLinkedListCur != NULL && !ilCodeFilter.IsNull() && ilCodeFilter.GetVersionId() != m_pLinkedListCur->GetILVersionId());
+ }
+ if (m_pLinkedListCur != NULL)
+ {
+ m_cur = NativeCodeVersion(m_pLinkedListCur);
+ return;
+ }
+ else
+ {
+ m_stage = IterationStage::End;
+ m_cur = NativeCodeVersion();
+ }
+ }
+}
+const NativeCodeVersion & NativeCodeVersionIterator::Get() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_cur;
+}
+bool NativeCodeVersionIterator::Equal(const NativeCodeVersionIterator &i) const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_cur == i.m_cur;
+}
+
+ILCodeVersionNode::ILCodeVersionNode() :
+ m_pModule(dac_cast<PTR_Module>(nullptr)),
+ m_methodDef(0),
+ m_rejitId(0),
+ m_pNextILVersionNode(dac_cast<PTR_ILCodeVersionNode>(nullptr)),
+ m_rejitState(ILCodeVersion::kStateRequested),
+ m_pIL(dac_cast<PTR_COR_ILMETHOD>(nullptr)),
+ m_jitFlags(0)
+{}
+
+#ifndef DACCESS_COMPILE
+ILCodeVersionNode::ILCodeVersionNode(Module* pModule, mdMethodDef methodDef, ReJITID id) :
+ m_pModule(pModule),
+ m_methodDef(methodDef),
+ m_rejitId(id),
+ m_pNextILVersionNode(dac_cast<PTR_ILCodeVersionNode>(nullptr)),
+ m_rejitState(ILCodeVersion::kStateRequested),
+ m_pIL(nullptr),
+ m_jitFlags(0)
+{}
+#endif
+
+#ifdef DEBUG
+BOOL ILCodeVersionNode::LockOwnedByCurrentThread() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return GetModule()->GetCodeVersionManager()->LockOwnedByCurrentThread();
+}
+#endif //DEBUG
+
+PTR_Module ILCodeVersionNode::GetModule() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_pModule;
+}
+
+mdMethodDef ILCodeVersionNode::GetMethodDef() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_methodDef;
+}
+
+ReJITID ILCodeVersionNode::GetVersionId() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_rejitId;
+}
+
+ILCodeVersion::RejitFlags ILCodeVersionNode::GetRejitState() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_rejitState.Load();
+}
+
+PTR_COR_ILMETHOD ILCodeVersionNode::GetIL() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return dac_cast<PTR_COR_ILMETHOD>(m_pIL.Load());
+}
+
+DWORD ILCodeVersionNode::GetJitFlags() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_jitFlags.Load();
+}
+
+const InstrumentedILOffsetMapping* ILCodeVersionNode::GetInstrumentedILMap() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ _ASSERTE(LockOwnedByCurrentThread());
+ return &m_instrumentedILMap;
+}
+
+PTR_ILCodeVersionNode ILCodeVersionNode::GetNextILVersionNode() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ _ASSERTE(LockOwnedByCurrentThread());
+ return m_pNextILVersionNode;
+}
+
+#ifndef DACCESS_COMPILE
+void ILCodeVersionNode::SetRejitState(ILCodeVersion::RejitFlags newState)
+{
+ LIMITED_METHOD_CONTRACT;
+ m_rejitState.Store(newState);
+}
+
+void ILCodeVersionNode::SetIL(COR_ILMETHOD* pIL)
+{
+ LIMITED_METHOD_CONTRACT;
+ m_pIL.Store(pIL);
+}
+
+void ILCodeVersionNode::SetJitFlags(DWORD flags)
+{
+ LIMITED_METHOD_CONTRACT;
+ m_jitFlags.Store(flags);
+}
+
+void ILCodeVersionNode::SetInstrumentedILMap(SIZE_T cMap, COR_IL_MAP * rgMap)
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(LockOwnedByCurrentThread());
+ m_instrumentedILMap.SetMappingInfo(cMap, rgMap);
+}
+
+void ILCodeVersionNode::SetNextILVersionNode(ILCodeVersionNode* pNextILVersionNode)
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(LockOwnedByCurrentThread());
+ m_pNextILVersionNode = pNextILVersionNode;
+}
+#endif
+
+ILCodeVersion::ILCodeVersion() :
+ m_storageKind(StorageKind::Unknown)
+{}
+
+ILCodeVersion::ILCodeVersion(const ILCodeVersion & ilCodeVersion) :
+ m_storageKind(ilCodeVersion.m_storageKind)
+{
+ if(m_storageKind == StorageKind::Explicit)
+ {
+ m_pVersionNode = ilCodeVersion.m_pVersionNode;
+ }
+ else if(m_storageKind == StorageKind::Synthetic)
+ {
+ m_synthetic = ilCodeVersion.m_synthetic;
+ }
+}
+
+ILCodeVersion::ILCodeVersion(PTR_ILCodeVersionNode pILCodeVersionNode) :
+ m_storageKind(pILCodeVersionNode != NULL ? StorageKind::Explicit : StorageKind::Unknown),
+ m_pVersionNode(pILCodeVersionNode)
+{}
+
+ILCodeVersion::ILCodeVersion(PTR_Module pModule, mdMethodDef methodDef) :
+ m_storageKind(pModule != NULL ? StorageKind::Synthetic : StorageKind::Unknown)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ m_synthetic.m_pModule = pModule;
+ m_synthetic.m_methodDef = methodDef;
+}
+
+bool ILCodeVersion::operator==(const ILCodeVersion & rhs) const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ if (m_storageKind == StorageKind::Explicit)
+ {
+ return (rhs.m_storageKind == StorageKind::Explicit) &&
+ (AsNode() == rhs.AsNode());
+ }
+ else if (m_storageKind == StorageKind::Synthetic)
+ {
+ return (rhs.m_storageKind == StorageKind::Synthetic) &&
+ (m_synthetic.m_pModule == rhs.m_synthetic.m_pModule) &&
+ (m_synthetic.m_methodDef == rhs.m_synthetic.m_methodDef);
+ }
+ else
+ {
+ return rhs.m_storageKind == StorageKind::Unknown;
+ }
+}
+
+BOOL ILCodeVersion::IsNull() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_storageKind == StorageKind::Unknown;
+}
+
+BOOL ILCodeVersion::IsDefaultVersion() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_storageKind == StorageKind::Synthetic;
+}
+
+PTR_Module ILCodeVersion::GetModule() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ if (m_storageKind == StorageKind::Explicit)
+ {
+ return AsNode()->GetModule();
+ }
+ else
+ {
+ return m_synthetic.m_pModule;
+ }
+}
+
+mdMethodDef ILCodeVersion::GetMethodDef() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ if (m_storageKind == StorageKind::Explicit)
+ {
+ return AsNode()->GetMethodDef();
+ }
+ else
+ {
+ return m_synthetic.m_methodDef;
+ }
+}
+
+ReJITID ILCodeVersion::GetVersionId() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ if (m_storageKind == StorageKind::Explicit)
+ {
+ return AsNode()->GetVersionId();
+ }
+ else
+ {
+ return 0;
+ }
+}
+
+NativeCodeVersionCollection ILCodeVersion::GetNativeCodeVersions(PTR_MethodDesc pClosedMethodDesc) const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return NativeCodeVersionCollection(pClosedMethodDesc, *this);
+}
+
+NativeCodeVersion ILCodeVersion::GetActiveNativeCodeVersion(PTR_MethodDesc pClosedMethodDesc) const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ NativeCodeVersionCollection versions = GetNativeCodeVersions(pClosedMethodDesc);
+ for (NativeCodeVersionIterator cur = versions.Begin(), end = versions.End(); cur != end; cur++)
+ {
+ if (cur->IsActiveChildVersion())
+ {
+ return *cur;
+ }
+ }
+ return NativeCodeVersion();
+}
+
+ILCodeVersion::RejitFlags ILCodeVersion::GetRejitState() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ if (m_storageKind == StorageKind::Explicit)
+ {
+ return AsNode()->GetRejitState();
+ }
+ else
+ {
+ return ILCodeVersion::kStateActive;
+ }
+}
+
+PTR_COR_ILMETHOD ILCodeVersion::GetIL() const
+{
+ CONTRACTL
+ {
+ THROWS; //GetILHeader throws
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ if (m_storageKind == StorageKind::Explicit)
+ {
+ return AsNode()->GetIL();
+ }
+ else
+ {
+ PTR_Module pModule = GetModule();
+ PTR_MethodDesc pMethodDesc = dac_cast<PTR_MethodDesc>(pModule->LookupMethodDef(GetMethodDef()));
+ if (pMethodDesc == NULL)
+ {
+ return NULL;
+ }
+ else
+ {
+ return dac_cast<PTR_COR_ILMETHOD>(pMethodDesc->GetILHeader(TRUE));
+ }
+ }
+}
+
+PTR_COR_ILMETHOD ILCodeVersion::GetILNoThrow() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ PTR_COR_ILMETHOD ret;
+ EX_TRY
+ {
+ ret = GetIL();
+ }
+ EX_CATCH
+ {
+ ret = NULL;
+ }
+ EX_END_CATCH(RethrowTerminalExceptions);
+ return ret;
+}
+
+DWORD ILCodeVersion::GetJitFlags() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ if (m_storageKind == StorageKind::Explicit)
+ {
+ return AsNode()->GetJitFlags();
+ }
+ else
+ {
+ return 0;
+ }
+}
+
+const InstrumentedILOffsetMapping* ILCodeVersion::GetInstrumentedILMap() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ if (m_storageKind == StorageKind::Explicit)
+ {
+ return AsNode()->GetInstrumentedILMap();
+ }
+ else
+ {
+ return NULL;
+ }
+}
+
+#ifndef DACCESS_COMPILE
+void ILCodeVersion::SetRejitState(RejitFlags newState)
+{
+ LIMITED_METHOD_CONTRACT;
+ AsNode()->SetRejitState(newState);
+}
+
+void ILCodeVersion::SetIL(COR_ILMETHOD* pIL)
+{
+ LIMITED_METHOD_CONTRACT;
+ AsNode()->SetIL(pIL);
+}
+
+void ILCodeVersion::SetJitFlags(DWORD flags)
+{
+ LIMITED_METHOD_CONTRACT;
+ AsNode()->SetJitFlags(flags);
+}
+
+void ILCodeVersion::SetInstrumentedILMap(SIZE_T cMap, COR_IL_MAP * rgMap)
+{
+ LIMITED_METHOD_CONTRACT;
+ AsNode()->SetInstrumentedILMap(cMap, rgMap);
+}
+
+HRESULT ILCodeVersion::AddNativeCodeVersion(MethodDesc* pClosedMethodDesc, NativeCodeVersion* pNativeCodeVersion)
+{
+ LIMITED_METHOD_CONTRACT;
+ CodeVersionManager* pManager = GetModule()->GetCodeVersionManager();
+ HRESULT hr = pManager->AddNativeCodeVersion(*this, pClosedMethodDesc, pNativeCodeVersion);
+ if (FAILED(hr))
+ {
+ _ASSERTE(hr == E_OUTOFMEMORY);
+ return hr;
+ }
+ return S_OK;
+}
+
+HRESULT ILCodeVersion::GetOrCreateActiveNativeCodeVersion(MethodDesc* pClosedMethodDesc, NativeCodeVersion* pActiveNativeCodeVersion)
+{
+ LIMITED_METHOD_CONTRACT;
+ HRESULT hr = S_OK;
+ NativeCodeVersion activeNativeChild = GetActiveNativeCodeVersion(pClosedMethodDesc);
+ if (activeNativeChild.IsNull())
+ {
+ if (FAILED(hr = AddNativeCodeVersion(pClosedMethodDesc, &activeNativeChild)))
+ {
+ _ASSERTE(hr == E_OUTOFMEMORY);
+ return hr;
+ }
+ }
+ // The first added child should automatically become active
+ _ASSERTE(GetActiveNativeCodeVersion(pClosedMethodDesc) == activeNativeChild);
+ *pActiveNativeCodeVersion = activeNativeChild;
+ return S_OK;
+}
+
+HRESULT ILCodeVersion::SetActiveNativeCodeVersion(NativeCodeVersion activeNativeCodeVersion, BOOL fEESuspended)
+{
+ LIMITED_METHOD_CONTRACT;
+ HRESULT hr = S_OK;
+ MethodDesc* pMethodDesc = activeNativeCodeVersion.GetMethodDesc();
+ NativeCodeVersion prevActiveVersion = GetActiveNativeCodeVersion(pMethodDesc);
+ if (prevActiveVersion == activeNativeCodeVersion)
+ {
+ //nothing to do, this version is already active
+ return S_OK;
+ }
+
+ if (!prevActiveVersion.IsNull())
+ {
+ prevActiveVersion.SetActiveChildFlag(FALSE);
+ }
+ activeNativeCodeVersion.SetActiveChildFlag(TRUE);
+
+ // If needed update the published code body for this method
+ CodeVersionManager* pCodeVersionManager = GetModule()->GetCodeVersionManager();
+ if (pCodeVersionManager->GetActiveILCodeVersion(GetModule(), GetMethodDef()) == *this)
+ {
+ if (FAILED(hr = pCodeVersionManager->PublishNativeCodeVersion(pMethodDesc, activeNativeCodeVersion, fEESuspended)))
+ {
+ return hr;
+ }
+ }
+
+ return S_OK;
+}
+
+ILCodeVersionNode* ILCodeVersion::AsNode()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pVersionNode;
+}
+#endif //DACCESS_COMPILE
+
+PTR_ILCodeVersionNode ILCodeVersion::AsNode() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_pVersionNode;
+}
+
+ILCodeVersionCollection::ILCodeVersionCollection(PTR_Module pModule, mdMethodDef methodDef) :
+ m_pModule(pModule),
+ m_methodDef(methodDef)
+{}
+
+ILCodeVersionIterator ILCodeVersionCollection::Begin()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return ILCodeVersionIterator(this);
+}
+
+ILCodeVersionIterator ILCodeVersionCollection::End()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return ILCodeVersionIterator(NULL);
+}
+
+ILCodeVersionIterator::ILCodeVersionIterator(const ILCodeVersionIterator & iter) :
+ m_stage(iter.m_stage),
+ m_cur(iter.m_cur),
+ m_pLinkedListCur(iter.m_pLinkedListCur),
+ m_pCollection(iter.m_pCollection)
+{}
+
+ILCodeVersionIterator::ILCodeVersionIterator(ILCodeVersionCollection* pCollection) :
+ m_stage(pCollection != NULL ? IterationStage::Initial : IterationStage::End),
+ m_pLinkedListCur(dac_cast<PTR_ILCodeVersionNode>(nullptr)),
+ m_pCollection(pCollection)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ First();
+}
+
+const ILCodeVersion & ILCodeVersionIterator::Get() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_cur;
+}
+
+void ILCodeVersionIterator::First()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ Next();
+}
+
+void ILCodeVersionIterator::Next()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ if (m_stage == IterationStage::Initial)
+ {
+ m_stage = IterationStage::ImplicitCodeVersion;
+ m_cur = ILCodeVersion(m_pCollection->m_pModule, m_pCollection->m_methodDef);
+ return;
+ }
+ if (m_stage == IterationStage::ImplicitCodeVersion)
+ {
+ CodeVersionManager* pCodeVersionManager = m_pCollection->m_pModule->GetCodeVersionManager();
+ _ASSERTE(pCodeVersionManager->LockOwnedByCurrentThread());
+ PTR_ILCodeVersioningState pILCodeVersioningState = pCodeVersionManager->GetILCodeVersioningState(m_pCollection->m_pModule, m_pCollection->m_methodDef);
+ if (pILCodeVersioningState != NULL)
+ {
+ m_pLinkedListCur = pILCodeVersioningState->GetFirstVersionNode();
+ }
+ m_stage = IterationStage::LinkedList;
+ if (m_pLinkedListCur != NULL)
+ {
+ m_cur = ILCodeVersion(m_pLinkedListCur);
+ return;
+ }
+ }
+ if (m_stage == IterationStage::LinkedList)
+ {
+ if (m_pLinkedListCur != NULL)
+ {
+ m_pLinkedListCur = m_pLinkedListCur->GetNextILVersionNode();
+ }
+ if (m_pLinkedListCur != NULL)
+ {
+ m_cur = ILCodeVersion(m_pLinkedListCur);
+ return;
+ }
+ else
+ {
+ m_stage = IterationStage::End;
+ m_cur = ILCodeVersion();
+ return;
+ }
+ }
+}
+
+bool ILCodeVersionIterator::Equal(const ILCodeVersionIterator &i) const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_cur == i.m_cur;
+}
+
+MethodDescVersioningState::MethodDescVersioningState(PTR_MethodDesc pMethodDesc) :
+ m_pMethodDesc(pMethodDesc),
+ m_flags(IsDefaultVersionActiveChildFlag),
+ m_nextId(1),
+ m_pFirstVersionNode(dac_cast<PTR_NativeCodeVersionNode>(nullptr))
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+#ifdef FEATURE_JUMPSTAMP
+ ZeroMemory(m_rgSavedCode, JumpStubSize);
+#endif
+}
+
+PTR_MethodDesc MethodDescVersioningState::GetMethodDesc() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_pMethodDesc;
+}
+
+#ifndef DACCESS_COMPILE
+NativeCodeVersionId MethodDescVersioningState::AllocateVersionId()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_nextId++;
+}
+#endif
+
+PTR_NativeCodeVersionNode MethodDescVersioningState::GetFirstVersionNode() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_pFirstVersionNode;
+}
+
+#ifdef FEATURE_JUMPSTAMP
+MethodDescVersioningState::JumpStampFlags MethodDescVersioningState::GetJumpStampState()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return (JumpStampFlags)(m_flags & JumpStampMask);
+}
+
+#ifndef DACCESS_COMPILE
+void MethodDescVersioningState::SetJumpStampState(JumpStampFlags newState)
+{
+ LIMITED_METHOD_CONTRACT;
+ m_flags = (m_flags & ~JumpStampMask) | (BYTE)newState;
+}
+#endif // DACCESS_COMPILE
+
+#ifndef DACCESS_COMPILE
+HRESULT MethodDescVersioningState::SyncJumpStamp(NativeCodeVersion nativeCodeVersion, BOOL fEESuspended)
+ {
+ LIMITED_METHOD_CONTRACT;
+ HRESULT hr = S_OK;
+ PCODE pCode = nativeCodeVersion.IsNull() ? NULL : nativeCodeVersion.GetNativeCode();
+ MethodDesc* pMethod = GetMethodDesc();
+ _ASSERTE(pMethod->IsVersionable() && pMethod->IsVersionableWithJumpStamp());
+
+ if (!pMethod->HasNativeCode())
+ {
+ //we'll set up the jump-stamp when the default native code is created
+ return S_OK;
+ }
+
+ if (!nativeCodeVersion.IsNull() && nativeCodeVersion.IsDefaultVersion())
+ {
+ return UndoJumpStampNativeCode(fEESuspended);
+ }
+ else
+ {
+ // We don't have new code ready yet, jumpstamp back to the prestub to let us generate it the next time
+ // the method is called
+ if (pCode == NULL)
+ {
+ if (!fEESuspended)
+ {
+ return CORPROF_E_RUNTIME_SUSPEND_REQUIRED;
+ }
+ return JumpStampNativeCode();
+ }
+ // We do know the new code body, install the jump stamp now
+ else
+ {
+ return UpdateJumpTarget(fEESuspended, pCode);
+ }
+ }
+}
+#endif // DACCESS_COMPILE
+
+//---------------------------------------------------------------------------------------
+//
+// Simple, thin abstraction of debugger breakpoint patching. Given an address and a
+// previously procured DebuggerControllerPatch governing the code address, this decides
+// whether the code address is patched. If so, it returns a pointer to the debugger's
+// buffer (of what's "underneath" the int 3 patch); otherwise, it returns the code
+// address itself.
+//
+// Arguments:
+// * pbCode - Code address to return if unpatched
+// * dbgpatch - DebuggerControllerPatch to test
+//
+// Return Value:
+// Either pbCode or the debugger's patch buffer, as per description above.
+//
+// Assumptions:
+// Caller must manually grab (and hold) the ControllerLockHolder and get the
+// DebuggerControllerPatch before calling this helper.
+//
+// Notes:
+// pbCode need not equal the code address governed by dbgpatch, but is always
+// "related" (and sometimes really is equal). For example, this helper may be used
+// when writing a code byte to an internal rejit buffer (e.g., in preparation for an
+// eventual 64-bit interlocked write into the code stream), and thus pbCode would
+// point into the internal rejit buffer whereas dbgpatch governs the corresponding
+// code byte in the live code stream. This function would then be used to determine
+// whether a byte should be written into the internal rejit buffer OR into the
+// debugger controller's breakpoint buffer.
+//
+
+LPBYTE FirstCodeByteAddr(LPBYTE pbCode, DebuggerControllerPatch * dbgpatch)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (dbgpatch != NULL && dbgpatch->IsActivated())
+ {
+ // Debugger has patched the code, so return the address of the buffer
+ return LPBYTE(&(dbgpatch->opcode));
+ }
+
+ // no active patch, just return the direct code address
+ return pbCode;
+}
+
+
+#ifdef _DEBUG
+#ifndef DACCESS_COMPILE
+BOOL MethodDescVersioningState::CodeIsSaved()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ for (size_t i = 0; i < sizeof(m_rgSavedCode); i++)
+ {
+ if (m_rgSavedCode[i] != 0)
+ return TRUE;
+ }
+ return FALSE;
+}
+#endif //DACCESS_COMPILE
+#endif //_DEBUG
+
+//---------------------------------------------------------------------------------------
+//
+// Do the actual work of stamping the top of originally-jitted-code with a jmp that goes
+// to the prestub. This can be called in one of three ways:
+// * Case 1: By RequestReJIT against an already-jitted function, in which case the
+// PCODE may be inferred by the MethodDesc, and our caller will have suspended
+// the EE for us, OR
+// * Case 2: By the prestub worker after jitting the original code of a function
+// (i.e., the "pre-rejit" scenario). In this case, the EE is not suspended. But
+// that's ok, because the PCODE has not yet been published to the MethodDesc, and
+// no thread can be executing inside the originally JITted function yet.
+// * Case 3: At type/method restore time for an NGEN'ed assembly. This is also the pre-rejit
+// scenario because we are guaranteed to do this before the code in the module
+// is executable. EE suspend is not required.
+//
+// Arguments:
+// * pCode - Case 1 (above): will be NULL, and we can infer the PCODE from the
+// MethodDesc; Case 2+3 (above, pre-rejit): will be non-NULL, and we'll need to use
+// this to find the code to stamp on top of.
+//
+// Return Value:
+// * S_OK: Either we successfully did the jmp-stamp, or a racing thread took care of
+// it for us.
+// * Else, HRESULT indicating failure.
+//
+// Assumptions:
+// The caller will have suspended the EE if necessary (case 1), before this is
+// called.
+//
+#ifndef DACCESS_COMPILE
+HRESULT MethodDescVersioningState::JumpStampNativeCode(PCODE pCode /* = NULL */)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ // It may seem dangerous to be stamping jumps over code while a GC is going on,
+ // but we're actually safe. As we assert below, either we're holding the thread
+ // store lock (and thus preventing a GC) OR we're stamping code that has not yet
+ // been published (and will thus not be executed by managed therads or examined
+ // by the GC).
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ PCODE pCodePublished = GetMethodDesc()->GetNativeCode();
+
+ _ASSERTE((pCode != NULL) || (pCodePublished != NULL));
+ _ASSERTE(GetMethodDesc()->GetCodeVersionManager()->LockOwnedByCurrentThread());
+
+ HRESULT hr = S_OK;
+
+ // We'll jump-stamp over pCode, or if pCode is NULL, jump-stamp over the published
+ // code for this's MethodDesc.
+ LPBYTE pbCode = (LPBYTE)pCode;
+ if (pbCode == NULL)
+ {
+ // If caller didn't specify a pCode, just use the one that was published after
+ // the original JIT. (A specific pCode would be passed in the pre-rejit case,
+ // to jump-stamp the original code BEFORE the PCODE gets published.)
+ pbCode = (LPBYTE)pCodePublished;
+ }
+ _ASSERTE(pbCode != NULL);
+
+ // The debugging API may also try to write to the very top of this function (though
+ // with an int 3 for breakpoint purposes). Coordinate with the debugger so we know
+ // whether we can safely patch the actual code, or instead write to the debugger's
+ // buffer.
+ DebuggerController::ControllerLockHolder lockController;
+
+ if (GetJumpStampState() == JumpStampToPrestub)
+ {
+ // The method has already been jump stamped so nothing left to do
+ _ASSERTE(CodeIsSaved());
+ return S_OK;
+ }
+
+ // Remember what we're stamping our jump on top of, so we can replace it during a
+ // revert.
+ if (GetJumpStampState() == JumpStampNone)
+ {
+ for (int i = 0; i < sizeof(m_rgSavedCode); i++)
+ {
+ m_rgSavedCode[i] = *FirstCodeByteAddr(pbCode + i, DebuggerController::GetPatchTable()->GetPatch((CORDB_ADDRESS_TYPE *)(pbCode + i)));
+ }
+ }
+
+ EX_TRY
+ {
+ AllocMemTracker amt;
+
+ // This guy might throw on out-of-memory, so rely on the tracker to clean-up
+ Precode * pPrecode = Precode::Allocate(PRECODE_STUB, GetMethodDesc(), GetMethodDesc()->GetLoaderAllocator(), &amt);
+ PCODE target = pPrecode->GetEntryPoint();
+
+#if defined(_X86_) || defined(_AMD64_)
+
+ // Normal unpatched code never starts with a jump
+ _ASSERTE(GetJumpStampState() == JumpStampToActiveVersion ||
+ *FirstCodeByteAddr(pbCode, DebuggerController::GetPatchTable()->GetPatch((CORDB_ADDRESS_TYPE *)pbCode)) != X86_INSTR_JMP_REL32);
+
+ INT64 i64OldCode = *(INT64*)pbCode;
+ INT64 i64NewCode = i64OldCode;
+ LPBYTE pbNewValue = (LPBYTE)&i64NewCode;
+ *pbNewValue = X86_INSTR_JMP_REL32;
+ INT32 UNALIGNED * pOffset = reinterpret_cast<INT32 UNALIGNED *>(&pbNewValue[1]);
+ // This will throw for out-of-memory, so don't write anything until
+ // after he succeeds
+ // This guy will leak/cache/reuse the jumpstub
+ *pOffset = rel32UsingJumpStub(reinterpret_cast<INT32 UNALIGNED *>(pbCode + 1), target, GetMethodDesc(), GetMethodDesc()->GetLoaderAllocator());
+
+ // If we have the EE suspended or the code is unpublished there won't be contention on this code
+ hr = UpdateJumpStampHelper(pbCode, i64OldCode, i64NewCode, FALSE);
+ if (FAILED(hr))
+ {
+ ThrowHR(hr);
+ }
+
+ //
+ // No failure point after this!
+ //
+ amt.SuppressRelease();
+
+#else // _X86_ || _AMD64_
+#error "Need to define a way to jump-stamp the prolog in a safe way for this platform"
+#endif // _X86_ || _AMD64_
+
+ SetJumpStampState(JumpStampToPrestub);
+ }
+ EX_CATCH_HRESULT(hr);
+ _ASSERT(hr == S_OK || hr == E_OUTOFMEMORY);
+
+ if (SUCCEEDED(hr))
+ {
+ _ASSERTE(GetJumpStampState() == JumpStampToPrestub);
+ _ASSERTE(m_rgSavedCode[0] != 0); // saved code should not start with 0
+ }
+
+ return hr;
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// After code has been rejitted, this is called to update the jump-stamp to go from
+// pointing to the prestub, to pointing to the newly rejitted code.
+//
+// Arguments:
+// fEESuspended - TRUE if the caller keeps the EE suspended during this call
+// pRejittedCode - jitted code for the updated IL this method should execute
+//
+// Assumptions:
+// This rejit manager's table crst should be held by the caller
+//
+// Returns - S_OK if the jump target is updated
+// CORPROF_E_RUNTIME_SUSPEND_REQUIRED if the ee isn't suspended and it
+// will need to be in order to do the update safely
+HRESULT MethodDescVersioningState::UpdateJumpTarget(BOOL fEESuspended, PCODE pRejittedCode)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ }
+ CONTRACTL_END;
+
+ MethodDesc * pMD = GetMethodDesc();
+ _ASSERTE(pMD->GetCodeVersionManager()->LockOwnedByCurrentThread());
+
+ // It isn't safe to overwrite the original method prolog with a jmp because threads might
+ // be at an IP in the middle of the jump stamp already. However converting between different
+ // jump stamps is OK (when done atomically) because this only changes the jmp target, not
+ // instruction boundaries.
+ if (GetJumpStampState() == JumpStampNone && !fEESuspended)
+ {
+ return CORPROF_E_RUNTIME_SUSPEND_REQUIRED;
+ }
+
+ // Beginning of originally JITted code containing the jmp that we will redirect.
+ BYTE * pbCode = (BYTE*)pMD->GetNativeCode();
+
+ // Remember what we're stamping our jump on top of, so we can replace it during a
+ // revert.
+ if (GetJumpStampState() == JumpStampNone)
+ {
+ for (int i = 0; i < sizeof(m_rgSavedCode); i++)
+ {
+ m_rgSavedCode[i] = *FirstCodeByteAddr(pbCode + i, DebuggerController::GetPatchTable()->GetPatch((CORDB_ADDRESS_TYPE *)(pbCode + i)));
+ }
+ }
+
+#if defined(_X86_) || defined(_AMD64_)
+
+ HRESULT hr = S_OK;
+ {
+ DebuggerController::ControllerLockHolder lockController;
+
+ // This will throw for out-of-memory, so don't write anything until
+ // after he succeeds
+ // This guy will leak/cache/reuse the jumpstub
+ INT32 offset = 0;
+ EX_TRY
+ {
+ offset = rel32UsingJumpStub(
+ reinterpret_cast<INT32 UNALIGNED *>(&pbCode[1]), // base of offset
+ pRejittedCode, // target of jump
+ pMD,
+ pMD->GetLoaderAllocator());
+ }
+ EX_CATCH_HRESULT(hr);
+ _ASSERT(hr == S_OK || hr == E_OUTOFMEMORY);
+ if (FAILED(hr))
+ {
+ return hr;
+ }
+ // For validation later, remember what pbCode is right now
+ INT64 i64OldValue = *(INT64 *)pbCode;
+
+ // Assemble the INT64 of the new code bytes to write. Start with what's there now
+ INT64 i64NewValue = i64OldValue;
+ LPBYTE pbNewValue = (LPBYTE)&i64NewValue;
+
+ // First byte becomes a rel32 jmp instruction (if it wasn't already)
+ *pbNewValue = X86_INSTR_JMP_REL32;
+ // Next 4 bytes are the jmp target (offset to jmp stub)
+ INT32 UNALIGNED * pnOffset = reinterpret_cast<INT32 UNALIGNED *>(&pbNewValue[1]);
+ *pnOffset = offset;
+
+ hr = UpdateJumpStampHelper(pbCode, i64OldValue, i64NewValue, !fEESuspended);
+ _ASSERTE(hr == S_OK || (hr == CORPROF_E_RUNTIME_SUSPEND_REQUIRED && !fEESuspended));
+ }
+ if (FAILED(hr))
+ {
+ return hr;
+ }
+
+#else // _X86_ || _AMD64_
+#error "Need to define a way to jump-stamp the prolog in a safe way for this platform"
+#endif // _X86_ || _AMD64_
+
+ // State transition
+ SetJumpStampState(JumpStampToActiveVersion);
+ return S_OK;
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// Poke the JITted code to satsify a revert request (or to perform an implicit revert as
+// part of a second, third, etc. rejit request). Reinstates the originally JITted code
+// that had been jump-stamped over to perform a prior rejit.
+//
+// Arguments
+// fEESuspended - TRUE if the caller keeps the EE suspended during this call
+//
+//
+// Return Value:
+// S_OK to indicate the revert succeeded,
+// CORPROF_E_RUNTIME_SUSPEND_REQUIRED to indicate the jumpstamp hasn't been reverted
+// and EE suspension will be needed for success
+// other failure HRESULT indicating what went wrong.
+//
+// Assumptions:
+// Caller must be holding the owning ReJitManager's table crst.
+//
+HRESULT MethodDescVersioningState::UndoJumpStampNativeCode(BOOL fEESuspended)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(GetMethodDesc()->GetCodeVersionManager()->LockOwnedByCurrentThread());
+ if (GetJumpStampState() == JumpStampNone)
+ {
+ return S_OK;
+ }
+
+ _ASSERTE(m_rgSavedCode[0] != 0); // saved code should not start with 0
+
+ BYTE * pbCode = (BYTE*)GetMethodDesc()->GetNativeCode();
+ DebuggerController::ControllerLockHolder lockController;
+
+#if defined(_X86_) || defined(_AMD64_)
+ _ASSERTE(m_rgSavedCode[0] != X86_INSTR_JMP_REL32);
+ _ASSERTE(*FirstCodeByteAddr(pbCode, DebuggerController::GetPatchTable()->GetPatch((CORDB_ADDRESS_TYPE *)pbCode)) == X86_INSTR_JMP_REL32);
+#else
+#error "Need to define a way to jump-stamp the prolog in a safe way for this platform"
+#endif // _X86_ || _AMD64_
+
+ // For the interlocked compare, remember what pbCode is right now
+ INT64 i64OldValue = *(INT64 *)pbCode;
+ // Assemble the INT64 of the new code bytes to write. Start with what's there now
+ INT64 i64NewValue = i64OldValue;
+ memcpy(LPBYTE(&i64NewValue), m_rgSavedCode, sizeof(m_rgSavedCode));
+ HRESULT hr = UpdateJumpStampHelper(pbCode, i64OldValue, i64NewValue, !fEESuspended);
+ _ASSERTE(hr == S_OK || (hr == CORPROF_E_RUNTIME_SUSPEND_REQUIRED && !fEESuspended));
+ if (hr != S_OK)
+ return hr;
+
+ // Transition state of this ReJitInfo to indicate the MD no longer has any jump stamp
+ SetJumpStampState(JumpStampNone);
+ return S_OK;
+}
+#endif
+
+//---------------------------------------------------------------------------------------
+//
+// This is called to modify the jump-stamp area, the first ReJitInfo::JumpStubSize bytes
+// in the method's code.
+//
+// Notes:
+// Callers use this method in a variety of circumstances:
+// a) when the code is unpublished (fContentionPossible == FALSE)
+// b) when the caller has taken the ThreadStoreLock and suspended the EE
+// (fContentionPossible == FALSE)
+// c) when the code is published, the EE isn't suspended, and the jumpstamp
+// area consists of a single 5 byte long jump instruction
+// (fContentionPossible == TRUE)
+// This method will attempt to alter the jump-stamp even if the caller has not prevented
+// contention, but there is no guarantee it will be succesful. When the caller has prevented
+// contention, then success is assured. Callers may oportunistically try without
+// EE suspension, and then upgrade to EE suspension if the first attempt fails.
+//
+// Assumptions:
+// This rejit manager's table crst should be held by the caller or fContentionPossible==FALSE
+// The debugger patch table lock should be held by the caller
+//
+// Arguments:
+// pbCode - pointer to the code where the jump stamp is placed
+// i64OldValue - the bytes which should currently be at the start of the method code
+// i64NewValue - the new bytes which should be written at the start of the method code
+// fContentionPossible - See the Notes section above.
+//
+// Returns:
+// S_OK => the jumpstamp has been succesfully updated.
+// CORPROF_E_RUNTIME_SUSPEND_REQUIRED => the jumpstamp remains unchanged (preventing contention will be necessary)
+// other failing HR => VirtualProtect failed, the jumpstamp remains unchanged
+//
+#ifndef DACCESS_COMPILE
+HRESULT MethodDescVersioningState::UpdateJumpStampHelper(BYTE* pbCode, INT64 i64OldValue, INT64 i64NewValue, BOOL fContentionPossible)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ MethodDesc * pMD = GetMethodDesc();
+ _ASSERTE(pMD->GetCodeVersionManager()->LockOwnedByCurrentThread() || !fContentionPossible);
+
+ // When ReJIT is enabled, method entrypoints are always at least 8-byte aligned (see
+ // code:EEJitManager::allocCode), so we can do a single 64-bit interlocked operation
+ // to update the jump target. However, some code may have gotten compiled before
+ // the profiler had a chance to enable ReJIT (e.g., NGENd code, or code JITted
+ // before a profiler attaches). In such cases, we cannot rely on a simple
+ // interlocked operation, and instead must suspend the runtime to ensure we can
+ // safely update the jmp instruction.
+ //
+ // This method doesn't verify that the method is actually safe to rejit, we expect
+ // callers to do that. At the moment NGEN'ed code is safe to rejit even if
+ // it is unaligned, but code generated before the profiler attaches is not.
+ if (fContentionPossible && !(IS_ALIGNED(pbCode, sizeof(INT64))))
+ {
+ return CORPROF_E_RUNTIME_SUSPEND_REQUIRED;
+ }
+
+ // The debugging API may also try to write to this function (though
+ // with an int 3 for breakpoint purposes). Coordinate with the debugger so we know
+ // whether we can safely patch the actual code, or instead write to the debugger's
+ // buffer.
+ if (fContentionPossible)
+ {
+ for (CORDB_ADDRESS_TYPE* pbProbeAddr = pbCode; pbProbeAddr < pbCode + MethodDescVersioningState::JumpStubSize; pbProbeAddr++)
+ {
+ if (NULL != DebuggerController::GetPatchTable()->GetPatch(pbProbeAddr))
+ {
+ return CORPROF_E_RUNTIME_SUSPEND_REQUIRED;
+ }
+ }
+ }
+
+#if defined(_X86_) || defined(_AMD64_)
+
+ DWORD oldProt;
+ if (!ClrVirtualProtect((LPVOID)pbCode, 8, PAGE_EXECUTE_READWRITE, &oldProt))
+ {
+ return HRESULT_FROM_WIN32(GetLastError());
+ }
+
+ if (fContentionPossible)
+ {
+ INT64 i64InterlockReportedOldValue = FastInterlockCompareExchangeLong((INT64 *)pbCode, i64NewValue, i64OldValue);
+ // Since changes to these bytes are protected by this rejitmgr's m_crstTable, we
+ // shouldn't have two writers conflicting.
+ _ASSERTE(i64InterlockReportedOldValue == i64OldValue);
+ }
+ else
+ {
+ // In this path the caller ensures:
+ // a) no thread will execute through the prologue area we are modifying
+ // b) no thread is stopped in a prologue such that it resumes in the middle of code we are modifying
+ // c) no thread is doing a debugger patch skip operation in which an unmodified copy of the method's
+ // code could be executed from a patch skip buffer.
+
+ // PERF: we might still want a faster path through here if we aren't debugging that doesn't do
+ // all the patch checks
+ for (int i = 0; i < MethodDescVersioningState::JumpStubSize; i++)
+ {
+ *FirstCodeByteAddr(pbCode + i, DebuggerController::GetPatchTable()->GetPatch(pbCode + i)) = ((BYTE*)&i64NewValue)[i];
+ }
+ }
+
+ if (oldProt != PAGE_EXECUTE_READWRITE)
+ {
+ // The CLR codebase in many locations simply ignores failures to restore the page protections
+ // Its true that it isn't a problem functionally, but it seems a bit sketchy?
+ // I am following the convention for now.
+ ClrVirtualProtect((LPVOID)pbCode, 8, oldProt, &oldProt);
+ }
+
+ FlushInstructionCache(GetCurrentProcess(), pbCode, MethodDescVersioningState::JumpStubSize);
+ return S_OK;
+
+#else // _X86_ || _AMD64_
+#error "Need to define a way to jump-stamp the prolog in a safe way for this platform"
+#endif // _X86_ || _AMD64_
+}
+#endif
+#endif // FEATURE_JUMPSTAMP
+
+BOOL MethodDescVersioningState::IsDefaultVersionActiveChild() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return (m_flags & IsDefaultVersionActiveChildFlag) != 0;
+}
+#ifndef DACCESS_COMPILE
+void MethodDescVersioningState::SetDefaultVersionActiveChildFlag(BOOL isActive)
+{
+ LIMITED_METHOD_CONTRACT;
+ if (isActive)
+ {
+ m_flags |= IsDefaultVersionActiveChildFlag;
+ }
+ else
+ {
+ m_flags &= ~IsDefaultVersionActiveChildFlag;
+ }
+}
+
+void MethodDescVersioningState::LinkNativeCodeVersionNode(NativeCodeVersionNode* pNativeCodeVersionNode)
+{
+ LIMITED_METHOD_CONTRACT;
+ pNativeCodeVersionNode->m_pNextMethodDescSibling = m_pFirstVersionNode;
+ m_pFirstVersionNode = pNativeCodeVersionNode;
+}
+#endif
+
+ILCodeVersioningState::ILCodeVersioningState(PTR_Module pModule, mdMethodDef methodDef) :
+ m_activeVersion(ILCodeVersion(pModule,methodDef)),
+ m_pFirstVersionNode(dac_cast<PTR_ILCodeVersionNode>(nullptr)),
+ m_pModule(pModule),
+ m_methodDef(methodDef)
+{}
+
+
+ILCodeVersioningState::Key::Key() :
+ m_pModule(dac_cast<PTR_Module>(nullptr)),
+ m_methodDef(0)
+{}
+
+ILCodeVersioningState::Key::Key(PTR_Module pModule, mdMethodDef methodDef) :
+ m_pModule(pModule),
+ m_methodDef(methodDef)
+{}
+
+size_t ILCodeVersioningState::Key::Hash() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return (size_t)(dac_cast<TADDR>(m_pModule) ^ m_methodDef);
+}
+
+bool ILCodeVersioningState::Key::operator==(const Key & rhs) const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return (m_pModule == rhs.m_pModule) && (m_methodDef == rhs.m_methodDef);
+}
+
+ILCodeVersioningState::Key ILCodeVersioningState::GetKey() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return Key(m_pModule, m_methodDef);
+}
+
+ILCodeVersion ILCodeVersioningState::GetActiveVersion() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_activeVersion;
+}
+
+PTR_ILCodeVersionNode ILCodeVersioningState::GetFirstVersionNode() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_pFirstVersionNode;
+}
+
+#ifndef DACCESS_COMPILE
+void ILCodeVersioningState::SetActiveVersion(ILCodeVersion ilActiveCodeVersion)
+{
+ LIMITED_METHOD_CONTRACT;
+ m_activeVersion = ilActiveCodeVersion;
+}
+
+void ILCodeVersioningState::LinkILCodeVersionNode(ILCodeVersionNode* pILCodeVersionNode)
+{
+ LIMITED_METHOD_CONTRACT;
+ pILCodeVersionNode->SetNextILVersionNode(m_pFirstVersionNode);
+ m_pFirstVersionNode = pILCodeVersionNode;
+}
+#endif
+
+CodeVersionManager::CodeVersionManager()
+{}
+
+//---------------------------------------------------------------------------------------
+//
+// Called from BaseDomain::BaseDomain to do any constructor-time initialization.
+// Presently, this takes care of initializing the Crst, choosing the type based on
+// whether this ReJitManager belongs to the SharedDomain.
+//
+// Arguments:
+// * fSharedDomain - nonzero iff this ReJitManager belongs to the SharedDomain.
+//
+
+void CodeVersionManager::PreInit(BOOL fSharedDomain)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ CAN_TAKE_LOCK;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+#ifndef DACCESS_COMPILE
+ m_crstTable.Init(
+ fSharedDomain ? CrstReJITSharedDomainTable : CrstReJITDomainTable,
+ CrstFlags(CRST_UNSAFE_ANYMODE | CRST_DEBUGGER_THREAD | CRST_REENTRANCY | CRST_TAKEN_DURING_SHUTDOWN));
+#endif // DACCESS_COMPILE
+}
+
+CodeVersionManager::TableLockHolder::TableLockHolder(CodeVersionManager* pCodeVersionManager) :
+ CrstHolder(&pCodeVersionManager->m_crstTable)
+{
+}
+#ifndef DACCESS_COMPILE
+void CodeVersionManager::EnterLock()
+{
+ m_crstTable.Enter();
+}
+void CodeVersionManager::LeaveLock()
+{
+ m_crstTable.Leave();
+}
+#endif
+
+#ifdef DEBUG
+BOOL CodeVersionManager::LockOwnedByCurrentThread() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+#ifdef DACCESS_COMPILE
+ return TRUE;
+#else
+ return const_cast<CrstExplicitInit &>(m_crstTable).OwnedByCurrentThread();
+#endif
+}
+#endif
+
+PTR_ILCodeVersioningState CodeVersionManager::GetILCodeVersioningState(PTR_Module pModule, mdMethodDef methodDef) const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ ILCodeVersioningState::Key key = ILCodeVersioningState::Key(pModule, methodDef);
+ return m_ilCodeVersioningStateMap.Lookup(key);
+}
+
+PTR_MethodDescVersioningState CodeVersionManager::GetMethodDescVersioningState(PTR_MethodDesc pClosedMethodDesc) const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_methodDescVersioningStateMap.Lookup(pClosedMethodDesc);
+}
+
+#ifndef DACCESS_COMPILE
+HRESULT CodeVersionManager::GetOrCreateILCodeVersioningState(Module* pModule, mdMethodDef methodDef, ILCodeVersioningState** ppILCodeVersioningState)
+{
+ LIMITED_METHOD_CONTRACT;
+ HRESULT hr = S_OK;
+ ILCodeVersioningState* pILCodeVersioningState = GetILCodeVersioningState(pModule, methodDef);
+ if (pILCodeVersioningState == NULL)
+ {
+ pILCodeVersioningState = new (nothrow) ILCodeVersioningState(pModule, methodDef);
+ if (pILCodeVersioningState == NULL)
+ {
+ return E_OUTOFMEMORY;
+ }
+ EX_TRY
+ {
+ // This throws when out of memory, but remains internally
+ // consistent (without adding the new element)
+ m_ilCodeVersioningStateMap.Add(pILCodeVersioningState);
+ }
+ EX_CATCH_HRESULT(hr);
+ if (FAILED(hr))
+ {
+ delete pILCodeVersioningState;
+ return hr;
+ }
+ }
+ *ppILCodeVersioningState = pILCodeVersioningState;
+ return S_OK;
+}
+
+HRESULT CodeVersionManager::GetOrCreateMethodDescVersioningState(MethodDesc* pMethod, MethodDescVersioningState** ppMethodVersioningState)
+{
+ LIMITED_METHOD_CONTRACT;
+ HRESULT hr = S_OK;
+ MethodDescVersioningState* pMethodVersioningState = m_methodDescVersioningStateMap.Lookup(pMethod);
+ if (pMethodVersioningState == NULL)
+ {
+ pMethodVersioningState = new (nothrow) MethodDescVersioningState(pMethod);
+ if (pMethodVersioningState == NULL)
+ {
+ return E_OUTOFMEMORY;
+ }
+ EX_TRY
+ {
+ // This throws when out of memory, but remains internally
+ // consistent (without adding the new element)
+ m_methodDescVersioningStateMap.Add(pMethodVersioningState);
+ }
+ EX_CATCH_HRESULT(hr);
+ if (FAILED(hr))
+ {
+ delete pMethodVersioningState;
+ return hr;
+ }
+ }
+ *ppMethodVersioningState = pMethodVersioningState;
+ return S_OK;
+}
+#endif // DACCESS_COMPILE
+
+DWORD CodeVersionManager::GetNonDefaultILVersionCount()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ //This function is legal to call WITHOUT taking the lock
+ //It is used to do a quick check if work might be needed without paying the overhead
+ //of acquiring the lock and doing dictionary lookups
+ return m_ilCodeVersioningStateMap.GetCount();
+}
+
+ILCodeVersionCollection CodeVersionManager::GetILCodeVersions(PTR_MethodDesc pMethod)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ _ASSERTE(LockOwnedByCurrentThread());
+ return GetILCodeVersions(dac_cast<PTR_Module>(pMethod->GetModule()), pMethod->GetMemberDef());
+}
+
+ILCodeVersionCollection CodeVersionManager::GetILCodeVersions(PTR_Module pModule, mdMethodDef methodDef)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ _ASSERTE(LockOwnedByCurrentThread());
+ return ILCodeVersionCollection(pModule, methodDef);
+}
+
+ILCodeVersion CodeVersionManager::GetActiveILCodeVersion(PTR_MethodDesc pMethod)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ _ASSERTE(LockOwnedByCurrentThread());
+ return GetActiveILCodeVersion(dac_cast<PTR_Module>(pMethod->GetModule()), pMethod->GetMemberDef());
+}
+
+ILCodeVersion CodeVersionManager::GetActiveILCodeVersion(PTR_Module pModule, mdMethodDef methodDef)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ _ASSERTE(LockOwnedByCurrentThread());
+ ILCodeVersioningState* pILCodeVersioningState = GetILCodeVersioningState(pModule, methodDef);
+ if (pILCodeVersioningState == NULL)
+ {
+ return ILCodeVersion(pModule, methodDef);
+ }
+ else
+ {
+ return pILCodeVersioningState->GetActiveVersion();
+ }
+}
+
+ILCodeVersion CodeVersionManager::GetILCodeVersion(PTR_MethodDesc pMethod, ReJITID rejitId)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ _ASSERTE(LockOwnedByCurrentThread());
+
+#ifdef FEATURE_REJIT
+ ILCodeVersionCollection collection = GetILCodeVersions(pMethod);
+ for (ILCodeVersionIterator cur = collection.Begin(), end = collection.End(); cur != end; cur++)
+ {
+ if (cur->GetVersionId() == rejitId)
+ {
+ return *cur;
+ }
+ }
+ return ILCodeVersion();
+#else // FEATURE_REJIT
+ _ASSERTE(rejitId == 0);
+ return ILCodeVersion(dac_cast<PTR_Module>(pMethod->GetModule()), pMethod->GetMemberDef());
+#endif // FEATURE_REJIT
+}
+
+NativeCodeVersionCollection CodeVersionManager::GetNativeCodeVersions(PTR_MethodDesc pMethod) const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ _ASSERTE(LockOwnedByCurrentThread());
+ return NativeCodeVersionCollection(pMethod, ILCodeVersion());
+}
+
+NativeCodeVersion CodeVersionManager::GetNativeCodeVersion(PTR_MethodDesc pMethod, PCODE codeStartAddress) const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ _ASSERTE(LockOwnedByCurrentThread());
+
+ NativeCodeVersionCollection nativeCodeVersions = GetNativeCodeVersions(pMethod);
+ for (NativeCodeVersionIterator cur = nativeCodeVersions.Begin(), end = nativeCodeVersions.End(); cur != end; cur++)
+ {
+ if (cur->GetNativeCode() == codeStartAddress)
+ {
+ return *cur;
+ }
+ }
+ return NativeCodeVersion();
+}
+
+#ifndef DACCESS_COMPILE
+HRESULT CodeVersionManager::AddILCodeVersion(Module* pModule, mdMethodDef methodDef, ReJITID rejitId, ILCodeVersion* pILCodeVersion)
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(LockOwnedByCurrentThread());
+
+ ILCodeVersioningState* pILCodeVersioningState;
+ HRESULT hr = GetOrCreateILCodeVersioningState(pModule, methodDef, &pILCodeVersioningState);
+ if (FAILED(hr))
+ {
+ _ASSERTE(hr == E_OUTOFMEMORY);
+ return hr;
+ }
+
+ ILCodeVersionNode* pILCodeVersionNode = new (nothrow) ILCodeVersionNode(pModule, methodDef, rejitId);
+ if (pILCodeVersionNode == NULL)
+ {
+ return E_OUTOFMEMORY;
+ }
+ pILCodeVersioningState->LinkILCodeVersionNode(pILCodeVersionNode);
+ *pILCodeVersion = ILCodeVersion(pILCodeVersionNode);
+ return S_OK;
+}
+
+HRESULT CodeVersionManager::SetActiveILCodeVersions(ILCodeVersion* pActiveVersions, DWORD cActiveVersions, BOOL fEESuspended, CDynArray<CodePublishError> * pErrors)
+{
+ // If the IL version is in the shared domain we need to iterate all domains
+ // looking for instantiations. The domain iterator lock is bigger than
+ // the code version manager lock so we can't do this atomically. In one atomic
+ // update the bookkeeping for IL versioning will happen and then in a second
+ // update the active native code versions will change/code jumpstamps+precodes
+ // will update.
+ //
+ // Note: For all domains other than the shared AppDomain we could do this
+ // atomically, but for now we use the lowest common denominator for all
+ // domains.
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ CAN_TAKE_LOCK;
+ PRECONDITION(CheckPointer(pActiveVersions));
+ PRECONDITION(CheckPointer(pErrors, NULL_OK));
+ }
+ CONTRACTL_END;
+ _ASSERTE(!LockOwnedByCurrentThread());
+ HRESULT hr = S_OK;
+
+#if DEBUG
+ for (DWORD i = 0; i < cActiveVersions; i++)
+ {
+ ILCodeVersion activeVersion = pActiveVersions[i];
+ if (activeVersion.IsNull())
+ {
+ _ASSERTE(!"The active IL version can't be NULL");
+ }
+ }
+#endif
+
+ // step 1 - mark the IL versions as being active, this ensures that
+ // any new method instantiations added after this point will bind to
+ // the correct version
+ {
+ TableLockHolder(this);
+ for (DWORD i = 0; i < cActiveVersions; i++)
+ {
+ ILCodeVersion activeVersion = pActiveVersions[i];
+ ILCodeVersioningState* pILCodeVersioningState = NULL;
+ if (FAILED(hr = GetOrCreateILCodeVersioningState(activeVersion.GetModule(), activeVersion.GetMethodDef(), &pILCodeVersioningState)))
+ {
+ _ASSERTE(hr == E_OUTOFMEMORY);
+ return hr;
+ }
+ pILCodeVersioningState->SetActiveVersion(activeVersion);
+ }
+ }
+
+ // step 2 - determine the set of pre-existing method instantiations
+
+ // a parallel array to activeVersions
+ // for each ILCodeVersion in activeVersions, this lists the set
+ // MethodDescs that will need to be updated
+ CDynArray<CDynArray<MethodDesc*>> methodDescsToUpdate;
+ CDynArray<CodePublishError> errorRecords;
+ for (DWORD i = 0; i < cActiveVersions; i++)
+ {
+ CDynArray<MethodDesc*>* pMethodDescs = methodDescsToUpdate.Append();
+ if (pMethodDescs == NULL)
+ {
+ return E_OUTOFMEMORY;
+ }
+ *pMethodDescs = CDynArray<MethodDesc*>();
+
+ MethodDesc* pLoadedMethodDesc = pActiveVersions[i].GetModule()->LookupMethodDef(pActiveVersions[i].GetMethodDef());
+ if (FAILED(hr = CodeVersionManager::EnumerateClosedMethodDescs(pLoadedMethodDesc, pMethodDescs, &errorRecords)))
+ {
+ _ASSERTE(hr == E_OUTOFMEMORY);
+ return hr;
+ }
+ }
+
+ // step 3 - update each pre-existing method instantiation
+ {
+ TableLockHolder lock(this);
+ for (DWORD i = 0; i < cActiveVersions; i++)
+ {
+ // Its possible the active IL version has changed if
+ // another caller made an update while this method wasn't
+ // holding the lock. We will ensure that we synchronize
+ // publishing to whatever version is currently active, even
+ // if that isn't the IL version we set above.
+ //
+ // Note: Although we attempt to handle this case gracefully
+ // it isn't recommended for callers to do this. Racing two calls
+ // that set the IL version to different results means it will be
+ // completely arbitrary which version wins.
+ ILCodeVersion requestedActiveILVersion = pActiveVersions[i];
+ ILCodeVersion activeILVersion = GetActiveILCodeVersion(requestedActiveILVersion.GetModule(), requestedActiveILVersion.GetMethodDef());
+
+ CDynArray<MethodDesc*> methodDescs = methodDescsToUpdate[i];
+ for (int j = 0; j < methodDescs.Count(); j++)
+ {
+ // Get an the active child code version for this method instantiation (it might be NULL, that is OK)
+ NativeCodeVersion activeNativeChild = activeILVersion.GetActiveNativeCodeVersion(methodDescs[j]);
+
+ // Publish that child version, because it is the active native child of the active IL version
+ // Failing to publish is non-fatal, but we do record it so the caller is aware
+ if (FAILED(hr = PublishNativeCodeVersion(methodDescs[j], activeNativeChild, fEESuspended)))
+ {
+ if (FAILED(hr = AddCodePublishError(activeILVersion.GetModule(), activeILVersion.GetMethodDef(), methodDescs[j], hr, &errorRecords)))
+ {
+ _ASSERTE(hr == E_OUTOFMEMORY);
+ return hr;
+ }
+ }
+ }
+ }
+ }
+
+ return S_OK;
+}
+
+HRESULT CodeVersionManager::AddNativeCodeVersion(ILCodeVersion ilCodeVersion, MethodDesc* pClosedMethodDesc, NativeCodeVersion* pNativeCodeVersion)
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(LockOwnedByCurrentThread());
+
+ MethodDescVersioningState* pMethodVersioningState;
+ HRESULT hr = GetOrCreateMethodDescVersioningState(pClosedMethodDesc, &pMethodVersioningState);
+ if (FAILED(hr))
+ {
+ _ASSERTE(hr == E_OUTOFMEMORY);
+ return hr;
+ }
+
+ NativeCodeVersionId newId = pMethodVersioningState->AllocateVersionId();
+ NativeCodeVersionNode* pNativeCodeVersionNode = new (nothrow) NativeCodeVersionNode(newId, pClosedMethodDesc, ilCodeVersion.GetVersionId());
+ if (pNativeCodeVersionNode == NULL)
+ {
+ return E_OUTOFMEMORY;
+ }
+
+ pMethodVersioningState->LinkNativeCodeVersionNode(pNativeCodeVersionNode);
+
+ // the first child added is automatically considered the active one.
+ if (ilCodeVersion.GetActiveNativeCodeVersion(pClosedMethodDesc).IsNull())
+ {
+ pNativeCodeVersionNode->SetActiveChildFlag(TRUE);
+ _ASSERTE(!ilCodeVersion.GetActiveNativeCodeVersion(pClosedMethodDesc).IsNull());
+
+ // the new child shouldn't have any native code. If it did we might need to
+ // publish that code as part of adding the node which would require callers
+ // to pay attention to GC suspension and we'd need to report publishing errors
+ // back to them.
+ _ASSERTE(pNativeCodeVersionNode->GetNativeCode() == NULL);
+ }
+ *pNativeCodeVersion = NativeCodeVersion(pNativeCodeVersionNode);
+ return S_OK;
+}
+
+PCODE CodeVersionManager::PublishVersionableCodeIfNecessary(MethodDesc* pMethodDesc, BOOL fCanBackpatchPrestub)
+{
+ STANDARD_VM_CONTRACT;
+ _ASSERTE(!LockOwnedByCurrentThread());
+ _ASSERTE(pMethodDesc->IsVersionable());
+ _ASSERTE(!pMethodDesc->IsPointingToPrestub() || !pMethodDesc->IsVersionableWithJumpStamp());
+
+ HRESULT hr = S_OK;
+ PCODE pCode = NULL;
+ BOOL fIsJumpStampMethod = pMethodDesc->IsVersionableWithJumpStamp();
+
+ NativeCodeVersion activeVersion;
+ {
+ TableLockHolder lock(this);
+ if (FAILED(hr = GetActiveILCodeVersion(pMethodDesc).GetOrCreateActiveNativeCodeVersion(pMethodDesc, &activeVersion)))
+ {
+ _ASSERTE(hr == E_OUTOFMEMORY);
+ ReportCodePublishError(pMethodDesc->GetModule(), pMethodDesc->GetMemberDef(), pMethodDesc, hr);
+ return NULL;
+ }
+ }
+
+ BOOL fEESuspend = FALSE;
+ while (true)
+ {
+ // compile the code if needed
+ pCode = activeVersion.GetNativeCode();
+ if (pCode == NULL)
+ {
+ pCode = pMethodDesc->PrepareCode(activeVersion);
+ }
+
+ // suspend in preparation for publishing if needed
+ if (fEESuspend)
+ {
+ ThreadSuspend::SuspendEE(ThreadSuspend::SUSPEND_FOR_REJIT);
+ }
+
+ {
+ TableLockHolder lock(this);
+ // The common case is that newActiveCode == activeCode, however we did leave the lock so there is
+ // possibility that the active version has changed. If it has we need to restart the compilation
+ // and publishing process with the new active version instead.
+ //
+ // In theory it should be legitimate to break out of this loop and run the less recent active version,
+ // because ultimately this is a race between one thread that is updating the version and another thread
+ // trying to run the current version. However for back-compat with ReJIT we need to guarantee that
+ // a versioning update at least as late as the profiler JitCompilationFinished callback wins the race.
+ NativeCodeVersion newActiveVersion;
+ if (FAILED(hr = GetActiveILCodeVersion(pMethodDesc).GetOrCreateActiveNativeCodeVersion(pMethodDesc, &newActiveVersion)))
+ {
+ _ASSERTE(hr == E_OUTOFMEMORY);
+ ReportCodePublishError(pMethodDesc->GetModule(), pMethodDesc->GetMemberDef(), pMethodDesc, hr);
+ pCode = NULL;
+ break;
+ }
+ if (newActiveVersion != activeVersion)
+ {
+ activeVersion = newActiveVersion;
+ }
+ else
+ {
+ // if we aren't allowed to backpatch we are done
+ if (!fCanBackpatchPrestub)
+ {
+ break;
+ }
+
+ // attempt to publish the active version still under the lock
+ if (FAILED(hr = PublishNativeCodeVersion(pMethodDesc, activeVersion, fEESuspend)))
+ {
+ // if we need an EESuspend to publish then start over. We have to leave the lock in order to suspend,
+ // and when we leave the lock the active version might change again. However now we know that suspend
+ if (hr == CORPROF_E_RUNTIME_SUSPEND_REQUIRED)
+ {
+ _ASSERTE(!fEESuspend);
+ fEESuspend = true;
+ }
+ else
+ {
+ ReportCodePublishError(pMethodDesc->GetModule(), pMethodDesc->GetMemberDef(), pMethodDesc, hr);
+ pCode = NULL;
+ break;
+ }
+ }
+ else
+ {
+ //success
+ break;
+ }
+ }
+ } // exit lock
+
+ if (fEESuspend)
+ {
+ ThreadSuspend::RestartEE(FALSE, TRUE);
+ }
+ }
+
+ // if the EE is still suspended from breaking in the middle of the loop, resume it
+ if (fEESuspend)
+ {
+ ThreadSuspend::RestartEE(FALSE, TRUE);
+ }
+ return pCode;
+}
+
+HRESULT CodeVersionManager::PublishNativeCodeVersion(MethodDesc* pMethod, NativeCodeVersion nativeCodeVersion, BOOL fEESuspended)
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(LockOwnedByCurrentThread());
+ _ASSERTE(pMethod->IsVersionable());
+ HRESULT hr = S_OK;
+ PCODE pCode = nativeCodeVersion.IsNull() ? NULL : nativeCodeVersion.GetNativeCode();
+ if (pMethod->IsVersionableWithPrecode())
+ {
+ Precode* pPrecode = pMethod->GetOrCreatePrecode();
+ if (pCode == NULL)
+ {
+ EX_TRY
+ {
+ pPrecode->Reset();
+ }
+ EX_CATCH_HRESULT(hr);
+ return hr;
+ }
+ else
+ {
+ EX_TRY
+ {
+ hr = pPrecode->SetTargetInterlocked(pCode, FALSE) ? S_OK : E_FAIL;
+ }
+ EX_CATCH_HRESULT(hr);
+ return hr;
+ }
+ }
+ else
+ {
+#ifndef FEATURE_JUMPSTAMP
+ _ASSERTE(!"This platform doesn't support JumpStamp but this method doesn't version with Precode,"
+ " this method can't be updated");
+ return E_FAIL;
+#else
+ MethodDescVersioningState* pVersioningState;
+ if (FAILED(hr = GetOrCreateMethodDescVersioningState(pMethod, &pVersioningState)))
+ {
+ _ASSERTE(hr == E_OUTOFMEMORY);
+ return hr;
+ }
+ return pVersioningState->SyncJumpStamp(nativeCodeVersion, fEESuspended);
+#endif
+ }
+}
+
+// static
+HRESULT CodeVersionManager::EnumerateClosedMethodDescs(
+ MethodDesc* pMD,
+ CDynArray<MethodDesc*> * pClosedMethodDescs,
+ CDynArray<CodePublishError> * pUnsupportedMethodErrors)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ CAN_TAKE_LOCK;
+ PRECONDITION(CheckPointer(pMD, NULL_OK));
+ PRECONDITION(CheckPointer(pClosedMethodDescs));
+ PRECONDITION(CheckPointer(pUnsupportedMethodErrors));
+ }
+ CONTRACTL_END;
+ HRESULT hr = S_OK;
+ if (pMD == NULL)
+ {
+ // nothing is loaded yet so we're done for this method.
+ return S_OK;
+ }
+
+ if (!pMD->HasClassOrMethodInstantiation())
+ {
+ // We have a JITted non-generic.
+ MethodDesc ** ppMD = pClosedMethodDescs->Append();
+ if (ppMD == NULL)
+ {
+ return E_OUTOFMEMORY;
+ }
+ *ppMD = pMD;
+ }
+
+ if (!pMD->HasClassOrMethodInstantiation())
+ {
+ // not generic, we're done for this method
+ return S_OK;
+ }
+
+ // Ok, now the case of a generic function (or function on generic class), which
+ // is loaded, and may thus have compiled instantiations.
+ // It's impossible to get to any other kind of domain from the profiling API
+ Module* pModule = pMD->GetModule();
+ mdMethodDef methodDef = pMD->GetMemberDef();
+ BaseDomain * pBaseDomainFromModule = pModule->GetDomain();
+ _ASSERTE(pBaseDomainFromModule->IsAppDomain() ||
+ pBaseDomainFromModule->IsSharedDomain());
+
+ if (pBaseDomainFromModule->IsSharedDomain())
+ {
+ // Iterate through all modules loaded into the shared domain, to
+ // find all instantiations living in the shared domain. This will
+ // include orphaned code (i.e., shared code used by ADs that have
+ // all unloaded), which is good, because orphaned code could get
+ // re-adopted if a new AD is created that can use that shared code
+ hr = EnumerateDomainClosedMethodDescs(
+ NULL, // NULL means to search SharedDomain instead of an AD
+ pModule,
+ methodDef,
+ pClosedMethodDescs,
+ pUnsupportedMethodErrors);
+ }
+ else
+ {
+ // Module is unshared, so just use the module's domain to find instantiations.
+ hr = EnumerateDomainClosedMethodDescs(
+ pBaseDomainFromModule->AsAppDomain(),
+ pModule,
+ methodDef,
+ pClosedMethodDescs,
+ pUnsupportedMethodErrors);
+ }
+ if (FAILED(hr))
+ {
+ _ASSERTE(hr == E_OUTOFMEMORY);
+ return hr;
+ }
+
+ // We want to iterate through all compilations of existing instantiations to
+ // ensure they get marked for rejit. Note: There may be zero instantiations,
+ // but we won't know until we try.
+ if (pBaseDomainFromModule->IsSharedDomain())
+ {
+ // Iterate through all real domains, to find shared instantiations.
+ AppDomainIterator appDomainIterator(TRUE);
+ while (appDomainIterator.Next())
+ {
+ AppDomain * pAppDomain = appDomainIterator.GetDomain();
+ if (pAppDomain->IsUnloading())
+ {
+ continue;
+ }
+ hr = EnumerateDomainClosedMethodDescs(
+ pAppDomain,
+ pModule,
+ methodDef,
+ pClosedMethodDescs,
+ pUnsupportedMethodErrors);
+ if (FAILED(hr))
+ {
+ _ASSERTE(hr == E_OUTOFMEMORY);
+ return hr;
+ }
+ }
+ }
+ return S_OK;
+}
+
+// static
+HRESULT CodeVersionManager::EnumerateDomainClosedMethodDescs(
+ AppDomain * pAppDomainToSearch,
+ Module* pModuleContainingMethodDef,
+ mdMethodDef methodDef,
+ CDynArray<MethodDesc*> * pClosedMethodDescs,
+ CDynArray<CodePublishError> * pUnsupportedMethodErrors)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ CAN_TAKE_LOCK;
+ PRECONDITION(CheckPointer(pAppDomainToSearch, NULL_OK));
+ PRECONDITION(CheckPointer(pModuleContainingMethodDef));
+ PRECONDITION(CheckPointer(pClosedMethodDescs));
+ PRECONDITION(CheckPointer(pUnsupportedMethodErrors));
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(methodDef != mdTokenNil);
+
+ HRESULT hr;
+
+ BaseDomain * pDomainContainingGenericDefinition = pModuleContainingMethodDef->GetDomain();
+
+#ifdef _DEBUG
+ // If the generic definition is not loaded domain-neutral, then all its
+ // instantiations will also be non-domain-neutral and loaded into the same
+ // domain as the generic definition. So the caller may only pass the
+ // domain containing the generic definition as pAppDomainToSearch
+ if (!pDomainContainingGenericDefinition->IsSharedDomain())
+ {
+ _ASSERTE(pDomainContainingGenericDefinition == pAppDomainToSearch);
+ }
+#endif //_DEBUG
+
+ // If pAppDomainToSearch is NULL, iterate through all existing
+ // instantiations loaded into the SharedDomain. If pAppDomainToSearch is non-NULL,
+ // iterate through all existing instantiations in pAppDomainToSearch, and only consider
+ // instantiations in non-domain-neutral assemblies (as we already covered domain
+ // neutral assemblies when we searched the SharedDomain).
+ LoadedMethodDescIterator::AssemblyIterationMode mode = LoadedMethodDescIterator::kModeSharedDomainAssemblies;
+ // these are the default flags which won't actually be used in shared mode other than
+ // asserting they were specified with their default values
+ AssemblyIterationFlags assemFlags = (AssemblyIterationFlags)(kIncludeLoaded | kIncludeExecution);
+ ModuleIterationOption moduleFlags = (ModuleIterationOption)kModIterIncludeLoaded;
+ if (pAppDomainToSearch != NULL)
+ {
+ mode = LoadedMethodDescIterator::kModeUnsharedADAssemblies;
+ assemFlags = (AssemblyIterationFlags)(kIncludeAvailableToProfilers | kIncludeExecution);
+ moduleFlags = (ModuleIterationOption)kModIterIncludeAvailableToProfilers;
+ }
+ LoadedMethodDescIterator it(
+ pAppDomainToSearch,
+ pModuleContainingMethodDef,
+ methodDef,
+ mode,
+ assemFlags,
+ moduleFlags);
+ CollectibleAssemblyHolder<DomainAssembly *> pDomainAssembly;
+ while (it.Next(pDomainAssembly.This()))
+ {
+ MethodDesc * pLoadedMD = it.Current();
+
+ if (!pLoadedMD->IsVersionable())
+ {
+ // For compatibility with the rejit APIs we ensure certain errors are detected and reported using their
+ // original HRESULTS
+ HRESULT errorHR = GetNonVersionableError(pLoadedMD);
+ if (FAILED(errorHR))
+ {
+ if (FAILED(hr = CodeVersionManager::AddCodePublishError(pModuleContainingMethodDef, methodDef, pLoadedMD, CORPROF_E_FUNCTION_IS_COLLECTIBLE, pUnsupportedMethodErrors)))
+ {
+ _ASSERTE(hr == E_OUTOFMEMORY);
+ return hr;
+ }
+ }
+ continue;
+ }
+
+#ifdef _DEBUG
+ if (!pDomainContainingGenericDefinition->IsSharedDomain())
+ {
+ // Method is defined outside of the shared domain, so its instantiation must
+ // be defined in the AD we're iterating over (pAppDomainToSearch, which, as
+ // asserted above, must be the same domain as the generic's definition)
+ _ASSERTE(pLoadedMD->GetDomain() == pAppDomainToSearch);
+ }
+#endif // _DEBUG
+
+ MethodDesc ** ppMD = pClosedMethodDescs->Append();
+ if (ppMD == NULL)
+ {
+ return E_OUTOFMEMORY;
+ }
+ *ppMD = pLoadedMD;
+ }
+ return S_OK;
+}
+#endif // DACCESS_COMPILE
+
+
+//---------------------------------------------------------------------------------------
+//
+// Given the default version code for a MethodDesc that is about to published, add
+// a jumpstamp pointing back to the prestub if the currently active version isn't
+// the default one. This called from the PublishMethodHolder.
+//
+// Arguments:
+// * pMD - MethodDesc to jmp-stamp
+// * pCode - Top of the code that was just jitted (using original IL).
+//
+//
+// Return value:
+// * S_OK: Either we successfully did the jmp-stamp, or we didn't have to
+// * Else, HRESULT indicating failure.
+
+// Assumptions:
+// The caller has not yet published pCode to the MethodDesc, so no threads can be
+// executing inside pMD's code yet. Thus, we don't need to suspend the runtime while
+// applying the jump-stamp like we usually do for rejit requests that are made after
+// a function has been JITted.
+//
+#ifndef DACCESS_COMPILE
+HRESULT CodeVersionManager::DoJumpStampIfNecessary(MethodDesc* pMD, PCODE pCode)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ CAN_TAKE_LOCK;
+ PRECONDITION(CheckPointer(pMD));
+ PRECONDITION(pCode != NULL);
+ }
+ CONTRACTL_END;
+
+ HRESULT hr;
+
+ _ASSERTE(LockOwnedByCurrentThread());
+
+ NativeCodeVersion activeCodeVersion = GetActiveILCodeVersion(pMD).GetActiveNativeCodeVersion(pMD);
+ if (activeCodeVersion.IsDefaultVersion())
+ {
+ //Method not requested to be rejitted, nothing to do
+ return S_OK;
+ }
+
+ if (!(pMD->IsVersionable() && pMD->IsVersionableWithJumpStamp()))
+ {
+ return GetNonVersionableError(pMD);
+ }
+
+#ifndef FEATURE_JUMPSTAMP
+ _ASSERTE(!"How did we get here? IsVersionableWithJumpStamp() should have been FALSE above");
+ return S_OK;
+#else
+ MethodDescVersioningState* pVersioningState;
+ if (FAILED(hr = GetOrCreateMethodDescVersioningState(pMD, &pVersioningState)))
+ {
+ _ASSERTE(hr == E_OUTOFMEMORY);
+ return hr;
+ }
+ if (pVersioningState->GetJumpStampState() != MethodDescVersioningState::JumpStampNone)
+ {
+ //JumpStamp already in place
+ return S_OK;
+ }
+ return pVersioningState->JumpStampNativeCode(pCode);
+#endif // FEATURE_JUMPSTAMP
+
+}
+#endif // DACCESS_COMPILE
+
+#ifndef DACCESS_COMPILE
+//static
+void CodeVersionManager::OnAppDomainExit(AppDomain * pAppDomain)
+{
+ LIMITED_METHOD_CONTRACT;
+ // This would clean up all the allocations we have done and synchronize with any threads that might
+ // still be using the data
+ _ASSERTE(!".Net Core shouldn't be doing app domain shutdown - if we start doing so this needs to be implemented");
+}
+#endif
+
+//---------------------------------------------------------------------------------------
+//
+// Small helper to determine whether a given (possibly instantiated generic) MethodDesc
+// is safe to rejit.
+//
+// Arguments:
+// pMD - MethodDesc to test
+// Return Value:
+// S_OK iff pMD is safe to rejit
+// CORPROF_E_FUNCTION_IS_COLLECTIBLE - function can't be rejitted because it is collectible
+//
+
+// static
+#ifndef DACCESS_COMPILE
+HRESULT CodeVersionManager::GetNonVersionableError(MethodDesc* pMD)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ CAN_TAKE_LOCK;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(pMD != NULL);
+
+ // Weird, non-user functions were already weeded out in RequestReJIT(), and will
+ // also never be passed to us by the prestub worker (for the pre-rejit case).
+ _ASSERTE(pMD->IsIL());
+
+ // Any MethodDescs that could be collected are not currently supported. Although we
+ // rule out all Ref.Emit modules in RequestReJIT(), there can still exist types defined
+ // in a non-reflection module and instantiated into a collectible assembly
+ // (e.g., List<MyCollectibleStruct>). In the future we may lift this
+ // restriction by updating the ReJitManager when the collectible assemblies
+ // owning the instantiations get collected.
+ if (pMD->GetLoaderAllocator()->IsCollectible())
+ {
+ return CORPROF_E_FUNCTION_IS_COLLECTIBLE;
+ }
+
+ return S_OK;
+}
+#endif
+
+//---------------------------------------------------------------------------------------
+//
+// Helper that inits a new CodePublishError and adds it to the pErrors array
+//
+// Arguments:
+// * pModule - The module in the module/MethodDef identifier pair for the method which
+// had an error during rejit
+// * methodDef - The MethodDef in the module/MethodDef identifier pair for the method which
+// had an error during rejit
+// * pMD - If available, the specific method instance which had an error during rejit
+// * hrStatus - HRESULT for the rejit error that occurred
+// * pErrors - the list of error records that this method will append to
+//
+// Return Value:
+// * S_OK: error was appended
+// * E_OUTOFMEMORY: Not enough memory to create the new error item. The array is unchanged.
+//
+
+//static
+#ifndef DACCESS_COMPILE
+HRESULT CodeVersionManager::AddCodePublishError(Module* pModule, mdMethodDef methodDef, MethodDesc* pMD, HRESULT hrStatus, CDynArray<CodePublishError> * pErrors)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (pErrors == NULL)
+ {
+ return S_OK;
+ }
+
+ CodePublishError* pError = pErrors->Append();
+ if (pError == NULL)
+ {
+ return E_OUTOFMEMORY;
+ }
+ pError->pModule = pModule;
+ pError->methodDef = methodDef;
+ pError->pMethodDesc = pMD;
+ pError->hrStatus = hrStatus;
+ return S_OK;
+}
+#endif
+
+#ifndef DACCESS_COMPILE
+void CodeVersionManager::ReportCodePublishError(CodePublishError* pErrorRecord)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ CAN_TAKE_LOCK;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ ReportCodePublishError(pErrorRecord->pModule, pErrorRecord->methodDef, pErrorRecord->pMethodDesc, pErrorRecord->hrStatus);
+}
+
+void CodeVersionManager::ReportCodePublishError(Module* pModule, mdMethodDef methodDef, MethodDesc* pMD, HRESULT hrStatus)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ CAN_TAKE_LOCK;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+#ifdef FEATURE_REJIT
+ BOOL isRejitted = FALSE;
+ {
+ TableLockHolder(this);
+ isRejitted = !GetActiveILCodeVersion(pModule, methodDef).IsDefaultVersion();
+ }
+
+ // this isn't perfect, we might be activating a tiered jitting variation of a rejitted
+ // method for example. If it proves to be an issue we can revisit.
+ if (isRejitted)
+ {
+ ReJitManager::ReportReJITError(pModule, methodDef, pMD, hrStatus);
+ }
+#endif
+}
+#endif // DACCESS_COMPILE
+
+//---------------------------------------------------------------------------------------
+//
+// PrepareCodeConfig::SetNativeCode() calls this to determine if there's a non-default code
+// version requested for a MethodDesc that has just been jitted for the first time.
+// This is also called when methods are being restored in NGEN images. The sequence looks like:
+// *Enter holder
+// Enter code version manager lock
+// DoJumpStampIfNecessary
+// *Runtime code publishes/restores method
+// *Exit holder
+// Leave code version manager lock
+// Send rejit error callbacks if needed
+//
+//
+// #PublishCode:
+// Note that the runtime needs to publish/restore the PCODE while this holder is
+// on the stack, so it can happen under the code version manager's lock.
+// This prevents a race with a profiler that calls
+// RequestReJIT just as the method finishes compiling. In particular, the locking ensures
+// atomicity between this set of steps (performed in DoJumpStampIfNecessary):
+// * (1) Checking whether there is a non-default version for this MD
+// * (2) If not, skip doing the jmp-stamp
+// * (3) Publishing the PCODE
+//
+// with respect to these steps performed in RequestReJIT:
+// * (a) Is PCODE published yet?
+// * (b) Create non-default ILCodeVersion which the prestub will
+// consult when it JITs the original IL
+//
+// Without this atomicity, we could get the ordering (1), (2), (a), (b), (3), resulting
+// in the rejit request getting completely ignored (i.e., we file away the new ILCodeVersion
+// AFTER the prestub checks for it).
+//
+// A similar race is possible for code being restored. In that case the restoring thread
+// does:
+// * (1) Check if there is a non-default ILCodeVersion for this MD
+// * (2) If not, no need to jmp-stamp
+// * (3) Restore the MD
+
+// And RequestRejit does:
+// * (a) [In LoadedMethodDescIterator] Is a potential MD restored yet?
+// * (b) [In EnumerateDomainClosedMethodDescs] If not, don't queue it for jump-stamping
+//
+// Same ordering (1), (2), (a), (b), (3) results in missing both opportunities to jump
+// stamp.
+
+#if !defined(DACCESS_COMPILE) && !defined(CROSSGEN_COMPILE)
+PublishMethodHolder::PublishMethodHolder(MethodDesc* pMethodDesc, PCODE pCode) :
+ m_pMD(NULL), m_hr(S_OK)
+{
+ // This method can't have a contract because entering the table lock
+ // below increments GCNoTrigger count. Contracts always revert these changes
+ // at the end of the method but we need the incremented count to flow out of the
+ // method. The balancing decrement occurs in the destructor.
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_CAN_TAKE_LOCK;
+ STATIC_CONTRACT_MODE_ANY;
+
+ // We come here from the PreStub and from MethodDesc::CheckRestore
+ // The method should be effectively restored, but we haven't yet
+ // cleared the unrestored bit so we can't assert pMethodDesc->IsRestored()
+ // We can assert:
+ _ASSERTE(pMethodDesc->GetMethodTable()->IsRestored());
+
+ if (pCode != NULL)
+ {
+ m_pMD = pMethodDesc;
+ CodeVersionManager* pCodeVersionManager = pMethodDesc->GetCodeVersionManager();
+ pCodeVersionManager->EnterLock();
+ m_hr = pCodeVersionManager->DoJumpStampIfNecessary(pMethodDesc, pCode);
+ }
+}
+
+
+PublishMethodHolder::~PublishMethodHolder()
+{
+ // This method can't have a contract because leaving the table lock
+ // below decrements GCNoTrigger count. Contracts always revert these changes
+ // at the end of the method but we need the decremented count to flow out of the
+ // method. The balancing increment occurred in the constructor.
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_TRIGGERS; // NOTRIGGER until we leave the lock
+ STATIC_CONTRACT_CAN_TAKE_LOCK;
+ STATIC_CONTRACT_MODE_ANY;
+
+ if (m_pMD)
+ {
+ CodeVersionManager* pCodeVersionManager = m_pMD->GetCodeVersionManager();
+ pCodeVersionManager->LeaveLock();
+ if (FAILED(m_hr))
+ {
+ pCodeVersionManager->ReportCodePublishError(m_pMD->GetModule(), m_pMD->GetMemberDef(), m_pMD, m_hr);
+ }
+ }
+}
+
+PublishMethodTableHolder::PublishMethodTableHolder(MethodTable* pMethodTable) :
+ m_pMethodTable(NULL)
+{
+ // This method can't have a contract because entering the table lock
+ // below increments GCNoTrigger count. Contracts always revert these changes
+ // at the end of the method but we need the incremented count to flow out of the
+ // method. The balancing decrement occurs in the destructor.
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_CAN_TAKE_LOCK;
+ STATIC_CONTRACT_MODE_ANY;
+
+ // We come here from MethodTable::SetIsRestored
+ // The method table should be effectively restored, but we haven't yet
+ // cleared the unrestored bit so we can't assert pMethodTable->IsRestored()
+
+ m_pMethodTable = pMethodTable;
+ CodeVersionManager* pCodeVersionManager = pMethodTable->GetModule()->GetCodeVersionManager();
+ pCodeVersionManager->EnterLock();
+ MethodTable::IntroducedMethodIterator itMethods(pMethodTable, FALSE);
+ for (; itMethods.IsValid(); itMethods.Next())
+ {
+ // Although the MethodTable is restored, the methods might not be.
+ // We need to be careful to only query portions of the MethodDesc
+ // that work in a partially restored state. The only methods that need
+ // further restoration are IL stubs (which aren't rejittable) and
+ // generic methods. The only generic methods directly accesible from
+ // the MethodTable are definitions. GetNativeCode() on generic defs
+ // will run succesfully and return NULL which short circuits the
+ // rest of the logic.
+ MethodDesc * pMD = itMethods.GetMethodDesc();
+ PCODE pCode = pMD->GetNativeCode();
+ if (pCode != NULL)
+ {
+ HRESULT hr = pCodeVersionManager->DoJumpStampIfNecessary(pMD, pCode);
+ if (FAILED(hr))
+ {
+ CodeVersionManager::AddCodePublishError(pMD->GetModule(), pMD->GetMemberDef(), pMD, hr, &m_errors);
+ }
+ }
+ }
+}
+
+
+PublishMethodTableHolder::~PublishMethodTableHolder()
+{
+ // This method can't have a contract because leaving the table lock
+ // below decrements GCNoTrigger count. Contracts always revert these changes
+ // at the end of the method but we need the decremented count to flow out of the
+ // method. The balancing increment occurred in the constructor.
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_TRIGGERS; // NOTRIGGER until we leave the lock
+ STATIC_CONTRACT_CAN_TAKE_LOCK;
+ STATIC_CONTRACT_MODE_ANY;
+
+ if (m_pMethodTable)
+ {
+ CodeVersionManager* pCodeVersionManager = m_pMethodTable->GetModule()->GetCodeVersionManager();
+ pCodeVersionManager->LeaveLock();
+ for (int i = 0; i < m_errors.Count(); i++)
+ {
+ pCodeVersionManager->ReportCodePublishError(&(m_errors[i]));
+ }
+ }
+}
+#endif // !defined(DACCESS_COMPILE) && !defined(CROSSGEN_COMPILE)
+
+#endif // FEATURE_CODE_VERSIONING
+
diff --git a/src/vm/codeversion.h b/src/vm/codeversion.h
new file mode 100644
index 0000000000..7bb2a24294
--- /dev/null
+++ b/src/vm/codeversion.h
@@ -0,0 +1,689 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+// ===========================================================================
+// File: CodeVersion.h
+//
+// ===========================================================================
+
+
+#ifndef CODE_VERSION_H
+#define CODE_VERSION_H
+
+class NativeCodeVersion;
+class ILCodeVersion;
+typedef DWORD NativeCodeVersionId;
+
+#ifdef FEATURE_CODE_VERSIONING
+class NativeCodeVersionNode;
+typedef DPTR(class NativeCodeVersionNode) PTR_NativeCodeVersionNode;
+class NativeCodeVersionCollection;
+class NativeCodeVersionIterator;
+class ILCodeVersionNode;
+typedef DPTR(class ILCodeVersionNode) PTR_ILCodeVersionNode;
+class ILCodeVersionCollection;
+class ILCodeVersionIterator;
+class MethodDescVersioningState;
+typedef DPTR(class MethodDescVersioningState) PTR_MethodDescVersioningState;
+
+class ILCodeVersioningState;
+typedef DPTR(class ILCodeVersioningState) PTR_ILCodeVersioningState;
+class CodeVersionManager;
+typedef DPTR(class CodeVersionManager) PTR_CodeVersionManager;
+
+// This HRESULT is only used as a private implementation detail. Corerror.xml has a comment in it
+// reserving this value for our use but it doesn't appear in the public headers.
+#define CORPROF_E_RUNTIME_SUSPEND_REQUIRED 0x80131381
+
+#endif
+
+
+
+
+class NativeCodeVersion
+{
+#ifdef FEATURE_CODE_VERSIONING
+ friend class MethodDescVersioningState;
+ friend class ILCodeVersion;
+#endif
+
+public:
+ NativeCodeVersion();
+ NativeCodeVersion(const NativeCodeVersion & rhs);
+#ifdef FEATURE_CODE_VERSIONING
+ NativeCodeVersion(PTR_NativeCodeVersionNode pVersionNode);
+#endif
+ NativeCodeVersion(PTR_MethodDesc pMethod);
+ BOOL IsNull() const;
+ PTR_MethodDesc GetMethodDesc() const;
+ NativeCodeVersionId GetVersionId() const;
+ BOOL IsDefaultVersion() const;
+ PCODE GetNativeCode() const;
+ ILCodeVersion GetILCodeVersion() const;
+ ReJITID GetILCodeVersionId() const;
+#ifndef DACCESS_COMPILE
+ BOOL SetNativeCodeInterlocked(PCODE pCode, PCODE pExpected = NULL);
+#endif
+#ifdef FEATURE_TIERED_COMPILATION
+ enum OptimizationTier
+ {
+ OptimizationTier0,
+ OptimizationTier1
+ };
+ OptimizationTier GetOptimizationTier() const;
+#ifndef DACCESS_COMPILE
+ void SetOptimizationTier(OptimizationTier tier);
+#endif
+#endif // FEATURE_TIERED_COMPILATION
+ bool operator==(const NativeCodeVersion & rhs) const;
+ bool operator!=(const NativeCodeVersion & rhs) const;
+#if defined(DACCESS_COMPILE) && defined(FEATURE_CODE_VERSIONING)
+ // The DAC is privy to the backing node abstraction
+ PTR_NativeCodeVersionNode AsNode() const;
+#endif
+
+private:
+
+#ifndef FEATURE_CODE_VERSIONING
+ MethodDesc* m_pMethodDesc;
+#else // FEATURE_CODE_VERSIONING
+
+#ifndef DACCESS_COMPILE
+ NativeCodeVersionNode* AsNode() const;
+ NativeCodeVersionNode* AsNode();
+ void SetActiveChildFlag(BOOL isActive);
+ MethodDescVersioningState* GetMethodDescVersioningState();
+#endif
+
+ BOOL IsActiveChildVersion() const;
+ PTR_MethodDescVersioningState GetMethodDescVersioningState() const;
+
+ enum StorageKind
+ {
+ Unknown,
+ Explicit,
+ Synthetic
+ };
+
+ StorageKind m_storageKind;
+ union
+ {
+ PTR_NativeCodeVersionNode m_pVersionNode;
+ struct SyntheticStorage
+ {
+ PTR_MethodDesc m_pMethodDesc;
+ } m_synthetic;
+ };
+#endif // FEATURE_CODE_VERSIONING
+};
+
+
+
+#ifdef FEATURE_CODE_VERSIONING
+
+
+
+class ILCodeVersion
+{
+ friend class NativeCodeVersionIterator;
+
+public:
+ ILCodeVersion();
+ ILCodeVersion(const ILCodeVersion & ilCodeVersion);
+ ILCodeVersion(PTR_ILCodeVersionNode pILCodeVersionNode);
+ ILCodeVersion(PTR_Module pModule, mdMethodDef methodDef);
+
+ bool operator==(const ILCodeVersion & rhs) const;
+ bool operator!=(const ILCodeVersion & rhs) const;
+ BOOL IsNull() const;
+ BOOL IsDefaultVersion() const;
+ PTR_Module GetModule() const;
+ mdMethodDef GetMethodDef() const;
+ ReJITID GetVersionId() const;
+ NativeCodeVersionCollection GetNativeCodeVersions(PTR_MethodDesc pClosedMethodDesc) const;
+ NativeCodeVersion GetActiveNativeCodeVersion(PTR_MethodDesc pClosedMethodDesc) const;
+ PTR_COR_ILMETHOD GetIL() const;
+ PTR_COR_ILMETHOD GetILNoThrow() const;
+ DWORD GetJitFlags() const;
+ const InstrumentedILOffsetMapping* GetInstrumentedILMap() const;
+
+#ifndef DACCESS_COMPILE
+ void SetIL(COR_ILMETHOD* pIL);
+ void SetJitFlags(DWORD flags);
+ void SetInstrumentedILMap(SIZE_T cMap, COR_IL_MAP * rgMap);
+ HRESULT AddNativeCodeVersion(MethodDesc* pClosedMethodDesc, NativeCodeVersion* pNativeCodeVersion);
+ HRESULT GetOrCreateActiveNativeCodeVersion(MethodDesc* pClosedMethodDesc, NativeCodeVersion* pNativeCodeVersion);
+ HRESULT SetActiveNativeCodeVersion(NativeCodeVersion activeNativeCodeVersion, BOOL fEESuspended);
+#endif //DACCESS_COMPILE
+
+ enum RejitFlags
+ {
+ // The profiler has requested a ReJit, so we've allocated stuff, but we haven't
+ // called back to the profiler to get any info or indicate that the ReJit has
+ // started. (This Info can be 'reused' for a new ReJit if the
+ // profiler calls RequestRejit again before we transition to the next state.)
+ kStateRequested = 0x00000000,
+
+ // The CLR has initiated the call to the profiler's GetReJITParameters() callback
+ // but it hasn't completed yet. At this point we have to assume the profiler has
+ // commited to a specific IL body, even if the CLR doesn't know what it is yet.
+ // If the profiler calls RequestRejit we need to allocate a new ILCodeVersion
+ // and call GetReJITParameters() again.
+ kStateGettingReJITParameters = 0x00000001,
+
+ // We have asked the profiler about this method via ICorProfilerFunctionControl,
+ // and have thus stored the IL and codegen flags the profiler specified.
+ kStateActive = 0x00000002,
+
+ kStateMask = 0x0000000F,
+ };
+
+ RejitFlags GetRejitState() const;
+#ifndef DACCESS_COMPILE
+ void SetRejitState(RejitFlags newState);
+#endif
+
+#ifdef DACCESS_COMPILE
+ // The DAC is privy to the backing node abstraction
+ PTR_ILCodeVersionNode AsNode() const;
+#endif
+
+private:
+
+#ifndef DACCESS_COMPILE
+ PTR_ILCodeVersionNode AsNode();
+ PTR_ILCodeVersionNode AsNode() const;
+#endif
+
+ enum StorageKind
+ {
+ Unknown,
+ Explicit,
+ Synthetic
+ };
+
+ StorageKind m_storageKind;
+ union
+ {
+ PTR_ILCodeVersionNode m_pVersionNode;
+ struct SyntheticStorage
+ {
+ PTR_Module m_pModule;
+ mdMethodDef m_methodDef;
+ } m_synthetic;
+ };
+};
+
+
+class NativeCodeVersionNode
+{
+ friend NativeCodeVersionIterator;
+ friend MethodDescVersioningState;
+ friend ILCodeVersionNode;
+public:
+#ifndef DACCESS_COMPILE
+ NativeCodeVersionNode(NativeCodeVersionId id, MethodDesc* pMethod, ReJITID parentId);
+#endif
+#ifdef DEBUG
+ BOOL LockOwnedByCurrentThread() const;
+#endif
+ PTR_MethodDesc GetMethodDesc() const;
+ NativeCodeVersionId GetVersionId() const;
+ PCODE GetNativeCode() const;
+ ReJITID GetILVersionId() const;
+ ILCodeVersion GetILCodeVersion() const;
+ BOOL IsActiveChildVersion() const;
+#ifndef DACCESS_COMPILE
+ BOOL SetNativeCodeInterlocked(PCODE pCode, PCODE pExpected);
+ void SetActiveChildFlag(BOOL isActive);
+#endif
+#ifdef FEATURE_TIERED_COMPILATION
+ NativeCodeVersion::OptimizationTier GetOptimizationTier() const;
+#ifndef DACCESS_COMPILE
+ void SetOptimizationTier(NativeCodeVersion::OptimizationTier tier);
+#endif
+#endif
+
+private:
+ //union - could save a little memory?
+ //{
+ PCODE m_pNativeCode;
+ PTR_MethodDesc m_pMethodDesc;
+ //};
+
+ ReJITID m_parentId;
+ PTR_NativeCodeVersionNode m_pNextMethodDescSibling;
+ NativeCodeVersionId m_id;
+#ifdef FEATURE_TIERED_COMPILATION
+ Volatile<NativeCodeVersion::OptimizationTier> m_optTier;
+#endif
+
+ enum NativeCodeVersionNodeFlags
+ {
+ IsActiveChildFlag = 1
+ };
+ DWORD m_flags;
+};
+
+class NativeCodeVersionCollection
+{
+ friend class NativeCodeVersionIterator;
+public:
+ NativeCodeVersionCollection(PTR_MethodDesc pMethodDescFilter, ILCodeVersion ilCodeFilter);
+ NativeCodeVersionIterator Begin();
+ NativeCodeVersionIterator End();
+
+private:
+ PTR_MethodDesc m_pMethodDescFilter;
+ ILCodeVersion m_ilCodeFilter;
+};
+
+class NativeCodeVersionIterator : public Enumerator<const NativeCodeVersion, NativeCodeVersionIterator>
+{
+ friend class Enumerator<const NativeCodeVersion, NativeCodeVersionIterator>;
+
+public:
+ NativeCodeVersionIterator(NativeCodeVersionCollection* pCollection);
+ CHECK Check() const { CHECK_OK; }
+
+protected:
+ const NativeCodeVersion & Get() const;
+ void First();
+ void Next();
+ bool Equal(const NativeCodeVersionIterator &i) const;
+
+ CHECK DoCheck() const { CHECK_OK; }
+
+private:
+ enum IterationStage
+ {
+ Initial,
+ ImplicitCodeVersion,
+ LinkedList,
+ End
+ };
+ IterationStage m_stage;
+ NativeCodeVersionCollection* m_pCollection;
+ PTR_NativeCodeVersionNode m_pLinkedListCur;
+ NativeCodeVersion m_cur;
+};
+
+class ILCodeVersionNode
+{
+public:
+ ILCodeVersionNode();
+#ifndef DACCESS_COMPILE
+ ILCodeVersionNode(Module* pModule, mdMethodDef methodDef, ReJITID id);
+#endif
+#ifdef DEBUG
+ BOOL LockOwnedByCurrentThread() const;
+#endif //DEBUG
+ PTR_Module GetModule() const;
+ mdMethodDef GetMethodDef() const;
+ ReJITID GetVersionId() const;
+ PTR_COR_ILMETHOD GetIL() const;
+ DWORD GetJitFlags() const;
+ const InstrumentedILOffsetMapping* GetInstrumentedILMap() const;
+ ILCodeVersion::RejitFlags GetRejitState() const;
+ PTR_ILCodeVersionNode GetNextILVersionNode() const;
+#ifndef DACCESS_COMPILE
+ void SetIL(COR_ILMETHOD* pIL);
+ void SetJitFlags(DWORD flags);
+ void SetInstrumentedILMap(SIZE_T cMap, COR_IL_MAP * rgMap);
+ void SetRejitState(ILCodeVersion::RejitFlags newState);
+ void SetNextILVersionNode(ILCodeVersionNode* pNextVersionNode);
+#endif
+
+private:
+ PTR_Module m_pModule;
+ mdMethodDef m_methodDef;
+ ReJITID m_rejitId;
+ PTR_ILCodeVersionNode m_pNextILVersionNode;
+ Volatile<ILCodeVersion::RejitFlags> m_rejitState;
+ VolatilePtr<COR_ILMETHOD> m_pIL;
+ Volatile<DWORD> m_jitFlags;
+ InstrumentedILOffsetMapping m_instrumentedILMap;
+};
+
+class ILCodeVersionCollection
+{
+ friend class ILCodeVersionIterator;
+
+public:
+ ILCodeVersionCollection(PTR_Module pModule, mdMethodDef methodDef);
+ ILCodeVersionIterator Begin();
+ ILCodeVersionIterator End();
+
+private:
+ PTR_Module m_pModule;
+ mdMethodDef m_methodDef;
+};
+
+class ILCodeVersionIterator : public Enumerator<const ILCodeVersion, ILCodeVersionIterator>
+{
+ friend class Enumerator<const ILCodeVersion, ILCodeVersionIterator>;
+
+public:
+ ILCodeVersionIterator();
+ ILCodeVersionIterator(const ILCodeVersionIterator & iter);
+ ILCodeVersionIterator(ILCodeVersionCollection* pCollection);
+ CHECK Check() const { CHECK_OK; }
+
+protected:
+ const ILCodeVersion & Get() const;
+ void First();
+ void Next();
+ bool Equal(const ILCodeVersionIterator &i) const;
+
+ CHECK DoCheck() const { CHECK_OK; }
+
+private:
+ enum IterationStage
+ {
+ Initial,
+ ImplicitCodeVersion,
+ LinkedList,
+ End
+ };
+ IterationStage m_stage;
+ ILCodeVersion m_cur;
+ PTR_ILCodeVersionNode m_pLinkedListCur;
+ ILCodeVersionCollection* m_pCollection;
+};
+
+class MethodDescVersioningState
+{
+public:
+ // The size of the code used to jump stamp the prolog
+#ifdef FEATURE_JUMPSTAMP
+ static const size_t JumpStubSize =
+#if defined(_X86_) || defined(_AMD64_)
+ 5;
+#else
+#error "Need to define size of jump-stamp for this platform"
+#endif
+#endif // FEATURE_JUMPSTAMP
+
+ MethodDescVersioningState(PTR_MethodDesc pMethodDesc);
+ PTR_MethodDesc GetMethodDesc() const;
+ NativeCodeVersionId AllocateVersionId();
+ PTR_NativeCodeVersionNode GetFirstVersionNode() const;
+
+#ifndef DACCESS_COMPILE
+#ifdef FEATURE_JUMPSTAMP
+ HRESULT SyncJumpStamp(NativeCodeVersion nativeCodeVersion, BOOL fEESuspended);
+ HRESULT UpdateJumpTarget(BOOL fEESuspended, PCODE pRejittedCode);
+ HRESULT UndoJumpStampNativeCode(BOOL fEESuspended);
+ HRESULT JumpStampNativeCode(PCODE pCode = NULL);
+#endif // FEATURE_JUMPSTAMP
+ void LinkNativeCodeVersionNode(NativeCodeVersionNode* pNativeCodeVersionNode);
+#endif // DACCESS_COMPILE
+
+#ifdef FEATURE_JUMPSTAMP
+ enum JumpStampFlags
+ {
+ // There is no jump stamp in place on this method (Either because
+ // there is no code at all, or there is code that hasn't been
+ // overwritten with a jump)
+ JumpStampNone = 0x0,
+
+ // The method code has the jump stamp written in, and it points to the Prestub
+ JumpStampToPrestub = 0x1,
+
+ // The method code has the jump stamp written in, and it points to the currently
+ // active code version
+ JumpStampToActiveVersion = 0x2,
+ };
+
+ JumpStampFlags GetJumpStampState();
+ void SetJumpStampState(JumpStampFlags newState);
+#endif // FEATURE_JUMPSTAMP
+
+ //read-write data for the default native code version
+ BOOL IsDefaultVersionActiveChild() const;
+#ifndef DACCESS_COMPILE
+ void SetDefaultVersionActiveChildFlag(BOOL isActive);
+#endif
+
+private:
+#if !defined(DACCESS_COMPILE) && defined(FEATURE_JUMPSTAMP)
+ INDEBUG(BOOL CodeIsSaved();)
+ HRESULT UpdateJumpStampHelper(BYTE* pbCode, INT64 i64OldValue, INT64 i64NewValue, BOOL fContentionPossible);
+#endif
+ PTR_MethodDesc m_pMethodDesc;
+
+ enum MethodDescVersioningStateFlags
+ {
+ JumpStampMask = 0x3,
+ IsDefaultVersionActiveChildFlag = 0x4
+ };
+ BYTE m_flags;
+ NativeCodeVersionId m_nextId;
+ PTR_NativeCodeVersionNode m_pFirstVersionNode;
+
+
+ // The originally JITted code that was overwritten with the jmp stamp.
+#ifdef FEATURE_JUMPSTAMP
+ BYTE m_rgSavedCode[JumpStubSize];
+#endif
+};
+
+class MethodDescVersioningStateHashTraits : public NoRemoveSHashTraits<DefaultSHashTraits<PTR_MethodDescVersioningState>>
+{
+public:
+ typedef typename DefaultSHashTraits<PTR_MethodDescVersioningState>::element_t element_t;
+ typedef typename DefaultSHashTraits<PTR_MethodDescVersioningState>::count_t count_t;
+
+ typedef const PTR_MethodDesc key_t;
+
+ static key_t GetKey(element_t e)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return e->GetMethodDesc();
+ }
+ static BOOL Equals(key_t k1, key_t k2)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return k1 == k2;
+ }
+ static count_t Hash(key_t k)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (count_t)(size_t)dac_cast<TADDR>(k);
+ }
+
+ static const element_t Null() { LIMITED_METHOD_CONTRACT; return element_t(); }
+ static bool IsNull(const element_t &e) { LIMITED_METHOD_CONTRACT; return e == NULL; }
+};
+
+typedef SHash<MethodDescVersioningStateHashTraits> MethodDescVersioningStateHash;
+
+class ILCodeVersioningState
+{
+public:
+ ILCodeVersioningState(PTR_Module pModule, mdMethodDef methodDef);
+ ILCodeVersion GetActiveVersion() const;
+ PTR_ILCodeVersionNode GetFirstVersionNode() const;
+#ifndef DACCESS_COMPILE
+ void SetActiveVersion(ILCodeVersion ilActiveCodeVersion);
+ void LinkILCodeVersionNode(ILCodeVersionNode* pILCodeVersionNode);
+#endif
+
+ struct Key
+ {
+ public:
+ Key();
+ Key(PTR_Module pModule, mdMethodDef methodDef);
+ size_t Hash() const;
+ bool operator==(const Key & rhs) const;
+ private:
+ PTR_Module m_pModule;
+ mdMethodDef m_methodDef;
+ };
+
+ Key GetKey() const;
+
+private:
+ ILCodeVersion m_activeVersion;
+ PTR_ILCodeVersionNode m_pFirstVersionNode;
+ PTR_Module m_pModule;
+ mdMethodDef m_methodDef;
+};
+
+class ILCodeVersioningStateHashTraits : public NoRemoveSHashTraits<DefaultSHashTraits<PTR_ILCodeVersioningState>>
+{
+public:
+ typedef typename DefaultSHashTraits<PTR_ILCodeVersioningState>::element_t element_t;
+ typedef typename DefaultSHashTraits<PTR_ILCodeVersioningState>::count_t count_t;
+
+ typedef const ILCodeVersioningState::Key key_t;
+
+ static key_t GetKey(element_t e)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return e->GetKey();
+ }
+ static BOOL Equals(key_t k1, key_t k2)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return k1 == k2;
+ }
+ static count_t Hash(key_t k)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (count_t)k.Hash();
+ }
+
+ static const element_t Null() { LIMITED_METHOD_CONTRACT; return element_t(); }
+ static bool IsNull(const element_t &e) { LIMITED_METHOD_CONTRACT; return e == NULL; }
+};
+
+typedef SHash<ILCodeVersioningStateHashTraits> ILCodeVersioningStateHash;
+
+
+class CodeVersionManager
+{
+ friend class ILCodeVersion;
+ friend class PublishMethodHolder;
+ friend class PublishMethodTableHolder;
+
+public:
+ CodeVersionManager();
+
+ void PreInit(BOOL fSharedDomain);
+
+ class TableLockHolder : public CrstHolder
+ {
+ public:
+ TableLockHolder(CodeVersionManager * pCodeVersionManager);
+ };
+ //Using the holder is preferable, but in some cases the holder can't be used
+#ifndef DACCESS_COMPILE
+ void EnterLock();
+ void LeaveLock();
+#endif
+
+#ifdef DEBUG
+ BOOL LockOwnedByCurrentThread() const;
+#endif
+
+ DWORD GetNonDefaultILVersionCount();
+ ILCodeVersionCollection GetILCodeVersions(PTR_MethodDesc pMethod);
+ ILCodeVersionCollection GetILCodeVersions(PTR_Module pModule, mdMethodDef methodDef);
+ ILCodeVersion GetActiveILCodeVersion(PTR_MethodDesc pMethod);
+ ILCodeVersion GetActiveILCodeVersion(PTR_Module pModule, mdMethodDef methodDef);
+ ILCodeVersion GetILCodeVersion(PTR_MethodDesc pMethod, ReJITID rejitId);
+ NativeCodeVersionCollection GetNativeCodeVersions(PTR_MethodDesc pMethod) const;
+ NativeCodeVersion GetNativeCodeVersion(PTR_MethodDesc pMethod, PCODE codeStartAddress) const;
+ PTR_ILCodeVersioningState GetILCodeVersioningState(PTR_Module pModule, mdMethodDef methodDef) const;
+ PTR_MethodDescVersioningState GetMethodDescVersioningState(PTR_MethodDesc pMethod) const;
+
+#ifndef DACCESS_COMPILE
+ struct CodePublishError
+ {
+ Module* pModule;
+ mdMethodDef methodDef;
+ MethodDesc* pMethodDesc;
+ HRESULT hrStatus;
+ };
+
+ HRESULT AddILCodeVersion(Module* pModule, mdMethodDef methodDef, ReJITID rejitId, ILCodeVersion* pILCodeVersion);
+ HRESULT AddNativeCodeVersion(ILCodeVersion ilCodeVersion, MethodDesc* pClosedMethodDesc, NativeCodeVersion* pNativeCodeVersion);
+ HRESULT DoJumpStampIfNecessary(MethodDesc* pMD, PCODE pCode);
+ PCODE PublishVersionableCodeIfNecessary(MethodDesc* pMethodDesc, BOOL fCanBackpatchPrestub);
+ HRESULT PublishNativeCodeVersion(MethodDesc* pMethodDesc, NativeCodeVersion nativeCodeVersion, BOOL fEESuspended);
+ HRESULT GetOrCreateMethodDescVersioningState(MethodDesc* pMethod, MethodDescVersioningState** ppMethodDescVersioningState);
+ HRESULT GetOrCreateILCodeVersioningState(Module* pModule, mdMethodDef methodDef, ILCodeVersioningState** ppILCodeVersioningState);
+ HRESULT SetActiveILCodeVersions(ILCodeVersion* pActiveVersions, DWORD cActiveVersions, BOOL fEESuspended, CDynArray<CodePublishError> * pPublishErrors);
+ static HRESULT AddCodePublishError(Module* pModule, mdMethodDef methodDef, MethodDesc* pMD, HRESULT hrStatus, CDynArray<CodePublishError> * pErrors);
+ static HRESULT AddCodePublishError(NativeCodeVersion nativeCodeVersion, HRESULT hrStatus, CDynArray<CodePublishError> * pErrors);
+ static void OnAppDomainExit(AppDomain* pAppDomain);
+#endif
+
+private:
+
+#ifndef DACCESS_COMPILE
+ static HRESULT EnumerateClosedMethodDescs(MethodDesc* pMD, CDynArray<MethodDesc*> * pClosedMethodDescs, CDynArray<CodePublishError> * pUnsupportedMethodErrors);
+ static HRESULT EnumerateDomainClosedMethodDescs(
+ AppDomain * pAppDomainToSearch,
+ Module* pModuleContainingMethodDef,
+ mdMethodDef methodDef,
+ CDynArray<MethodDesc*> * pClosedMethodDescs,
+ CDynArray<CodePublishError> * pUnsupportedMethodErrors);
+ static HRESULT GetNonVersionableError(MethodDesc* pMD);
+ void ReportCodePublishError(CodePublishError* pErrorRecord);
+ void ReportCodePublishError(Module* pModule, mdMethodDef methodDef, MethodDesc* pMD, HRESULT hrStatus);
+#endif
+
+ //Module,MethodDef -> ILCodeVersioningState
+ ILCodeVersioningStateHash m_ilCodeVersioningStateMap;
+
+ //closed MethodDesc -> MethodDescVersioningState
+ MethodDescVersioningStateHash m_methodDescVersioningStateMap;
+
+ CrstExplicitInit m_crstTable;
+};
+
+#endif // FEATURE_CODE_VERSIONING
+
+//
+// These holders are used by runtime code that is making new code
+// available for execution, either by publishing jitted code
+// or restoring NGEN code. It ensures the publishing is synchronized
+// with rejit requests
+//
+class PublishMethodHolder
+{
+public:
+#if !defined(FEATURE_CODE_VERSIONING) || defined(DACCESS_COMPILE) || defined(CROSSGEN_COMPILE)
+ PublishMethodHolder(MethodDesc* pMethod, PCODE pCode) { }
+#else
+ PublishMethodHolder(MethodDesc* pMethod, PCODE pCode);
+ ~PublishMethodHolder();
+#endif
+
+private:
+#if defined(FEATURE_CODE_VERSIONING)
+ MethodDesc * m_pMD;
+ HRESULT m_hr;
+#endif
+};
+
+class PublishMethodTableHolder
+{
+public:
+#if !defined(FEATURE_CODE_VERSIONING) || defined(DACCESS_COMPILE) || defined(CROSSGEN_COMPILE)
+ PublishMethodTableHolder(MethodTable* pMethodTable) { }
+#else
+ PublishMethodTableHolder(MethodTable* pMethodTable);
+ ~PublishMethodTableHolder();
+#endif
+
+private:
+#if defined(FEATURE_CODE_VERSIONING) && !defined(DACCESS_COMPILE)
+ MethodTable* m_pMethodTable;
+ CDynArray<CodeVersionManager::CodePublishError> m_errors;
+#endif
+};
+
+#endif // CODE_VERSION_H
diff --git a/src/vm/crossgen/CMakeLists.txt b/src/vm/crossgen/CMakeLists.txt
index 805e932dda..8c706885b8 100644
--- a/src/vm/crossgen/CMakeLists.txt
+++ b/src/vm/crossgen/CMakeLists.txt
@@ -36,6 +36,7 @@ set(VM_CROSSGEN_SOURCES
../generics.cpp
../genmeth.cpp
../hash.cpp
+ ../ilinstrumentation.cpp
../ilmarshalers.cpp
../ilstubcache.cpp
../ilstubresolver.cpp
@@ -46,7 +47,6 @@ set(VM_CROSSGEN_SOURCES
../contractimpl.cpp
../jitinterface.cpp
../loaderallocator.cpp
- ../listlock.cpp
../memberload.cpp
../method.cpp
../methodimpl.cpp
diff --git a/src/vm/crossgencompile.cpp b/src/vm/crossgencompile.cpp
index 367112e285..bcbf9d1636 100644
--- a/src/vm/crossgencompile.cpp
+++ b/src/vm/crossgencompile.cpp
@@ -436,7 +436,3 @@ BOOL AppDomain::BindingByManifestFile()
{
return FALSE;
}
-
-ReJitManager::ReJitManager()
-{
-}
diff --git a/src/vm/crst.h b/src/vm/crst.h
index a353c6ea44..fa8c307f3f 100644
--- a/src/vm/crst.h
+++ b/src/vm/crst.h
@@ -115,14 +115,15 @@ class CrstBase
friend class Thread;
friend class ThreadStore;
friend class ThreadSuspend;
-friend class ListLock;
-friend class ListLockEntry;
+template <typename ELEMENT>
+friend class ListLockBase;
+template <typename ELEMENT>
+friend class ListLockEntryBase;
//friend class CExecutionEngine;
friend struct SavedExceptionInfo;
friend void EEEnterCriticalSection(CRITSEC_COOKIE cookie);
friend void EELeaveCriticalSection(CRITSEC_COOKIE cookie);
-friend class ReJitPublishMethodHolder;
-friend class ReJitPublishMethodTableHolder;
+friend class CodeVersionManager;
friend class Debugger;
friend class Crst;
diff --git a/src/vm/dllimport.cpp b/src/vm/dllimport.cpp
index 49c7d7a8b8..a0631c3345 100644
--- a/src/vm/dllimport.cpp
+++ b/src/vm/dllimport.cpp
@@ -5413,8 +5413,7 @@ PCODE JitILStub(MethodDesc* pStubMD)
// A dynamically generated IL stub
//
- CORJIT_FLAGS jitFlags = pStubMD->AsDynamicMethodDesc()->GetILStubResolver()->GetJitFlags();
- pCode = pStubMD->MakeJitWorker(NULL, jitFlags);
+ pCode = pStubMD->PrepareInitialCode();
_ASSERTE(pCode == pStubMD->GetNativeCode());
}
diff --git a/src/vm/eventtrace.cpp b/src/vm/eventtrace.cpp
index 16f729d505..6325edb462 100644
--- a/src/vm/eventtrace.cpp
+++ b/src/vm/eventtrace.cpp
@@ -6834,7 +6834,7 @@ VOID ETW::MethodLog::SendEventsForJitMethodsHelper(BaseDomain *pDomainFilter,
// manager locks.
// see code:#TableLockHolder
ReJITID rejitID =
- fGetReJitIDs ? pMD->GetReJitManager()->GetReJitIdNoLock(pMD, codeStart) : 0;
+ fGetReJitIDs ? ReJitManager::GetReJitIdNoLock(pMD, codeStart) : 0;
// There are small windows of time where the heap iterator may come across a
// codeStart that is not yet published to the MethodDesc. This may happen if
@@ -6962,8 +6962,8 @@ VOID ETW::MethodLog::SendEventsForJitMethods(BaseDomain *pDomainFilter, LoaderAl
// We only support getting rejit IDs when filtering by domain.
if (pDomainFilter)
{
- ReJitManager::TableLockHolder lkRejitMgrSharedDomain(SharedDomain::GetDomain()->GetReJitManager());
- ReJitManager::TableLockHolder lkRejitMgrModule(pDomainFilter->GetReJitManager());
+ CodeVersionManager::TableLockHolder lkRejitMgrSharedDomain(SharedDomain::GetDomain()->GetCodeVersionManager());
+ CodeVersionManager::TableLockHolder lkRejitMgrModule(pDomainFilter->GetCodeVersionManager());
SendEventsForJitMethodsHelper(pDomainFilter,
pLoaderAllocatorFilter,
dwEventOptions,
diff --git a/src/vm/gccover.cpp b/src/vm/gccover.cpp
index 3e195796b4..15ce76164c 100644
--- a/src/vm/gccover.cpp
+++ b/src/vm/gccover.cpp
@@ -160,7 +160,7 @@ void SetupGcCoverage(MethodDesc* pMD, BYTE* methodStartPtr) {
{
BaseDomain* pDomain = pMD->GetDomain();
// Enter the global lock which protects the list of all functions being JITd
- ListLockHolder pJitLock(pDomain->GetJitLock());
+ JitListLock::LockHolder pJitLock(pDomain->GetJitLock());
// It is possible that another thread stepped in before we entered the global lock for the first time.
@@ -175,14 +175,14 @@ void SetupGcCoverage(MethodDesc* pMD, BYTE* methodStartPtr) {
#ifdef _DEBUG
description = pMD->m_pszDebugMethodName;
#endif
- ListLockEntryHolder pEntry(ListLockEntry::Find(pJitLock, pMD, description));
+ ReleaseHolder<JitListLockEntry> pEntry(JitListLockEntry::Find(pJitLock, pMD->GetInitialCodeVersion(), description));
// We have an entry now, we can release the global lock
pJitLock.Release();
// Take the entry lock
{
- ListLockEntryLockHolder pEntryLock(pEntry, FALSE);
+ JitListLockEntry::LockHolder pEntryLock(pEntry, FALSE);
if (pEntryLock.DeadlockAwareAcquire())
{
diff --git a/src/vm/i386/stublinkerx86.cpp b/src/vm/i386/stublinkerx86.cpp
index c1b0b19f0c..dbb4c028c1 100644
--- a/src/vm/i386/stublinkerx86.cpp
+++ b/src/vm/i386/stublinkerx86.cpp
@@ -6698,7 +6698,7 @@ BOOL FixupPrecode::SetTargetInterlocked(TADDR target, TADDR expected)
CONTRACTL
{
THROWS; // Creating a JumpStub could throw OutOfMemory
- GC_TRIGGERS;
+ GC_NOTRIGGER;
}
CONTRACTL_END;
@@ -6720,7 +6720,7 @@ BOOL FixupPrecode::SetTargetInterlocked(TADDR target, TADDR expected)
}
else if (pOldValue[OFFSETOF_PRECODE_TYPE_CALL_OR_JMP] == FixupPrecode::Type)
{
-#ifdef FEATURE_TIERED_COMPILATION
+#ifdef FEATURE_CODE_VERSIONING
// No change needed, jmp is already in place
#else
// Setting the target more than once is unexpected
diff --git a/src/vm/ilinstrumentation.cpp b/src/vm/ilinstrumentation.cpp
new file mode 100644
index 0000000000..a2bdbf1a60
--- /dev/null
+++ b/src/vm/ilinstrumentation.cpp
@@ -0,0 +1,90 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+// ===========================================================================
+// File: ILInstrumentation.cpp
+//
+// ===========================================================================
+
+
+#include "common.h"
+#include "ilinstrumentation.h"
+
+
+//---------------------------------------------------------------------------------------
+InstrumentedILOffsetMapping::InstrumentedILOffsetMapping()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ m_cMap = 0;
+ m_rgMap = NULL;
+ _ASSERTE(IsNull());
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Check whether there is any mapping information stored in this object.
+//
+// Notes:
+// The memory should be alive throughout the process lifetime until
+// the Module containing the instrumented method is destructed.
+//
+
+BOOL InstrumentedILOffsetMapping::IsNull() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ _ASSERTE((m_cMap == 0) == (m_rgMap == NULL));
+ return (m_cMap == 0);
+}
+
+#if !defined(DACCESS_COMPILE)
+//---------------------------------------------------------------------------------------
+//
+// Release the memory used by the array of COR_IL_MAPs.
+//
+// Notes:
+// * The memory should be alive throughout the process lifetime until the Module containing
+// the instrumented method is destructed.
+// * This struct should be read-only in DAC builds.
+//
+
+void InstrumentedILOffsetMapping::Clear()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (m_rgMap != NULL)
+ {
+ delete[] m_rgMap;
+ }
+
+ m_cMap = 0;
+ m_rgMap = NULL;
+}
+#endif // !DACCESS_COMPILE
+
+#if !defined(DACCESS_COMPILE)
+void InstrumentedILOffsetMapping::SetMappingInfo(SIZE_T cMap, COR_IL_MAP * rgMap)
+{
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE((cMap == 0) == (rgMap == NULL));
+ m_cMap = cMap;
+ m_rgMap = ARRAY_PTR_COR_IL_MAP(rgMap);
+}
+#endif // !DACCESS_COMPILE
+
+SIZE_T InstrumentedILOffsetMapping::GetCount() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ _ASSERTE((m_cMap == 0) == (m_rgMap == NULL));
+ return m_cMap;
+}
+
+ARRAY_PTR_COR_IL_MAP InstrumentedILOffsetMapping::GetOffsets() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ _ASSERTE((m_cMap == 0) == (m_rgMap == NULL));
+ return m_rgMap;
+}
diff --git a/src/vm/ilinstrumentation.h b/src/vm/ilinstrumentation.h
new file mode 100644
index 0000000000..cc486ede3f
--- /dev/null
+++ b/src/vm/ilinstrumentation.h
@@ -0,0 +1,116 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+// ===========================================================================
+// File: ILInstrumentation.h
+//
+// ===========================================================================
+
+
+
+#ifndef IL_INSTRUMENTATION_H
+#define IL_INSTRUMENTATION_H
+
+// declare an array type of COR_IL_MAP entries
+typedef ArrayDPTR(COR_IL_MAP) ARRAY_PTR_COR_IL_MAP;
+
+//---------------------------------------------------------------------------------------
+//
+// A profiler may instrument a method by changing the IL. This is typically done when the profiler receives
+// a JITCompilationStarted notification. The profiler also has the option to provide the runtime with
+// a mapping between original IL offsets and instrumented IL offsets. This struct is a simple container
+// for storing the mapping information. We store the mapping information on the Module class, where it can
+// be accessed by the debugger from out-of-process.
+//
+
+class InstrumentedILOffsetMapping
+{
+public:
+ InstrumentedILOffsetMapping();
+
+ // Check whether there is any mapping information stored in this object.
+ BOOL IsNull() const;
+
+#if !defined(DACCESS_COMPILE)
+ // Release the memory used by the array of COR_IL_MAPs.
+ void Clear();
+
+ void SetMappingInfo(SIZE_T cMap, COR_IL_MAP * rgMap);
+#endif // !DACCESS_COMPILE
+
+ SIZE_T GetCount() const;
+ ARRAY_PTR_COR_IL_MAP GetOffsets() const;
+
+private:
+ SIZE_T m_cMap; // the number of elements in m_rgMap
+ ARRAY_PTR_COR_IL_MAP m_rgMap; // an array of COR_IL_MAPs
+};
+
+//---------------------------------------------------------------------------------------
+//
+// Hash table entry for storing InstrumentedILOffsetMapping. This is keyed by the MethodDef token.
+//
+
+struct ILOffsetMappingEntry
+{
+ ILOffsetMappingEntry()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ m_methodToken = mdMethodDefNil;
+ // No need to initialize m_mapping. The default ctor of InstrumentedILOffsetMapping does the job.
+ }
+
+ ILOffsetMappingEntry(mdMethodDef token, InstrumentedILOffsetMapping mapping)
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ m_methodToken = token;
+ m_mapping = mapping;
+ }
+
+ mdMethodDef m_methodToken;
+ InstrumentedILOffsetMapping m_mapping;
+};
+
+//---------------------------------------------------------------------------------------
+//
+// This class is used to create the hash table for the instrumented IL offset mapping.
+// It encapsulates the desired behaviour of the templated hash table and implements
+// the various functions needed by the hash table.
+//
+
+class ILOffsetMappingTraits : public NoRemoveSHashTraits<DefaultSHashTraits<ILOffsetMappingEntry> >
+{
+public:
+ typedef mdMethodDef key_t;
+
+ static key_t GetKey(element_t e)
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return e.m_methodToken;
+ }
+ static BOOL Equals(key_t k1, key_t k2)
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return (k1 == k2);
+ }
+ static count_t Hash(key_t k)
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return (count_t)(size_t)k;
+ }
+ static const element_t Null()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ ILOffsetMappingEntry e;
+ return e;
+ }
+ static bool IsNull(const element_t &e) { LIMITED_METHOD_DAC_CONTRACT; return e.m_methodToken == mdMethodDefNil; }
+};
+
+// Hash table of profiler-provided instrumented IL offset mapping, keyed by the MethodDef token
+typedef SHash<ILOffsetMappingTraits> ILOffsetMappingTable;
+typedef DPTR(ILOffsetMappingTable) PTR_ILOffsetMappingTable;
+
+#endif // IL_INSTRUMENTATION_H
diff --git a/src/vm/interpreter.cpp b/src/vm/interpreter.cpp
index 6901c9c2cd..df1cc92a97 100644
--- a/src/vm/interpreter.cpp
+++ b/src/vm/interpreter.cpp
@@ -38,7 +38,6 @@ static CorInfoType asCorInfoType(CORINFO_CLASS_HANDLE clsHnd)
InterpreterMethodInfo::InterpreterMethodInfo(CEEInfo* comp, CORINFO_METHOD_INFO* methInfo)
: m_method(methInfo->ftn),
m_module(methInfo->scope),
- m_jittedCode(0),
m_ILCode(methInfo->ILCode),
m_ILCodeEnd(methInfo->ILCode + methInfo->ILCodeSize),
m_maxStack(methInfo->maxStack),
@@ -798,12 +797,6 @@ CorJitResult Interpreter::GenerateInterpreterStub(CEEInfo* comp,
// So the structure of the code will look like this (in the non-ILstub case):
//
#if defined(_X86_) || defined(_AMD64_)
- // First do "short-circuiting" if the method has JITted code, and we couldn't find/update the call site:
- // eax = &interpMethInfo
- // eax = [eax + offsetof(m_jittedCode)]
- // if (eax == zero) goto doInterpret:
- // /*else*/ jmp [eax]
- // doInterpret:
// push ebp
// mov ebp, esp
// [if there are register arguments in ecx or edx, push them]
@@ -817,41 +810,6 @@ CorJitResult Interpreter::GenerateInterpreterStub(CEEInfo* comp,
// TODO.
#endif
- // The IL stub case is hard. The portion of the interpreter stub that short-circuits
- // to JITted code requires an extra "scratch" volatile register, not an argument register;
- // in the IL stub case, it too is using such a register, as an extra argument, to hold the stub context.
- // On x86 and ARM, there is only one such extra volatile register, and we've got a conundrum.
- // The cases where this short-circuiting is important is when the address of an interpreter stub
- // becomes "embedded" in other code. The examples I know of are VSD stubs and delegates.
- // The first of these is not a problem for IL stubs -- methods invoked via p/Invoke (the ones that
- // [I think!] use IL stubs) are static, and cannot be invoked via VSD stubs. Delegates, on the other
- // remain a problem [I believe].
- // For the short term, we'll ignore this issue, and never do short-circuiting for IL stubs.
- // So interpreter stubs embedded in delegates will continue to interpret the IL stub, even after
- // the stub has been JITted.
- // The long-term intention is that when we JIT a method with an interpreter stub, we keep a mapping
- // from interpreter stub address to corresponding native code address. If this mapping is non-empty,
- // at GC time we would visit the locations in which interpreter stub addresses might be located, like
- // VSD stubs and delegate objects, and update them to point to new addresses. This would be a necessary
- // part of any scheme to GC interpreter stubs, and InterpreterMethodInfos.
-
- // If we *really* wanted to make short-circuiting work for the IL stub case, we would have to
- // (in the x86 case, which should be sufficiently illustrative):
- // push eax
- // <get the address of JITted code, if any, into eax>
- // if there is JITted code in eax, we'd have to
- // push 2 non-volatile registers, say esi and edi.
- // copy the JITted code address from eax into esi.
- // copy the method arguments (without the return address) down the stack, using edi
- // as a scratch register.
- // restore the original stub context value into eax from the stack
- // call (not jmp) to the JITted code address in esi
- // pop esi and edi from the stack.
- // now the stack has original args, followed by original return address. Do a "ret"
- // that returns to the return address, and also pops the original args from the stack.
- // If we did this, we'd have to give this portion of the stub proper unwind info.
- // Also, we'd have to adjust the rest of the stub to pop eax from the stack.
-
// TODO: much of the interpreter stub code should be is shareable. In the non-IL stub case,
// at least, we could have a small per-method stub that puts the address of the method-specific
// InterpreterMethodInfo into eax, and then branches to a shared part. Probably we would want to
@@ -868,24 +826,7 @@ CorJitResult Interpreter::GenerateInterpreterStub(CEEInfo* comp,
{
sl.Init();
#if defined(_X86_) || defined(_AMD64_)
- // First we do "short-circuiting" if the method has JITted code.
-#if INTERP_ILSTUBS
- if (!pMD->IsILStub()) // As discussed above, we don't do short-circuiting for IL stubs.
-#endif
- {
- // First read the m_jittedCode field.
- sl.X86EmitRegLoad(kEAX, UINT_PTR(interpMethInfo));
- sl.X86EmitOffsetModRM(0x8b, kEAX, kEAX, offsetof(InterpreterMethodInfo, m_jittedCode));
- // If it is still zero, then go on to do the interpretation.
- sl.X86EmitCmpRegImm32(kEAX, 0);
- CodeLabel* doInterpret = sl.NewCodeLabel();
- sl.X86EmitCondJump(doInterpret, X86CondCode::kJE);
- // Otherwise...
- sl.X86EmitJumpReg(kEAX); // tail call to JITted code.
- sl.EmitLabel(doInterpret);
- }
#if defined(_X86_)
- // Start regular interpretation
sl.X86EmitPushReg(kEBP);
sl.X86EmitMovRegReg(kEBP, static_cast<X86Reg>(kESP_Unsafe));
#endif
@@ -895,43 +836,10 @@ CorJitResult Interpreter::GenerateInterpreterStub(CEEInfo* comp,
ThumbReg r11 = ThumbReg(11);
ThumbReg r12 = ThumbReg(12);
-#if INTERP_ILSTUBS
- if (!pMD->IsILStub()) // As discussed above, we don't do short-circuiting for IL stubs.
-#endif
- {
- // But we also have to use r4, because ThumbEmitCondRegJump below requires a low register.
- sl.ThumbEmitMovConstant(r11, 0);
- sl.ThumbEmitMovConstant(r12, UINT_PTR(interpMethInfo));
- sl.ThumbEmitLoadRegIndirect(r12, r12, offsetof(InterpreterMethodInfo, m_jittedCode));
- sl.ThumbEmitCmpReg(r12, r11); // Set condition codes.
- // If r12 is zero, then go on to do the interpretation.
- CodeLabel* doInterpret = sl.NewCodeLabel();
- sl.ThumbEmitCondFlagJump(doInterpret, thumbCondEq.cond);
- sl.ThumbEmitJumpRegister(r12); // If non-zero, tail call to JITted code.
- sl.EmitLabel(doInterpret);
- }
-
- // Start regular interpretation
-
#elif defined(_ARM64_)
// x8 through x15 are scratch registers on ARM64.
IntReg x8 = IntReg(8);
IntReg x9 = IntReg(9);
-
-#if INTERP_ILSTUBS
- if (!pMD->IsILStub()) // As discussed above, we don't do short-circuiting for IL stubs.
-#endif
- {
- sl.EmitMovConstant(x8, UINT64(interpMethInfo));
- sl.EmitLoadStoreRegImm(StubLinkerCPU::eLOAD, x9, x8, offsetof(InterpreterMethodInfo, m_jittedCode));
- sl.EmitCmpImm(x9, 0);
- CodeLabel* doInterpret = sl.NewCodeLabel();
- sl.EmitCondFlagJump(doInterpret, CondEq.cond);
- sl.EmitJumpRegister(x9);
- sl.EmitLabel(doInterpret);
- }
-
- // Start regular interpretation
#else
#error unsupported platform
#endif
@@ -1749,8 +1657,16 @@ void Interpreter::JitMethodIfAppropriate(InterpreterMethodInfo* interpMethInfo,
md->GetMDImport(),
&status);
}
- PCODE res = md->MakeJitWorker(pDecoder, jitFlags);
- interpMethInfo->m_jittedCode = res;
+ // This used to be a synchronous jit and could be made so again if desired,
+ // but using ASP.Net MusicStore as an example scenario the performance is
+ // better doing the JIT asynchronously. Given the not-on-by-default nature of the
+ // interpreter I didn't wring my hands too much trying to determine the ideal
+ // policy.
+#ifdef FEATURE_TIERED_COMPILATION
+ GetAppDomain()->GetTieredCompilationManager()->AsyncPromoteMethodToTier1(md);
+#else
+#error FEATURE_INTERPRETER depends on FEATURE_TIERED_COMPILATION now
+#endif
}
}
}
diff --git a/src/vm/interpreter.h b/src/vm/interpreter.h
index dc7638ca7d..fd4a68bea3 100644
--- a/src/vm/interpreter.h
+++ b/src/vm/interpreter.h
@@ -552,9 +552,6 @@ struct InterpreterMethodInfo
// The module containing the method.
CORINFO_MODULE_HANDLE m_module;
- // If the method has been JITted, it's JITted code (for indirection).
- PCODE m_jittedCode;
-
// Code pointer, size, and max stack usage.
BYTE* m_ILCode;
BYTE* m_ILCodeEnd; // One byte past the last byte of IL. IL Code Size = m_ILCodeEnd - m_ILCode.
diff --git a/src/vm/jitinterface.cpp b/src/vm/jitinterface.cpp
index 2fc5e09391..57ea7125b6 100644
--- a/src/vm/jitinterface.cpp
+++ b/src/vm/jitinterface.cpp
@@ -7673,64 +7673,10 @@ CorInfoInline CEEInfo::canInline (CORINFO_METHOD_HANDLE hCaller,
{
// #rejit
//
- // See if rejit-specific flags for the caller disable inlining
- if ((ReJitManager::GetCurrentReJitFlags(pCaller) &
- COR_PRF_CODEGEN_DISABLE_INLINING) != 0)
- {
- result = INLINE_FAIL;
- szFailReason = "ReJIT request disabled inlining from caller";
- goto exit;
- }
-
- // If the profiler has set a mask preventing inlining, always return
- // false to the jit.
- if (CORProfilerDisableInlining())
- {
- result = INLINE_FAIL;
- szFailReason = "Profiler disabled inlining globally";
- goto exit;
- }
-
- // If the profiler wishes to be notified of JIT events and the result from
- // the above tests will cause a function to be inlined, we need to tell the
- // profiler that this inlining is going to take place, and give them a
- // chance to prevent it.
- {
- BEGIN_PIN_PROFILER(CORProfilerTrackJITInfo());
- if (pCaller->IsILStub() || pCallee->IsILStub())
- {
- // do nothing
- }
- else
- {
- BOOL fShouldInline;
-
- HRESULT hr = g_profControlBlock.pProfInterface->JITInlining(
- (FunctionID)pCaller,
- (FunctionID)pCallee,
- &fShouldInline);
-
- if (SUCCEEDED(hr) && !fShouldInline)
- {
- result = INLINE_FAIL;
- szFailReason = "Profiler disabled inlining locally";
- goto exit;
- }
- }
- END_PIN_PROFILER();
- }
- }
-#endif // PROFILING_SUPPORTED
-
-
-#ifdef PROFILING_SUPPORTED
- if (CORProfilerPresent())
- {
- // #rejit
- //
- // See if rejit-specific flags for the caller disable inlining
- if ((ReJitManager::GetCurrentReJitFlags(pCaller) &
- COR_PRF_CODEGEN_DISABLE_INLINING) != 0)
+ // Currently the rejit path is the only path which sets this.
+ // If we get more reasons to set this then we may need to change
+ // the failure reason message or disambiguate them.
+ if (!m_allowInlining)
{
result = INLINE_FAIL;
szFailReason = "ReJIT request disabled inlining from caller";
@@ -12546,7 +12492,8 @@ PCODE UnsafeJitFunction(MethodDesc* ftn, COR_ILMETHOD_DECODER* ILHeader, CORJIT_
for (;;)
{
#ifndef CROSSGEN_COMPILE
- CEEJitInfo jitInfo(ftn, ILHeader, jitMgr, flags.IsSet(CORJIT_FLAGS::CORJIT_FLAG_IMPORT_ONLY));
+ CEEJitInfo jitInfo(ftn, ILHeader, jitMgr, flags.IsSet(CORJIT_FLAGS::CORJIT_FLAG_IMPORT_ONLY),
+ !flags.IsSet(CORJIT_FLAGS::CORJIT_FLAG_NO_INLINING));
#else
// This path should be only ever used for verification in crossgen and so we should not need EEJitManager
_ASSERTE(flags.IsSet(CORJIT_FLAGS::CORJIT_FLAG_IMPORT_ONLY));
diff --git a/src/vm/jitinterface.h b/src/vm/jitinterface.h
index e34b859c3f..8df6b72ea4 100644
--- a/src/vm/jitinterface.h
+++ b/src/vm/jitinterface.h
@@ -1052,16 +1052,17 @@ public:
DWORD getExpectedTargetArchitecture();
- CEEInfo(MethodDesc * fd = NULL, bool fVerifyOnly = false) :
+ CEEInfo(MethodDesc * fd = NULL, bool fVerifyOnly = false, bool fAllowInlining = true) :
m_pOverride(NULL),
m_pMethodBeingCompiled(fd),
m_fVerifyOnly(fVerifyOnly),
m_pThread(GetThread()),
m_hMethodForSecurity_Key(NULL),
- m_pMethodForSecurity_Value(NULL)
+ m_pMethodForSecurity_Value(NULL),
#if defined(FEATURE_GDBJIT)
- , m_pCalledMethods(NULL)
+ m_pCalledMethods(NULL),
#endif
+ m_allowInlining(fAllowInlining)
{
LIMITED_METHOD_CONTRACT;
}
@@ -1154,6 +1155,8 @@ protected:
CalledMethod * m_pCalledMethods;
#endif
+ bool m_allowInlining;
+
// Tracking of module activation dependencies. We have two flavors:
// - Fast one that gathers generic arguments from EE handles, but does not work inside generic context.
// - Slow one that operates on typespec and methodspecs from metadata.
@@ -1330,8 +1333,8 @@ public:
#endif
CEEJitInfo(MethodDesc* fd, COR_ILMETHOD_DECODER* header,
- EEJitManager* jm, bool fVerifyOnly)
- : CEEInfo(fd, fVerifyOnly),
+ EEJitManager* jm, bool fVerifyOnly, bool allowInlining = true)
+ : CEEInfo(fd, fVerifyOnly, allowInlining),
m_jitManager(jm),
m_CodeHeader(NULL),
m_ILHeader(header),
@@ -1464,7 +1467,6 @@ protected :
void* m_pvGphProfilerHandle;
} m_gphCache;
-
};
#endif // CROSSGEN_COMPILE
diff --git a/src/vm/listlock.cpp b/src/vm/listlock.cpp
deleted file mode 100644
index 450e85aef5..0000000000
--- a/src/vm/listlock.cpp
+++ /dev/null
@@ -1,96 +0,0 @@
-// Licensed to the .NET Foundation under one or more agreements.
-// The .NET Foundation licenses this file to you under the MIT license.
-// See the LICENSE file in the project root for more information.
-// ===========================================================================
-// File: ListLock.cpp
-//
-
-//
-// ===========================================================================
-// This file decribes the list lock and deadlock aware list lock.
-// ===========================================================================
-
-
-#include "common.h"
-#include "listlock.h"
-#include "listlock.inl"
-
-ListLockEntry::ListLockEntry(ListLock *pList, void *pData, const char *description)
- : m_deadlock(description),
- m_pList(pList),
- m_pData(pData),
- m_Crst(CrstListLock,
- (CrstFlags)(CRST_REENTRANCY | (pList->IsHostBreakable()?CRST_HOST_BREAKABLE:0))),
- m_pszDescription(description),
- m_pNext(NULL),
- m_dwRefCount(1),
- m_hrResultCode(S_FALSE),
- m_hInitException(NULL),
- m_pLoaderAllocator(NULL)
-#ifdef FEATURE_CORRUPTING_EXCEPTIONS
- ,
- m_CorruptionSeverity(NotCorrupting)
-#endif // FEATURE_CORRUPTING_EXCEPTIONS
-{
- WRAPPER_NO_CONTRACT;
-}
-
-ListLockEntry *ListLockEntry::Find(ListLock* pLock, LPVOID pPointer, const char *description)
-{
- CONTRACTL
- {
- THROWS;
- GC_NOTRIGGER;
- MODE_ANY;
- }
- CONTRACTL_END;
-
- _ASSERTE(pLock->HasLock());
-
- ListLockEntry *pEntry = pLock->Find(pPointer);
- if (pEntry==NULL)
- {
- pEntry = new ListLockEntry(pLock, pPointer, description);
- pLock->AddElement(pEntry);
- }
- else
- pEntry->AddRef();
-
- return pEntry;
-};
-
-void ListLockEntry::AddRef()
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- MODE_ANY;
- PRECONDITION(CheckPointer(this));
- }
- CONTRACTL_END;
-
- FastInterlockIncrement((LONG*)&m_dwRefCount);
-}
-
-void ListLockEntry::Release()
-{
- CONTRACTL
- {
- NOTHROW;
- GC_TRIGGERS;
- MODE_ANY;
- PRECONDITION(CheckPointer(this));
- }
- CONTRACTL_END;
-
- ListLockHolder lock(m_pList);
-
- if (FastInterlockDecrement((LONG*)&m_dwRefCount) == 0)
- {
- // Remove from list
- m_pList->Unlink(this);
- delete this;
- }
-};
-
diff --git a/src/vm/listlock.h b/src/vm/listlock.h
index e16741a7d7..db953c8b55 100644
--- a/src/vm/listlock.h
+++ b/src/vm/listlock.h
@@ -17,7 +17,8 @@
#include "threads.h"
#include "crst.h"
-class ListLock;
+template < typename ELEMENT >
+class ListLockBase;
// This structure is used for running class init methods or JITing methods
// (m_pData points to a FunctionDesc). This class cannot have a destructor since it is used
// in function that also have EX_TRY's and the VC compiler doesn't allow classes with destructors
@@ -25,9 +26,14 @@ class ListLock;
// <TODO>@FUTURE Keep a pool of these (e.g. an array), so we don't have to allocate on the fly</TODO>
// m_hInitException contains a handle to the exception thrown by the class init. This
// allows us to throw this information to the caller on subsequent class init attempts.
-class ListLockEntry
+template < typename ELEMENT >
+class ListLockEntryBase
{
- friend class ListLock;
+ friend class ListLockBase<ELEMENT>;
+ typedef ListLockEntryBase<ELEMENT> Entry_t;
+ typedef ListLockBase<ELEMENT> List_t;
+ typedef typename List_t::LockHolder ListLockHolder;
+
public:
#ifdef _DEBUG
@@ -40,11 +46,11 @@ public:
#endif // DEBUG
DeadlockAwareLock m_deadlock;
- ListLock * m_pList;
- void * m_pData;
+ List_t * m_pList;
+ ELEMENT m_data;
Crst m_Crst;
const char * m_pszDescription;
- ListLockEntry * m_pNext;
+ Entry_t * m_pNext;
DWORD m_dwRefCount;
HRESULT m_hrResultCode;
LOADERHANDLE m_hInitException;
@@ -54,9 +60,27 @@ public:
CorruptionSeverity m_CorruptionSeverity;
#endif // FEATURE_CORRUPTING_EXCEPTIONS
- ListLockEntry(ListLock *pList, void *pData, const char *description = NULL);
+ ListLockEntryBase(List_t *pList, ELEMENT data, const char *description = NULL)
+ : m_deadlock(description),
+ m_pList(pList),
+ m_data(data),
+ m_Crst(CrstListLock,
+ (CrstFlags)(CRST_REENTRANCY | (pList->IsHostBreakable() ? CRST_HOST_BREAKABLE : 0))),
+ m_pszDescription(description),
+ m_pNext(NULL),
+ m_dwRefCount(1),
+ m_hrResultCode(S_FALSE),
+ m_hInitException(NULL),
+ m_pLoaderAllocator(dac_cast<PTR_LoaderAllocator>(nullptr))
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ ,
+ m_CorruptionSeverity(NotCorrupting)
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+ {
+ WRAPPER_NO_CONTRACT;
+ }
- virtual ~ListLockEntry()
+ virtual ~ListLockEntryBase()
{
}
@@ -102,10 +126,65 @@ public:
m_Crst.Leave();
}
- static ListLockEntry *Find(ListLock* pLock, LPVOID pPointer, const char *description = NULL) DAC_EMPTY_RET(NULL);
+ static Entry_t *Find(List_t* pLock, ELEMENT data, const char *description = NULL)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(pLock->HasLock());
+
+ Entry_t *pEntry = pLock->Find(data);
+ if (pEntry == NULL)
+ {
+ pEntry = new Entry_t(pLock, data, description);
+ pLock->AddElement(pEntry);
+ }
+ else
+ pEntry->AddRef();
+
+ return pEntry;
+ };
+
+
+ void AddRef()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(this));
+ }
+ CONTRACTL_END;
+
+ FastInterlockIncrement((LONG*)&m_dwRefCount);
+ }
+
+ void Release()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(this));
+ }
+ CONTRACTL_END;
- void AddRef() DAC_EMPTY_ERR();
- void Release() DAC_EMPTY_ERR();
+ ListLockHolder lock(m_pList);
+
+ if (FastInterlockDecrement((LONG*)&m_dwRefCount) == 0)
+ {
+ // Remove from list
+ m_pList->Unlink(this);
+ delete this;
+ }
+ };
#ifdef _DEBUG
BOOL HasLock()
@@ -117,14 +196,14 @@ public:
// LockHolder holds the lock of the element, not the element itself
- DEBUG_NOINLINE static void LockHolderEnter(ListLockEntry *pThis) PUB
+ DEBUG_NOINLINE static void LockHolderEnter(Entry_t *pThis) PUB
{
WRAPPER_NO_CONTRACT;
ANNOTATION_SPECIAL_HOLDER_CALLER_NEEDS_DYNAMIC_CONTRACT;
pThis->Enter();
}
- DEBUG_NOINLINE static void LockHolderLeave(ListLockEntry *pThis) PUB
+ DEBUG_NOINLINE static void LockHolderLeave(Entry_t *pThis) PUB
{
WRAPPER_NO_CONTRACT;
ANNOTATION_SPECIAL_HOLDER_CALLER_NEEDS_DYNAMIC_CONTRACT;
@@ -139,7 +218,7 @@ public:
m_deadlock.EndEnterLock();
}
- typedef Wrapper<ListLockEntry *, ListLockEntry::LockHolderEnter, ListLockEntry::LockHolderLeave> LockHolderBase;
+ typedef Wrapper<Entry_t *, LockHolderEnter, LockHolderLeave> LockHolderBase;
class LockHolder : public LockHolderBase
{
@@ -150,32 +229,36 @@ public:
{
}
- LockHolder(ListLockEntry *value, BOOL take = TRUE)
+ LockHolder(Entry_t *value, BOOL take = TRUE)
: LockHolderBase(value, take)
{
}
BOOL DeadlockAwareAcquire()
{
- if (!m_acquired && m_value != NULL)
+ if (!this->m_acquired && this->m_value != NULL)
{
- if (!m_value->m_deadlock.TryBeginEnterLock())
+ if (!this->m_value->m_deadlock.TryBeginEnterLock())
return FALSE;
- m_value->FinishDeadlockAwareEnter();
- m_acquired = TRUE;
+ this->m_value->FinishDeadlockAwareEnter();
+ this->m_acquired = TRUE;
}
return TRUE;
}
};
};
-class ListLock
+template < typename ELEMENT >
+class ListLockBase
{
+ typedef ListLockBase<ELEMENT> List_t;
+ typedef ListLockEntryBase<ELEMENT> Entry_t;
+
protected:
CrstStatic m_Crst;
BOOL m_fInited;
BOOL m_fHostBreakable; // Lock can be broken by a host for deadlock detection
- ListLockEntry * m_pHead;
+ Entry_t * m_pHead;
public:
@@ -219,7 +302,7 @@ class ListLock
return m_fHostBreakable;
}
- void AddElement(ListLockEntry* pElement)
+ void AddElement(Entry_t* pElement)
{
WRAPPER_NO_CONTRACT;
pElement->m_pNext = m_pHead;
@@ -257,10 +340,39 @@ class ListLock
// Must own the lock before calling this or is ok if the debugger has
// all threads stopped
- ListLockEntry *Find(void *pData);
+ inline Entry_t *Find(ELEMENT data)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(this));
+#ifdef DEBUGGING_SUPPORTED
+ PRECONDITION(m_Crst.OwnedByCurrentThread() ||
+ CORDebuggerAttached()
+ // This condition should be true, but it is awkward to assert it because adding dbginterface.h creates lots of cycles in the includes
+ // It didn't seem valuable enough to refactor out a wrapper just to preserve it
+ /* && g_pDebugInterface->IsStopped() */);
+#else
+ PRECONDITION(m_Crst.OwnedByCurrentThread());
+#endif // DEBUGGING_SUPPORTED
+
+ }
+ CONTRACTL_END;
+
+ Entry_t *pSearch;
+
+ for (pSearch = m_pHead; pSearch != NULL; pSearch = pSearch->m_pNext)
+ {
+ if (pSearch->m_data == data)
+ return pSearch;
+ }
+
+ return NULL;
+ }
// Must own the lock before calling this!
- ListLockEntry* Pop(BOOL unloading = FALSE)
+ Entry_t* Pop(BOOL unloading = FALSE)
{
LIMITED_METHOD_CONTRACT;
#ifdef _DEBUG
@@ -269,13 +381,13 @@ class ListLock
#endif
if(m_pHead == NULL) return NULL;
- ListLockEntry* pEntry = m_pHead;
+ Entry_t* pEntry = m_pHead;
m_pHead = m_pHead->m_pNext;
return pEntry;
}
// Must own the lock before calling this!
- ListLockEntry* Peek()
+ Entry_t* Peek()
{
LIMITED_METHOD_CONTRACT;
_ASSERTE(m_Crst.OwnedByCurrentThread());
@@ -283,12 +395,12 @@ class ListLock
}
// Must own the lock before calling this!
- BOOL Unlink(ListLockEntry *pItem)
+ BOOL Unlink(Entry_t *pItem)
{
LIMITED_METHOD_CONTRACT;
_ASSERTE(m_Crst.OwnedByCurrentThread());
- ListLockEntry *pSearch;
- ListLockEntry *pPrev;
+ Entry_t *pSearch;
+ Entry_t *pPrev;
pPrev = NULL;
@@ -320,21 +432,21 @@ class ListLock
}
#endif
- DEBUG_NOINLINE static void HolderEnter(ListLock *pThis)
+ DEBUG_NOINLINE static void HolderEnter(List_t *pThis)
{
WRAPPER_NO_CONTRACT;
ANNOTATION_SPECIAL_HOLDER_CALLER_NEEDS_DYNAMIC_CONTRACT;
pThis->Enter();
}
- DEBUG_NOINLINE static void HolderLeave(ListLock *pThis)
+ DEBUG_NOINLINE static void HolderLeave(List_t *pThis)
{
WRAPPER_NO_CONTRACT;
ANNOTATION_SPECIAL_HOLDER_CALLER_NEEDS_DYNAMIC_CONTRACT;
pThis->Leave();
}
- typedef Wrapper<ListLock*, ListLock::HolderEnter, ListLock::HolderLeave> LockHolder;
+ typedef Wrapper<List_t*, List_t::HolderEnter, List_t::HolderLeave> LockHolder;
};
class WaitingThreadListElement
@@ -344,6 +456,9 @@ public:
WaitingThreadListElement * m_pNext;
};
+typedef class ListLockBase<void*> ListLock;
+typedef class ListLockEntryBase<void*> ListLockEntry;
+
// Holds the lock of the ListLock
typedef ListLock::LockHolder ListLockHolder;
diff --git a/src/vm/listlock.inl b/src/vm/listlock.inl
deleted file mode 100644
index 17e383edd7..0000000000
--- a/src/vm/listlock.inl
+++ /dev/null
@@ -1,51 +0,0 @@
-// Licensed to the .NET Foundation under one or more agreements.
-// The .NET Foundation licenses this file to you under the MIT license.
-// See the LICENSE file in the project root for more information.
-// ===========================================================================
-//
-
-//
-// File: ListLock.inl
-//
-// ===========================================================================
-// This file decribes the list lock and deadlock aware list lock functions
-// that are inlined but can't go in the header.
-// ===========================================================================
-#ifndef LISTLOCK_INL
-#define LISTLOCK_INL
-
-#include "listlock.h"
-#include "dbginterface.h"
-// Must own the lock before calling this or is ok if the debugger has
-// all threads stopped
-
-inline ListLockEntry *ListLock::Find(void *pData)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- PRECONDITION(CheckPointer(this));
-#ifdef DEBUGGING_SUPPORTED
- PRECONDITION(m_Crst.OwnedByCurrentThread() ||
- CORDebuggerAttached() && g_pDebugInterface->IsStopped());
-#else
- PRECONDITION(m_Crst.OwnedByCurrentThread());
-#endif // DEBUGGING_SUPPORTED
-
- }
- CONTRACTL_END;
-
- ListLockEntry *pSearch;
-
- for (pSearch = m_pHead; pSearch != NULL; pSearch = pSearch->m_pNext)
- {
- if (pSearch->m_pData == pData)
- return pSearch;
- }
-
- return NULL;
-}
-
-
-#endif // LISTLOCK_I
diff --git a/src/vm/loaderallocator.hpp b/src/vm/loaderallocator.hpp
index 72fa59857d..b057283136 100644
--- a/src/vm/loaderallocator.hpp
+++ b/src/vm/loaderallocator.hpp
@@ -62,7 +62,9 @@ public:
class StringLiteralMap;
class VirtualCallStubManager;
-class ListLockEntry;
+template <typename ELEMENT>
+class ListLockEntryBase;
+typedef ListLockEntryBase<void*> ListLockEntry;
class LoaderAllocator
{
diff --git a/src/vm/memberload.cpp b/src/vm/memberload.cpp
index aa5667dd21..6f0f2b6213 100644
--- a/src/vm/memberload.cpp
+++ b/src/vm/memberload.cpp
@@ -45,7 +45,6 @@
#include "virtualcallstub.h"
#include "eeconfig.h"
#include "contractimpl.h"
-#include "listlock.inl"
#include "generics.h"
#include "instmethhash.h"
#include "typestring.h"
diff --git a/src/vm/method.cpp b/src/vm/method.cpp
index 241a0ccee0..fc84298bc7 100644
--- a/src/vm/method.cpp
+++ b/src/vm/method.cpp
@@ -30,9 +30,6 @@
#include "interoputil.h"
#include "prettyprintsig.h"
#include "formattype.h"
-#ifdef FEATURE_INTERPRETER
-#include "interpreter.h"
-#endif
#ifdef FEATURE_PREJIT
#include "compile.h"
@@ -1176,16 +1173,6 @@ PCODE MethodDesc::GetNativeCode()
return pCode;
}
-#ifdef FEATURE_INTERPRETER
-#ifndef DACCESS_COMPILE // TODO: Need a solution that will work under DACCESS
- PCODE pEntryPoint = GetMethodEntryPoint();
- if (Interpreter::InterpretationStubToMethodInfo(pEntryPoint) == this)
- {
- return pEntryPoint;
- }
-#endif
-#endif
-
if (!HasStableEntryPoint() || HasPrecode())
return NULL;
@@ -2435,32 +2422,6 @@ BOOL MethodDesc::IsPointingToPrestub()
return GetPrecode()->IsPointingToPrestub();
}
-#ifdef FEATURE_INTERPRETER
-//*******************************************************************************
-BOOL MethodDesc::IsReallyPointingToPrestub()
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- SO_TOLERANT;
- MODE_ANY;
- }
- CONTRACTL_END;
-
- if (!HasPrecode())
- {
- PCODE pCode = GetMethodEntryPoint();
- return HasTemporaryEntryPoint() && pCode == GetTemporaryEntryPoint();
- }
-
- if (!IsRestored())
- return TRUE;
-
- return GetPrecode()->IsPointingToPrestub();
-}
-#endif
-
//*******************************************************************************
void MethodDesc::Reset()
{
@@ -4180,7 +4141,7 @@ void MethodDesc::CheckRestore(ClassLoadLevel level)
// for details on the race.
//
{
- ReJitPublishMethodHolder publishWorker(this, GetNativeCode());
+ PublishMethodHolder publishWorker(this, GetNativeCode());
pIMD->m_wFlags2 = pIMD->m_wFlags2 & ~InstantiatedMethodDesc::Unrestored;
}
@@ -4963,11 +4924,7 @@ Precode* MethodDesc::GetOrCreatePrecode()
}
//*******************************************************************************
-BOOL MethodDesc::SetNativeCodeInterlocked(PCODE addr, PCODE pExpected /*=NULL*/
-#ifdef FEATURE_INTERPRETER
- , BOOL fStable
-#endif
- )
+BOOL MethodDesc::SetNativeCodeInterlocked(PCODE addr, PCODE pExpected /*=NULL*/)
{
CONTRACTL {
THROWS;
@@ -4993,41 +4950,12 @@ BOOL MethodDesc::SetNativeCodeInterlocked(PCODE addr, PCODE pExpected /*=NULL*/
value.SetValueMaybeNull(pSlot, addr | (*dac_cast<PTR_TADDR>(pSlot) & FIXUP_LIST_MASK));
expected.SetValueMaybeNull(pSlot, pExpected | (*dac_cast<PTR_TADDR>(pSlot) & FIXUP_LIST_MASK));
-#ifdef FEATURE_INTERPRETER
- BOOL fRet = FALSE;
-
- fRet = FastInterlockCompareExchangePointer(
- EnsureWritablePages(reinterpret_cast<TADDR*>(pSlot)),
- (TADDR&)value,
- (TADDR&)expected) == (TADDR&)expected;
-
- if (!fRet)
- {
- // Can always replace NULL.
- expected.SetValueMaybeNull(pSlot, (*dac_cast<PTR_TADDR>(pSlot) & FIXUP_LIST_MASK));
- fRet = FastInterlockCompareExchangePointer(
- EnsureWritablePages(reinterpret_cast<TADDR*>(pSlot)),
- (TADDR&)value,
- (TADDR&)expected) == (TADDR&)expected;
- }
- return fRet;
-#else // FEATURE_INTERPRETER
return FastInterlockCompareExchangePointer(EnsureWritablePages(reinterpret_cast<TADDR*>(pSlot)),
(TADDR&)value, (TADDR&)expected) == (TADDR&)expected;
-#endif // FEATURE_INTERPRETER
}
-#ifdef FEATURE_INTERPRETER
- PCODE pFound = FastInterlockCompareExchangePointer(GetAddrOfSlot(), addr, pExpected);
- if (fStable)
- {
- InterlockedUpdateFlags2(enum_flag2_HasStableEntryPoint, TRUE);
- }
- return (pFound == pExpected);
-#else
_ASSERTE(pExpected == NULL);
return SetStableEntryPointInterlocked(addr);
-#endif
}
//*******************************************************************************
@@ -5051,26 +4979,6 @@ BOOL MethodDesc::SetStableEntryPointInterlocked(PCODE addr)
return fResult;
}
-#ifdef FEATURE_INTERPRETER
-BOOL MethodDesc::SetEntryPointInterlocked(PCODE addr)
-{
- CONTRACTL {
- NOTHROW;
- GC_NOTRIGGER;
- } CONTRACTL_END;
-
- _ASSERTE(!HasPrecode());
-
- PCODE pExpected = GetTemporaryEntryPoint();
- PTR_PCODE pSlot = GetAddrOfSlot();
-
- BOOL fResult = FastInterlockCompareExchangePointer(pSlot, addr, pExpected) == pExpected;
-
- return fResult;
-}
-
-#endif // FEATURE_INTERPRETER
-
//*******************************************************************************
void NDirectMethodDesc::InterlockedSetNDirectFlags(WORD wFlags)
{
diff --git a/src/vm/method.hpp b/src/vm/method.hpp
index 36a23716b4..99c2384a2d 100644
--- a/src/vm/method.hpp
+++ b/src/vm/method.hpp
@@ -25,6 +25,7 @@
#include <stddef.h>
#include "eeconfig.h"
#include "precode.h"
+#include "codeversion.h"
#ifndef FEATURE_PREJIT
#include "fixuppointer.h"
@@ -42,6 +43,8 @@ class Dictionary;
class GCCoverageInfo;
class DynamicMethodDesc;
class ReJitManager;
+class CodeVersionManager;
+class PrepareCodeConfig;
typedef DPTR(FCallMethodDesc) PTR_FCallMethodDesc;
typedef DPTR(ArrayMethodDesc) PTR_ArrayMethodDesc;
@@ -268,10 +271,6 @@ public:
BOOL SetStableEntryPointInterlocked(PCODE addr);
-#ifdef FEATURE_INTERPRETER
- BOOL SetEntryPointInterlocked(PCODE addr);
-#endif // FEATURE_INTERPRETER
-
BOOL HasTemporaryEntryPoint();
PCODE GetTemporaryEntryPoint();
@@ -507,7 +506,12 @@ public:
BaseDomain *GetDomain();
- ReJitManager * GetReJitManager();
+#ifdef FEATURE_CODE_VERSIONING
+ CodeVersionManager* GetCodeVersionManager();
+#endif
+#ifdef FEATURE_TIERED_COMPILATION
+ CallCounter* GetCallCounter();
+#endif
PTR_LoaderAllocator GetLoaderAllocator();
@@ -1278,12 +1282,73 @@ public:
void SetChunkIndex(MethodDescChunk *pChunk);
BOOL IsPointingToPrestub();
-#ifdef FEATURE_INTERPRETER
- BOOL IsReallyPointingToPrestub();
-#endif // FEATURE_INTERPRETER
public:
+ // TRUE iff it is possible to change the code this method will run using
+ // the CodeVersionManager.
+ // Note: EnC currently returns FALSE here because it uses its own seperate
+ // scheme to manage versionability. We will likely want to converge them
+ // at some point.
+ BOOL IsVersionable()
+ {
+#ifndef FEATURE_CODE_VERSIONING
+ return FALSE;
+#else
+ return IsVersionableWithPrecode() || IsVersionableWithJumpStamp();
+#endif
+ }
+
+ // If true, these methods version using the CodeVersionManager and
+ // switch between different code versions by updating the target of the precode.
+ // Note: EnC returns FALSE - even though it uses precode updates it does not
+ // use the CodeVersionManager right now
+ BOOL IsVersionableWithPrecode()
+ {
+#ifdef FEATURE_CODE_VERSIONING
+ return
+ // policy: which things do we want to version with a precode if possible
+ IsEligibleForTieredCompilation() &&
+
+ // functional requirements:
+ !IsZapped() && // NGEN directly invokes the pre-generated native code.
+ // without necessarily going through the prestub or
+ // precode
+ HasNativeCodeSlot(); // the stable entry point will need to point at our
+ // precode and not directly contain the native code.
+#else
+ return FALSE;
+#endif
+ }
+
+ // If true, these methods version using the CodeVersionManager and switch between
+ // different code versions by overwriting the first bytes of the method's initial
+ // native code with a jmp instruction.
+ BOOL IsVersionableWithJumpStamp()
+ {
+#if defined(FEATURE_CODE_VERSIONING) && defined(FEATURE_JUMPSTAMP)
+ return
+ // for native image code this is policy, but for jitted code it is a functional requirement
+ // to ensure the prolog is sufficiently large
+ ReJitManager::IsReJITEnabled() &&
+
+ // functional requirement - the runtime doesn't expect both options to be possible
+ !IsVersionableWithPrecode() &&
+
+ // functional requirement - we must be able to evacuate the prolog and the prolog must be big
+ // enough, both of which are only designed to work on jitted code
+ (IsIL() || IsNoMetadata()) &&
+ !IsUnboxingStub() &&
+ !IsInstantiatingStub() &&
+
+ // functional requirement - code version manager can't handle what would happen if the code
+ // was collected
+ !GetLoaderAllocator()->IsCollectible();
+#else
+ return FALSE;
+#endif
+ }
+
#ifdef FEATURE_TIERED_COMPILATION
// Is this method allowed to be recompiled and the entrypoint redirected so that we
// can optimize its performance? Eligibility is invariant for the lifetime of a method.
@@ -1293,20 +1358,31 @@ public:
// This policy will need to change some more before tiered compilation feature
// can be properly supported across a broad range of scenarios. For instance it
- // wouldn't interact correctly debugging or profiling at the moment because we
- // enable it too aggresively and it conflicts with the operations of those features.
+ // wouldn't interact correctly with debugging at the moment because we enable
+ // it too aggresively and it conflicts with the operations of those features.
- //Keep in-sync with MethodTableBuilder::NeedsNativeCodeSlot(bmtMDMethod * pMDMethod)
- //In the future we might want mutable vtable slots too, but that would require
- //more work around the runtime to prevent those mutable pointers from leaking
+ // Keep in-sync with MethodTableBuilder::NeedsNativeCodeSlot(bmtMDMethod * pMDMethod)
+ // to ensure native slots are available where needed.
return g_pConfig->TieredCompilation() &&
- !GetModule()->HasNativeOrReadyToRunImage() &&
+ !IsZapped() &&
!IsEnCMethod() &&
- HasNativeCodeSlot();
+ HasNativeCodeSlot() &&
+ !IsUnboxingStub() &&
+ !IsInstantiatingStub();
+
+ // We should add an exclusion for modules with debuggable code gen flags
}
#endif
+ // Returns a code version that represents the first (default)
+ // code body that this method would have.
+ NativeCodeVersion GetInitialCodeVersion()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return NativeCodeVersion(dac_cast<PTR_MethodDesc>(this));
+ }
+
// Does this method force the NativeCodeSlot to stay fixed after it
// is first initialized to native code? Consumers of the native code
// pointer need to be very careful about if and when they cache it
@@ -1369,11 +1445,7 @@ public:
return GetNativeCode() != NULL;
}
-#ifdef FEATURE_INTERPRETER
- BOOL SetNativeCodeInterlocked(PCODE addr, PCODE pExpected, BOOL fStable);
-#else // FEATURE_INTERPRETER
BOOL SetNativeCodeInterlocked(PCODE addr, PCODE pExpected = NULL);
-#endif // FEATURE_INTERPRETER
TADDR GetAddrOfNativeCodeSlot();
@@ -1688,8 +1760,6 @@ public:
PCODE DoPrestub(MethodTable *pDispatchingMT);
- PCODE MakeJitWorker(COR_ILMETHOD_DECODER* ILHeader, CORJIT_FLAGS flags);
-
VOID GetMethodInfo(SString &namespaceOrClassName, SString &methodName, SString &methodSignature);
VOID GetMethodInfoWithNewSig(SString &namespaceOrClassName, SString &methodName, SString &methodSignature);
VOID GetMethodInfoNoSig(SString &namespaceOrClassName, SString &methodName);
@@ -1952,8 +2022,72 @@ public:
REFLECTMETHODREF GetStubMethodInfo();
PrecodeType GetPrecodeType();
+
+
+ // ---------------------------------------------------------------------------------
+ // IL based Code generation pipeline
+ // ---------------------------------------------------------------------------------
+
+#ifndef DACCESS_COMPILE
+public:
+ PCODE PrepareInitialCode();
+ PCODE PrepareCode(NativeCodeVersion codeVersion);
+ PCODE PrepareCode(PrepareCodeConfig* pConfig);
+
+private:
+ PCODE PrepareILBasedCode(PrepareCodeConfig* pConfig);
+ PCODE GetPrecompiledCode(PrepareCodeConfig* pConfig);
+ PCODE GetPrecompiledNgenCode();
+ PCODE GetPrecompiledR2RCode();
+ PCODE GetMulticoreJitCode();
+ COR_ILMETHOD_DECODER* GetAndVerifyILHeader(PrepareCodeConfig* pConfig, COR_ILMETHOD_DECODER* pIlDecoderMemory);
+ COR_ILMETHOD_DECODER* GetAndVerifyMetadataILHeader(PrepareCodeConfig* pConfig, COR_ILMETHOD_DECODER* pIlDecoderMemory);
+ COR_ILMETHOD_DECODER* GetAndVerifyNoMetadataILHeader();
+ PCODE JitCompileCode(PrepareCodeConfig* pConfig);
+ PCODE JitCompileCodeLockedEventWrapper(PrepareCodeConfig* pConfig, JitListLockEntry* pEntry);
+ PCODE JitCompileCodeLocked(PrepareCodeConfig* pConfig, JitListLockEntry* pLockEntry, ULONG* pSizeOfCode, CORJIT_FLAGS* pFlags);
+#endif // DACCESS_COMPILE
};
+#ifndef DACCESS_COMPILE
+class PrepareCodeConfig
+{
+public:
+ PrepareCodeConfig();
+ PrepareCodeConfig(NativeCodeVersion nativeCodeVersion, BOOL needsMulticoreJitNotification, BOOL mayUsePrecompiledCode);
+ MethodDesc* GetMethodDesc();
+ NativeCodeVersion GetCodeVersion();
+ BOOL NeedsMulticoreJitNotification();
+ BOOL MayUsePrecompiledCode();
+ virtual PCODE IsJitCancellationRequested();
+ virtual BOOL SetNativeCode(PCODE pCode, PCODE * ppAlternateCodeToUse);
+ virtual COR_ILMETHOD* GetILHeader();
+ virtual CORJIT_FLAGS GetJitCompilationFlags();
+
+protected:
+ MethodDesc* m_pMethodDesc;
+ NativeCodeVersion m_nativeCodeVersion;
+ BOOL m_needsMulticoreJitNotification;
+ BOOL m_mayUsePrecompiledCode;
+};
+
+#ifdef FEATURE_CODE_VERSIONING
+class VersionedPrepareCodeConfig : public PrepareCodeConfig
+{
+public:
+ VersionedPrepareCodeConfig();
+ VersionedPrepareCodeConfig(NativeCodeVersion codeVersion);
+ HRESULT FinishConfiguration();
+ virtual PCODE IsJitCancellationRequested();
+ virtual BOOL SetNativeCode(PCODE pCode, PCODE * ppAlternateCodeToUse);
+ virtual COR_ILMETHOD* GetILHeader();
+ virtual CORJIT_FLAGS GetJitCompilationFlags();
+private:
+ ILCodeVersion m_ilCodeVersion;
+};
+#endif // FEATURE_CODE_VERSIONING
+#endif // DACCESS_COMPILE
+
/******************************************************************/
// A code:MethodDescChunk is a container that holds one or more code:MethodDesc. Logically it is just
diff --git a/src/vm/method.inl b/src/vm/method.inl
index cdd137b84b..dd14900c12 100644
--- a/src/vm/method.inl
+++ b/src/vm/method.inl
@@ -203,11 +203,21 @@ inline BOOL HasTypeEquivalentStructParameters()
}
#endif // FEATURE_TYPEEQUIVALENCE
-inline ReJitManager * MethodDesc::GetReJitManager()
+#ifdef FEATURE_CODE_VERSIONING
+inline CodeVersionManager * MethodDesc::GetCodeVersionManager()
{
LIMITED_METHOD_CONTRACT;
- return GetModule()->GetReJitManager();
+ return GetModule()->GetCodeVersionManager();
}
+#endif
+
+#ifdef FEATURE_TIERED_COMPILATION
+inline CallCounter * MethodDesc::GetCallCounter()
+{
+ LIMITED_METHOD_CONTRACT;
+ return GetModule()->GetCallCounter();
+}
+#endif
#endif // _METHOD_INL_
diff --git a/src/vm/methodtable.cpp b/src/vm/methodtable.cpp
index 66a6a2e29a..6e6987ebf2 100644
--- a/src/vm/methodtable.cpp
+++ b/src/vm/methodtable.cpp
@@ -67,7 +67,6 @@
#include "typeequivalencehash.hpp"
#endif
-#include "listlock.inl"
#include "generics.h"
#include "genericdict.h"
#include "typestring.h"
@@ -593,7 +592,7 @@ void MethodTable::SetIsRestored()
// for details on the race.
//
{
- ReJitPublishMethodTableHolder(this);
+ PublishMethodTableHolder(this);
FastInterlockAnd(EnsureWritablePages(&(GetWriteableDataForWrite()->m_dwFlags)), ~MethodTableWriteableData::enum_flag_Unrestored);
}
#ifndef DACCESS_COMPILE
@@ -9371,11 +9370,8 @@ void MethodTable::SetSlot(UINT32 slotNumber, PCODE slotCode)
if (fSharedVtableChunk)
{
MethodDesc* pMD = GetMethodDescForSlotAddress(slotCode);
-#ifndef FEATURE_INTERPRETER
- // TBD: Make this take a "stable" debug arg, determining whether to make these assertions.
_ASSERTE(pMD->HasStableEntryPoint());
_ASSERTE(pMD->GetStableEntryPoint() == slotCode);
-#endif // FEATURE_INTERPRETER
}
}
#endif
diff --git a/src/vm/multicorejit.cpp b/src/vm/multicorejit.cpp
index 4ad5447950..7e756864ed 100644
--- a/src/vm/multicorejit.cpp
+++ b/src/vm/multicorejit.cpp
@@ -17,7 +17,6 @@
#include "dllimport.h"
#include "comdelegate.h"
#include "dbginterface.h"
-#include "listlock.inl"
#include "stubgen.h"
#include "eventtrace.h"
#include "array.h"
@@ -993,7 +992,7 @@ PCODE MulticoreJitRecorder::RequestMethodCode(MethodDesc * pMethod, MulticoreJit
PCODE pCode = NULL;
- pCode = pManager->GetMulticoreJitCodeStorage().QueryMethodCode(pMethod);
+ pCode = pManager->GetMulticoreJitCodeStorage().QueryMethodCode(pMethod, TRUE);
if ((pCode != NULL) && pManager->IsRecorderActive()) // recorder may be off when player is on (e.g. for Appx)
{
diff --git a/src/vm/multicorejit.h b/src/vm/multicorejit.h
index b7a0951ee1..047ba01a5f 100644
--- a/src/vm/multicorejit.h
+++ b/src/vm/multicorejit.h
@@ -103,7 +103,7 @@ public:
void StoreMethodCode(MethodDesc * pMethod, PCODE pCode);
- PCODE QueryMethodCode(MethodDesc * pMethod);
+ PCODE QueryMethodCode(MethodDesc * pMethod, BOOL shouldRemoveCode);
inline unsigned GetRemainingMethodCount() const
{
diff --git a/src/vm/multicorejitplayer.cpp b/src/vm/multicorejitplayer.cpp
index d7c2cec8a1..8a9c8f8397 100644
--- a/src/vm/multicorejitplayer.cpp
+++ b/src/vm/multicorejitplayer.cpp
@@ -17,7 +17,6 @@
#include "dllimport.h"
#include "comdelegate.h"
#include "dbginterface.h"
-#include "listlock.inl"
#include "stubgen.h"
#include "eventtrace.h"
#include "array.h"
@@ -103,7 +102,7 @@ void MulticoreJitCodeStorage::StoreMethodCode(MethodDesc * pMD, PCODE pCode)
// Query from MakeJitWorker: Lookup stored JITted methods
-PCODE MulticoreJitCodeStorage::QueryMethodCode(MethodDesc * pMethod)
+PCODE MulticoreJitCodeStorage::QueryMethodCode(MethodDesc * pMethod, BOOL shouldRemoveCode)
{
STANDARD_VM_CONTRACT;
@@ -113,7 +112,7 @@ PCODE MulticoreJitCodeStorage::QueryMethodCode(MethodDesc * pMethod)
{
CrstHolder holder(& m_crstCodeMap);
- if (m_nativeCodeMap.Lookup(pMethod, & code))
+ if (m_nativeCodeMap.Lookup(pMethod, & code) && shouldRemoveCode)
{
m_nReturned ++;
@@ -507,6 +506,23 @@ HRESULT MulticoreJitProfilePlayer::HandleModuleRecord(const ModuleRecord * pMod)
}
+#ifndef DACCESS_COMPILE
+class MulticoreJitPrepareCodeConfig : public PrepareCodeConfig
+{
+public:
+ MulticoreJitPrepareCodeConfig(MethodDesc* pMethod) :
+ PrepareCodeConfig(NativeCodeVersion(pMethod), FALSE, FALSE)
+ {}
+
+ virtual BOOL SetNativeCode(PCODE pCode, PCODE * ppAlternateCodeToUse)
+ {
+ MulticoreJitManager & mcJitManager = GetAppDomain()->GetMulticoreJitManager();
+ mcJitManager.GetMulticoreJitCodeStorage().StoreMethodCode(GetMethodDesc(), pCode);
+ return TRUE;
+ }
+};
+#endif
+
// Call JIT to compile a method
bool MulticoreJitProfilePlayer::CompileMethodDesc(Module * pModule, MethodDesc * pMD)
@@ -529,8 +545,9 @@ bool MulticoreJitProfilePlayer::CompileMethodDesc(Module * pModule, MethodDesc *
// Reset the flag to allow managed code to be called in multicore JIT background thread from this routine
ThreadStateNCStackHolder holder(-1, Thread::TSNC_CallingManagedCodeDisabled);
- // MakeJitWorker calls back to MulticoreJitCodeStorage::StoreMethodCode under MethodDesc lock
- pMD->MakeJitWorker(& header, CORJIT_FLAGS(CORJIT_FLAGS::CORJIT_FLAG_MCJIT_BACKGROUND));
+ // PrepareCode calls back to MulticoreJitCodeStorage::StoreMethodCode under MethodDesc lock
+ MulticoreJitPrepareCodeConfig config(pMD);
+ pMD->PrepareCode(&config);
return true;
}
diff --git a/src/vm/prestub.cpp b/src/vm/prestub.cpp
index 0c01efd64e..d3639ab21b 100644
--- a/src/vm/prestub.cpp
+++ b/src/vm/prestub.cpp
@@ -18,7 +18,6 @@
#include "dllimport.h"
#include "comdelegate.h"
#include "dbginterface.h"
-#include "listlock.inl"
#include "stubgen.h"
#include "eventtrace.h"
#include "array.h"
@@ -79,17 +78,11 @@ PCODE MethodDesc::DoBackpatch(MethodTable * pMT, MethodTable *pDispatchingMT, BO
{
STANDARD_VM_CHECK;
PRECONDITION(!ContainsGenericVariables());
-#ifndef FEATURE_INTERPRETER
PRECONDITION(HasStableEntryPoint());
-#endif // FEATURE_INTERPRETER
PRECONDITION(pMT == GetMethodTable());
}
CONTRACTL_END;
-#ifdef FEATURE_INTERPRETER
- PCODE pTarget = GetMethodEntryPoint();
-#else
PCODE pTarget = GetStableEntryPoint();
-#endif
if (!HasTemporaryEntryPoint())
return pTarget;
@@ -257,93 +250,358 @@ void DACNotifyCompilationFinished(MethodDesc *methodDesc)
#endif
// </TODO>
+PCODE MethodDesc::PrepareInitialCode()
+{
+ STANDARD_VM_CONTRACT;
+ PrepareCodeConfig config(NativeCodeVersion(this), TRUE, TRUE);
+ return PrepareCode(&config);
+}
-// ********************************************************************
-// README!!
-// ********************************************************************
+PCODE MethodDesc::PrepareCode(NativeCodeVersion codeVersion)
+{
+ STANDARD_VM_CONTRACT;
+
+#ifdef FEATURE_CODE_VERSIONING
+ if (codeVersion.IsDefaultVersion())
+ {
+#endif
+ // fast path
+ PrepareCodeConfig config(codeVersion, TRUE, TRUE);
+ return PrepareCode(&config);
+#ifdef FEATURE_CODE_VERSIONING
+ }
+ else
+ {
+ // a bit slower path (+1 usec?)
+ VersionedPrepareCodeConfig config;
+ {
+ CodeVersionManager::TableLockHolder lock(GetCodeVersionManager());
+ config = VersionedPrepareCodeConfig(codeVersion);
+ }
+ config.FinishConfiguration();
+ return PrepareCode(&config);
+ }
+#endif
+
+}
-// MakeJitWorker is the thread safe way to invoke the JIT compiler
-// If multiple threads get in here for the same pMD, ALL of them
-// MUST return the SAME value for pstub.
-//
-// This function creates a DeadlockAware list of methods being jitted
-// which prevents us from trying to JIT the same method more that once.
+PCODE MethodDesc::PrepareCode(PrepareCodeConfig* pConfig)
+{
+ STANDARD_VM_CONTRACT;
-PCODE MethodDesc::MakeJitWorker(COR_ILMETHOD_DECODER* ILHeader, CORJIT_FLAGS flags)
+ // If other kinds of code need multi-versioning we could add more cases here,
+ // but for now generation of all other code/stubs occurs in other code paths
+ _ASSERTE(IsIL() || IsNoMetadata());
+ return PrepareILBasedCode(pConfig);
+}
+
+PCODE MethodDesc::PrepareILBasedCode(PrepareCodeConfig* pConfig)
{
STANDARD_VM_CONTRACT;
+ PCODE pCode = NULL;
- BOOL fIsILStub = IsILStub(); // @TODO: understand the need for this special case
+ if (pConfig->MayUsePrecompiledCode())
+ {
+ pCode = GetPrecompiledCode(pConfig);
+ }
+ if (pCode == NULL)
+ {
+ LOG((LF_CLASSLOADER, LL_INFO1000000,
+ " In PrepareILBasedCode, calling JitCompileCode\n"));
+ // Mark the code as hot in case the method ends up in the native image
+ g_IBCLogger.LogMethodCodeAccess(this);
+ pCode = JitCompileCode(pConfig);
+ }
- LOG((LF_JIT, LL_INFO1000000,
- "MakeJitWorker(" FMT_ADDR ", %s) for %s:%s\n",
- DBG_ADDR(this),
- fIsILStub ? " TRUE" : "FALSE",
- GetMethodTable()->GetDebugClassName(),
- m_pszDebugMethodName));
+ return pCode;
+}
-#if defined(FEATURE_JIT_PITCHING)
- CheckStacksAndPitch();
+PCODE MethodDesc::GetPrecompiledCode(PrepareCodeConfig* pConfig)
+{
+ STANDARD_VM_CONTRACT;
+ PCODE pCode = NULL;
+
+#ifdef FEATURE_PREJIT
+ pCode = GetPrecompiledNgenCode();
#endif
+#ifdef FEATURE_READYTORUN
+ if (pCode == NULL)
+ {
+ pCode = GetPrecompiledR2RCode();
+ if (pCode != NULL)
+ {
+ pConfig->SetNativeCode(pCode, &pCode);
+ }
+ }
+#endif // FEATURE_READYTORUN
+
+ return pCode;
+}
+
+PCODE MethodDesc::GetPrecompiledNgenCode()
+{
+ STANDARD_VM_CONTRACT;
PCODE pCode = NULL;
- ULONG sizeOfCode = 0;
-#if defined(FEATURE_INTERPRETER) || defined(FEATURE_TIERED_COMPILATION)
- BOOL fStable = TRUE; // True iff the new code address (to be stored in pCode), is a stable entry point.
-#endif
-#ifdef FEATURE_INTERPRETER
- PCODE pPreviousInterpStub = NULL;
- BOOL fInterpreted = FALSE;
+
+#ifdef FEATURE_PREJIT
+ pCode = GetPreImplementedCode();
+
+#ifdef PROFILING_SUPPORTED
+
+ // The pre-existing cache search callbacks aren't implemented as you might expect.
+ // Instead of sending a cache search started for all methods, we only send the notification
+ // when we already know a pre-compiled version of the method exists. In the NGEN case we also
+ // don't send callbacks unless the method triggers the prestub which excludes a lot of methods.
+ // From the profiler's perspective this technique is only reliable/predictable when using profiler
+ // instrumented NGEN images (that virtually no profilers use). As-is the callback only
+ // gives an opportunity for the profiler to say whether or not it wants to use the ngen'ed
+ // code.
+ //
+ // Despite those oddities I am leaving this behavior as-is during refactoring because trying to
+ // improve it probably offers little value vs. the potential for compat issues and creating more
+ // complexity reasoning how the API behavior changed across runtime releases.
+ if (pCode != NULL)
+ {
+ BOOL fShouldSearchCache = TRUE;
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackCacheSearches());
+ g_profControlBlock.pProfInterface->JITCachedFunctionSearchStarted((FunctionID)this, &fShouldSearchCache);
+ END_PIN_PROFILER();
+ }
+
+ if (!fShouldSearchCache)
+ {
+ SetNativeCodeInterlocked(NULL, pCode);
+ _ASSERTE(!IsPreImplemented());
+ pCode = NULL;
+ }
+ }
+#endif // PROFILING_SUPPORTED
+
+ if (pCode != NULL)
+ {
+ LOG((LF_ZAP, LL_INFO10000,
+ "ZAP: Using code" FMT_ADDR "for %s.%s sig=\"%s\" (token %x).\n",
+ DBG_ADDR(pCode),
+ m_pszDebugClassName,
+ m_pszDebugMethodName,
+ m_pszDebugMethodSignature,
+ GetMemberDef()));
+
+ TADDR pFixupList = GetFixupList();
+ if (pFixupList != NULL)
+ {
+ Module *pZapModule = GetZapModule();
+ _ASSERTE(pZapModule != NULL);
+ if (!pZapModule->FixupDelayList(pFixupList))
+ {
+ _ASSERTE(!"FixupDelayList failed");
+ ThrowHR(COR_E_BADIMAGEFORMAT);
+ }
+ }
+
+#ifdef HAVE_GCCOVER
+ if (GCStress<cfg_instr_ngen>::IsEnabled())
+ SetupGcCoverage(this, (BYTE*)pCode);
+#endif // HAVE_GCCOVER
+
+#ifdef PROFILING_SUPPORTED
+ /*
+ * This notifies the profiler that a search to find a
+ * cached jitted function has been made.
+ */
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackCacheSearches());
+ g_profControlBlock.pProfInterface->
+ JITCachedFunctionSearchFinished((FunctionID)this, COR_PRF_CACHED_FUNCTION_FOUND);
+ END_PIN_PROFILER();
+ }
+#endif // PROFILING_SUPPORTED
+
+ }
+#endif // FEATURE_PREJIT
+
+ return pCode;
+}
+
+
+PCODE MethodDesc::GetPrecompiledR2RCode()
+{
+ STANDARD_VM_CONTRACT;
+
+ PCODE pCode = NULL;
+#ifdef FEATURE_READYTORUN
+ Module * pModule = GetModule();
+ if (pModule->IsReadyToRun())
+ {
+ pCode = pModule->GetReadyToRunInfo()->GetEntryPoint(this);
+ }
#endif
+ return pCode;
+}
+PCODE MethodDesc::GetMulticoreJitCode()
+{
+ STANDARD_VM_CONTRACT;
+
+ PCODE pCode = NULL;
#ifdef FEATURE_MULTICOREJIT
+ // Quick check before calling expensive out of line function on this method's domain has code JITted by background thread
MulticoreJitManager & mcJitManager = GetAppDomain()->GetMulticoreJitManager();
-
- bool fBackgroundThread = flags.IsSet(CORJIT_FLAGS::CORJIT_FLAG_MCJIT_BACKGROUND);
+ if (mcJitManager.GetMulticoreJitCodeStorage().GetRemainingMethodCount() > 0)
+ {
+ if (MulticoreJitManager::IsMethodSupported(this))
+ {
+ pCode = mcJitManager.RequestMethodCode(this); // Query multi-core JIT manager for compiled code
+ }
+ }
#endif
+ return pCode;
+}
- // If this is the first stage of a tiered compilation progression, use tier0, otherwise
- // use default compilation options
-#ifdef FEATURE_TIERED_COMPILATION
- if (!IsEligibleForTieredCompilation())
+COR_ILMETHOD_DECODER* MethodDesc::GetAndVerifyMetadataILHeader(PrepareCodeConfig* pConfig, COR_ILMETHOD_DECODER* pDecoderMemory)
+{
+ STANDARD_VM_CONTRACT;
+
+ _ASSERTE(!IsNoMetadata());
+
+ COR_ILMETHOD_DECODER* pHeader = NULL;
+ COR_ILMETHOD* ilHeader = pConfig->GetILHeader();
+ if (ilHeader == NULL)
+ {
+#ifdef FEATURE_COMINTEROP
+ // Abstract methods can be called through WinRT derivation if the deriving type
+ // is not implemented in managed code, and calls through the CCW to the abstract
+ // method. Throw a sensible exception in that case.
+ if (GetMethodTable()->IsExportedToWinRT() && IsAbstract())
+ {
+ COMPlusThrowHR(E_NOTIMPL);
+ }
+#endif // FEATURE_COMINTEROP
+
+ COMPlusThrowHR(COR_E_BADIMAGEFORMAT, BFA_BAD_IL);
+ }
+
+ COR_ILMETHOD_DECODER::DecoderStatus status = COR_ILMETHOD_DECODER::FORMAT_ERROR;
{
- fStable = TRUE;
+ // Decoder ctor can AV on a malformed method header
+ AVInRuntimeImplOkayHolder AVOkay;
+ pHeader = new (pDecoderMemory) COR_ILMETHOD_DECODER(ilHeader, GetMDImport(), &status);
+ }
+
+ if (status == COR_ILMETHOD_DECODER::VERIFICATION_ERROR &&
+ Security::CanSkipVerification(GetModule()->GetDomainAssembly()))
+ {
+ status = COR_ILMETHOD_DECODER::SUCCESS;
+ }
+
+ if (status != COR_ILMETHOD_DECODER::SUCCESS)
+ {
+ if (status == COR_ILMETHOD_DECODER::VERIFICATION_ERROR)
+ {
+ // Throw a verification HR
+ COMPlusThrowHR(COR_E_VERIFICATION);
+ }
+ else
+ {
+ COMPlusThrowHR(COR_E_BADIMAGEFORMAT, BFA_BAD_IL);
+ }
+ }
+
+#ifdef _VER_EE_VERIFICATION_ENABLED
+ static ConfigDWORD peVerify;
+
+ if (peVerify.val(CLRConfig::EXTERNAL_PEVerify))
+ m_pMethod->Verify(pHeader, TRUE, FALSE); // Throws a VerifierException if verification fails
+#endif // _VER_EE_VERIFICATION_ENABLED
+
+ return pHeader;
+}
+
+COR_ILMETHOD_DECODER* MethodDesc::GetAndVerifyNoMetadataILHeader()
+{
+ STANDARD_VM_CONTRACT;
+
+ if (IsILStub())
+ {
+ ILStubResolver* pResolver = AsDynamicMethodDesc()->GetILStubResolver();
+ return pResolver->GetILHeader();
}
else
{
- fStable = FALSE;
- flags.Add(CORJIT_FLAGS(CORJIT_FLAGS::CORJIT_FLAG_TIER0));
+ return NULL;
}
+
+ // NoMetadata currently doesn't verify the IL. I'm not sure if that was
+ // a deliberate decision in the past or not, but I've left the behavior
+ // as-is during refactoring.
+}
+
+COR_ILMETHOD_DECODER* MethodDesc::GetAndVerifyILHeader(PrepareCodeConfig* pConfig, COR_ILMETHOD_DECODER* pIlDecoderMemory)
+{
+ STANDARD_VM_CONTRACT;
+ _ASSERTE(IsIL() || IsNoMetadata());
+
+ if (IsNoMetadata())
+ {
+ // The NoMetadata version already has a decoder to use, it doesn't need the stack allocated one
+ return GetAndVerifyNoMetadataILHeader();
+ }
+ else
+ {
+ return GetAndVerifyMetadataILHeader(pConfig, pIlDecoderMemory);
+ }
+}
+
+// ********************************************************************
+// README!!
+// ********************************************************************
+
+// JitCompileCode is the thread safe way to invoke the JIT compiler
+// If multiple threads get in here for the same config, ALL of them
+// MUST return the SAME value for pcode.
+//
+// This function creates a DeadlockAware list of methods being jitted
+// which prevents us from trying to JIT the same method more that once.
+
+PCODE MethodDesc::JitCompileCode(PrepareCodeConfig* pConfig)
+{
+ STANDARD_VM_CONTRACT;
+
+ LOG((LF_JIT, LL_INFO1000000,
+ "JitCompileCode(" FMT_ADDR ", %s) for %s:%s\n",
+ DBG_ADDR(this),
+ IsILStub() ? " TRUE" : "FALSE",
+ GetMethodTable()->GetDebugClassName(),
+ m_pszDebugMethodName));
+
+#if defined(FEATURE_JIT_PITCHING)
+ CheckStacksAndPitch();
#endif
+ PCODE pCode = NULL;
{
// Enter the global lock which protects the list of all functions being JITd
- ListLockHolder pJitLock (GetDomain()->GetJitLock());
+ JitListLock::LockHolder pJitLock(GetDomain()->GetJitLock());
// It is possible that another thread stepped in before we entered the global lock for the first time.
- pCode = GetNativeCode();
- if (pCode != NULL)
+ if ((pCode = pConfig->IsJitCancellationRequested()))
{
-#ifdef FEATURE_INTERPRETER
- if (Interpreter::InterpretationStubToMethodInfo(pCode) == this)
- {
- pPreviousInterpStub = pCode;
- }
- else
-#endif // FEATURE_INTERPRETER
- goto Done;
+ return pCode;
}
const char *description = "jit lock";
INDEBUG(description = m_pszDebugMethodName;)
- ListLockEntryHolder pEntry(ListLockEntry::Find(pJitLock, this, description));
+ ReleaseHolder<JitListLockEntry> pEntry(JitListLockEntry::Find(
+ pJitLock, pConfig->GetCodeVersion(), description));
// We have an entry now, we can release the global lock
pJitLock.Release();
// Take the entry lock
{
- ListLockEntryLockHolder pEntryLock(pEntry, FALSE);
+ JitListLockEntry::LockHolder pEntryLock(pEntry, FALSE);
if (pEntryLock.DeadlockAwareAcquire())
{
@@ -384,319 +642,446 @@ PCODE MethodDesc::MakeJitWorker(COR_ILMETHOD_DECODER* ILHeader, CORJIT_FLAGS fla
}
// It is possible that another thread stepped in before we entered the lock.
- pCode = GetNativeCode();
-#ifdef FEATURE_INTERPRETER
- if (pCode != NULL && (pCode != pPreviousInterpStub))
-#else
- if (pCode != NULL)
-#endif // FEATURE_INTERPRETER
+ if ((pCode = pConfig->IsJitCancellationRequested()))
{
- goto Done;
+ return pCode;
}
- SString namespaceOrClassName, methodName, methodSignature;
-
- PCODE pOtherCode = NULL; // Need to move here due to 'goto GotNewCode'
-
-#ifdef FEATURE_MULTICOREJIT
-
- bool fCompiledInBackground = false;
-
- // If not called from multi-core JIT thread,
- if (! fBackgroundThread)
+ pCode = GetMulticoreJitCode();
+ if (pCode != NULL)
{
- // Quick check before calling expensive out of line function on this method's domain has code JITted by background thread
- if (mcJitManager.GetMulticoreJitCodeStorage().GetRemainingMethodCount() > 0)
- {
- if (MulticoreJitManager::IsMethodSupported(this))
- {
- pCode = mcJitManager.RequestMethodCode(this); // Query multi-core JIT manager for compiled code
-
- // Multicore JIT manager starts background thread to pre-compile methods, but it does not back-patch it/notify profiler/notify DAC,
- // Jumtp to GotNewCode to do so
- if (pCode != NULL)
- {
- fCompiledInBackground = true;
-
-#ifdef DEBUGGING_SUPPORTED
- // Notify the debugger of the jitted function
- if (g_pDebugInterface != NULL)
- {
- g_pDebugInterface->JITComplete(this, pCode);
- }
-#endif
-
- goto GotNewCode;
- }
- }
- }
+ pConfig->SetNativeCode(pCode, &pCode);
+ pEntry->m_hrResultCode = S_OK;
+ return pCode;
}
-#endif
-
- if (fIsILStub)
+ else
{
- // we race with other threads to JIT the code for an IL stub and the
- // IL header is released once one of the threads completes. As a result
- // we must be inside the lock to reliably get the IL header for the
- // stub.
-
- ILStubResolver* pResolver = AsDynamicMethodDesc()->GetILStubResolver();
- ILHeader = pResolver->GetILHeader();
+ return JitCompileCodeLockedEventWrapper(pConfig, pEntryLock);
}
+ }
+ }
+}
+
+PCODE MethodDesc::JitCompileCodeLockedEventWrapper(PrepareCodeConfig* pConfig, JitListLockEntry* pEntry)
+{
+ STANDARD_VM_CONTRACT;
+
+ PCODE pCode = NULL;
+ ULONG sizeOfCode = 0;
+ CORJIT_FLAGS flags;
#ifdef MDA_SUPPORTED
- MdaJitCompilationStart* pProbe = MDA_GET_ASSISTANT(JitCompilationStart);
- if (pProbe)
- pProbe->NowCompiling(this);
+ MdaJitCompilationStart* pProbe = MDA_GET_ASSISTANT(JitCompilationStart);
+ if (pProbe)
+ pProbe->NowCompiling(this);
#endif // MDA_SUPPORTED
#ifdef PROFILING_SUPPORTED
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackJITInfo());
+ // For methods with non-zero rejit id we send ReJITCompilationStarted, otherwise
+ // JITCompilationStarted. It isn't clear if this is the ideal policy for these
+ // notifications yet.
+ ReJITID rejitId = pConfig->GetCodeVersion().GetILCodeVersionId();
+ if (rejitId != 0)
+ {
+ g_profControlBlock.pProfInterface->ReJITCompilationStarted((FunctionID)this,
+ rejitId,
+ TRUE);
+ }
+ else
// If profiling, need to give a chance for a tool to examine and modify
// the IL before it gets to the JIT. This allows one to add probe calls for
// things like code coverage, performance, or whatever.
+ {
+ if (!IsNoMetadata())
{
- BEGIN_PIN_PROFILER(CORProfilerTrackJITInfo());
-
-#ifdef FEATURE_MULTICOREJIT
- // Multicore JIT should be disabled when CORProfilerTrackJITInfo is on
- // But there could be corner case in which profiler is attached when multicore background thread is calling MakeJitWorker
- // Disable this block when calling from multicore JIT background thread
- if (!fBackgroundThread)
-#endif
- {
- if (!IsNoMetadata())
- {
- g_profControlBlock.pProfInterface->JITCompilationStarted((FunctionID) this, TRUE);
- // The profiler may have changed the code on the callback. Need to
- // pick up the new code. Note that you have to be fully trusted in
- // this mode and the code will not be verified.
- COR_ILMETHOD *pilHeader = GetILHeader(TRUE);
- new (ILHeader) COR_ILMETHOD_DECODER(pilHeader, GetMDImport(), NULL);
- }
- else
- {
- unsigned int ilSize, unused;
- CorInfoOptions corOptions;
- LPCBYTE ilHeaderPointer = this->AsDynamicMethodDesc()->GetResolver()->GetCodeInfo(&ilSize, &unused, &corOptions, &unused);
+ g_profControlBlock.pProfInterface->JITCompilationStarted((FunctionID)this, TRUE);
- g_profControlBlock.pProfInterface->DynamicMethodJITCompilationStarted((FunctionID) this, TRUE, ilHeaderPointer, ilSize);
- }
- }
- END_PIN_PROFILER();
}
-#endif // PROFILING_SUPPORTED
-#ifdef FEATURE_INTERPRETER
- // We move the ETW event for start of JITting inward, after we make the decision
- // to JIT rather than interpret.
-#else // FEATURE_INTERPRETER
- // Fire an ETW event to mark the beginning of JIT'ing
- ETW::MethodLog::MethodJitting(this, &namespaceOrClassName, &methodName, &methodSignature);
-#endif // FEATURE_INTERPRETER
-
-#ifdef FEATURE_STACK_SAMPLING
-#ifdef FEATURE_MULTICOREJIT
- if (!fBackgroundThread)
-#endif // FEATURE_MULTICOREJIT
+ else
{
- StackSampler::RecordJittingInfo(this, flags);
- }
-#endif // FEATURE_STACK_SAMPLING
+ unsigned int ilSize, unused;
+ CorInfoOptions corOptions;
+ LPCBYTE ilHeaderPointer = this->AsDynamicMethodDesc()->GetResolver()->GetCodeInfo(&ilSize, &unused, &corOptions, &unused);
- EX_TRY
- {
- pCode = UnsafeJitFunction(this, ILHeader, flags, &sizeOfCode);
- }
- EX_CATCH
- {
- // If the current thread threw an exception, but a competing thread
- // somehow succeeded at JITting the same function (e.g., out of memory
- // encountered on current thread but not competing thread), then go ahead
- // and swallow this current thread's exception, since we somehow managed
- // to successfully JIT the code on the other thread.
- //
- // Note that if a deadlock cycle is broken, that does not result in an
- // exception--the thread would just pass through the lock and JIT the
- // function in competition with the other thread (with the winner of the
- // race decided later on when we do SetNativeCodeInterlocked). This
- // try/catch is purely to deal with the (unusual) case where a competing
- // thread succeeded where we aborted.
-
- pOtherCode = GetNativeCode();
-
- if (pOtherCode == NULL)
- {
- pEntry->m_hrResultCode = E_FAIL;
- EX_RETHROW;
- }
+ g_profControlBlock.pProfInterface->DynamicMethodJITCompilationStarted((FunctionID)this, TRUE, ilHeaderPointer, ilSize);
}
- EX_END_CATCH(RethrowTerminalExceptions)
+ }
+ END_PIN_PROFILER();
+ }
+#endif // PROFILING_SUPPORTED
- if (pOtherCode != NULL)
- {
- // Somebody finished jitting recursively while we were jitting the method.
- // Just use their method & leak the one we finished. (Normally we hope
- // not to finish our JIT in this case, as we will abort early if we notice
- // a reentrant jit has occurred. But we may not catch every place so we
- // do a definitive final check here.
- pCode = pOtherCode;
- goto Done;
- }
+ if (!ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context,
+ TRACE_LEVEL_VERBOSE,
+ CLR_JIT_KEYWORD))
+ {
+ pCode = JitCompileCodeLocked(pConfig, pEntry, &sizeOfCode, &flags);
+ }
+ else
+ {
+ SString namespaceOrClassName, methodName, methodSignature;
- _ASSERTE(pCode != NULL);
+ // Methods that may be interpreted defer this notification until it is certain
+ // we are jitting and not interpreting in CompileMethodWithEtwWrapper.
+ // Some further refactoring could consolidate the notification to always
+ // occur at the point the interpreter does it, but it might even better
+ // to fix the issues that cause us to avoid generating jit notifications
+ // for interpreted methods in the first place. The interpreter does generate
+ // a small stub of native code but no native-IL mapping.
+#ifndef FEATURE_INTERPRETER
+ ETW::MethodLog::MethodJitting(this,
+ &namespaceOrClassName,
+ &methodName,
+ &methodSignature);
+#endif
-#ifdef HAVE_GCCOVER
- if (GCStress<cfg_instr_jit>::IsEnabled())
- {
- SetupGcCoverage(this, (BYTE*) pCode);
- }
-#endif // HAVE_GCCOVER
+ pCode = JitCompileCodeLocked(pConfig, pEntry, &sizeOfCode, &flags);
+ // Interpretted methods skip this notification
#ifdef FEATURE_INTERPRETER
- // Determine whether the new code address is "stable"...= is not an interpreter stub.
- fInterpreted = (Interpreter::InterpretationStubToMethodInfo(pCode) == this);
- fStable = !fInterpreted;
-#endif // FEATURE_INTERPRETER
-
-#ifdef FEATURE_MULTICOREJIT
-
- // If called from multi-core JIT background thread, store code under lock, delay patching until code is queried from application threads
- if (fBackgroundThread)
- {
- // Fire an ETW event to mark the end of JIT'ing
- ETW::MethodLog::MethodJitted(this, &namespaceOrClassName, &methodName, &methodSignature, pCode, 0 /* ReJITID */);
-
-#ifdef FEATURE_PERFMAP
- // Save the JIT'd method information so that perf can resolve JIT'd call frames.
- PerfMap::LogJITCompiledMethod(this, pCode, sizeOfCode);
+ if (Interpreter::InterpretationStubToMethodInfo(pJitNotificationInfo->pCode) == NULL)
#endif
-
- mcJitManager.GetMulticoreJitCodeStorage().StoreMethodCode(this, pCode);
-
- goto Done;
- }
+ {
+ // Fire an ETW event to mark the end of JIT'ing
+ ETW::MethodLog::MethodJitted(this,
+ &namespaceOrClassName,
+ &methodName,
+ &methodSignature,
+ pCode,
+ pConfig->GetCodeVersion().GetVersionId());
+ }
-GotNewCode:
-#endif
- // If this function had already been requested for rejit (before its original
- // code was jitted), then give the rejit manager a chance to jump-stamp the
- // code we just compiled so the first thread entering the function will jump
- // to the prestub and trigger the rejit. Note that the PublishMethodHolder takes
- // a lock to avoid a particular kind of rejit race. See
- // code:ReJitManager::PublishMethodHolder::PublishMethodHolder#PublishCode for
- // details on the rejit race.
- //
- // Aside from rejit, performing a SetNativeCodeInterlocked at this point
- // generally ensures that there is only one winning version of the native
- // code. This also avoid races with profiler overriding ngened code (see
- // matching SetNativeCodeInterlocked done after
- // JITCachedFunctionSearchStarted)
-#ifdef FEATURE_INTERPRETER
- PCODE pExpected = pPreviousInterpStub;
- if (pExpected == NULL) pExpected = GetTemporaryEntryPoint();
-#endif
- {
- ReJitPublishMethodHolder publishWorker(this, pCode);
- if (!SetNativeCodeInterlocked(pCode
-#ifdef FEATURE_INTERPRETER
- , pExpected, fStable
-#endif
- ))
- {
- // Another thread beat us to publishing its copy of the JITted code.
- pCode = GetNativeCode();
- goto Done;
- }
-#if defined(FEATURE_JIT_PITCHING)
- else
- {
- SavePitchingCandidate(this, sizeOfCode);
- }
-#endif
- }
+ }
-#ifdef FEATURE_INTERPRETER
- // State for dynamic methods cannot be freed if the method was ever interpreted,
- // since there is no way to ensure that it is not in use at the moment.
- if (IsDynamicMethod() && !fInterpreted && (pPreviousInterpStub == NULL))
- {
- AsDynamicMethodDesc()->GetResolver()->FreeCompileTimeState();
- }
-#endif // FEATURE_INTERPRETER
+#ifdef FEATURE_STACK_SAMPLING
+ StackSampler::RecordJittingInfo(this, flags);
+#endif // FEATURE_STACK_SAMPLING
- // We succeeded in jitting the code, and our jitted code is the one that's going to run now.
- pEntry->m_hrResultCode = S_OK;
+#ifdef PROFILING_SUPPORTED
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackJITInfo());
+ // For methods with non-zero rejit id we send ReJITCompilationFinished, otherwise
+ // JITCompilationFinished. It isn't clear if this is the ideal policy for these
+ // notifications yet.
+ ReJITID rejitId = pConfig->GetCodeVersion().GetILCodeVersionId();
+ if (rejitId != 0)
+ {
- #ifdef PROFILING_SUPPORTED
+ g_profControlBlock.pProfInterface->ReJITCompilationFinished((FunctionID)this,
+ rejitId,
+ S_OK,
+ TRUE);
+ }
+ else
// Notify the profiler that JIT completed.
// Must do this after the address has been set.
// @ToDo: Why must we set the address before notifying the profiler ??
// Note that if IsInterceptedForDeclSecurity is set no one should access the jitted code address anyway.
+ {
+ if (!IsNoMetadata())
{
- BEGIN_PIN_PROFILER(CORProfilerTrackJITInfo());
- if (!IsNoMetadata())
- {
- g_profControlBlock.pProfInterface->
- JITCompilationFinished((FunctionID) this,
- pEntry->m_hrResultCode,
- TRUE);
- }
- else
- {
- g_profControlBlock.pProfInterface->DynamicMethodJITCompilationFinished((FunctionID) this, pEntry->m_hrResultCode, TRUE);
- }
- END_PIN_PROFILER();
+ g_profControlBlock.pProfInterface->
+ JITCompilationFinished((FunctionID)this,
+ pEntry->m_hrResultCode,
+ TRUE);
+ }
+ else
+ {
+ g_profControlBlock.pProfInterface->DynamicMethodJITCompilationFinished((FunctionID)this, pEntry->m_hrResultCode, TRUE);
}
+ }
+ END_PIN_PROFILER();
+ }
#endif // PROFILING_SUPPORTED
-#ifdef FEATURE_MULTICOREJIT
- if (! fCompiledInBackground)
-#endif
+ // Interpretted methods skip this notification
#ifdef FEATURE_INTERPRETER
- // If we didn't JIT, but rather, created an interpreter stub (i.e., fStable is false), don't tell ETW that we did.
- if (fStable)
-#endif // FEATURE_INTERPRETER
- {
- // Fire an ETW event to mark the end of JIT'ing
- ETW::MethodLog::MethodJitted(this, &namespaceOrClassName, &methodName, &methodSignature, pCode, 0 /* ReJITID */);
-
+ if (Interpreter::InterpretationStubToMethodInfo(pCode) == NULL)
+#endif
+ {
#ifdef FEATURE_PERFMAP
- // Save the JIT'd method information so that perf can resolve JIT'd call frames.
- PerfMap::LogJITCompiledMethod(this, pCode, sizeOfCode);
+ // Save the JIT'd method information so that perf can resolve JIT'd call frames.
+ PerfMap::LogJITCompiledMethod(this, pCode, sizeOfCode);
#endif
- }
-
+ }
-#ifdef FEATURE_MULTICOREJIT
- // If not called from multi-core JIT thread, not got code from storage, quick check before calling out of line function
- if (! fBackgroundThread && ! fCompiledInBackground && mcJitManager.IsRecorderActive())
+#ifdef FEATURE_MULTICOREJIT
+ // Non-initial code versions and multicore jit initial compilation all skip this
+ if (pConfig->NeedsMulticoreJitNotification())
+ {
+ MulticoreJitManager & mcJitManager = GetAppDomain()->GetMulticoreJitManager();
+ if (mcJitManager.IsRecorderActive())
+ {
+ if (MulticoreJitManager::IsMethodSupported(this))
{
- if (MulticoreJitManager::IsMethodSupported(this))
- {
- mcJitManager.RecordMethodJit(this); // Tell multi-core JIT manager to record method on successful JITting
- }
+ mcJitManager.RecordMethodJit(this); // Tell multi-core JIT manager to record method on successful JITting
}
+ }
+ }
#endif
- if (!fIsILStub)
- {
- // The notification will only occur if someone has registered for this method.
- DACNotifyCompilationFinished(this);
- }
+ // The notification will only occur if someone has registered for this method.
+ DACNotifyCompilationFinished(this);
+
+ return pCode;
+}
+
+PCODE MethodDesc::JitCompileCodeLocked(PrepareCodeConfig* pConfig, JitListLockEntry* pEntry, ULONG* pSizeOfCode, CORJIT_FLAGS* pFlags)
+{
+ STANDARD_VM_CONTRACT;
+
+ PCODE pCode = NULL;
+
+ // The profiler may have changed the code on the callback. Need to
+ // pick up the new code.
+ COR_ILMETHOD_DECODER ilDecoderTemp;
+ COR_ILMETHOD_DECODER *pilHeader = GetAndVerifyILHeader(pConfig, &ilDecoderTemp);
+ *pFlags = pConfig->GetJitCompilationFlags();
+ PCODE pOtherCode = NULL;
+ EX_TRY
+ {
+ pCode = UnsafeJitFunction(this, pilHeader, *pFlags, pSizeOfCode);
+ }
+ EX_CATCH
+ {
+ // If the current thread threw an exception, but a competing thread
+ // somehow succeeded at JITting the same function (e.g., out of memory
+ // encountered on current thread but not competing thread), then go ahead
+ // and swallow this current thread's exception, since we somehow managed
+ // to successfully JIT the code on the other thread.
+ //
+ // Note that if a deadlock cycle is broken, that does not result in an
+ // exception--the thread would just pass through the lock and JIT the
+ // function in competition with the other thread (with the winner of the
+ // race decided later on when we do SetNativeCodeInterlocked). This
+ // try/catch is purely to deal with the (unusual) case where a competing
+ // thread succeeded where we aborted.
+
+ if (!(pOtherCode = pConfig->IsJitCancellationRequested()))
+ {
+ pEntry->m_hrResultCode = E_FAIL;
+ EX_RETHROW;
}
}
+ EX_END_CATCH(RethrowTerminalExceptions)
-Done:
+ if (pOtherCode != NULL)
+ {
+ // Somebody finished jitting recursively while we were jitting the method.
+ // Just use their method & leak the one we finished. (Normally we hope
+ // not to finish our JIT in this case, as we will abort early if we notice
+ // a reentrant jit has occurred. But we may not catch every place so we
+ // do a definitive final check here.
+ return pOtherCode;
+ }
- // We must have a code by now.
_ASSERTE(pCode != NULL);
+
+ // Aside from rejit, performing a SetNativeCodeInterlocked at this point
+ // generally ensures that there is only one winning version of the native
+ // code. This also avoid races with profiler overriding ngened code (see
+ // matching SetNativeCodeInterlocked done after
+ // JITCachedFunctionSearchStarted)
+ {
+ if (!pConfig->SetNativeCode(pCode, &pOtherCode))
+ {
+ // Another thread beat us to publishing its copy of the JITted code.
+ return pOtherCode;
+ }
+#if defined(FEATURE_JIT_PITCHING)
+ else
+ {
+ SavePitchingCandidate(this, sizeOfCode);
+ }
+#endif
+ }
+
+#ifdef HAVE_GCCOVER
+ if (GCStress<cfg_instr_jit>::IsEnabled())
+ {
+ SetupGcCoverage(this, (BYTE*)pCode);
+ }
+#endif // HAVE_GCCOVER
- LOG((LF_CORDB, LL_EVERYTHING, "MethodDesc::MakeJitWorker finished. Stub is" FMT_ADDR "\n",
- DBG_ADDR(pCode)));
+ // We succeeded in jitting the code, and our jitted code is the one that's going to run now.
+ pEntry->m_hrResultCode = S_OK;
return pCode;
}
+
+
+PrepareCodeConfig::PrepareCodeConfig() {}
+
+PrepareCodeConfig::PrepareCodeConfig(NativeCodeVersion codeVersion, BOOL needsMulticoreJitNotification, BOOL mayUsePrecompiledCode) :
+ m_pMethodDesc(codeVersion.GetMethodDesc()),
+ m_nativeCodeVersion(codeVersion),
+ m_needsMulticoreJitNotification(needsMulticoreJitNotification),
+ m_mayUsePrecompiledCode(mayUsePrecompiledCode)
+{}
+
+MethodDesc* PrepareCodeConfig::GetMethodDesc()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pMethodDesc;
+}
+
+PCODE PrepareCodeConfig::IsJitCancellationRequested()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pMethodDesc->GetNativeCode();
+}
+
+BOOL PrepareCodeConfig::NeedsMulticoreJitNotification()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_needsMulticoreJitNotification;
+}
+
+NativeCodeVersion PrepareCodeConfig::GetCodeVersion()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_nativeCodeVersion;
+}
+
+BOOL PrepareCodeConfig::SetNativeCode(PCODE pCode, PCODE * ppAlternateCodeToUse)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // If this function had already been requested for rejit (before its original
+ // code was jitted), then give the CodeVersionManager a chance to jump-stamp the
+ // code we just compiled so the first thread entering the function will jump
+ // to the prestub and trigger the rejit. Note that the PublishMethodHolder takes
+ // a lock to avoid a particular kind of rejit race. See
+ // code:CodeVersionManager::PublishMethodHolder::PublishMethodHolder#PublishCode for
+ // details on the rejit race.
+ //
+ if (m_pMethodDesc->IsVersionableWithJumpStamp())
+ {
+ PublishMethodHolder publishWorker(GetMethodDesc(), pCode);
+ if (m_pMethodDesc->SetNativeCodeInterlocked(pCode, NULL))
+ {
+ return TRUE;
+ }
+ }
+ else
+ {
+ if (m_pMethodDesc->SetNativeCodeInterlocked(pCode, NULL))
+ {
+ return TRUE;
+ }
+ }
+
+ *ppAlternateCodeToUse = m_pMethodDesc->GetNativeCode();
+ return FALSE;
+}
+
+COR_ILMETHOD* PrepareCodeConfig::GetILHeader()
+{
+ STANDARD_VM_CONTRACT;
+ return m_pMethodDesc->GetILHeader(TRUE);
+}
+
+CORJIT_FLAGS PrepareCodeConfig::GetJitCompilationFlags()
+{
+ STANDARD_VM_CONTRACT;
+
+ if (m_pMethodDesc->IsILStub())
+ {
+ ILStubResolver* pResolver = m_pMethodDesc->AsDynamicMethodDesc()->GetILStubResolver();
+ return pResolver->GetJitFlags();
+ }
+ return CORJIT_FLAGS();
+}
+
+BOOL PrepareCodeConfig::MayUsePrecompiledCode()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_mayUsePrecompiledCode;
+}
+
+#ifdef FEATURE_CODE_VERSIONING
+VersionedPrepareCodeConfig::VersionedPrepareCodeConfig() {}
+
+VersionedPrepareCodeConfig::VersionedPrepareCodeConfig(NativeCodeVersion codeVersion) :
+ PrepareCodeConfig(codeVersion, TRUE, FALSE)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(!m_nativeCodeVersion.IsDefaultVersion());
+ _ASSERTE(m_pMethodDesc->GetCodeVersionManager()->LockOwnedByCurrentThread());
+ m_ilCodeVersion = m_nativeCodeVersion.GetILCodeVersion();
+}
+
+HRESULT VersionedPrepareCodeConfig::FinishConfiguration()
+{
+ STANDARD_VM_CONTRACT;
+
+ _ASSERTE(!GetMethodDesc()->GetCodeVersionManager()->LockOwnedByCurrentThread());
+
+ // Any code build stages that do just in time configuration should
+ // be configured now
+#ifdef FEATURE_REJIT
+ if (m_ilCodeVersion.GetRejitState() != ILCodeVersion::kStateActive)
+ {
+ ReJitManager::ConfigureILCodeVersion(m_ilCodeVersion);
+ }
+ _ASSERTE(m_ilCodeVersion.GetRejitState() == ILCodeVersion::kStateActive);
+#endif
+
+ return S_OK;
+}
+
+PCODE VersionedPrepareCodeConfig::IsJitCancellationRequested()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_nativeCodeVersion.GetNativeCode();
+}
+
+BOOL VersionedPrepareCodeConfig::SetNativeCode(PCODE pCode, PCODE * ppAlternateCodeToUse)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ //This isn't the default version so jumpstamp is never needed
+ _ASSERTE(!m_nativeCodeVersion.IsDefaultVersion());
+ if (m_nativeCodeVersion.SetNativeCodeInterlocked(pCode, NULL))
+ {
+ return TRUE;
+ }
+ else
+ {
+ *ppAlternateCodeToUse = m_nativeCodeVersion.GetNativeCode();
+ return FALSE;
+ }
+}
+
+COR_ILMETHOD* VersionedPrepareCodeConfig::GetILHeader()
+{
+ STANDARD_VM_CONTRACT;
+ return m_ilCodeVersion.GetIL();
+}
+
+CORJIT_FLAGS VersionedPrepareCodeConfig::GetJitCompilationFlags()
+{
+ STANDARD_VM_CONTRACT;
+ CORJIT_FLAGS flags;
+
+#ifdef FEATURE_REJIT
+ DWORD profilerFlags = m_ilCodeVersion.GetJitFlags();
+ flags.Add(ReJitManager::JitFlagsFromProfCodegenFlags(profilerFlags));
+#endif
+
+#ifdef FEATURE_TIERED_COMPILATION
+ flags.Add(TieredCompilationManager::GetJitFlags(m_nativeCodeVersion));
+#endif
+
+ return flags;
+}
+
+#endif //FEATURE_CODE_VERSIONING
+
#ifdef FEATURE_STUBS_AS_IL
// CreateInstantiatingILStubTargetSig:
@@ -1280,21 +1665,6 @@ PCODE MethodDesc::DoPrestub(MethodTable *pDispatchingMT)
GCStress<cfg_any, EeconfigFastGcSPolicy, CoopGcModePolicy>::MaybeTrigger();
- // Are we in the prestub because of a rejit request? If so, let the ReJitManager
- // take it from here.
- pCode = ReJitManager::DoReJitIfNecessary(this);
- if (pCode != NULL)
- {
- // A ReJIT was performed, so nothing left for DoPrestub() to do. Return now.
- //
- // The stable entrypoint will either be a pointer to the original JITted code
- // (with a jmp at the top to jump to the newly-rejitted code) OR a pointer to any
- // stub code that must be executed first (e.g., a remoting stub), which in turn
- // will call the original JITted code (which then jmps to the newly-rejitted
- // code).
- RETURN GetStableEntryPoint();
- }
-
#ifdef FEATURE_COMINTEROP
/************************** INTEROP *************************/
@@ -1333,30 +1703,38 @@ PCODE MethodDesc::DoPrestub(MethodTable *pDispatchingMT)
pMT->CheckRunClassInitThrowing();
}
- /************************** BACKPATCHING *************************/
- // See if the addr of code has changed from the pre-stub
-#ifdef FEATURE_INTERPRETER
- if (!IsReallyPointingToPrestub())
-#else
- if (!IsPointingToPrestub())
+
+ /*************************** CALL COUNTER ***********************/
+ // If we are counting calls for tiered compilation, leave the prestub
+ // in place so that we can continue intercepting method invocations.
+ // When the TieredCompilationManager has received enough call notifications
+ // for this method only then do we back-patch it.
+ BOOL fCanBackpatchPrestub = TRUE;
+#ifdef FEATURE_TIERED_COMPILATION
+ BOOL fEligibleForTieredCompilation = IsEligibleForTieredCompilation();
+ if (fEligibleForTieredCompilation)
+ {
+ CallCounter * pCallCounter = GetCallCounter();
+ fCanBackpatchPrestub = pCallCounter->OnMethodCalled(this);
+ }
#endif
+
+ /*************************** VERSIONABLE CODE *********************/
+
+ BOOL fIsPointingToPrestub = IsPointingToPrestub();
+#ifdef FEATURE_CODE_VERSIONING
+ if (IsVersionableWithPrecode() ||
+ (!fIsPointingToPrestub && IsVersionableWithJumpStamp()))
{
- // If we are counting calls for tiered compilation, leave the prestub
- // in place so that we can continue intercepting method invocations.
- // When the TieredCompilationManager has received enough call notifications
- // for this method only then do we back-patch it.
-#ifdef FEATURE_TIERED_COMPILATION
- PCODE pNativeCode = GetNativeCode();
- if (pNativeCode && IsEligibleForTieredCompilation())
- {
- CallCounter * pCallCounter = GetAppDomain()->GetCallCounter();
- BOOL doBackPatch = pCallCounter->OnMethodCalled(this);
- if (!doBackPatch)
- {
- return pNativeCode;
- }
- }
+ pCode = GetCodeVersionManager()->PublishVersionableCodeIfNecessary(this, fCanBackpatchPrestub);
+ fIsPointingToPrestub = IsPointingToPrestub();
+ }
#endif
+
+ /************************** BACKPATCHING *************************/
+ // See if the addr of code has changed from the pre-stub
+ if (!fIsPointingToPrestub)
+ {
LOG((LF_CLASSLOADER, LL_INFO10000,
" In PreStubWorker, method already jitted, backpatching call point\n"));
#if defined(FEATURE_JIT_PITCHING)
@@ -1364,11 +1742,15 @@ PCODE MethodDesc::DoPrestub(MethodTable *pDispatchingMT)
#endif
RETURN DoBackpatch(pMT, pDispatchingMT, TRUE);
}
-
- // record if remoting needs to intercept this call
- BOOL fRemotingIntercepted = IsRemotingInterceptedViaPrestub();
-
- BOOL fReportCompilationFinished = FALSE;
+
+ if (pCode)
+ {
+ // The only reason we are still pointing to prestub is because the call counter
+ // prevented it. We should still short circuit and return the code without
+ // backpatching.
+ _ASSERTE(!fCanBackpatchPrestub);
+ RETURN pCode;
+ }
/************************** CODE CREATION *************************/
if (IsUnboxingStub())
@@ -1383,209 +1765,11 @@ PCODE MethodDesc::DoPrestub(MethodTable *pDispatchingMT)
#endif // defined(FEATURE_SHARE_GENERIC_CODE)
else if (IsIL() || IsNoMetadata())
{
- // remember if we need to backpatch the MethodTable slot
- BOOL fBackpatch = !fRemotingIntercepted
- && IsNativeCodeStableAfterInit();
-
-#ifdef FEATURE_PREJIT
- //
- // See if we have any prejitted code to use.
- //
-
- pCode = GetPreImplementedCode();
-
-#ifdef PROFILING_SUPPORTED
- if (pCode != NULL)
- {
- BOOL fShouldSearchCache = TRUE;
-
- {
- BEGIN_PIN_PROFILER(CORProfilerTrackCacheSearches());
- g_profControlBlock.pProfInterface->
- JITCachedFunctionSearchStarted((FunctionID) this,
- &fShouldSearchCache);
- END_PIN_PROFILER();
- }
-
- if (!fShouldSearchCache)
- {
-#ifdef FEATURE_INTERPRETER
- SetNativeCodeInterlocked(NULL, pCode, FALSE);
-#else
- SetNativeCodeInterlocked(NULL, pCode);
-#endif
- _ASSERTE(!IsPreImplemented());
- pCode = NULL;
- }
- }
-#endif // PROFILING_SUPPORTED
-
- if (pCode != NULL)
- {
- LOG((LF_ZAP, LL_INFO10000,
- "ZAP: Using code" FMT_ADDR "for %s.%s sig=\"%s\" (token %x).\n",
- DBG_ADDR(pCode),
- m_pszDebugClassName,
- m_pszDebugMethodName,
- m_pszDebugMethodSignature,
- GetMemberDef()));
-
- TADDR pFixupList = GetFixupList();
- if (pFixupList != NULL)
- {
- Module *pZapModule = GetZapModule();
- _ASSERTE(pZapModule != NULL);
- if (!pZapModule->FixupDelayList(pFixupList))
- {
- _ASSERTE(!"FixupDelayList failed");
- ThrowHR(COR_E_BADIMAGEFORMAT);
- }
- }
-
-#ifdef HAVE_GCCOVER
- if (GCStress<cfg_instr_ngen>::IsEnabled())
- SetupGcCoverage(this, (BYTE*) pCode);
-#endif // HAVE_GCCOVER
-
-#ifdef PROFILING_SUPPORTED
- /*
- * This notifies the profiler that a search to find a
- * cached jitted function has been made.
- */
- {
- BEGIN_PIN_PROFILER(CORProfilerTrackCacheSearches());
- g_profControlBlock.pProfInterface->
- JITCachedFunctionSearchFinished((FunctionID) this, COR_PRF_CACHED_FUNCTION_FOUND);
- END_PIN_PROFILER();
- }
-#endif // PROFILING_SUPPORTED
- }
-
- //
- // If not, try to jit it
- //
-
-#endif // FEATURE_PREJIT
-
-#ifdef FEATURE_READYTORUN
- if (pCode == NULL)
+ if (!IsNativeCodeStableAfterInit())
{
- Module * pModule = GetModule();
- if (pModule->IsReadyToRun())
- {
- pCode = pModule->GetReadyToRunInfo()->GetEntryPoint(this);
- if (pCode != NULL)
- fReportCompilationFinished = TRUE;
- }
+ GetOrCreatePrecode();
}
-#endif // FEATURE_READYTORUN
-
- if (pCode == NULL)
- {
- NewHolder<COR_ILMETHOD_DECODER> pHeader(NULL);
- // Get the information on the method
- if (!IsNoMetadata())
- {
- COR_ILMETHOD* ilHeader = GetILHeader(TRUE);
- if(ilHeader == NULL)
- {
-#ifdef FEATURE_COMINTEROP
- // Abstract methods can be called through WinRT derivation if the deriving type
- // is not implemented in managed code, and calls through the CCW to the abstract
- // method. Throw a sensible exception in that case.
- if (pMT->IsExportedToWinRT() && IsAbstract())
- {
- COMPlusThrowHR(E_NOTIMPL);
- }
-#endif // FEATURE_COMINTEROP
-
- COMPlusThrowHR(COR_E_BADIMAGEFORMAT, BFA_BAD_IL);
- }
-
- COR_ILMETHOD_DECODER::DecoderStatus status = COR_ILMETHOD_DECODER::FORMAT_ERROR;
-
- {
- // Decoder ctor can AV on a malformed method header
- AVInRuntimeImplOkayHolder AVOkay;
- pHeader = new COR_ILMETHOD_DECODER(ilHeader, GetMDImport(), &status);
- if(pHeader == NULL)
- status = COR_ILMETHOD_DECODER::FORMAT_ERROR;
- }
-
- if (status == COR_ILMETHOD_DECODER::VERIFICATION_ERROR &&
- Security::CanSkipVerification(GetModule()->GetDomainAssembly()))
- {
- status = COR_ILMETHOD_DECODER::SUCCESS;
- }
-
- if (status != COR_ILMETHOD_DECODER::SUCCESS)
- {
- if (status == COR_ILMETHOD_DECODER::VERIFICATION_ERROR)
- {
- // Throw a verification HR
- COMPlusThrowHR(COR_E_VERIFICATION);
- }
- else
- {
- COMPlusThrowHR(COR_E_BADIMAGEFORMAT, BFA_BAD_IL);
- }
- }
-
-#ifdef _VER_EE_VERIFICATION_ENABLED
- static ConfigDWORD peVerify;
-
- if (peVerify.val(CLRConfig::EXTERNAL_PEVerify))
- Verify(pHeader, TRUE, FALSE); // Throws a VerifierException if verification fails
-#endif // _VER_EE_VERIFICATION_ENABLED
- } // end if (!IsNoMetadata())
-
- // JIT it
- LOG((LF_CLASSLOADER, LL_INFO1000000,
- " In PreStubWorker, calling MakeJitWorker\n"));
-
- // Create the precode eagerly if it is going to be needed later.
- if (!fBackpatch)
- {
- GetOrCreatePrecode();
- }
-
- // Mark the code as hot in case the method ends up in the native image
- g_IBCLogger.LogMethodCodeAccess(this);
-
- pCode = MakeJitWorker(pHeader, CORJIT_FLAGS());
-
-#ifdef FEATURE_INTERPRETER
- if ((pCode != NULL) && !HasStableEntryPoint())
- {
- // We don't yet have a stable entry point, so don't do backpatching yet.
- // But we do have to handle some extra cases that occur in backpatching.
- // (Perhaps I *should* get to the backpatching code, but in a mode where we know
- // we're not dealing with the stable entry point...)
- if (HasNativeCodeSlot())
- {
- // We called "SetNativeCodeInterlocked" in MakeJitWorker, which updated the native
- // code slot, but I think we also want to update the regular slot...
- PCODE tmpEntry = GetTemporaryEntryPoint();
- PCODE pFound = FastInterlockCompareExchangePointer(GetAddrOfSlot(), pCode, tmpEntry);
- // Doesn't matter if we failed -- if we did, it's because somebody else made progress.
- if (pFound != tmpEntry) pCode = pFound;
- }
-
- // Now we handle the case of a FuncPtrPrecode.
- FuncPtrStubs * pFuncPtrStubs = GetLoaderAllocator()->GetFuncPtrStubsNoCreate();
- if (pFuncPtrStubs != NULL)
- {
- Precode* pFuncPtrPrecode = pFuncPtrStubs->Lookup(this);
- if (pFuncPtrPrecode != NULL)
- {
- // If there is a funcptr precode to patch, attempt to patch it. If we lose, that's OK,
- // somebody else made progress.
- pFuncPtrPrecode->SetTargetInterlocked(pCode);
- }
- }
- }
-#endif // FEATURE_INTERPRETER
- } // end if (pCode == NULL)
+ pCode = PrepareInitialCode();
} // end else if (IsIL() || IsNoMetadata())
else if (IsNDirect())
{
@@ -1621,13 +1805,7 @@ PCODE MethodDesc::DoPrestub(MethodTable *pDispatchingMT)
}
/************************** POSTJIT *************************/
-#ifndef FEATURE_INTERPRETER
_ASSERTE(pCode == NULL || GetNativeCode() == NULL || pCode == GetNativeCode());
-#else // FEATURE_INTERPRETER
- // Interpreter adds a new possiblity == someone else beat us to installing an intepreter stub.
- _ASSERTE(pCode == NULL || GetNativeCode() == NULL || pCode == GetNativeCode()
- || Interpreter::InterpretationStubToMethodInfo(pCode) == this);
-#endif // FEATURE_INTERPRETER
// At this point we must have either a pointer to managed code or to a stub. All of the above code
// should have thrown an exception if it couldn't make a stub.
@@ -1656,42 +1834,15 @@ PCODE MethodDesc::DoPrestub(MethodTable *pDispatchingMT)
MemoryBarrier();
#endif
- // If we are counting calls for tiered compilation, leave the prestub
- // in place so that we can continue intercepting method invocations.
- // When the TieredCompilationManager has received enough call notifications
- // for this method only then do we back-patch it.
-#ifdef FEATURE_TIERED_COMPILATION
- if (pCode && IsEligibleForTieredCompilation())
- {
- CallCounter * pCallCounter = GetAppDomain()->GetCallCounter();
- BOOL doBackPatch = pCallCounter->OnMethodCalled(this);
- if (!doBackPatch)
- {
- return pCode;
- }
- }
-#endif
-
if (pCode != NULL)
{
if (HasPrecode())
GetPrecode()->SetTargetInterlocked(pCode);
else
- if (!HasStableEntryPoint())
- {
- // Is the result an interpreter stub?
-#ifdef FEATURE_INTERPRETER
- if (Interpreter::InterpretationStubToMethodInfo(pCode) == this)
+ if (!HasStableEntryPoint())
{
- SetEntryPointInterlocked(pCode);
- }
- else
-#endif // FEATURE_INTERPRETER
- {
- ReJitPublishMethodHolder publishWorker(this, pCode);
SetStableEntryPointInterlocked(pCode);
}
- }
}
else
{
@@ -1708,15 +1859,8 @@ PCODE MethodDesc::DoPrestub(MethodTable *pDispatchingMT)
}
}
-#ifdef FEATURE_INTERPRETER
- _ASSERTE(!IsReallyPointingToPrestub());
-#else // FEATURE_INTERPRETER
_ASSERTE(!IsPointingToPrestub());
_ASSERTE(HasStableEntryPoint());
-#endif // FEATURE_INTERPRETER
-
- if (fReportCompilationFinished)
- DACNotifyCompilationFinished(this);
RETURN DoBackpatch(pMT, pDispatchingMT, FALSE);
}
diff --git a/src/vm/profilingenumerators.cpp b/src/vm/profilingenumerators.cpp
index 5044eb7c2b..2406f5aa42 100644
--- a/src/vm/profilingenumerators.cpp
+++ b/src/vm/profilingenumerators.cpp
@@ -79,7 +79,7 @@ BOOL ProfilerFunctionEnum::Init(BOOL fWithReJITIDs)
if (fWithReJITIDs)
{
// This guy causes triggering and locking, while the non-rejitid case does not.
- element->reJitId = pMD->GetReJitManager()->GetReJitId(pMD, heapIterator.GetMethodCode());
+ element->reJitId = ReJitManager::GetReJitId(pMD, heapIterator.GetMethodCode());
}
else
{
diff --git a/src/vm/proftoeeinterfaceimpl.cpp b/src/vm/proftoeeinterfaceimpl.cpp
index cfd99adf27..0493163287 100644
--- a/src/vm/proftoeeinterfaceimpl.cpp
+++ b/src/vm/proftoeeinterfaceimpl.cpp
@@ -2117,7 +2117,7 @@ HRESULT ProfToEEInterfaceImpl::GetFunctionFromIP2(LPCBYTE ip, FunctionID * pFunc
if (pReJitId != NULL)
{
MethodDesc * pMD = codeInfo.GetMethodDesc();
- *pReJitId = pMD->GetReJitManager()->GetReJitId(pMD, codeInfo.GetStartAddress());
+ *pReJitId = ReJitManager::GetReJitId(pMD, codeInfo.GetStartAddress());
}
return S_OK;
@@ -2592,13 +2592,24 @@ HRESULT ProfToEEInterfaceImpl::GetCodeInfo3(FunctionID functionId,
hr = ValidateParametersForGetCodeInfo(pMethodDesc, cCodeInfos, codeInfos);
if (SUCCEEDED(hr))
{
- hr = GetCodeInfoFromCodeStart(
- // Note here that we must consult the rejit manager to determine the code
- // start address
- pMethodDesc->GetReJitManager()->GetCodeStart(pMethodDesc, reJitId),
- cCodeInfos,
- pcCodeInfos,
- codeInfos);
+ CodeVersionManager* pCodeVersionManager = pMethodDesc->GetCodeVersionManager();
+ ILCodeVersion ilCodeVersion = pCodeVersionManager->GetILCodeVersion(pMethodDesc, reJitId);
+
+ // Now that tiered compilation can create more than one jitted code version for the same rejit id
+ // we are arbitrarily choosing the first one to return. To return all of them we'd presumably need
+ // a new profiler API.
+ NativeCodeVersionCollection nativeCodeVersions = ilCodeVersion.GetNativeCodeVersions(pMethodDesc);
+ for (NativeCodeVersionIterator iter = nativeCodeVersions.Begin(); iter != nativeCodeVersions.End(); iter++)
+ {
+ PCODE pCodeStart = iter->GetNativeCode();
+ hr = GetCodeInfoFromCodeStart(
+ pCodeStart,
+ cCodeInfos,
+ pcCodeInfos,
+ codeInfos);
+ break;
+ }
+
}
}
EX_CATCH_HRESULT(hr);
@@ -6425,7 +6436,7 @@ HRESULT ProfToEEInterfaceImpl::GetFunctionFromIP3(LPCBYTE ip, FunctionID * pFunc
if (pReJitId != NULL)
{
MethodDesc * pMD = codeInfo.GetMethodDesc();
- *pReJitId = pMD->GetReJitManager()->GetReJitId(pMD, codeInfo.GetStartAddress());
+ *pReJitId = ReJitManager::GetReJitId(pMD, codeInfo.GetStartAddress());
}
return S_OK;
@@ -8239,7 +8250,7 @@ HRESULT ProfToEEInterfaceImpl::GetReJITIDs(
MethodDesc * pMD = FunctionIdToMethodDesc(functionId);
- return pMD->GetReJitManager()->GetReJITIDs(pMD, cReJitIds, pcReJitIds, reJitIds);
+ return ReJitManager::GetReJITIDs(pMD, cReJitIds, pcReJitIds, reJitIds);
}
HRESULT ProfToEEInterfaceImpl::RequestReJIT(ULONG cFunctions, // in
diff --git a/src/vm/rejit.cpp b/src/vm/rejit.cpp
index 7bbd0e2f71..2a9c9e78a3 100644
--- a/src/vm/rejit.cpp
+++ b/src/vm/rejit.cpp
@@ -37,7 +37,7 @@
// appropriate IL and codegen flags, calling UnsafeJitFunction(), and redirecting the
// jump-stamp from the prestub to the newly-rejitted code.
//
-// * code:ReJitPublishMethodHolder::ReJitPublishMethodHolder
+// * code:PublishMethodHolder::PublishMethodHolder
// MethodDesc::MakeJitWorker() calls this to determine if there's an outstanding
// "pre-rejit" request for a MethodDesc that has just been jitted for the first time. We
// also call this from MethodDesc::CheckRestore when restoring generic methods.
@@ -48,8 +48,8 @@
// the PCODE, which is required to avoid races with a profiler that calls RequestReJIT
// just as the method finishes compiling/restoring.
//
-// * code:ReJitPublishMethodTableHolder::ReJitPublishMethodTableHolder
-// Does the same thing as ReJitPublishMethodHolder except iterating over every
+// * code:PublishMethodTableHolder::PublishMethodTableHolder
+// Does the same thing as PublishMethodHolder except iterating over every
// method in the MethodTable. This is called from MethodTable::SetIsRestored.
//
// * code:ReJitManager::GetCurrentReJitFlags:
@@ -156,20 +156,21 @@
#include "threadsuspend.h"
#ifdef FEATURE_REJIT
+#ifdef FEATURE_CODE_VERSIONING
#include "../debug/ee/debugger.h"
#include "../debug/ee/walker.h"
#include "../debug/ee/controller.h"
+#include "codeversion.h"
-// This HRESULT is only used as a private implementation detail. If it escapes functions
-// defined in this file it is a bug. Corerror.xml has a comment in it reserving this
-// value for our use but it doesn't appear in the public headers.
+// This HRESULT is only used as a private implementation detail. Corerror.xml has a comment in it
+// reserving this value for our use but it doesn't appear in the public headers.
#define CORPROF_E_RUNTIME_SUSPEND_REQUIRED 0x80131381
// This is just used as a unique id. Overflow is OK. If we happen to have more than 4+Billion rejits
// and somehow manage to not run out of memory, we'll just have to redefine ReJITID as size_t.
/* static */
-ReJITID SharedReJitInfo::s_GlobalReJitId = 1;
+static ReJITID s_GlobalReJitId = 1;
/* static */
CrstStatic ReJitManager::s_csGlobalRequest;
@@ -178,19 +179,20 @@ CrstStatic ReJitManager::s_csGlobalRequest;
//---------------------------------------------------------------------------------------
// Helpers
-inline CORJIT_FLAGS JitFlagsFromProfCodegenFlags(DWORD dwCodegenFlags)
+//static
+CORJIT_FLAGS ReJitManager::JitFlagsFromProfCodegenFlags(DWORD dwCodegenFlags)
{
LIMITED_METHOD_DAC_CONTRACT;
CORJIT_FLAGS jitFlags;
-
- // Note: COR_PRF_CODEGEN_DISABLE_INLINING is checked in
- // code:CEEInfo::canInline#rejit (it has no equivalent CORJIT flag).
-
if ((dwCodegenFlags & COR_PRF_CODEGEN_DISABLE_ALL_OPTIMIZATIONS) != 0)
{
jitFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_DEBUG_CODE);
}
+ if ((dwCodegenFlags & COR_PRF_CODEGEN_DISABLE_INLINING) != 0)
+ {
+ jitFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_NO_INLINING);
+ }
// In the future more flags may be added that need to be converted here (e.g.,
// COR_PRF_CODEGEN_ENTERLEAVE / CORJIT_FLAG_PROF_ENTERLEAVE)
@@ -199,94 +201,6 @@ inline CORJIT_FLAGS JitFlagsFromProfCodegenFlags(DWORD dwCodegenFlags)
}
//---------------------------------------------------------------------------------------
-// Allocation helpers used by ReJitInfo / SharedReJitInfo to ensure they
-// stick stuff on the appropriate loader heap.
-
-void * LoaderHeapAllocatedRejitStructure::operator new (size_t size, LoaderHeap * pHeap, const NoThrow&)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- MODE_ANY;
- INJECT_FAULT(return NULL;);
- PRECONDITION(CheckPointer(pHeap));
- }
- CONTRACTL_END;
-
-#ifdef DACCESS_COMPILE
- return ::operator new(size, nothrow);
-#else
- return pHeap->AllocMem_NoThrow(S_SIZE_T(size));
-#endif
-}
-
-void * LoaderHeapAllocatedRejitStructure::operator new (size_t size, LoaderHeap * pHeap)
-{
- CONTRACTL
- {
- THROWS;
- GC_NOTRIGGER;
- MODE_ANY;
- INJECT_FAULT(COMPlusThrowOM());
- PRECONDITION(CheckPointer(pHeap));
- }
- CONTRACTL_END;
-
-#ifdef DACCESS_COMPILE
- return ::operator new(size);
-#else
- return pHeap->AllocMem(S_SIZE_T(size));
-#endif
-}
-
-
-//---------------------------------------------------------------------------------------
-//
-// Simple, thin abstraction of debugger breakpoint patching. Given an address and a
-// previously procured DebuggerControllerPatch governing the code address, this decides
-// whether the code address is patched. If so, it returns a pointer to the debugger's
-// buffer (of what's "underneath" the int 3 patch); otherwise, it returns the code
-// address itself.
-//
-// Arguments:
-// * pbCode - Code address to return if unpatched
-// * dbgpatch - DebuggerControllerPatch to test
-//
-// Return Value:
-// Either pbCode or the debugger's patch buffer, as per description above.
-//
-// Assumptions:
-// Caller must manually grab (and hold) the ControllerLockHolder and get the
-// DebuggerControllerPatch before calling this helper.
-//
-// Notes:
-// pbCode need not equal the code address governed by dbgpatch, but is always
-// "related" (and sometimes really is equal). For example, this helper may be used
-// when writing a code byte to an internal rejit buffer (e.g., in preparation for an
-// eventual 64-bit interlocked write into the code stream), and thus pbCode would
-// point into the internal rejit buffer whereas dbgpatch governs the corresponding
-// code byte in the live code stream. This function would then be used to determine
-// whether a byte should be written into the internal rejit buffer OR into the
-// debugger controller's breakpoint buffer.
-//
-
-LPBYTE FirstCodeByteAddr(LPBYTE pbCode, DebuggerControllerPatch * dbgpatch)
-{
- LIMITED_METHOD_CONTRACT;
-
- if (dbgpatch != NULL && dbgpatch->IsActivated())
- {
- // Debugger has patched the code, so return the address of the buffer
- return LPBYTE(&(dbgpatch->opcode));
- }
-
- // no active patch, just return the direct code address
- return pbCode;
-}
-
-
-//---------------------------------------------------------------------------------------
// ProfilerFunctionControl implementation
ProfilerFunctionControl::ProfilerFunctionControl(LoaderHeap * pHeap) :
@@ -532,30 +446,6 @@ COR_IL_MAP* ProfilerFunctionControl::GetInstrumentedMapEntries()
#ifndef DACCESS_COMPILE
//---------------------------------------------------------------------------------------
-// Called by the prestub worker, this function is a simple wrapper which determines the
-// appropriate ReJitManager, and then calls DoReJitIfNecessaryWorker() on it. See the
-// comment at the top of code:ReJitManager::DoReJitIfNecessaryWorker for more info,
-// including parameter & return value descriptions.
-
-// static
-PCODE ReJitManager::DoReJitIfNecessary(PTR_MethodDesc pMD)
-{
- STANDARD_VM_CONTRACT;
-
- if (!pMD->HasNativeCode())
- {
- // If method hasn't been jitted yet, the prestub worker should just continue as
- // usual.
- return NULL;
- }
-
- // We've already published the JITted code for this MethodDesc, and yet we're
- // back in the prestub (who called us). Ask the appropriate rejit manager if that's because of a rejit request. If so, the
- // ReJitManager will take care of the rejit now
- return pMD->GetReJitManager()->DoReJitIfNecessaryWorker(pMD);
-}
-
-//---------------------------------------------------------------------------------------
//
// ICorProfilerInfo4::RequestReJIT calls into this guy to do most of the
// work. Takes care of finding the appropriate ReJitManager instances to
@@ -579,6 +469,18 @@ HRESULT ReJitManager::RequestReJIT(
ModuleID rgModuleIDs[],
mdMethodDef rgMethodDefs[])
{
+ return ReJitManager::UpdateActiveILVersions(cFunctions, rgModuleIDs, rgMethodDefs, NULL, FALSE);
+}
+
+
+ // static
+HRESULT ReJitManager::UpdateActiveILVersions(
+ ULONG cFunctions,
+ ModuleID rgModuleIDs[],
+ mdMethodDef rgMethodDefs[],
+ HRESULT rgHrStatuses[],
+ BOOL fIsRevert)
+{
CONTRACTL
{
NOTHROW;
@@ -599,20 +501,12 @@ HRESULT ReJitManager::RequestReJIT(
// Temporary storage to batch up all the ReJitInfos that will get jump stamped
// later when the runtime is suspended.
//
- //BUGBUG: Its not clear to me why it is safe to hold ReJitInfo* lists
- // outside the table locks. If an AppDomain unload occurred I don't see anything
- // that prevents them from being deleted. If this is a bug it is a pre-existing
- // condition and nobody has reported it as an issue yet. AppDomainExit probably
- // needs to synchronize with something.
- // Jan also pointed out the ModuleIDs have the same issue, in order to use this
- // function safely the profiler needs prevent the AppDomain which contains the
- // modules from being unloaded. I doubt any profilers are doing this intentionally
- // but calling from within typical callbacks like ModuleLoadFinished or
- // JIT events would do it for the current domain I think. Of course RequestRejit
- // could always be called with ModuleIDs in some other AppDomain.
- //END BUGBUG
- SHash<ReJitManagerJumpStampBatchTraits> mgrToJumpStampBatch;
- CDynArray<ReJitReportErrorWorkItem> errorRecords;
+ //DESKTOP WARNING: On CoreCLR we are safe but if this code ever gets ported back
+ //there aren't any protections against domain unload. Any of these moduleIDs
+ //code version managers, or code versions would become invalid if the domain which
+ //contains them was unloaded.
+ SHash<CodeActivationBatchTraits> mgrToCodeActivationBatch;
+ CDynArray<CodeVersionManager::CodePublishError> errorRecords;
for (ULONG i = 0; i < cFunctions; i++)
{
Module * pModule = reinterpret_cast< Module * >(rgModuleIDs[i]);
@@ -660,13 +554,13 @@ HRESULT ReJitManager::RequestReJIT(
}
}
- ReJitManager * pReJitMgr = pModule->GetReJitManager();
- _ASSERTE(pReJitMgr != NULL);
- ReJitManagerJumpStampBatch * pJumpStampBatch = mgrToJumpStampBatch.Lookup(pReJitMgr);
- if (pJumpStampBatch == NULL)
+ CodeVersionManager * pCodeVersionManager = pModule->GetCodeVersionManager();
+ _ASSERTE(pCodeVersionManager != NULL);
+ CodeActivationBatch * pCodeActivationBatch = mgrToCodeActivationBatch.Lookup(pCodeVersionManager);
+ if (pCodeActivationBatch == NULL)
{
- pJumpStampBatch = new (nothrow)ReJitManagerJumpStampBatch(pReJitMgr);
- if (pJumpStampBatch == NULL)
+ pCodeActivationBatch = new (nothrow)CodeActivationBatch(pCodeVersionManager);
+ if (pCodeActivationBatch == NULL)
{
return E_OUTOFMEMORY;
}
@@ -676,7 +570,7 @@ HRESULT ReJitManager::RequestReJIT(
{
// This guy throws when out of memory, but remains internally
// consistent (without adding the new element)
- mgrToJumpStampBatch.Add(pJumpStampBatch);
+ mgrToCodeActivationBatch.Add(pCodeActivationBatch);
}
EX_CATCH_HRESULT(hr);
@@ -687,133 +581,24 @@ HRESULT ReJitManager::RequestReJIT(
}
}
-
- // At this stage, pMD may be NULL or non-NULL, and the specified function may or
- // may not be a generic (or a function on a generic class). The operations
- // below depend on these conditions as follows:
- //
- // (1) If pMD == NULL || PMD has no code || pMD is generic
- // Do a "PRE-REJIT" (add a placeholder ReJitInfo that points to module/token;
- // there's nothing to jump-stamp)
- //
- // (2) IF pMD != NULL, but not generic (or function on generic class)
- // Do a REAL REJIT (add a real ReJitInfo that points to pMD and jump-stamp)
- //
- // (3) IF pMD != NULL, and is a generic (or function on generic class)
- // Do a real rejit (including jump-stamp) for all already-jitted instantiations.
-
- BaseDomain * pBaseDomainFromModule = pModule->GetDomain();
- SharedReJitInfo * pSharedInfo = NULL;
{
- CrstHolder ch(&(pReJitMgr->m_crstTable));
-
- // Do a PRE-rejit
- if (pMD == NULL || !pMD->HasNativeCode() || pMD->HasClassOrMethodInstantiation())
- {
- hr = pReJitMgr->MarkForReJit(
- pModule,
- rgMethodDefs[i],
- pJumpStampBatch,
- &errorRecords,
- &pSharedInfo);
- if (FAILED(hr))
- {
- _ASSERTE(hr == E_OUTOFMEMORY);
- return hr;
- }
- }
-
- if (pMD == NULL)
- {
- // nothing is loaded yet so only the pre-rejit placeholder is needed. We're done for this method.
- continue;
- }
+ CodeVersionManager::TableLockHolder lock(pCodeVersionManager);
- if (!pMD->HasClassOrMethodInstantiation() && pMD->HasNativeCode())
+ // Bind the il code version
+ ILCodeVersion* pILCodeVersion = pCodeActivationBatch->m_methodsToActivate.Append();
+ if (pILCodeVersion == NULL)
{
- // We have a JITted non-generic. Easy case. Just mark the JITted method
- // desc as needing to be rejitted
- hr = pReJitMgr->MarkForReJit(
- pMD,
- pSharedInfo,
- pJumpStampBatch,
- &errorRecords,
- NULL); // Don't need the SharedReJitInfo to be returned
-
- if (FAILED(hr))
- {
- _ASSERTE(hr == E_OUTOFMEMORY);
- return hr;
- }
+ return E_OUTOFMEMORY;
}
-
- if (!pMD->HasClassOrMethodInstantiation())
+ if (fIsRevert)
{
- // not generic, we're done for this method
- continue;
- }
-
- // Ok, now the case of a generic function (or function on generic class), which
- // is loaded, and may thus have compiled instantiations.
- // It's impossible to get to any other kind of domain from the profiling API
- _ASSERTE(pBaseDomainFromModule->IsAppDomain() ||
- pBaseDomainFromModule->IsSharedDomain());
-
- if (pBaseDomainFromModule->IsSharedDomain())
- {
- // Iterate through all modules loaded into the shared domain, to
- // find all instantiations living in the shared domain. This will
- // include orphaned code (i.e., shared code used by ADs that have
- // all unloaded), which is good, because orphaned code could get
- // re-adopted if a new AD is created that can use that shared code
- hr = pReJitMgr->MarkAllInstantiationsForReJit(
- pSharedInfo,
- NULL, // NULL means to search SharedDomain instead of an AD
- pModule,
- rgMethodDefs[i],
- pJumpStampBatch,
- &errorRecords);
+ // activate the original version
+ *pILCodeVersion = ILCodeVersion(pModule, rgMethodDefs[i]);
}
else
{
- // Module is unshared, so just use the module's domain to find instantiations.
- hr = pReJitMgr->MarkAllInstantiationsForReJit(
- pSharedInfo,
- pBaseDomainFromModule->AsAppDomain(),
- pModule,
- rgMethodDefs[i],
- pJumpStampBatch,
- &errorRecords);
- }
- if (FAILED(hr))
- {
- _ASSERTE(hr == E_OUTOFMEMORY);
- return hr;
- }
- }
-
- // We want to iterate through all compilations of existing instantiations to
- // ensure they get marked for rejit. Note: There may be zero instantiations,
- // but we won't know until we try.
- if (pBaseDomainFromModule->IsSharedDomain())
- {
- // Iterate through all real domains, to find shared instantiations.
- AppDomainIterator appDomainIterator(TRUE);
- while (appDomainIterator.Next())
- {
- AppDomain * pAppDomain = appDomainIterator.GetDomain();
- if (pAppDomain->IsUnloading())
- {
- continue;
- }
- CrstHolder ch(&(pReJitMgr->m_crstTable));
- hr = pReJitMgr->MarkAllInstantiationsForReJit(
- pSharedInfo,
- pAppDomain,
- pModule,
- rgMethodDefs[i],
- pJumpStampBatch,
- &errorRecords);
+ // activate an unused or new IL version
+ hr = ReJitManager::BindILVersion(pCodeVersionManager, pModule, rgMethodDefs[i], pILCodeVersion);
if (FAILED(hr))
{
_ASSERTE(hr == E_OUTOFMEMORY);
@@ -823,18 +608,18 @@ HRESULT ReJitManager::RequestReJIT(
}
} // for (ULONG i = 0; i < cFunctions; i++)
- // For each rejit mgr, if there's work to do, suspend EE if needed,
- // enter the rejit mgr's crst, and do the batched work.
+ // For each code versioning mgr, if there's work to do, suspend EE if needed,
+ // enter the code versioning mgr's crst, and do the batched work.
BOOL fEESuspended = FALSE;
- SHash<ReJitManagerJumpStampBatchTraits>::Iterator beginIter = mgrToJumpStampBatch.Begin();
- SHash<ReJitManagerJumpStampBatchTraits>::Iterator endIter = mgrToJumpStampBatch.End();
- for (SHash<ReJitManagerJumpStampBatchTraits>::Iterator iter = beginIter; iter != endIter; iter++)
+ SHash<CodeActivationBatchTraits>::Iterator beginIter = mgrToCodeActivationBatch.Begin();
+ SHash<CodeActivationBatchTraits>::Iterator endIter = mgrToCodeActivationBatch.End();
+ for (SHash<CodeActivationBatchTraits>::Iterator iter = beginIter; iter != endIter; iter++)
{
- ReJitManagerJumpStampBatch * pJumpStampBatch = *iter;
- ReJitManager * pMgr = pJumpStampBatch->pReJitManager;
+ CodeActivationBatch * pCodeActivationBatch = *iter;
+ CodeVersionManager * pCodeVersionManager = pCodeActivationBatch->m_pCodeVersionManager;
- int cBatchedPreStubMethods = pJumpStampBatch->preStubMethods.Count();
- if (cBatchedPreStubMethods == 0)
+ int cMethodsToActivate = pCodeActivationBatch->m_methodsToActivate.Count();
+ if (cMethodsToActivate == 0)
{
continue;
}
@@ -842,14 +627,12 @@ HRESULT ReJitManager::RequestReJIT(
{
// As a potential future optimization we could speculatively try to update the jump stamps without
// suspending the runtime. That needs to be plumbed through BatchUpdateJumpStamps though.
-
ThreadSuspend::SuspendEE(ThreadSuspend::SUSPEND_FOR_REJIT);
fEESuspended = TRUE;
}
- CrstHolder ch(&(pMgr->m_crstTable));
_ASSERTE(ThreadStore::HoldingThreadStore());
- hr = pMgr->BatchUpdateJumpStamps(&(pJumpStampBatch->undoMethods), &(pJumpStampBatch->preStubMethods), &errorRecords);
+ hr = pCodeVersionManager->SetActiveILCodeVersions(pCodeActivationBatch->m_methodsToActivate.Ptr(), pCodeActivationBatch->m_methodsToActivate.Count(), fEESuspended, &errorRecords);
if (FAILED(hr))
break;
}
@@ -867,702 +650,72 @@ HRESULT ReJitManager::RequestReJIT(
// Report any errors that were batched up
for (int i = 0; i < errorRecords.Count(); i++)
{
- ReportReJITError(&(errorRecords[i]));
- }
-
- INDEBUG(SharedDomain::GetDomain()->GetReJitManager()->Dump(
- "Finished RequestReJIT(). Dumping Shared ReJitManager\n"));
-
- // We got through processing everything, but profiler will need to see the individual ReJITError
- // callbacks to know what, if anything, failed.
- return S_OK;
-}
-
-//---------------------------------------------------------------------------------------
-//
-// Helper used by ReJitManager::RequestReJIT to jump stamp all the methods that were
-// specified by the caller. Also used by RejitManager::DoJumpStampForAssemblyIfNecessary
-// when rejitting a batch of generic method instantiations in a newly loaded NGEN assembly.
-//
-// This method is responsible for calling ReJITError on the profiler if anything goes
-// wrong.
-//
-// Arguments:
-// * pUndoMethods - array containing the methods that need the jump stamp removed
-// * pPreStubMethods - array containing the methods that need to be jump stamped to prestub
-// * pErrors - any errors will be appended to this array
-//
-// Returns:
-// S_OK - all methods are updated or added an error to the pErrors array
-// E_OUTOFMEMORY - some methods neither updated nor added an error to pErrors array
-// ReJitInfo state remains consistent
-//
-// Assumptions:
-// 1) Caller prevents contention by either:
-// a) Suspending the runtime
-// b) Ensuring all methods being updated haven't been published
-//
-HRESULT ReJitManager::BatchUpdateJumpStamps(CDynArray<ReJitInfo *> * pUndoMethods, CDynArray<ReJitInfo *> * pPreStubMethods, CDynArray<ReJitReportErrorWorkItem> * pErrors)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- MODE_PREEMPTIVE;
- PRECONDITION(CheckPointer(pUndoMethods));
- PRECONDITION(CheckPointer(pPreStubMethods));
- PRECONDITION(CheckPointer(pErrors));
- }
- CONTRACTL_END;
-
- _ASSERTE(m_crstTable.OwnedByCurrentThread());
- HRESULT hr = S_OK;
-
- ReJitInfo ** ppInfoEnd = pUndoMethods->Ptr() + pUndoMethods->Count();
- for (ReJitInfo ** ppInfoCur = pUndoMethods->Ptr(); ppInfoCur < ppInfoEnd; ppInfoCur++)
- {
- // If we are undoing jumpstamps they have been published already
- // and our caller is holding the EE suspended
- _ASSERTE(ThreadStore::HoldingThreadStore());
- if (FAILED(hr = (*ppInfoCur)->UndoJumpStampNativeCode(TRUE)))
- {
- if (FAILED(hr = AddReJITError(*ppInfoCur, hr, pErrors)))
- {
- _ASSERTE(hr == E_OUTOFMEMORY);
- return hr;
- }
- }
- }
-
- ppInfoEnd = pPreStubMethods->Ptr() + pPreStubMethods->Count();
- for (ReJitInfo ** ppInfoCur = pPreStubMethods->Ptr(); ppInfoCur < ppInfoEnd; ppInfoCur++)
- {
- if (FAILED(hr = (*ppInfoCur)->JumpStampNativeCode()))
- {
- if (FAILED(hr = AddReJITError(*ppInfoCur, hr, pErrors)))
- {
- _ASSERTE(hr == E_OUTOFMEMORY);
- return hr;
- }
- }
- }
- return S_OK;
-}
-
-//---------------------------------------------------------------------------------------
-//
-// Helper used by ReJitManager::RequestReJIT to iterate through any generic
-// instantiations of a function in a given AppDomain, and to create the corresponding
-// ReJitInfos for those MethodDescs. This also adds corresponding entries to a temporary
-// dynamic array created by our caller for batching up the jump-stamping we'll need to do
-// later.
-//
-// This method is responsible for calling ReJITError on the profiler if anything goes
-// wrong.
-//
-// Arguments:
-// * pSharedForAllGenericInstantiations - The SharedReJitInfo for this mdMethodDef's
-// rejit request. This is what we must associate any newly-created ReJitInfo with.
-// * pAppDomainToSearch - AppDomain in which to search for generic instantiations
-// matching the specified methodDef. If it is NULL, then we'll search for all
-// MethodDescs whose metadata definition appears in a Module loaded into the
-// SharedDomain (regardless of which ADs--if any--are using those MethodDescs).
-// This captures the case of domain-neutral code that was in use by an AD that
-// unloaded, and may come into use again once a new AD loads that can use the
-// shared code.
-// * pModuleContainingMethodDef - Module* containing the specified methodDef token.
-// * methodDef - Token for the method for which we're searching for MethodDescs.
-// * pJumpStampBatch - Batch we're responsible for placing ReJitInfo's into, on which
-// the caller will update the jump stamps.
-// * pRejitErrors - Dynamic array we're responsible for adding error records into.
-// The caller will report them to the profiler outside the table lock
-//
-// Returns:
-// S_OK - all methods were either marked for rejit OR have appropriate error records
-// in pRejitErrors
-// E_OUTOFMEMORY - some methods weren't marked for rejit AND we didn't have enough
-// memory to create the error records
-//
-// Assumptions:
-// * This function should only be called on the ReJitManager that owns the (generic)
-// definition of methodDef
-// * If pModuleContainingMethodDef is loaded into the SharedDomain, then
-// pAppDomainToSearch may be NULL (to search all instantiations loaded shared),
-// or may be non-NULL (to search all instantiations loaded into
-// pAppDomainToSearch)
-// * If pModuleContainingMethodDef is not loaded domain-neutral, then
-// pAppDomainToSearch must be non-NULL (and, indeed, must be the very AD that
-// pModuleContainingMethodDef is loaded into).
-//
-
-HRESULT ReJitManager::MarkAllInstantiationsForReJit(
- SharedReJitInfo * pSharedForAllGenericInstantiations,
- AppDomain * pAppDomainToSearch,
- PTR_Module pModuleContainingMethodDef,
- mdMethodDef methodDef,
- ReJitManagerJumpStampBatch* pJumpStampBatch,
- CDynArray<ReJitReportErrorWorkItem> * pRejitErrors)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- MODE_PREEMPTIVE;
- CAN_TAKE_LOCK;
- PRECONDITION(CheckPointer(pSharedForAllGenericInstantiations));
- PRECONDITION(CheckPointer(pAppDomainToSearch, NULL_OK));
- PRECONDITION(CheckPointer(pModuleContainingMethodDef));
- PRECONDITION(CheckPointer(pJumpStampBatch));
- }
- CONTRACTL_END;
-
- _ASSERTE(m_crstTable.OwnedByCurrentThread());
- _ASSERTE(methodDef != mdTokenNil);
- _ASSERTE(pJumpStampBatch->pReJitManager == this);
-
- HRESULT hr;
-
- BaseDomain * pDomainContainingGenericDefinition = pModuleContainingMethodDef->GetDomain();
-
-#ifdef _DEBUG
- // This function should only be called on the ReJitManager that owns the (generic)
- // definition of methodDef
- _ASSERTE(this == pDomainContainingGenericDefinition->GetReJitManager());
-
- // If the generic definition is not loaded domain-neutral, then all its
- // instantiations will also be non-domain-neutral and loaded into the same
- // domain as the generic definition. So the caller may only pass the
- // domain containing the generic definition as pAppDomainToSearch
- if (!pDomainContainingGenericDefinition->IsSharedDomain())
- {
- _ASSERTE(pDomainContainingGenericDefinition == pAppDomainToSearch);
- }
-#endif //_DEBUG
-
- // If pAppDomainToSearch is NULL, iterate through all existing
- // instantiations loaded into the SharedDomain. If pAppDomainToSearch is non-NULL,
- // iterate through all existing instantiations in pAppDomainToSearch, and only consider
- // instantiations in non-domain-neutral assemblies (as we already covered domain
- // neutral assemblies when we searched the SharedDomain).
- LoadedMethodDescIterator::AssemblyIterationMode mode = LoadedMethodDescIterator::kModeSharedDomainAssemblies;
- // these are the default flags which won't actually be used in shared mode other than
- // asserting they were specified with their default values
- AssemblyIterationFlags assemFlags = (AssemblyIterationFlags) (kIncludeLoaded | kIncludeExecution);
- ModuleIterationOption moduleFlags = (ModuleIterationOption) kModIterIncludeLoaded;
- if (pAppDomainToSearch != NULL)
- {
- mode = LoadedMethodDescIterator::kModeUnsharedADAssemblies;
- assemFlags = (AssemblyIterationFlags)(kIncludeAvailableToProfilers | kIncludeExecution);
- moduleFlags = (ModuleIterationOption)kModIterIncludeAvailableToProfilers;
- }
- LoadedMethodDescIterator it(
- pAppDomainToSearch,
- pModuleContainingMethodDef,
- methodDef,
- mode,
- assemFlags,
- moduleFlags);
- CollectibleAssemblyHolder<DomainAssembly *> pDomainAssembly;
- while (it.Next(pDomainAssembly.This()))
- {
- MethodDesc * pLoadedMD = it.Current();
-
- if (!pLoadedMD->HasNativeCode())
- {
- // Skip uninstantiated MethodDescs. The placeholder added by our caller
- // is sufficient to ensure they'll eventually be rejitted when they get
- // compiled.
- continue;
- }
-
- if (FAILED(hr = IsMethodSafeForReJit(pLoadedMD)))
- {
- if (FAILED(hr = AddReJITError(pModuleContainingMethodDef, methodDef, pLoadedMD, hr, pRejitErrors)))
- {
- _ASSERTE(hr == E_OUTOFMEMORY);
- return hr;
- }
- continue;
- }
-
-#ifdef _DEBUG
- if (!pDomainContainingGenericDefinition->IsSharedDomain())
- {
- // Method is defined outside of the shared domain, so its instantiation must
- // be defined in the AD we're iterating over (pAppDomainToSearch, which, as
- // asserted above, must be the same domain as the generic's definition)
- _ASSERTE(pLoadedMD->GetDomain() == pAppDomainToSearch);
- }
-#endif // _DEBUG
-
- // This will queue up the MethodDesc for rejitting and create all the
- // look-aside tables needed.
- SharedReJitInfo * pSharedUsed = NULL;
- hr = MarkForReJit(
- pLoadedMD,
- pSharedForAllGenericInstantiations,
- pJumpStampBatch,
- pRejitErrors,
- &pSharedUsed);
- if (FAILED(hr))
- {
- _ASSERTE(hr == E_OUTOFMEMORY);
- return hr;
- }
- }
-
- return S_OK;
-}
-
-
-//---------------------------------------------------------------------------------------
-//
-// Helper used by ReJitManager::MarkAllInstantiationsForReJit and
-// ReJitManager::RequestReJIT to do the actual ReJitInfo allocation and
-// placement inside m_table. Note that callers don't use MarkForReJitHelper
-// directly. Instead, callers actually use the inlined overloaded wrappers
-// ReJitManager::MarkForReJit (one for placeholder (i.e., methodDef pre-rejit)
-// ReJitInfos and one for regular (i.e., MethodDesc) ReJitInfos). When the
-// overloaded MarkForReJit wrappers call this, they ensure that either pMD is
-// valid XOR (pModule, methodDef) is valid.
-//
-// Arguments:
-// * pMD - MethodDesc for which to find / create ReJitInfo. Only used if
-// we're creating a regular ReJitInfo
-// * pModule - Module for which to find / create ReJitInfo. Only used if
-// we're creating a placeholder ReJitInfo
-// * methodDef - methodDef for which to find / create ReJitInfo. Only used
-// if we're creating a placeholder ReJitInfo
-// * pSharedToReuse - SharedReJitInfo to associate any newly created
-// ReJitInfo with. If NULL, we'll create a new one.
-// * pJumpStampBatch - a batch of methods that need to have jump stamps added
-// or removed. This method will add new ReJitInfos to the batch as needed.
-// * pRejitErrors - An array of rejit errors that this call will append to
-// if there is an error marking
-// * ppSharedUsed - [out]: SharedReJitInfo used for this request. If
-// pSharedToReuse is non-NULL, *ppSharedUsed == pSharedToReuse. Else,
-// *ppSharedUsed is the SharedReJitInfo newly-created to associate with
-// the ReJitInfo used for this request.
-//
-// Return Value:
-// * S_OK: Successfully created a new ReJitInfo to manage this request
-// * S_FALSE: An existing ReJitInfo was already available to manage this
-// request, so we didn't need to create a new one.
-// * E_OUTOFMEMORY
-// * Else, a failure HRESULT indicating what went wrong.
-//
-
-HRESULT ReJitManager::MarkForReJitHelper(
- PTR_MethodDesc pMD,
- PTR_Module pModule,
- mdMethodDef methodDef,
- SharedReJitInfo * pSharedToReuse,
- ReJitManagerJumpStampBatch* pJumpStampBatch,
- CDynArray<ReJitReportErrorWorkItem> * pRejitErrors,
- /* out */ SharedReJitInfo ** ppSharedUsed)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- MODE_PREEMPTIVE;
- CAN_TAKE_LOCK;
- PRECONDITION(CheckPointer(pMD, NULL_OK));
- PRECONDITION(CheckPointer(pModule, NULL_OK));
- PRECONDITION(CheckPointer(pJumpStampBatch));
- PRECONDITION(CheckPointer(pRejitErrors));
- PRECONDITION(CheckPointer(ppSharedUsed, NULL_OK));
- }
- CONTRACTL_END;
-
- CrstHolder ch(&m_crstTable);
-
- // Either pMD is valid, xor (pModule,methodDef) is valid
- _ASSERTE(
- ((pMD != NULL) && (pModule == NULL) && (methodDef == mdTokenNil)) ||
- ((pMD == NULL) && (pModule != NULL) && (methodDef != mdTokenNil)));
- _ASSERTE(pJumpStampBatch->pReJitManager == this);
-
- if (ppSharedUsed != NULL)
- *ppSharedUsed = NULL;
- HRESULT hr = S_OK;
-
- // Check if there was there a previous rejit request for pMD
-
- ReJitInfoHash::KeyIterator beginIter(&m_table, TRUE /* begin */);
- ReJitInfoHash::KeyIterator endIter(&m_table, FALSE /* begin */);
-
- if (pMD != NULL)
- {
- beginIter = GetBeginIterator(pMD);
- endIter = GetEndIterator(pMD);
- }
- else
- {
- beginIter = GetBeginIterator(pModule, methodDef);
- endIter = GetEndIterator(pModule, methodDef);
- }
-
- for (ReJitInfoHash::KeyIterator iter = beginIter;
- iter != endIter;
- iter++)
- {
- ReJitInfo * pInfo = *iter;
- _ASSERTE(pInfo->m_pShared != NULL);
-
-#ifdef _DEBUG
- if (pMD != NULL)
- {
- _ASSERTE(pInfo->GetMethodDesc() == pMD);
- }
- else
- {
- Module * pModuleTest = NULL;
- mdMethodDef methodDefTest = mdTokenNil;
- pInfo->GetModuleAndToken(&pModuleTest, &methodDefTest);
- _ASSERTE((pModule == pModuleTest) && (methodDef == methodDefTest));
- }
-#endif //_DEBUG
-
- SharedReJitInfo * pShared = pInfo->m_pShared;
-
- switch (pShared->GetState())
- {
- case SharedReJitInfo::kStateRequested:
- // We can 'reuse' this instance because the profiler doesn't know about
- // it yet. (This likely happened because a profiler called RequestReJIT
- // twice in a row, without us having a chance to jmp-stamp the code yet OR
- // while iterating through instantiations of a generic, the iterator found
- // duplicate entries for the same instantiation.)
- _ASSERTE(pShared->m_pbIL == NULL);
- _ASSERTE(pInfo->m_pCode == NULL);
-
- if (ppSharedUsed != NULL)
- *ppSharedUsed = pShared;
-
- INDEBUG(AssertRestOfEntriesAreReverted(iter, endIter));
- return S_FALSE;
-
- case SharedReJitInfo::kStateGettingReJITParameters:
- case SharedReJitInfo::kStateActive:
+ if (rgHrStatuses != NULL)
{
- // Profiler has already requested to rejit this guy, AND we've already
- // at least started getting the rejit parameters from the profiler. We need to revert this
- // instance (this will put back the original code)
-
- INDEBUG(AssertRestOfEntriesAreReverted(iter, endIter));
- hr = Revert(pShared, pJumpStampBatch);
- if (FAILED(hr))
+ for (DWORD j = 0; j < cFunctions; j++)
{
- _ASSERTE(hr == E_OUTOFMEMORY);
- return hr;
+ if (rgMethodDefs[j] == errorRecords[i].methodDef &&
+ reinterpret_cast<Module*>(rgModuleIDs[j]) == errorRecords[i].pModule)
+ {
+ rgHrStatuses[j] = errorRecords[i].hrStatus;
+ }
}
- _ASSERTE(pShared->GetState() == SharedReJitInfo::kStateReverted);
-
- // No need to continue looping. Break out of loop to create a new
- // ReJitInfo to service the request.
- goto EXIT_LOOP;
- }
- case SharedReJitInfo::kStateReverted:
- // just ignore this guy
- continue;
-
- default:
- UNREACHABLE();
- }
- }
-EXIT_LOOP:
-
- // Either there was no ReJitInfo yet for this MethodDesc OR whatever we've found
- // couldn't be reused (and needed to be reverted). Create a new ReJitInfo to return
- // to the caller.
- //
- // If the caller gave us a pMD that is a new generic instantiation, then the caller
- // may also have provided a pSharedToReuse for the generic. Use that instead of
- // creating a new one.
-
- SharedReJitInfo * pShared = NULL;
-
- if (pSharedToReuse != NULL)
- {
- pShared = pSharedToReuse;
- }
- else
- {
- PTR_LoaderHeap pHeap = NULL;
- if (pModule != NULL)
- {
- pHeap = pModule->GetLoaderAllocator()->GetLowFrequencyHeap();
}
else
{
- pHeap = pMD->GetLoaderAllocator()->GetLowFrequencyHeap();
- }
- pShared = new (pHeap, nothrow) SharedReJitInfo;
- if (pShared == NULL)
- {
- return E_OUTOFMEMORY;
- }
- }
-
- _ASSERTE(pShared != NULL);
-
- // ReJitInfos with MethodDesc's need to be jump-stamped,
- // ReJitInfos with Module/MethodDef are placeholders that don't need a stamp
- ReJitInfo * pInfo = NULL;
- ReJitInfo ** ppInfo = &pInfo;
- if (pMD != NULL)
- {
- ppInfo = pJumpStampBatch->preStubMethods.Append();
- if (ppInfo == NULL)
- {
- return E_OUTOFMEMORY;
+ ReportReJITError(&(errorRecords[i]));
}
+
}
- hr = AddNewReJitInfo(pMD, pModule, methodDef, pShared, ppInfo);
- if (FAILED(hr))
- {
- // NOTE: We could consider using an AllocMemTracker or AllocMemHolder
- // here to back out the allocation of pShared, but it probably
- // wouldn't make much of a difference. We'll only get here if we ran
- // out of memory allocating the pInfo, so our memory has already been
- // blown. We can't cause much leaking due to this error path.
- _ASSERTE(hr == E_OUTOFMEMORY);
- return hr;
- }
-
- _ASSERTE(*ppInfo != NULL);
-
- if (ppSharedUsed != NULL)
- *ppSharedUsed = pShared;
+ // We got through processing everything, but profiler will need to see the individual ReJITError
+ // callbacks to know what, if anything, failed.
return S_OK;
}
-//---------------------------------------------------------------------------------------
-//
-// Helper used by the above helpers (and also during jump-stamping) to
-// allocate and store a new ReJitInfo.
-//
-// Arguments:
-// * pMD - MethodDesc for which to create ReJitInfo. Only used if we're
-// creating a regular ReJitInfo
-// * pModule - Module for which create ReJitInfo. Only used if we're
-// creating a placeholder ReJitInfo
-// * methodDef - methodDef for which to create ReJitInfo. Only used if
-// we're creating a placeholder ReJitInfo
-// * pShared - SharedReJitInfo to associate the newly created ReJitInfo
-// with.
-// * ppInfo - [out]: ReJitInfo created
-//
-// Return Value:
-// * S_OK: ReJitInfo successfully created & stored.
-// * Else, failure indicating the problem. Currently only E_OUTOFMEMORY.
-//
-// Assumptions:
-// * Caller should be holding this ReJitManager's table crst.
-//
-
-HRESULT ReJitManager::AddNewReJitInfo(
- PTR_MethodDesc pMD,
+// static
+HRESULT ReJitManager::BindILVersion(
+ CodeVersionManager* pCodeVersionManager,
PTR_Module pModule,
mdMethodDef methodDef,
- SharedReJitInfo * pShared,
- ReJitInfo ** ppInfo)
+ ILCodeVersion *pILCodeVersion)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
- MODE_ANY;
- CAN_TAKE_LOCK;
- PRECONDITION(CheckPointer(pMD, NULL_OK));
- PRECONDITION(CheckPointer(pModule, NULL_OK));
- PRECONDITION(CheckPointer(pShared));
- PRECONDITION(CheckPointer(ppInfo));
- }
- CONTRACTL_END;
-
- _ASSERTE(m_crstTable.OwnedByCurrentThread());
- _ASSERTE(pShared->GetState() != SharedReJitInfo::kStateReverted);
-
- // Either pMD is valid, xor (pModule,methodDef) is valid
- _ASSERTE(
- ((pMD != NULL) && (pModule == NULL) && (methodDef == mdTokenNil)) ||
- ((pMD == NULL) && (pModule != NULL) && (methodDef != mdTokenNil)));
-
- HRESULT hr;
- ReJitInfo * pInfo = NULL;
-
- if (pMD != NULL)
- {
- PTR_LoaderHeap pHeap = pMD->GetLoaderAllocator()->GetLowFrequencyHeap();
- pInfo = new (pHeap, nothrow) ReJitInfo(pMD, pShared);
- }
- else
- {
- PTR_LoaderHeap pHeap = pModule->GetLoaderAllocator()->GetLowFrequencyHeap();
- pInfo = new (pHeap, nothrow) ReJitInfo(pModule, methodDef, pShared);
- }
- if (pInfo == NULL)
- {
- return E_OUTOFMEMORY;
- }
-
- hr = S_OK;
- EX_TRY
- {
- // This guy throws when out of memory, but remains internally
- // consistent (without adding the new element)
- m_table.Add(pInfo);
- }
- EX_CATCH_HRESULT(hr);
-
- _ASSERT(hr == S_OK || hr == E_OUTOFMEMORY);
- if (FAILED(hr))
- {
- pInfo = NULL;
- return hr;
- }
-
- *ppInfo = pInfo;
- return S_OK;
-}
-
-
-//---------------------------------------------------------------------------------------
-//
-// Given a MethodDesc, call ReJitInfo::JumpStampNativeCode to stamp the top of its
-// originally-jitted-code with a jmp that goes to the prestub. This is called by the
-// prestub worker after jitting the original code of a function (i.e., the "pre-rejit"
-// scenario). In this case, the EE is not suspended. But that's ok, because the PCODE has
-// not yet been published to the MethodDesc, and no thread can be executing inside the
-// originally JITted function yet.
-//
-// Arguments:
-// * pMD - MethodDesc to jmp-stamp
-// * pCode - Top of the code that was just jitted (using original IL).
-//
-//
-// Return value:
-// * S_OK: Either we successfully did the jmp-stamp, or we didn't have to (e.g., there
-// was no outstanding pre-rejit request for this MethodDesc, or a racing thread
-// took care of it for us).
-// * Else, HRESULT indicating failure.
-
-// Assumptions:
-// The caller has not yet published pCode to the MethodDesc, so no threads can be
-// executing inside pMD's code yet. Thus, we don't need to suspend the runtime while
-// applying the jump-stamp like we usually do for rejit requests that are made after
-// a function has been JITted.
-//
-
-HRESULT ReJitManager::DoJumpStampIfNecessary(MethodDesc* pMD, PCODE pCode)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- MODE_ANY;
+ MODE_PREEMPTIVE;
CAN_TAKE_LOCK;
- PRECONDITION(CheckPointer(pMD));
- PRECONDITION(pCode != NULL);
+ PRECONDITION(CheckPointer(pCodeVersionManager));
+ PRECONDITION(CheckPointer(pModule));
+ PRECONDITION(CheckPointer(pILCodeVersion));
}
CONTRACTL_END;
- HRESULT hr;
-
- _ASSERTE(IsTableCrstOwnedByCurrentThread());
-
- ReJitInfo * pInfoToJumpStamp = NULL;
-
- // First, try looking up ReJitInfo by MethodDesc. A "regular" MethodDesc-based
- // ReJitInfo already exists for "case 1" (see comment above
- // code:ReJitInfo::JumpStampNativeCode), and could even exist for "case 2"
- // (pre-rejit), if either:
- // * The pre-rejit was requested after the MD had already been loaded (though
- // before it had been jitted) OR
- // * there was a race to JIT the original code for the MD, and another thread got
- // here before us and already added the ReJitInfo for that MD.
-
- ReJitInfoHash::KeyIterator beginIter = GetBeginIterator(pMD);
- ReJitInfoHash::KeyIterator endIter = GetEndIterator(pMD);
-
- pInfoToJumpStamp = FindPreReJittedReJitInfo(beginIter, endIter);
- if (pInfoToJumpStamp != NULL)
- {
- _ASSERTE(pInfoToJumpStamp->GetMethodDesc() == pMD);
- // does it need to be jump-stamped?
- if (pInfoToJumpStamp->GetState() != ReJitInfo::kJumpNone)
- {
- return S_OK;
- }
- else
- {
- return pInfoToJumpStamp->JumpStampNativeCode(pCode);
- }
- }
-
- // In this case, try looking up by module / metadata token. This is the case where
- // the pre-rejit request occurred before the MD was loaded.
-
- Module * pModule = pMD->GetModule();
- _ASSERTE(pModule != NULL);
- mdMethodDef methodDef = pMD->GetMemberDef();
-
- beginIter = GetBeginIterator(pModule, methodDef);
- endIter = GetEndIterator(pModule, methodDef);
- ReJitInfo * pInfoPlaceholder = NULL;
-
- pInfoPlaceholder = FindPreReJittedReJitInfo(beginIter, endIter);
- if (pInfoPlaceholder == NULL)
- {
- // No jump stamping to do.
- return S_OK;
- }
+ _ASSERTE(pCodeVersionManager->LockOwnedByCurrentThread());
+ _ASSERTE((pModule != NULL) && (methodDef != mdTokenNil));
- // The placeholder may already have a rejit info for this MD, in which
- // case we don't need to do any additional work
- for (ReJitInfo * pInfo = pInfoPlaceholder->m_pShared->GetMethods(); pInfo != NULL; pInfo = pInfo->m_pNext)
- {
- if ((pInfo->GetKey().m_keyType == ReJitInfo::Key::kMethodDesc) &&
- (pInfo->GetMethodDesc() == pMD))
- {
- // Any rejit info we find should already be jumpstamped
- _ASSERTE(pInfo->GetState() != ReJitInfo::kJumpNone);
- return S_OK;
- }
- }
+ // Check if there was there a previous rejit request for this method that hasn't been exposed back
+ // to the profiler yet
+ ILCodeVersion ilCodeVersion = pCodeVersionManager->GetActiveILCodeVersion(pModule, methodDef);
-#ifdef _DEBUG
+ if (ilCodeVersion.GetRejitState() == ILCodeVersion::kStateRequested)
{
- Module * pModuleTest = NULL;
- mdMethodDef methodDefTest = mdTokenNil;
- INDEBUG(pInfoPlaceholder->GetModuleAndToken(&pModuleTest, &methodDefTest));
- _ASSERTE((pModule == pModuleTest) && (methodDef == methodDefTest));
- }
-#endif //_DEBUG
+ // We can 'reuse' this instance because the profiler doesn't know about
+ // it yet. (This likely happened because a profiler called RequestReJIT
+ // twice in a row, without us having a chance to jmp-stamp the code yet OR
+ // while iterating through instantiations of a generic, the iterator found
+ // duplicate entries for the same instantiation.)
+ _ASSERTE(ilCodeVersion.GetILNoThrow() == NULL);
- // We have finished JITting the original code for a function that had been
- // "pre-rejitted" (i.e., requested to be rejitted before it was first compiled). So
- // now is the first time where we know the MethodDesc of the request.
- if (FAILED(hr = IsMethodSafeForReJit(pMD)))
- {
- // No jump stamping to do.
- return hr;
+ *pILCodeVersion = ilCodeVersion;
+ return S_FALSE;
}
- // Create the ReJitInfo associated with the MethodDesc now (pInfoToJumpStamp), and
- // jump-stamp the original code.
- pInfoToJumpStamp = NULL;
- hr = AddNewReJitInfo(pMD, NULL /*pModule*/, NULL /*methodDef*/, pInfoPlaceholder->m_pShared, &pInfoToJumpStamp);
- if (FAILED(hr))
- {
- return hr;
- }
-
- _ASSERTE(pInfoToJumpStamp != NULL);
- return pInfoToJumpStamp->JumpStampNativeCode(pCode);
+ // Either there was no ILCodeVersion yet for this MethodDesc OR whatever we've found
+ // couldn't be reused (and needed to be reverted). Create a new ILCodeVersion to return
+ // to the caller.
+ return pCodeVersionManager->AddILCodeVersion(pModule, methodDef, InterlockedIncrement(reinterpret_cast<LONG*>(&s_GlobalReJitId)), pILCodeVersion);
}
//---------------------------------------------------------------------------------------
@@ -1601,395 +754,41 @@ HRESULT ReJitManager::RequestRevert(
}
CONTRACTL_END;
- // Serialize all RequestReJIT() and Revert() calls against each other (even across AppDomains)
- CrstHolder ch(&(s_csGlobalRequest));
-
- // Request at least 1 method to revert!
- _ASSERTE ((cFunctions != 0) && (rgModuleIDs != NULL) && (rgMethodDefs != NULL));
-
- ThreadSuspend::SuspendEE(ThreadSuspend::SUSPEND_FOR_REJIT);
- for (ULONG i = 0; i < cFunctions; i++)
- {
- HRESULT hr = E_UNEXPECTED;
- Module * pModule = reinterpret_cast< Module * >(rgModuleIDs[i]);
- if (pModule == NULL || TypeFromToken(rgMethodDefs[i]) != mdtMethodDef)
- {
- hr = E_INVALIDARG;
- }
- else if (pModule->IsBeingUnloaded())
- {
- hr = CORPROF_E_DATAINCOMPLETE;
- }
- else if (pModule->IsReflection())
- {
- hr = CORPROF_E_MODULE_IS_DYNAMIC;
- }
- else
- {
- hr = pModule->GetReJitManager()->RequestRevertByToken(pModule, rgMethodDefs[i]);
- }
-
- if (rgHrStatuses != NULL)
- {
- rgHrStatuses[i] = hr;
- }
- }
-
- ThreadSuspend::RestartEE(FALSE /* bFinishedGC */, TRUE /* SuspendSucceded */);
-
- return S_OK;
+ return UpdateActiveILVersions(cFunctions, rgModuleIDs, rgMethodDefs, rgHrStatuses, TRUE);
}
-//---------------------------------------------------------------------------------------
-//
-// Called by AppDomain::Exit() to notify the SharedDomain's ReJitManager that this
-// AppDomain is exiting. The SharedDomain's ReJitManager will then remove any
-// ReJitInfos relating to MDs owned by AppDomain. This is how we remove
-// non-domain-neutral instantiations of domain-neutral generics from the SharedDomain's
-// ReJitManager.
-//
-// Arguments:
-// pAppDomain - AppDomain that is exiting.
-//
-
// static
-void ReJitManager::OnAppDomainExit(AppDomain * pAppDomain)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- CAN_TAKE_LOCK;
- MODE_ANY;
- }
- CONTRACTL_END;
-
- // All ReJitInfos and SharedReJitInfos for this AD's ReJitManager automatically get
- // cleaned up as they're allocated on the AD's loader heap.
-
- // We explicitly clean up the SHash here, as its entries get allocated using regular
- // "new"
- pAppDomain->GetReJitManager()->m_table.RemoveAll();
-
- // We need to ensure that any MethodDescs from pAppDomain that are stored on the
- // SharedDomain's ReJitManager get removed from the SharedDomain's ReJitManager's
- // hash table, and from the linked lists tied to their owning SharedReJitInfo. (This
- // covers the case of non-domain-neutral instantiations of domain-neutral generics.)
- SharedDomain::GetDomain()->GetReJitManager()->RemoveReJitInfosFromDomain(pAppDomain);
-}
-
-
-//---------------------------------------------------------------------------------------
-//
-// Small helper to determine whether a given (possibly instantiated generic) MethodDesc
-// is safe to rejit. If not, this function is responsible for calling into the
-// profiler's ReJITError()
-//
-// Arguments:
-// pMD - MethodDesc to test
-// Return Value:
-// S_OK iff pMD is safe to rejit
-// CORPROF_E_FUNCTION_IS_COLLECTIBLE - function can't be rejitted because it is collectible
-//
-
-// static
-HRESULT ReJitManager::IsMethodSafeForReJit(PTR_MethodDesc pMD)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- CAN_TAKE_LOCK;
- MODE_ANY;
- }
- CONTRACTL_END;
-
- _ASSERTE(pMD != NULL);
-
- // Weird, non-user functions were already weeded out in RequestReJIT(), and will
- // also never be passed to us by the prestub worker (for the pre-rejit case).
- _ASSERTE(pMD->IsIL());
-
- // Any MethodDescs that could be collected are not currently supported. Although we
- // rule out all Ref.Emit modules in RequestReJIT(), there can still exist types defined
- // in a non-reflection module and instantiated into a collectible assembly
- // (e.g., List<MyCollectibleStruct>). In the future we may lift this
- // restriction by updating the ReJitManager when the collectible assemblies
- // owning the instantiations get collected.
- if (pMD->GetLoaderAllocator()->IsCollectible())
- {
- return CORPROF_E_FUNCTION_IS_COLLECTIBLE;
- }
-
- return S_OK;
-}
-
-
-//---------------------------------------------------------------------------------------
-//
-// Simple wrapper around GetCurrentReJitWorker. See
-// code:ReJitManager::GetCurrentReJitWorker for information about parameters, return
-// values, etc.
-
-// static
-DWORD ReJitManager::GetCurrentReJitFlags(PTR_MethodDesc pMD)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- MODE_PREEMPTIVE;
- PRECONDITION(CheckPointer(pMD));
- }
- CONTRACTL_END;
-
- return pMD->GetReJitManager()->GetCurrentReJitFlagsWorker(pMD);
-}
-
-
-//---------------------------------------------------------------------------------------
-//
-// Given a methodDef token, finds the corresponding ReJitInfo, and asks the
-// ReJitInfo to perform a revert.
-//
-// Arguments:
-// * pModule - Module to revert
-// * methodDef - methodDef token to revert
-//
-// Return Value:
-// HRESULT indicating success or failure. If the method was never
-// rejitted in the first place, this method returns a special error code
-// (CORPROF_E_ACTIVE_REJIT_REQUEST_NOT_FOUND).
-// E_OUTOFMEMORY
-//
-
-#ifdef _MSC_VER
-#pragma warning(push)
-#pragma warning(disable:4702) // Disable bogus unreachable code warning
-#endif // _MSC_VER
-HRESULT ReJitManager::RequestRevertByToken(PTR_Module pModule, mdMethodDef methodDef)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- CAN_TAKE_LOCK;
- MODE_PREEMPTIVE;
- }
- CONTRACTL_END;
-
- _ASSERTE(ThreadStore::HoldingThreadStore());
- CrstHolder ch(&m_crstTable);
-
- _ASSERTE(pModule != NULL);
- _ASSERTE(methodDef != mdTokenNil);
-
- ReJitInfo * pInfo = NULL;
- MethodDesc * pMD = NULL;
-
- pInfo = FindNonRevertedReJitInfo(pModule, methodDef);
- if (pInfo == NULL)
- {
- pMD = pModule->LookupMethodDef(methodDef);
- pInfo = FindNonRevertedReJitInfo(pMD);
- if (pInfo == NULL)
- return CORPROF_E_ACTIVE_REJIT_REQUEST_NOT_FOUND;
- }
-
- _ASSERTE (pInfo != NULL);
- _ASSERTE (pInfo->m_pShared != NULL);
- _ASSERTE (pInfo->m_pShared->GetState() != SharedReJitInfo::kStateReverted);
- ReJitManagerJumpStampBatch batch(this);
- HRESULT hr = Revert(pInfo->m_pShared, &batch);
- if (FAILED(hr))
- {
- _ASSERTE(hr == E_OUTOFMEMORY);
- return hr;
- }
- CDynArray<ReJitReportErrorWorkItem> errorRecords;
- hr = BatchUpdateJumpStamps(&(batch.undoMethods), &(batch.preStubMethods), &errorRecords);
- if (FAILED(hr))
- {
- _ASSERTE(hr == E_OUTOFMEMORY);
- return hr;
- }
-
- // If there were any errors, return the first one. This matches previous error handling
- // behavior that only returned the first error encountered within Revert().
- for (int i = 0; i < errorRecords.Count(); i++)
- {
- _ASSERTE(FAILED(errorRecords[i].hrStatus));
- return errorRecords[i].hrStatus;
- }
- return S_OK;
-}
-#ifdef _MSC_VER
-#pragma warning(pop)
-#endif // _MSC_VER
-
-
-
-//---------------------------------------------------------------------------------------
-//
-// Called by the prestub worker, this function decides if the MethodDesc needs to be
-// rejitted, and if so, this will call the profiler to get the rejit parameters (if they
-// are not yet stored), and then perform the actual re-JIT (by calling, indirectly,
-// UnsafeJitFunction).
-//
-// In order to allow the re-JIT to occur outside of any locks, the following sequence is
-// performed:
-//
-// * Enter this ReJitManager's table crst
-// * Find the single ReJitInfo (if any) in the table matching the input pMD. This
-// represents the outstanding rejit request against thie pMD
-// * If necessary, ask profiler for IL & codegen flags (by calling
-// GetReJITParameters()), thus transitioning the corresponding SharedReJitInfo
-// state kStateRequested-->kStateActive
-// * Exit this ReJitManager's table crst
-// * (following steps occur when DoReJitIfNecessary() calls DoReJit())
-// * Call profiler's ReJitCompilationStarted()
-// * Call UnsafeJitFunction with the IL / codegen flags provided by profiler, as stored
-// on the SharedReJitInfo. Note that if another Rejit request came in, then we would
-// create new SharedReJitInfo & ReJitInfo structures to track it, rather than
-// modifying the ReJitInfo / SharedReJitInfo we found above. So the ReJitInfo we're
-// using here (outside the lock), is "fixed" in the sense that its IL / codegen flags
-// will not change.
-// * (below is where we handle any races that might have occurred between threads
-// simultaneously rejitting this function)
-// * Enter this ReJitManager's table crst
-// * Check to see if another thread has already published the rejitted PCODE to
-// ReJitInfo::m_pCode. If so, bail.
-// * If we're the winner, publish our rejitted PCODE to ReJitInfo::m_pCode...
-// * ...and update the jump-stamp at the top of the originally JITted code so that it
-// now points to our rejitted code (instead of the prestub)
-// * Exit this ReJitManager's table crst
-// * Call profiler's ReJitCompilationFinished()
-// * Fire relevant ETW events
-//
-// Arguments:
-// pMD - MethodDesc to decide whether to rejit
-//
-// Return Value:
-// * If a rejit was performed, the PCODE of the generated code.
-// * If the ReJitManager changed its mind and chose not to do a rejit (e.g., a
-// revert request raced with this rejit request, and the revert won), just
-// return the PCODE of the originally JITted code (pMD->GetNativeCode())
-// * Else, NULL (which means the ReJitManager doesn't know or care about this
-// MethodDesc)
-//
-
-PCODE ReJitManager::DoReJitIfNecessaryWorker(PTR_MethodDesc pMD)
+HRESULT ReJitManager::ConfigureILCodeVersion(ILCodeVersion ilCodeVersion)
{
STANDARD_VM_CONTRACT;
- _ASSERTE(!IsTableCrstOwnedByCurrentThread());
+ CodeVersionManager* pCodeVersionManager = ilCodeVersion.GetModule()->GetCodeVersionManager();
+ _ASSERTE(!pCodeVersionManager->LockOwnedByCurrentThread());
- // Fast-path: If the rejit map is empty, no need to look up anything. Do this outside
- // of a lock to impact our caller (the prestub worker) as little as possible. If the
- // map is nonempty, we'll acquire the lock at that point and do the lookup for real.
- if (m_table.GetCount() == 0)
- {
- return NULL;
- }
HRESULT hr = S_OK;
- ReJitInfo * pInfoToRejit = NULL;
- Module* pModule = NULL;
- mdMethodDef methodDef = mdTokenNil;
+ Module* pModule = ilCodeVersion.GetModule();
+ mdMethodDef methodDef = ilCodeVersion.GetMethodDef();
BOOL fNeedsParameters = FALSE;
BOOL fWaitForParameters = FALSE;
{
- // Serialize access to the rejit table. Though once we find the ReJitInfo we want,
- // exit the Crst so we can ReJIT the method without holding a lock.
- CrstHolder ch(&m_crstTable);
-
- ReJitInfoHash::KeyIterator iter = GetBeginIterator(pMD);
- ReJitInfoHash::KeyIterator end = GetEndIterator(pMD);
-
- if (iter == end)
- {
- // No rejit actions necessary
- return NULL;
- }
-
-
- for (; iter != end; iter++)
+ // Serialize access to the rejit state
+ CodeVersionManager::TableLockHolder lock(pCodeVersionManager);
+ switch (ilCodeVersion.GetRejitState())
{
- ReJitInfo * pInfo = *iter;
- _ASSERTE(pInfo->GetMethodDesc() == pMD);
- _ASSERTE(pInfo->m_pShared != NULL);
- SharedReJitInfo * pShared = pInfo->m_pShared;
-
- switch (pShared->GetState())
- {
- case SharedReJitInfo::kStateRequested:
- if (pInfo->GetState() == ReJitInfo::kJumpNone)
- {
- // We haven't actually suspended threads and jump-stamped the
- // method's prolog so just ignore this guy
- INDEBUG(AssertRestOfEntriesAreReverted(iter, end));
- return NULL;
- }
- // When the SharedReJitInfo is still in the requested state, we haven't
- // gathered IL & codegen flags from the profiler yet. So, we can't be
- // pointing to rejitted code already. So we must be pointing to the prestub
- _ASSERTE(pInfo->GetState() == ReJitInfo::kJumpToPrestub);
-
- pInfo->GetModuleAndTokenRegardlessOfKeyType(&pModule, &methodDef);
- pShared->m_dwInternalFlags &= ~SharedReJitInfo::kStateMask;
- pShared->m_dwInternalFlags |= SharedReJitInfo::kStateGettingReJITParameters;
- pInfoToRejit = pInfo;
- fNeedsParameters = TRUE;
- break;
-
- case SharedReJitInfo::kStateGettingReJITParameters:
- if (pInfo->GetState() == ReJitInfo::kJumpNone)
- {
- // We haven't actually suspended threads and jump-stamped the
- // method's prolog so just ignore this guy
- INDEBUG(AssertRestOfEntriesAreReverted(iter, end));
- return NULL;
- }
- pInfoToRejit = pInfo;
- fWaitForParameters = TRUE;
- break;
-
- case SharedReJitInfo::kStateActive:
- INDEBUG(AssertRestOfEntriesAreReverted(iter, end));
- if (pInfo->GetState() == ReJitInfo::kJumpNone)
- {
- // We haven't actually suspended threads and jump-stamped the
- // method's prolog so just ignore this guy
- return NULL;
- }
- if (pInfo->GetState() == ReJitInfo::kJumpToRejittedCode)
- {
- // Looks like another thread has beat us in a race to rejit, so ignore.
- return NULL;
- }
-
- // Found a ReJitInfo to actually rejit.
- _ASSERTE(pInfo->GetState() == ReJitInfo::kJumpToPrestub);
- pInfoToRejit = pInfo;
- goto ExitLoop;
+ case ILCodeVersion::kStateRequested:
+ ilCodeVersion.SetRejitState(ILCodeVersion::kStateGettingReJITParameters);
+ fNeedsParameters = TRUE;
+ break;
- case SharedReJitInfo::kStateReverted:
- // just ignore this guy
- continue;
+ case ILCodeVersion::kStateGettingReJITParameters:
+ fWaitForParameters = TRUE;
+ break;
- default:
- UNREACHABLE();
- }
+ default:
+ return S_OK;
}
- ExitLoop:
- ;
- }
-
- if (pInfoToRejit == NULL)
- {
- // Didn't find the requested MD to rejit.
- return NULL;
}
if (fNeedsParameters)
@@ -2021,33 +820,39 @@ PCODE ReJitManager::DoReJitIfNecessaryWorker(PTR_MethodDesc pMD)
if (FAILED(hr))
{
{
- CrstHolder ch(&m_crstTable);
- if (pInfoToRejit->m_pShared->m_dwInternalFlags == SharedReJitInfo::kStateGettingReJITParameters)
+ // Historically on failure we would revert to the kRequested state and fall-back
+ // to the initial code gen. The next time the method ran it would try again.
+ //
+ // Preserving that behavior is possible, but a bit awkward now that we have
+ // Precode swapping as well. Instead of doing that I am acting as if GetReJITParameters
+ // had succeeded, using the original IL, no jit flags, and no modified IL mapping.
+ // This is similar to a fallback except the profiler won't get any further attempts
+ // to provide the parameters correctly. If the profiler wants another attempt it would
+ // need to call RequestRejit again.
+ CodeVersionManager::TableLockHolder lock(pCodeVersionManager);
+ if (ilCodeVersion.GetRejitState() == ILCodeVersion::kStateGettingReJITParameters)
{
- pInfoToRejit->m_pShared->m_dwInternalFlags &= ~SharedReJitInfo::kStateMask;
- pInfoToRejit->m_pShared->m_dwInternalFlags |= SharedReJitInfo::kStateRequested;
+ ilCodeVersion.SetRejitState(ILCodeVersion::kStateActive);
+ ilCodeVersion.SetIL(ILCodeVersion(pModule, methodDef).GetIL());
}
}
- ReportReJITError(pModule, methodDef, pMD, hr);
- return NULL;
+ ReportReJITError(pModule, methodDef, pModule->LookupMethodDef(methodDef), hr);
+ return S_OK;
}
-
+ else
{
- CrstHolder ch(&m_crstTable);
- if (pInfoToRejit->m_pShared->m_dwInternalFlags == SharedReJitInfo::kStateGettingReJITParameters)
+ CodeVersionManager::TableLockHolder lock(pCodeVersionManager);
+ if (ilCodeVersion.GetRejitState() == ILCodeVersion::kStateGettingReJITParameters)
{
// Inside the above call to ICorProfilerCallback4::GetReJITParameters, the profiler
// will have used the specified pFuncControl to provide its IL and codegen flags.
// So now we transfer it out to the SharedReJitInfo.
- pInfoToRejit->m_pShared->m_dwCodegenFlags = pFuncControl->GetCodegenFlags();
- pInfoToRejit->m_pShared->m_pbIL = pFuncControl->GetIL();
- // pShared is now the owner of the memory for the IL buffer
- pInfoToRejit->m_pShared->m_instrumentedILMap.SetMappingInfo(pFuncControl->GetInstrumentedMapEntryCount(),
+ ilCodeVersion.SetJitFlags(pFuncControl->GetCodegenFlags());
+ ilCodeVersion.SetIL((COR_ILMETHOD*)pFuncControl->GetIL());
+ // ilCodeVersion is now the owner of the memory for the IL buffer
+ ilCodeVersion.SetInstrumentedILMap(pFuncControl->GetInstrumentedMapEntryCount(),
pFuncControl->GetInstrumentedMapEntries());
- pInfoToRejit->m_pShared->m_dwInternalFlags &= ~SharedReJitInfo::kStateMask;
- pInfoToRejit->m_pShared->m_dwInternalFlags |= SharedReJitInfo::kStateActive;
- _ASSERTE(pInfoToRejit->m_pCode == NULL);
- _ASSERTE(pInfoToRejit->GetState() == ReJitInfo::kJumpToPrestub);
+ ilCodeVersion.SetRejitState(ILCodeVersion::kStateActive);
}
}
}
@@ -2077,568 +882,22 @@ PCODE ReJitManager::DoReJitIfNecessaryWorker(PTR_MethodDesc pMD)
while (true)
{
{
- CrstHolder ch(&m_crstTable);
- if (pInfoToRejit->m_pShared->GetState() == SharedReJitInfo::kStateActive)
+ CodeVersionManager::TableLockHolder lock(pCodeVersionManager);
+ if (ilCodeVersion.GetRejitState() == ILCodeVersion::kStateActive)
{
break; // the other thread got the parameters succesfully, go race to rejit
}
- else if (pInfoToRejit->m_pShared->GetState() == SharedReJitInfo::kStateRequested)
- {
- return NULL; // the other thread had an error getting parameters and went
- // back to requested
- }
- else if (pInfoToRejit->m_pShared->GetState() == SharedReJitInfo::kStateReverted)
- {
- break; // we got reverted, enter DoReJit anyways and it will detect this and
- // bail out.
- }
}
ClrSleepEx(1, FALSE);
}
}
-
- // We've got the info from the profiler, so JIT the method. This is also
- // responsible for updating the jump target from the prestub to the newly
- // rejitted code AND for publishing the top of the newly rejitted code to
- // pInfoToRejit->m_pCode. If two threads race to rejit, DoReJit handles the
- // race, and ensures the winner publishes his result to pInfoToRejit->m_pCode.
- return DoReJit(pInfoToRejit);
-
-}
-
-
-//---------------------------------------------------------------------------------------
-//
-// Called by DoReJitIfNecessaryWorker(), this function assumes the IL & codegen flags have
-// already been gathered from the profiler, and then calls UnsafeJitFunction to perform
-// the re-JIT (bracketing that with profiler callbacks to announce the start/finish of
-// the rejit).
-//
-// This is also responsible for handling any races between multiple threads
-// simultaneously rejitting a function. See the comment at the top of
-// code:ReJitManager::DoReJitIfNecessaryWorker for details.
-//
-// Arguments:
-// pInfo - ReJitInfo tracking this MethodDesc's rejit request
-//
-// Return Value:
-// * Generally, return the PCODE of the start of the rejitted code. However,
-// depending on the result of races determined by DoReJit(), the return value
-// can be different:
-// * If the current thread races with another thread to do the rejit, return the
-// PCODE generated by the winner.
-// * If the current thread races with another thread doing a revert, and the revert
-// wins, then return the PCODE of the start of the originally JITted code
-// (i.e., pInfo->GetMethodDesc()->GetNativeCode())
-//
-
-PCODE ReJitManager::DoReJit(ReJitInfo * pInfo)
-{
- STANDARD_VM_CONTRACT;
-
-#ifdef PROFILING_SUPPORTED
-
- INDEBUG(Dump("Inside DoRejit(). Dumping this ReJitManager\n"));
-
- _ASSERTE(!pInfo->GetMethodDesc()->IsNoMetadata());
- {
- BEGIN_PIN_PROFILER(CORProfilerTrackJITInfo());
- g_profControlBlock.pProfInterface->ReJITCompilationStarted((FunctionID)pInfo->GetMethodDesc(),
- pInfo->m_pShared->GetId(),
- TRUE);
- END_PIN_PROFILER();
- }
-
- COR_ILMETHOD_DECODER ILHeader(pInfo->GetIL(), pInfo->GetMethodDesc()->GetMDImport(), NULL);
- PCODE pCodeOfRejittedCode = NULL;
-
- // Note that we're intentionally not enclosing UnsafeJitFunction in a try block
- // to swallow exceptions. It's expected that any exception thrown is fatal and
- // should pass through. This is in contrast to MethodDesc::MakeJitWorker, which
- // does enclose UnsafeJitFunction in a try block, and attempts to swallow an
- // exception that occurs on the current thread when another thread has
- // simultaneously attempted (and provably succeeded in) the JITting of the same
- // function. This is a very unusual case (likely due to an out of memory error
- // encountered on the current thread and not on the competing thread), which is
- // not worth attempting to cover.
- pCodeOfRejittedCode = UnsafeJitFunction(
- pInfo->GetMethodDesc(),
- &ILHeader,
- JitFlagsFromProfCodegenFlags(pInfo->m_pShared->m_dwCodegenFlags));
-
- _ASSERTE(pCodeOfRejittedCode != NULL);
-
- // This atomically updates the jmp target (from prestub to top of rejitted code) and publishes
- // the top of rejitted code into pInfo, all inside the same acquisition of this
- // ReJitManager's table Crst.
- HRESULT hr = S_OK;
- BOOL fEESuspended = FALSE;
- BOOL fNotify = FALSE;
- PCODE ret = NULL;
- while (true)
- {
- if (fEESuspended)
- {
- ThreadSuspend::SuspendEE(ThreadSuspend::SUSPEND_FOR_REJIT);
- }
- CrstHolder ch(&m_crstTable);
-
- // Now that we're under the lock, recheck whether pInfo->m_pCode has been filled
- // in...
- if (pInfo->m_pCode != NULL)
- {
- // Yup, another thread rejitted this request at the same time as us, and beat
- // us to publishing the result. Intentionally skip the rest of this, and do
- // not issue a ReJITCompilationFinished from this thread.
- ret = pInfo->m_pCode;
- break;
- }
-
- // BUGBUG: This revert check below appears to introduce behavior we probably don't want.
- // This is a pre-existing issue and I don't have time to create a test for this right now,
- // but wanted to capture the issue in a comment for future work.
- // Imagine the profiler has one thread which is calling RequestReJIT periodically
- // updating the method's IL:
- // 1) RequestReJit (table lock keeps these atomic)
- // 1.1) Revert old shared rejit info
- // 1.2) Create new shared rejit info
- // 2) RequestReJit (table lock keeps these atomic)
- // 2.1) Revert old shared rejit info
- // 2.2) Create new shared rejit info
- // ...
- // On a second thread we keep calling the method which needs to periodically rejit
- // to update to the newest version:
- // a) [DoReJitIfNecessaryWorker] detects active rejit request
- // b) [DoReJit] if shared rejit info is reverted, execute original method code.
- //
- // Because (a) and (b) are not under the same lock acquisition this ordering is possible:
- // (1), (a), (2), (b)
- // The result is that (b) sees the shared rejit is reverted and the method executes its
- // original code. As a profiler using rejit I would expect either the IL specified in
- // (1) or the IL specified in (2) would be used, but never the original IL.
- //
- // I think the correct behavior is to bind a method execution to the current rejit
- // version at some point, and from then on we guarantee to execute that version of the
- // code, regardless of reverts or re-rejit request.
- //
- // There is also a related issue with GetCurrentReJitFlagsWorker which assumes jitting
- // always corresponds to the most recent version of the method. If we start pinning
- // method invocations to particular versions then that method can't be allowed to
- // float forward to the newest version, nor can it abort if the most recent version
- // is reverted.
- // END BUGBUG
- //
- // And recheck whether some other thread tried to revert this method in the
- // meantime (this check would also include an attempt to re-rejit the method
- // (i.e., calling RequestReJIT on the method multiple times), which would revert
- // this pInfo before creating a new one to track the latest rejit request).
- if (pInfo->m_pShared->GetState() == SharedReJitInfo::kStateReverted)
- {
- // Yes, we've been reverted, so the jmp-to-prestub has already been removed,
- // and we should certainly not attempt to redirect that nonexistent jmp to
- // the code we just rejitted
- _ASSERTE(pInfo->GetMethodDesc()->GetNativeCode() != NULL);
- ret = pInfo->GetMethodDesc()->GetNativeCode();
- break;
- }
-
-#ifdef DEBUGGING_SUPPORTED
- // Notify the debugger of the rejitted function, so it can generate
- // DebuggerMethodInfo / DebugJitInfo for it. Normally this is done inside
- // UnsafeJitFunction (via CallCompileMethodWithSEHWrapper), but it skips this
- // when it detects the MethodDesc was already jitted. Since we know here that
- // we're rejitting it (and this is not just some sort of multi-thread JIT race),
- // now is a good place to notify the debugger.
- if (g_pDebugInterface != NULL)
- {
- g_pDebugInterface->JITComplete(pInfo->GetMethodDesc(), pCodeOfRejittedCode);
- }
-
-#endif // DEBUGGING_SUPPORTED
-
- _ASSERTE(pInfo->m_pShared->GetState() == SharedReJitInfo::kStateActive);
- _ASSERTE(pInfo->GetState() == ReJitInfo::kJumpToPrestub);
-
- // Atomically publish the PCODE and update the jmp stamp (to go to the rejitted
- // code) under the lock
- hr = pInfo->UpdateJumpTarget(fEESuspended, pCodeOfRejittedCode);
- if (hr == CORPROF_E_RUNTIME_SUSPEND_REQUIRED)
- {
- _ASSERTE(!fEESuspended);
- fEESuspended = TRUE;
- continue;
- }
- if (FAILED(hr))
- {
- break;
- }
- pInfo->m_pCode = pCodeOfRejittedCode;
- fNotify = TRUE;
- ret = pCodeOfRejittedCode;
-
- _ASSERTE(pInfo->m_pShared->GetState() == SharedReJitInfo::kStateActive);
- _ASSERTE(pInfo->GetState() == ReJitInfo::kJumpToRejittedCode);
- break;
- }
-
- if (fEESuspended)
- {
- ThreadSuspend::RestartEE(FALSE /* bFinishedGC */, TRUE /* SuspendSucceded */);
- fEESuspended = FALSE;
- }
-
- if (FAILED(hr))
- {
- Module* pModule = NULL;
- mdMethodDef methodDef = mdTokenNil;
- pInfo->GetModuleAndTokenRegardlessOfKeyType(&pModule, &methodDef);
- ReportReJITError(pModule, methodDef, pInfo->GetMethodDesc(), hr);
- }
-
- // Notify the profiler that JIT completed.
- if (fNotify)
- {
- BEGIN_PIN_PROFILER(CORProfilerTrackJITInfo());
- g_profControlBlock.pProfInterface->ReJITCompilationFinished((FunctionID)pInfo->GetMethodDesc(),
- pInfo->m_pShared->GetId(),
- S_OK,
- TRUE);
- END_PIN_PROFILER();
- }
-#endif // PROFILING_SUPPORTED
-
- // Fire relevant ETW events
- if (fNotify)
- {
- ETW::MethodLog::MethodJitted(
- pInfo->GetMethodDesc(),
- NULL, // namespaceOrClassName
- NULL, // methodName
- NULL, // methodSignature
- pCodeOfRejittedCode,
- pInfo->m_pShared->GetId());
- }
- return ret;
-}
-
-
-//---------------------------------------------------------------------------------------
-//
-// Transition SharedReJitInfo to Reverted state and add all associated ReJitInfos to the
-// undo list in the method batch
-//
-// Arguments:
-// pShared - SharedReJitInfo to revert
-// pJumpStampBatch - a batch of methods that need their jump stamps reverted. This method
-// is responsible for adding additional ReJitInfos to the list.
-//
-// Return Value:
-// S_OK if all MDs are batched and the SharedReJitInfo is marked reverted
-// E_OUTOFMEMORY (MDs couldn't be added to batch, SharedReJitInfo is not reverted)
-//
-// Assumptions:
-// Caller must be holding this ReJitManager's table crst.
-//
-
-HRESULT ReJitManager::Revert(SharedReJitInfo * pShared, ReJitManagerJumpStampBatch* pJumpStampBatch)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- MODE_ANY;
- }
- CONTRACTL_END;
-
- _ASSERTE(m_crstTable.OwnedByCurrentThread());
- _ASSERTE((pShared->GetState() == SharedReJitInfo::kStateRequested) ||
- (pShared->GetState() == SharedReJitInfo::kStateGettingReJITParameters) ||
- (pShared->GetState() == SharedReJitInfo::kStateActive));
- _ASSERTE(pShared->GetMethods() != NULL);
- _ASSERTE(pJumpStampBatch->pReJitManager == this);
-
- HRESULT hrReturn = S_OK;
- for (ReJitInfo * pInfo = pShared->GetMethods(); pInfo != NULL; pInfo = pInfo->m_pNext)
- {
- if (pInfo->GetState() == ReJitInfo::kJumpNone)
- {
- // Nothing to revert for this MethodDesc / instantiation.
- continue;
- }
-
- ReJitInfo** ppInfo = pJumpStampBatch->undoMethods.Append();
- if (ppInfo == NULL)
- {
- return E_OUTOFMEMORY;
- }
- *ppInfo = pInfo;
- }
-
- pShared->m_dwInternalFlags &= ~SharedReJitInfo::kStateMask;
- pShared->m_dwInternalFlags |= SharedReJitInfo::kStateReverted;
+
return S_OK;
}
-
-//---------------------------------------------------------------------------------------
-//
-// Removes any ReJitInfos relating to MDs for the specified AppDomain from this
-// ReJitManager. This is used to remove non-domain-neutral instantiations of
-// domain-neutral generics from the SharedDomain's ReJitManager, when the AppDomain
-// containing those non-domain-neutral instantiations is unloaded.
-//
-// Arguments:
-// * pAppDomain - AppDomain that is exiting, and is thus the one for which we should
-// find ReJitInfos to remove
-//
-//
-
-void ReJitManager::RemoveReJitInfosFromDomain(AppDomain * pAppDomain)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- CAN_TAKE_LOCK;
- MODE_ANY;
- }
- CONTRACTL_END;
-
- CrstHolder ch(&m_crstTable);
-
- INDEBUG(Dump("Dumping SharedDomain rejit manager BEFORE AD Unload"));
-
- for (ReJitInfoHash::Iterator iterCur = m_table.Begin(), iterEnd = m_table.End();
- iterCur != iterEnd;
- iterCur++)
- {
- ReJitInfo * pInfo = *iterCur;
-
- if (pInfo->m_key.m_keyType != ReJitInfo::Key::kMethodDesc)
- {
- // Skip all "placeholder" ReJitInfos--they'll always be allocated on a
- // loader heap for the shared domain.
- _ASSERTE(pInfo->m_key.m_keyType == ReJitInfo::Key::kMetadataToken);
- _ASSERTE(PTR_Module(pInfo->m_key.m_pModule)->GetDomain()->IsSharedDomain());
- continue;
- }
-
- if (pInfo->GetMethodDesc()->GetDomain() != pAppDomain)
- {
- // We only care about non-domain-neutral instantiations that live in
- // pAppDomain.
- continue;
- }
-
- // Remove this ReJitInfo from the linked-list of ReJitInfos associated with its
- // SharedReJitInfo.
- pInfo->m_pShared->RemoveMethod(pInfo);
-
- // Remove this ReJitInfo from the ReJitManager's hash table.
- m_table.Remove(iterCur);
-
- // pInfo is not deallocated yet. That will happen when pAppDomain finishes
- // unloading and its loader heaps get freed.
- }
- INDEBUG(Dump("Dumping SharedDomain rejit manager AFTER AD Unload"));
-}
-
#endif // DACCESS_COMPILE
// The rest of the ReJitManager methods are safe to compile for DAC
-
-//---------------------------------------------------------------------------------------
-//
-// Helper to iterate through m_table, finding the single matching non-reverted ReJitInfo.
-// The caller may search either by MethodDesc * XOR by (Module *, methodDef) pair.
-//
-// Arguments:
-// * pMD - MethodDesc * to search for. (NULL if caller is searching by (Module *,
-// methodDef)
-// * pModule - Module * to search for. (NULL if caller is searching by MethodDesc *)
-// * methodDef - methodDef to search for. (NULL if caller is searching by MethodDesc
-// *)
-//
-// Return Value:
-// ReJitInfo * requested, or NULL if none is found
-//
-// Assumptions:
-// Caller should be holding this ReJitManager's table crst.
-//
-
-PTR_ReJitInfo ReJitManager::FindNonRevertedReJitInfoHelper(
- PTR_MethodDesc pMD,
- PTR_Module pModule,
- mdMethodDef methodDef)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- INSTANCE_CHECK;
- }
- CONTRACTL_END;
-
- // Either pMD is valid, xor (pModule,methodDef) is valid
- _ASSERTE(
- ((pMD != NULL) && (pModule == NULL) && (methodDef == mdTokenNil)) ||
- ((pMD == NULL) && (pModule != NULL) && (methodDef != mdTokenNil)));
-
- // Caller should hold the Crst around calling this function and using the ReJitInfo.
-#ifndef DACCESS_COMPILE
- _ASSERTE(m_crstTable.OwnedByCurrentThread());
-#endif
-
- ReJitInfoHash::KeyIterator beginIter(&m_table, TRUE /* begin */);
- ReJitInfoHash::KeyIterator endIter(&m_table, FALSE /* begin */);
-
- if (pMD != NULL)
- {
- beginIter = GetBeginIterator(pMD);
- endIter = GetEndIterator(pMD);
- }
- else
- {
- beginIter = GetBeginIterator(pModule, methodDef);
- endIter = GetEndIterator(pModule, methodDef);
- }
-
- for (ReJitInfoHash::KeyIterator iter = beginIter;
- iter != endIter;
- iter++)
- {
- PTR_ReJitInfo pInfo = *iter;
- _ASSERTE(pInfo->m_pShared != NULL);
-
- if (pInfo->m_pShared->GetState() == SharedReJitInfo::kStateReverted)
- continue;
-
- INDEBUG(AssertRestOfEntriesAreReverted(iter, endIter));
- return pInfo;
- }
-
- return NULL;
-}
-
-
-//---------------------------------------------------------------------------------------
-//
-// ReJitManager instance constructor--for now, does nothing
-//
-
-ReJitManager::ReJitManager()
-{
- LIMITED_METHOD_DAC_CONTRACT;
-}
-
-
-//---------------------------------------------------------------------------------------
-//
-// Called from BaseDomain::BaseDomain to do any constructor-time initialization.
-// Presently, this takes care of initializing the Crst, choosing the type based on
-// whether this ReJitManager belongs to the SharedDomain.
-//
-// Arguments:
-// * fSharedDomain - nonzero iff this ReJitManager belongs to the SharedDomain.
-//
-
-void ReJitManager::PreInit(BOOL fSharedDomain)
-{
- CONTRACTL
- {
- THROWS;
- GC_TRIGGERS;
- CAN_TAKE_LOCK;
- MODE_ANY;
- }
- CONTRACTL_END;
-
-#ifndef DACCESS_COMPILE
- m_crstTable.Init(
- fSharedDomain ? CrstReJITSharedDomainTable : CrstReJITDomainTable,
- CrstFlags(CRST_UNSAFE_ANYMODE | CRST_DEBUGGER_THREAD | CRST_REENTRANCY | CRST_TAKEN_DURING_SHUTDOWN));
-#endif // DACCESS_COMPILE
-}
-
-
-//---------------------------------------------------------------------------------------
-//
-// Finds the ReJitInfo tracking a pre-rejit request.
-//
-// Arguments:
-// * beginIter - Iterator to start search
-// * endIter - Iterator to end search
-//
-// Return Value:
-// NULL if no such ReJitInfo exists. This can occur if two thread race
-// to JIT the original code and we're the loser. Else, the ReJitInfo * found.
-//
-// Assumptions:
-// Caller must be holding this ReJitManager's table lock.
-//
-
-ReJitInfo * ReJitManager::FindPreReJittedReJitInfo(
- ReJitInfoHash::KeyIterator beginIter,
- ReJitInfoHash::KeyIterator endIter)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- MODE_ANY;
- }
- CONTRACTL_END;
-
- // Caller shouldn't be handing out iterators unless he's already locking the table.
-#ifndef DACCESS_COMPILE
- _ASSERTE(m_crstTable.OwnedByCurrentThread());
-#endif
-
- for (ReJitInfoHash::KeyIterator iter = beginIter;
- iter != endIter;
- iter++)
- {
- ReJitInfo * pInfo = *iter;
- SharedReJitInfo * pShared = pInfo->m_pShared;
- _ASSERTE(pShared != NULL);
-
- switch (pShared->GetState())
- {
- case SharedReJitInfo::kStateRequested:
- case SharedReJitInfo::kStateGettingReJITParameters:
- case SharedReJitInfo::kStateActive:
- if (pInfo->GetState() == ReJitInfo::kJumpToRejittedCode)
- {
- // There was a race for the original JIT, and we're the loser. (The winner
- // has already published the original JIT's pcode, jump-stamped, and begun
- // the rejit!)
- return NULL;
- }
-
- // Otherwise, either we have a rejit request that has not yet been
- // jump-stamped, or there was a race for the original JIT, and another
- // thread jump-stamped its copy of the originally JITted code already. In
- // that case, we still don't know who the winner or loser will be (PCODE may
- // not yet be published), so we'll have to jump-stamp our copy just in case
- // we win.
- _ASSERTE((pInfo->GetState() == ReJitInfo::kJumpNone) ||
- (pInfo->GetState() == ReJitInfo::kJumpToPrestub));
- INDEBUG(AssertRestOfEntriesAreReverted(iter, endIter));
- return pInfo;
-
-
- case SharedReJitInfo::kStateReverted:
- // just ignore this guy
- continue;
-
- default:
- UNREACHABLE();
- }
- }
-
- return NULL;
-}
-
//---------------------------------------------------------------------------------------
//
// Used by profiler to get the ReJITID corrseponding to a (MethodDesc *, PCODE) pair.
@@ -2654,7 +913,7 @@ ReJitInfo * ReJitManager::FindPreReJittedReJitInfo(
// 0 if no such ReJITID found (e.g., PCODE is from a JIT and not a rejit), else the
// ReJITID requested.
//
-
+// static
ReJITID ReJitManager::GetReJitId(PTR_MethodDesc pMD, PCODE pCodeStart)
{
CONTRACTL
@@ -2662,7 +921,6 @@ ReJITID ReJitManager::GetReJitId(PTR_MethodDesc pMD, PCODE pCodeStart)
NOTHROW;
CAN_TAKE_LOCK;
GC_TRIGGERS;
- INSTANCE_CHECK;
PRECONDITION(CheckPointer(pMD));
PRECONDITION(pCodeStart != NULL);
}
@@ -2671,14 +929,14 @@ ReJITID ReJitManager::GetReJitId(PTR_MethodDesc pMD, PCODE pCodeStart)
// Fast-path: If the rejit map is empty, no need to look up anything. Do this outside
// of a lock to impact our caller (the prestub worker) as little as possible. If the
// map is nonempty, we'll acquire the lock at that point and do the lookup for real.
- if (m_table.GetCount() == 0)
+ CodeVersionManager* pCodeVersionManager = pMD->GetCodeVersionManager();
+ if (pCodeVersionManager->GetNonDefaultILVersionCount() == 0)
{
return 0;
}
- CrstHolder ch(&m_crstTable);
-
- return GetReJitIdNoLock(pMD, pCodeStart);
+ CodeVersionManager::TableLockHolder ch(pCodeVersionManager);
+ return ReJitManager::GetReJitIdNoLock(pMD, pCodeStart);
}
//---------------------------------------------------------------------------------------
@@ -2699,221 +957,21 @@ ReJITID ReJitManager::GetReJitIdNoLock(PTR_MethodDesc pMD, PCODE pCodeStart)
NOTHROW;
CANNOT_TAKE_LOCK;
GC_NOTRIGGER;
- INSTANCE_CHECK;
PRECONDITION(CheckPointer(pMD));
PRECONDITION(pCodeStart != NULL);
}
CONTRACTL_END;
// Caller must ensure this lock is taken!
- _ASSERTE(m_crstTable.OwnedByCurrentThread());
+ CodeVersionManager* pCodeVersionManager = pMD->GetCodeVersionManager();
+ _ASSERTE(pCodeVersionManager->LockOwnedByCurrentThread());
- ReJitInfo * pInfo = FindReJitInfo(pMD, pCodeStart, 0);
- if (pInfo == NULL)
+ NativeCodeVersion nativeCodeVersion = pCodeVersionManager->GetNativeCodeVersion(pMD, pCodeStart);
+ if (nativeCodeVersion.IsNull())
{
return 0;
}
-
- _ASSERTE(pInfo->m_pShared->GetState() == SharedReJitInfo::kStateActive ||
- pInfo->m_pShared->GetState() == SharedReJitInfo::kStateReverted);
- return pInfo->m_pShared->GetId();
-}
-
-
-//---------------------------------------------------------------------------------------
-//
-// Used by profilers to map a (MethodDesc *, ReJITID) pair to the corresponding PCODE for
-// that rejit attempt. This can also be used for reverted methods, as the PCODE may still
-// be available and in use even after a rejitted function has been reverted.
-//
-// Arguments:
-// * pMD - MethodDesc * of interest
-// * reJitId - ReJITID of interest
-//
-// Return Value:
-// Corresponding PCODE of the rejit attempt, or NULL if no such rejit attempt can be
-// found.
-//
-
-PCODE ReJitManager::GetCodeStart(PTR_MethodDesc pMD, ReJITID reJitId)
-{
- CONTRACTL
- {
- NOTHROW;
- CAN_TAKE_LOCK;
- GC_NOTRIGGER;
- INSTANCE_CHECK;
- PRECONDITION(CheckPointer(pMD));
- PRECONDITION(reJitId != 0);
- }
- CONTRACTL_END;
-
- // Fast-path: If the rejit map is empty, no need to look up anything. Do this outside
- // of a lock to impact our caller (the prestub worker) as little as possible. If the
- // map is nonempty, we'll acquire the lock at that point and do the lookup for real.
- if (m_table.GetCount() == 0)
- {
- return NULL;
- }
-
- CrstHolder ch(&m_crstTable);
-
- ReJitInfo * pInfo = FindReJitInfo(pMD, NULL, reJitId);
- if (pInfo == NULL)
- {
- return NULL;
- }
-
- _ASSERTE(pInfo->m_pShared->GetState() == SharedReJitInfo::kStateActive ||
- pInfo->m_pShared->GetState() == SharedReJitInfo::kStateReverted);
-
- return pInfo->m_pCode;
-}
-
-
-//---------------------------------------------------------------------------------------
-//
-// If a function has been requested to be rejitted, finds the one current
-// SharedReJitInfo (ignoring all that are in the reverted state) and returns the codegen
-// flags recorded on it (which were thus used to rejit the MD). CEEInfo::canInline() calls
-// this as part of its calculation of whether it may inline a given method. (Profilers
-// may specify on a per-rejit-request basis whether the rejit of a method may inline
-// callees.)
-//
-// Arguments:
-// * pMD - MethodDesc * of interest.
-//
-// Return Value:
-// Returns the requested codegen flags, or 0 (i.e., no flags set) if no rejit attempt
-// can be found for the MD.
-//
-
-DWORD ReJitManager::GetCurrentReJitFlagsWorker(PTR_MethodDesc pMD)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- MODE_PREEMPTIVE;
- PRECONDITION(CheckPointer(pMD));
- }
- CONTRACTL_END;
-
- // Fast-path: If the rejit map is empty, no need to look up anything. Do this outside
- // of a lock to impact our caller (e.g., the JIT asking if it can inline) as little as possible. If the
- // map is nonempty, we'll acquire the lock at that point and do the lookup for real.
- if (m_table.GetCount() == 0)
- {
- return 0;
- }
-
- CrstHolder ch(&m_crstTable);
-
- for (ReJitInfoHash::KeyIterator iter = GetBeginIterator(pMD), end = GetEndIterator(pMD);
- iter != end;
- iter++)
- {
- ReJitInfo * pInfo = *iter;
- _ASSERTE(pInfo->GetMethodDesc() == pMD);
- _ASSERTE(pInfo->m_pShared != NULL);
-
- DWORD dwState = pInfo->m_pShared->GetState();
-
- if (dwState != SharedReJitInfo::kStateActive)
- {
- // Not active means we never asked profiler for the codegen flags OR the
- // rejit request has been reverted. So this one is useless.
- continue;
- }
-
- // Found it!
-#ifdef _DEBUG
- // This must be the only such ReJitInfo for this MethodDesc. Check the rest and
- // assert otherwise.
- {
- ReJitInfoHash::KeyIterator iterTest = iter;
- iterTest++;
-
- while(iterTest != end)
- {
- ReJitInfo * pInfoTest = *iterTest;
- _ASSERTE(pInfoTest->GetMethodDesc() == pMD);
- _ASSERTE(pInfoTest->m_pShared != NULL);
-
- DWORD dwStateTest = pInfoTest->m_pShared->GetState();
-
- if (dwStateTest == SharedReJitInfo::kStateActive)
- {
- _ASSERTE(!"Multiple active ReJitInfos for same MethodDesc");
- break;
- }
- iterTest++;
- }
- }
-#endif //_DEBUG
- return pInfo->m_pShared->m_dwCodegenFlags;
- }
-
- return 0;
-}
-
-//---------------------------------------------------------------------------------------
-//
-// Helper to find the matching ReJitInfo by methoddesc paired with either pCodeStart or
-// reJitId (exactly one should be non-zero, and will be used as the key for the lookup)
-//
-// Arguments:
-// * pMD - MethodDesc * to look up
-// * pCodeStart - PCODE of the particular rejit attempt to look up. NULL if looking
-// up by ReJITID.
-// * reJitId - ReJITID of the particular rejit attempt to look up. NULL if looking
-// up by PCODE.
-//
-// Return Value:
-// ReJitInfo * matching input parameters, or NULL if no such ReJitInfo could be
-// found.
-//
-// Assumptions:
-// Caller must be holding this ReJitManager's table lock.
-//
-
-PTR_ReJitInfo ReJitManager::FindReJitInfo(PTR_MethodDesc pMD, PCODE pCodeStart, ReJITID reJitId)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- INSTANCE_CHECK;
- PRECONDITION(CheckPointer(pMD));
- }
- CONTRACTL_END;
-
- // Caller should hold the Crst around calling this function and using the ReJitInfo.
-#ifndef DACCESS_COMPILE
- _ASSERTE(m_crstTable.OwnedByCurrentThread());
-#endif
-
- // One of these two keys should be used, but not both!
- _ASSERTE(
- ((pCodeStart != NULL) || (reJitId != 0)) &&
- !((pCodeStart != NULL) && (reJitId != 0)));
-
- for (ReJitInfoHash::KeyIterator iter = GetBeginIterator(pMD), end = GetEndIterator(pMD);
- iter != end;
- iter++)
- {
- PTR_ReJitInfo pInfo = *iter;
- _ASSERTE(pInfo->GetMethodDesc() == pMD);
- _ASSERTE(pInfo->m_pShared != NULL);
-
- if ((pCodeStart != NULL && pInfo->m_pCode == pCodeStart) || // pCodeStart is key
- (reJitId != 0 && pInfo->m_pShared->GetId() == reJitId)) // reJitId is key
- {
- return pInfo;
- }
- }
-
- return NULL;
+ return nativeCodeVersion.GetILCodeVersion().GetVersionId();
}
//---------------------------------------------------------------------------------------
@@ -2934,7 +992,7 @@ PTR_ReJitInfo ReJitManager::FindReJitInfo(PTR_MethodDesc pMD, PCODE pCodeStart,
// cReJitIds were returned and cReJitIds < *pcReJitId (latter being the total
// number of ReJITIDs available).
//
-
+// static
HRESULT ReJitManager::GetReJITIDs(PTR_MethodDesc pMD, ULONG cReJitIds, ULONG * pcReJitIds, ReJITID reJitIds[])
{
CONTRACTL
@@ -2942,31 +1000,29 @@ HRESULT ReJitManager::GetReJITIDs(PTR_MethodDesc pMD, ULONG cReJitIds, ULONG * p
NOTHROW;
CAN_TAKE_LOCK;
GC_NOTRIGGER;
- INSTANCE_CHECK;
PRECONDITION(CheckPointer(pMD));
PRECONDITION(pcReJitIds != NULL);
PRECONDITION(reJitIds != NULL);
}
CONTRACTL_END;
- CrstHolder ch(&m_crstTable);
+ CodeVersionManager* pCodeVersionManager = pMD->GetCodeVersionManager();
+ CodeVersionManager::TableLockHolder lock(pCodeVersionManager);
ULONG cnt = 0;
- for (ReJitInfoHash::KeyIterator iter = GetBeginIterator(pMD), end = GetEndIterator(pMD);
+ ILCodeVersionCollection ilCodeVersions = pCodeVersionManager->GetILCodeVersions(pMD);
+ for (ILCodeVersionIterator iter = ilCodeVersions.Begin(), end = ilCodeVersions.End();
iter != end;
iter++)
{
- ReJitInfo * pInfo = *iter;
- _ASSERTE(pInfo->GetMethodDesc() == pMD);
- _ASSERTE(pInfo->m_pShared != NULL);
+ ILCodeVersion curILVersion = *iter;
- if (pInfo->m_pShared->GetState() == SharedReJitInfo::kStateActive ||
- pInfo->m_pShared->GetState() == SharedReJitInfo::kStateReverted)
+ if (curILVersion.GetRejitState() == ILCodeVersion::kStateActive)
{
if (cnt < cReJitIds)
{
- reJitIds[cnt] = pInfo->m_pShared->GetId();
+ reJitIds[cnt] = curILVersion.GetVersionId();
}
++cnt;
@@ -2979,975 +1035,7 @@ HRESULT ReJitManager::GetReJITIDs(PTR_MethodDesc pMD, ULONG cReJitIds, ULONG * p
return (cnt > cReJitIds) ? S_FALSE : S_OK;
}
-//---------------------------------------------------------------------------------------
-//
-// Helper that inits a new ReJitReportErrorWorkItem and adds it to the pErrors array
-//
-// Arguments:
-// * pModule - The module in the module/MethodDef identifier pair for the method which
-// had an error during rejit
-// * methodDef - The MethodDef in the module/MethodDef identifier pair for the method which
-// had an error during rejit
-// * pMD - If available, the specific method instance which had an error during rejit
-// * hrStatus - HRESULT for the rejit error that occurred
-// * pErrors - the list of error records that this method will append to
-//
-// Return Value:
-// * S_OK: error was appended
-// * E_OUTOFMEMORY: Not enough memory to create the new error item. The array is unchanged.
-//
-
-//static
-HRESULT ReJitManager::AddReJITError(Module* pModule, mdMethodDef methodDef, MethodDesc* pMD, HRESULT hrStatus, CDynArray<ReJitReportErrorWorkItem> * pErrors)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- MODE_ANY;
- }
- CONTRACTL_END;
-
- ReJitReportErrorWorkItem* pError = pErrors->Append();
- if (pError == NULL)
- {
- return E_OUTOFMEMORY;
- }
- pError->pModule = pModule;
- pError->methodDef = methodDef;
- pError->pMethodDesc = pMD;
- pError->hrStatus = hrStatus;
- return S_OK;
-}
-
-//---------------------------------------------------------------------------------------
-//
-// Helper that inits a new ReJitReportErrorWorkItem and adds it to the pErrors array
-//
-// Arguments:
-// * pReJitInfo - The method which had an error during rejit
-// * hrStatus - HRESULT for the rejit error that occurred
-// * pErrors - the list of error records that this method will append to
-//
-// Return Value:
-// * S_OK: error was appended
-// * E_OUTOFMEMORY: Not enough memory to create the new error item. The array is unchanged.
-//
-
-//static
-HRESULT ReJitManager::AddReJITError(ReJitInfo* pReJitInfo, HRESULT hrStatus, CDynArray<ReJitReportErrorWorkItem> * pErrors)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- MODE_ANY;
- }
- CONTRACTL_END;
-
- Module * pModule = NULL;
- mdMethodDef methodDef = mdTokenNil;
- pReJitInfo->GetModuleAndTokenRegardlessOfKeyType(&pModule, &methodDef);
- return AddReJITError(pModule, methodDef, pReJitInfo->GetMethodDesc(), hrStatus, pErrors);
-}
-
-#ifdef _DEBUG
-//---------------------------------------------------------------------------------------
-//
-// Debug-only helper used while iterating through the hash table of
-// ReJitInfos to verify that all entries between the specified iterators are
-// reverted. Asserts if it finds any non-reverted entries.
-//
-// Arguments:
-// * iter - Iterator to start verifying at
-// * end - Iterator to stop verifying at
-//
-//
-
-void ReJitManager::AssertRestOfEntriesAreReverted(
- ReJitInfoHash::KeyIterator iter,
- ReJitInfoHash::KeyIterator end)
-{
- LIMITED_METHOD_CONTRACT;
-
- // All other rejits should be in the reverted state
- while (++iter != end)
- {
- _ASSERTE((*iter)->m_pShared->GetState() == SharedReJitInfo::kStateReverted);
- }
-}
-
-
-//---------------------------------------------------------------------------------------
-//
-// Debug-only helper to dump ReJitManager contents to stdout. Only used if
-// COMPlus_ProfAPI_EnableRejitDiagnostics is set.
-//
-// Arguments:
-// * szIntroText - Intro text passed by caller to be output before this ReJitManager
-// is dumped.
-//
-//
-
-void ReJitManager::Dump(LPCSTR szIntroText)
-{
- if (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_ProfAPI_EnableRejitDiagnostics) == 0)
- return;
-
- printf(szIntroText);
- fflush(stdout);
-
- CrstHolder ch(&m_crstTable);
-
- printf("BEGIN ReJitManager::Dump: 0x%p\n", this);
-
- for (ReJitInfoHash::Iterator iterCur = m_table.Begin(), iterEnd = m_table.End();
- iterCur != iterEnd;
- iterCur++)
- {
- ReJitInfo * pInfo = *iterCur;
- printf(
- "\tInfo 0x%p: State=0x%x, Next=0x%p, Shared=%p, SharedState=0x%x\n",
- pInfo,
- pInfo->GetState(),
- (void*)pInfo->m_pNext,
- (void*)pInfo->m_pShared,
- pInfo->m_pShared->GetState());
-
- switch(pInfo->m_key.m_keyType)
- {
- case ReJitInfo::Key::kMethodDesc:
- printf(
- "\t\tMD=0x%p, %s.%s (%s)\n",
- (void*)pInfo->GetMethodDesc(),
- pInfo->GetMethodDesc()->m_pszDebugClassName,
- pInfo->GetMethodDesc()->m_pszDebugMethodName,
- pInfo->GetMethodDesc()->m_pszDebugMethodSignature);
- break;
-
- case ReJitInfo::Key::kMetadataToken:
- Module * pModule;
- mdMethodDef methodDef;
- pInfo->GetModuleAndToken(&pModule, &methodDef);
- printf(
- "\t\tModule=0x%p, Token=0x%x\n",
- pModule,
- methodDef);
- break;
-
- case ReJitInfo::Key::kUninitialized:
- printf("\t\tUNINITIALIZED\n");
- break;
-
- default:
- _ASSERTE(!"Unrecognized pInfo key type");
- }
- fflush(stdout);
- }
- printf("END ReJitManager::Dump: 0x%p\n", this);
- fflush(stdout);
-}
-
-#endif // _DEBUG
-
-//---------------------------------------------------------------------------------------
-// ReJitInfo implementation
-
-// All the state-changey stuff is kept up here in the !DACCESS_COMPILE block.
-// The more read-only inspection-y stuff follows the block.
-
-
-#ifndef DACCESS_COMPILE
-
-//---------------------------------------------------------------------------------------
-//
-// Do the actual work of stamping the top of originally-jitted-code with a jmp that goes
-// to the prestub. This can be called in one of three ways:
-// * Case 1: By RequestReJIT against an already-jitted function, in which case the
-// PCODE may be inferred by the MethodDesc, and our caller will have suspended
-// the EE for us, OR
-// * Case 2: By the prestub worker after jitting the original code of a function
-// (i.e., the "pre-rejit" scenario). In this case, the EE is not suspended. But
-// that's ok, because the PCODE has not yet been published to the MethodDesc, and
-// no thread can be executing inside the originally JITted function yet.
-// * Case 3: At type/method restore time for an NGEN'ed assembly. This is also the pre-rejit
-// scenario because we are guaranteed to do this before the code in the module
-// is executable. EE suspend is not required.
-//
-// Arguments:
-// * pCode - Case 1 (above): will be NULL, and we can infer the PCODE from the
-// MethodDesc; Case 2+3 (above, pre-rejit): will be non-NULL, and we'll need to use
-// this to find the code to stamp on top of.
-//
-// Return Value:
-// * S_OK: Either we successfully did the jmp-stamp, or a racing thread took care of
-// it for us.
-// * Else, HRESULT indicating failure.
-//
-// Assumptions:
-// The caller will have suspended the EE if necessary (case 1), before this is
-// called.
-//
-HRESULT ReJitInfo::JumpStampNativeCode(PCODE pCode /* = NULL */)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
-
- // It may seem dangerous to be stamping jumps over code while a GC is going on,
- // but we're actually safe. As we assert below, either we're holding the thread
- // store lock (and thus preventing a GC) OR we're stamping code that has not yet
- // been published (and will thus not be executed by managed therads or examined
- // by the GC).
- MODE_ANY;
- }
- CONTRACTL_END;
-
- PCODE pCodePublished = GetMethodDesc()->GetNativeCode();
-
- _ASSERTE((pCode != NULL) || (pCodePublished != NULL));
- _ASSERTE(GetMethodDesc()->GetReJitManager()->IsTableCrstOwnedByCurrentThread());
-
- HRESULT hr = S_OK;
-
- // We'll jump-stamp over pCode, or if pCode is NULL, jump-stamp over the published
- // code for this's MethodDesc.
- LPBYTE pbCode = (LPBYTE) pCode;
- if (pbCode == NULL)
- {
- // If caller didn't specify a pCode, just use the one that was published after
- // the original JIT. (A specific pCode would be passed in the pre-rejit case,
- // to jump-stamp the original code BEFORE the PCODE gets published.)
- pbCode = (LPBYTE) pCodePublished;
- }
- _ASSERTE (pbCode != NULL);
-
- // The debugging API may also try to write to the very top of this function (though
- // with an int 3 for breakpoint purposes). Coordinate with the debugger so we know
- // whether we can safely patch the actual code, or instead write to the debugger's
- // buffer.
- DebuggerController::ControllerLockHolder lockController;
-
- // We could be in a race. Either two threads simultaneously JITting the same
- // method for the first time or two threads restoring NGEN'ed code.
- // Another thread may (or may not) have jump-stamped its copy of the code already
- _ASSERTE((GetState() == kJumpNone) || (GetState() == kJumpToPrestub));
-
- if (GetState() == kJumpToPrestub)
- {
- // The method has already been jump stamped so nothing left to do
- _ASSERTE(CodeIsSaved());
- return S_OK;
- }
-
- // Remember what we're stamping our jump on top of, so we can replace it during a
- // revert.
- for (int i = 0; i < sizeof(m_rgSavedCode); i++)
- {
- m_rgSavedCode[i] = *FirstCodeByteAddr(pbCode+i, DebuggerController::GetPatchTable()->GetPatch((CORDB_ADDRESS_TYPE *)(pbCode+i)));
- }
-
- EX_TRY
- {
- AllocMemTracker amt;
-
- // This guy might throw on out-of-memory, so rely on the tracker to clean-up
- Precode * pPrecode = Precode::Allocate(PRECODE_STUB, GetMethodDesc(), GetMethodDesc()->GetLoaderAllocator(), &amt);
- PCODE target = pPrecode->GetEntryPoint();
-
-#if defined(_X86_) || defined(_AMD64_)
-
- // Normal unpatched code never starts with a jump
- // so make sure this code isn't already patched
- _ASSERTE(*FirstCodeByteAddr(pbCode, DebuggerController::GetPatchTable()->GetPatch((CORDB_ADDRESS_TYPE *)pbCode)) != X86_INSTR_JMP_REL32);
-
- INT64 i64OldCode = *(INT64*)pbCode;
- INT64 i64NewCode = i64OldCode;
- LPBYTE pbNewValue = (LPBYTE)&i64NewCode;
- *pbNewValue = X86_INSTR_JMP_REL32;
- INT32 UNALIGNED * pOffset = reinterpret_cast<INT32 UNALIGNED *>(&pbNewValue[1]);
- // This will throw for out-of-memory, so don't write anything until
- // after he succeeds
- // This guy will leak/cache/reuse the jumpstub
- *pOffset = rel32UsingJumpStub(reinterpret_cast<INT32 UNALIGNED *>(pbCode + 1), target, GetMethodDesc(), GetMethodDesc()->GetLoaderAllocator());
-
- // If we have the EE suspended or the code is unpublished there won't be contention on this code
- hr = UpdateJumpStampHelper(pbCode, i64OldCode, i64NewCode, FALSE);
- if (FAILED(hr))
- {
- ThrowHR(hr);
- }
-
- //
- // No failure point after this!
- //
- amt.SuppressRelease();
-
-#else // _X86_ || _AMD64_
-#error "Need to define a way to jump-stamp the prolog in a safe way for this platform"
-
-#endif // _X86_ || _AMD64_
-
- m_dwInternalFlags &= ~kStateMask;
- m_dwInternalFlags |= kJumpToPrestub;
- }
- EX_CATCH_HRESULT(hr);
- _ASSERT(hr == S_OK || hr == E_OUTOFMEMORY);
-
- if (SUCCEEDED(hr))
- {
- _ASSERTE(GetState() == kJumpToPrestub);
- _ASSERTE(m_rgSavedCode[0] != 0); // saved code should not start with 0
- }
-
- return hr;
-}
-
-//---------------------------------------------------------------------------------------
-//
-// Poke the JITted code to satsify a revert request (or to perform an implicit revert as
-// part of a second, third, etc. rejit request). Reinstates the originally JITted code
-// that had been jump-stamped over to perform a prior rejit.
-//
-// Arguments
-// fEESuspended - TRUE if the caller keeps the EE suspended during this call
-//
-//
-// Return Value:
-// S_OK to indicate the revert succeeded,
-// CORPROF_E_RUNTIME_SUSPEND_REQUIRED to indicate the jumpstamp hasn't been reverted
-// and EE suspension will be needed for success
-// other failure HRESULT indicating what went wrong.
-//
-// Assumptions:
-// Caller must be holding the owning ReJitManager's table crst.
-//
-
-HRESULT ReJitInfo::UndoJumpStampNativeCode(BOOL fEESuspended)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- MODE_ANY;
- }
- CONTRACTL_END;
-
- _ASSERTE(GetMethodDesc()->GetReJitManager()->IsTableCrstOwnedByCurrentThread());
- _ASSERTE((m_pShared->GetState() == SharedReJitInfo::kStateReverted));
- _ASSERTE((GetState() == kJumpToPrestub) || (GetState() == kJumpToRejittedCode));
- _ASSERTE(m_rgSavedCode[0] != 0); // saved code should not start with 0 (see above test)
-
- BYTE * pbCode = (BYTE*)GetMethodDesc()->GetNativeCode();
- DebuggerController::ControllerLockHolder lockController;
-
-#if defined(_X86_) || defined(_AMD64_)
- _ASSERTE(m_rgSavedCode[0] != X86_INSTR_JMP_REL32);
- _ASSERTE(*FirstCodeByteAddr(pbCode, DebuggerController::GetPatchTable()->GetPatch((CORDB_ADDRESS_TYPE *)pbCode)) == X86_INSTR_JMP_REL32);
-#else
-#error "Need to define a way to jump-stamp the prolog in a safe way for this platform"
-#endif // _X86_ || _AMD64_
-
- // For the interlocked compare, remember what pbCode is right now
- INT64 i64OldValue = *(INT64 *)pbCode;
- // Assemble the INT64 of the new code bytes to write. Start with what's there now
- INT64 i64NewValue = i64OldValue;
- memcpy(LPBYTE(&i64NewValue), m_rgSavedCode, sizeof(m_rgSavedCode));
- HRESULT hr = UpdateJumpStampHelper(pbCode, i64OldValue, i64NewValue, !fEESuspended);
- _ASSERTE(hr == S_OK || (hr == CORPROF_E_RUNTIME_SUSPEND_REQUIRED && !fEESuspended));
- if (hr != S_OK)
- return hr;
-
- // Transition state of this ReJitInfo to indicate the MD no longer has any jump stamp
- m_dwInternalFlags &= ~kStateMask;
- m_dwInternalFlags |= kJumpNone;
- return S_OK;
-}
-
-//---------------------------------------------------------------------------------------
-//
-// After code has been rejitted, this is called to update the jump-stamp to go from
-// pointing to the prestub, to pointing to the newly rejitted code.
-//
-// Arguments:
-// fEESuspended - TRUE if the caller keeps the EE suspended during this call
-// pRejittedCode - jitted code for the updated IL this method should execute
-//
-// Assumptions:
-// This rejit manager's table crst should be held by the caller
-//
-// Returns - S_OK if the jump target is updated
-// CORPROF_E_RUNTIME_SUSPEND_REQUIRED if the ee isn't suspended and it
-// will need to be in order to do the update safely
-HRESULT ReJitInfo::UpdateJumpTarget(BOOL fEESuspended, PCODE pRejittedCode)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- MODE_PREEMPTIVE;
- }
- CONTRACTL_END;
-
- MethodDesc * pMD = GetMethodDesc();
- _ASSERTE(pMD->GetReJitManager()->IsTableCrstOwnedByCurrentThread());
- _ASSERTE(m_pShared->GetState() == SharedReJitInfo::kStateActive);
- _ASSERTE(GetState() == kJumpToPrestub);
- _ASSERTE(m_pCode == NULL);
-
- // Beginning of originally JITted code containing the jmp that we will redirect.
- BYTE * pbCode = (BYTE*)pMD->GetNativeCode();
-
-#if defined(_X86_) || defined(_AMD64_)
-
- HRESULT hr = S_OK;
- {
- DebuggerController::ControllerLockHolder lockController;
-
- // This will throw for out-of-memory, so don't write anything until
- // after he succeeds
- // This guy will leak/cache/reuse the jumpstub
- INT32 offset = 0;
- EX_TRY
- {
- offset = rel32UsingJumpStub(
- reinterpret_cast<INT32 UNALIGNED *>(&pbCode[1]), // base of offset
- pRejittedCode, // target of jump
- pMD,
- pMD->GetLoaderAllocator());
- }
- EX_CATCH_HRESULT(hr);
- _ASSERT(hr == S_OK || hr == E_OUTOFMEMORY);
- if (FAILED(hr))
- {
- return hr;
- }
- // For validation later, remember what pbCode is right now
- INT64 i64OldValue = *(INT64 *)pbCode;
-
- // Assemble the INT64 of the new code bytes to write. Start with what's there now
- INT64 i64NewValue = i64OldValue;
- LPBYTE pbNewValue = (LPBYTE)&i64NewValue;
-
- // First byte becomes a rel32 jmp instruction (should be a no-op as asserted
- // above, but can't hurt)
- *pbNewValue = X86_INSTR_JMP_REL32;
- // Next 4 bytes are the jmp target (offset to jmp stub)
- INT32 UNALIGNED * pnOffset = reinterpret_cast<INT32 UNALIGNED *>(&pbNewValue[1]);
- *pnOffset = offset;
-
- hr = UpdateJumpStampHelper(pbCode, i64OldValue, i64NewValue, !fEESuspended);
- _ASSERTE(hr == S_OK || (hr == CORPROF_E_RUNTIME_SUSPEND_REQUIRED && !fEESuspended));
- }
- if (FAILED(hr))
- {
- return hr;
- }
-
-#else // _X86_ || _AMD64_
-#error "Need to define a way to jump-stamp the prolog in a safe way for this platform"
-#endif // _X86_ || _AMD64_
-
- // State transition
- m_dwInternalFlags &= ~kStateMask;
- m_dwInternalFlags |= kJumpToRejittedCode;
- return S_OK;
-}
-
-
-//---------------------------------------------------------------------------------------
-//
-// This is called to modify the jump-stamp area, the first ReJitInfo::JumpStubSize bytes
-// in the method's code.
-//
-// Notes:
-// Callers use this method in a variety of circumstances:
-// a) when the code is unpublished (fContentionPossible == FALSE)
-// b) when the caller has taken the ThreadStoreLock and suspended the EE
-// (fContentionPossible == FALSE)
-// c) when the code is published, the EE isn't suspended, and the jumpstamp
-// area consists of a single 5 byte long jump instruction
-// (fContentionPossible == TRUE)
-// This method will attempt to alter the jump-stamp even if the caller has not prevented
-// contention, but there is no guarantee it will be succesful. When the caller has prevented
-// contention, then success is assured. Callers may oportunistically try without
-// EE suspension, and then upgrade to EE suspension if the first attempt fails.
-//
-// Assumptions:
-// This rejit manager's table crst should be held by the caller or fContentionPossible==FALSE
-// The debugger patch table lock should be held by the caller
-//
-// Arguments:
-// pbCode - pointer to the code where the jump stamp is placed
-// i64OldValue - the bytes which should currently be at the start of the method code
-// i64NewValue - the new bytes which should be written at the start of the method code
-// fContentionPossible - See the Notes section above.
-//
-// Returns:
-// S_OK => the jumpstamp has been succesfully updated.
-// CORPROF_E_RUNTIME_SUSPEND_REQUIRED => the jumpstamp remains unchanged (preventing contention will be necessary)
-// other failing HR => VirtualProtect failed, the jumpstamp remains unchanged
-//
-HRESULT ReJitInfo::UpdateJumpStampHelper(BYTE* pbCode, INT64 i64OldValue, INT64 i64NewValue, BOOL fContentionPossible)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- MODE_ANY;
- }
- CONTRACTL_END;
-
- MethodDesc * pMD = GetMethodDesc();
- _ASSERTE(pMD->GetReJitManager()->IsTableCrstOwnedByCurrentThread() || !fContentionPossible);
-
- // When ReJIT is enabled, method entrypoints are always at least 8-byte aligned (see
- // code:EEJitManager::allocCode), so we can do a single 64-bit interlocked operation
- // to update the jump target. However, some code may have gotten compiled before
- // the profiler had a chance to enable ReJIT (e.g., NGENd code, or code JITted
- // before a profiler attaches). In such cases, we cannot rely on a simple
- // interlocked operation, and instead must suspend the runtime to ensure we can
- // safely update the jmp instruction.
- //
- // This method doesn't verify that the method is actually safe to rejit, we expect
- // callers to do that. At the moment NGEN'ed code is safe to rejit even if
- // it is unaligned, but code generated before the profiler attaches is not.
- if (fContentionPossible && !(IS_ALIGNED(pbCode, sizeof(INT64))))
- {
- return CORPROF_E_RUNTIME_SUSPEND_REQUIRED;
- }
-
- // The debugging API may also try to write to this function (though
- // with an int 3 for breakpoint purposes). Coordinate with the debugger so we know
- // whether we can safely patch the actual code, or instead write to the debugger's
- // buffer.
- if (fContentionPossible)
- {
- for (CORDB_ADDRESS_TYPE* pbProbeAddr = pbCode; pbProbeAddr < pbCode + ReJitInfo::JumpStubSize; pbProbeAddr++)
- {
- if (NULL != DebuggerController::GetPatchTable()->GetPatch(pbProbeAddr))
- {
- return CORPROF_E_RUNTIME_SUSPEND_REQUIRED;
- }
- }
- }
-
-#if defined(_X86_) || defined(_AMD64_)
-
- DWORD oldProt;
- if (!ClrVirtualProtect((LPVOID)pbCode, 8, PAGE_EXECUTE_READWRITE, &oldProt))
- {
- return HRESULT_FROM_WIN32(GetLastError());
- }
-
- if (fContentionPossible)
- {
- INT64 i64InterlockReportedOldValue = FastInterlockCompareExchangeLong((INT64 *)pbCode, i64NewValue, i64OldValue);
- // Since changes to these bytes are protected by this rejitmgr's m_crstTable, we
- // shouldn't have two writers conflicting.
- _ASSERTE(i64InterlockReportedOldValue == i64OldValue);
- }
- else
- {
- // In this path the caller ensures:
- // a) no thread will execute through the prologue area we are modifying
- // b) no thread is stopped in a prologue such that it resumes in the middle of code we are modifying
- // c) no thread is doing a debugger patch skip operation in which an unmodified copy of the method's
- // code could be executed from a patch skip buffer.
-
- // PERF: we might still want a faster path through here if we aren't debugging that doesn't do
- // all the patch checks
- for (int i = 0; i < ReJitInfo::JumpStubSize; i++)
- {
- *FirstCodeByteAddr(pbCode+i, DebuggerController::GetPatchTable()->GetPatch(pbCode+i)) = ((BYTE*)&i64NewValue)[i];
- }
- }
-
- if (oldProt != PAGE_EXECUTE_READWRITE)
- {
- // The CLR codebase in many locations simply ignores failures to restore the page protections
- // Its true that it isn't a problem functionally, but it seems a bit sketchy?
- // I am following the convention for now.
- ClrVirtualProtect((LPVOID)pbCode, 8, oldProt, &oldProt);
- }
-
- FlushInstructionCache(GetCurrentProcess(), pbCode, ReJitInfo::JumpStubSize);
- return S_OK;
-
-#else // _X86_ || _AMD64_
-#error "Need to define a way to jump-stamp the prolog in a safe way for this platform"
-#endif // _X86_ || _AMD64_
-}
-
-
-#endif // DACCESS_COMPILE
-// The rest of the ReJitInfo methods are safe to compile for DAC
-
-
-
-//---------------------------------------------------------------------------------------
-//
-// ReJitInfos can be constructed in two ways: As a "regular" ReJitInfo indexed by
-// MethodDesc *, or as a "placeholder" ReJitInfo (to satisfy pre-rejit requests) indexed
-// by (Module *, methodDef). Both constructors call this helper to do all the common
-// code for initializing the ReJitInfo.
-//
-
-void ReJitInfo::CommonInit()
-{
- LIMITED_METHOD_CONTRACT;
-
- m_pCode = NULL;
- m_pNext = NULL;
- m_dwInternalFlags = kJumpNone;
- m_pShared->AddMethod(this);
- ZeroMemory(m_rgSavedCode, sizeof(m_rgSavedCode));
-}
-
-
-//---------------------------------------------------------------------------------------
-//
-// Regardless of which kind of ReJitInfo this is, this will always return its
-// corresponding Module * & methodDef
-//
-// Arguments:
-// * ppModule - [out] Module * related to this ReJitInfo (which contains the
-// returned methodDef)
-// * pMethodDef - [out] methodDef related to this ReJitInfo
-//
-
-void ReJitInfo::GetModuleAndTokenRegardlessOfKeyType(Module ** ppModule, mdMethodDef * pMethodDef)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- MODE_ANY;
- SO_NOT_MAINLINE;
- }
- CONTRACTL_END;
-
- _ASSERTE(ppModule != NULL);
- _ASSERTE(pMethodDef != NULL);
-
- if (m_key.m_keyType == Key::kMetadataToken)
- {
- GetModuleAndToken(ppModule, pMethodDef);
- }
- else
- {
- MethodDesc * pMD = GetMethodDesc();
- _ASSERTE(pMD != NULL);
- _ASSERTE(pMD->IsRestored());
-
- *ppModule = pMD->GetModule();
- *pMethodDef = pMD->GetMemberDef();
- }
-
- _ASSERTE(*ppModule != NULL);
- _ASSERTE(*pMethodDef != mdTokenNil);
-}
-
-
-//---------------------------------------------------------------------------------------
-//
-// Used as part of the hash table implementation in the containing ReJitManager, this
-// hashes a ReJitInfo by MethodDesc * when available, else by (Module *, methodDef)
-//
-// Arguments:
-// key - Key representing the ReJitInfo to hash
-//
-// Return Value:
-// Hash value of the ReJitInfo represented by the specified key
-//
-
-// static
-COUNT_T ReJitInfo::Hash(Key key)
-{
- LIMITED_METHOD_CONTRACT;
-
- if (key.m_keyType == Key::kMethodDesc)
- {
- return HashPtr(0, PTR_MethodDesc(key.m_pMD));
- }
-
- _ASSERTE (key.m_keyType == Key::kMetadataToken);
-
- return HashPtr(key.m_methodDef, PTR_Module(key.m_pModule));
-}
-
-
-//---------------------------------------------------------------------------------------
-//
-// Return the IL to compile for a given ReJitInfo
-//
-// Return Value:
-// Pointer to IL buffer to compile. If the profiler has specified IL to rejit,
-// this will be our copy of the IL buffer specified by the profiler. Else, this
-// points to the original IL for the method from its module's metadata.
-//
-// Notes:
-// IL memory is managed by us, not the caller. Caller must not free the buffer.
-//
-
-COR_ILMETHOD * ReJitInfo::GetIL()
-{
- CONTRACTL
- {
- THROWS; // Getting original IL via PEFile::GetIL can throw
- CAN_TAKE_LOCK; // Looking up dynamically overridden IL takes a lock
- GC_NOTRIGGER;
- MODE_ANY;
- }
- CONTRACTL_END;
-
- if (m_pShared->m_pbIL != NULL)
- {
- return reinterpret_cast<COR_ILMETHOD *>(m_pShared->m_pbIL);
- }
-
- // If the user hasn't overriden us, get whatever the original IL had
- return GetMethodDesc()->GetILHeader(TRUE);
-}
-
-
-//---------------------------------------------------------------------------------------
-// SharedReJitInfo implementation
-
-
-SharedReJitInfo::SharedReJitInfo()
- : m_dwInternalFlags(kStateRequested),
- m_pbIL(NULL),
- m_dwCodegenFlags(0),
- m_reJitId(InterlockedIncrement(reinterpret_cast<LONG*>(&s_GlobalReJitId))),
- m_pInfoList(NULL)
-{
- LIMITED_METHOD_CONTRACT;
-}
-
-
-//---------------------------------------------------------------------------------------
-//
-// Link in the specified ReJitInfo to the list maintained by this SharedReJitInfo
-//
-// Arguments:
-// pInfo - ReJitInfo being added
-//
-
-void SharedReJitInfo::AddMethod(ReJitInfo * pInfo)
-{
- LIMITED_METHOD_CONTRACT;
-
- _ASSERTE(pInfo->m_pShared == this);
-
- // Push it on the head of our list
- _ASSERTE(pInfo->m_pNext == NULL);
- pInfo->m_pNext = PTR_ReJitInfo(m_pInfoList);
- m_pInfoList = pInfo;
-}
-
-
-//---------------------------------------------------------------------------------------
-//
-// Unlink the specified ReJitInfo from the list maintained by this SharedReJitInfo.
-// Currently this is only used on AD unload to remove ReJitInfos of non-domain-neutral instantiations
-// of domain-neutral generics (which are tracked in the SharedDomain's ReJitManager).
-// This may be used in the future once we implement memory reclamation on revert().
-//
-// Arguments:
-// pInfo - ReJitInfo being removed
-//
-
-void SharedReJitInfo::RemoveMethod(ReJitInfo * pInfo)
-{
- LIMITED_METHOD_CONTRACT;
-
-#ifndef DACCESS_COMPILE
-
- // Find it
- ReJitInfo ** ppEntry = &m_pInfoList;
- while (*ppEntry != pInfo)
- {
- ppEntry = &(*ppEntry)->m_pNext;
- _ASSERTE(*ppEntry != NULL);
- }
-
- // Remove it
- _ASSERTE((*ppEntry)->m_pShared == this);
- *ppEntry = (*ppEntry)->m_pNext;
-
-#endif // DACCESS_COMPILE
-}
-
-//---------------------------------------------------------------------------------------
-//
-// MethodDesc::MakeJitWorker() calls this to determine if there's an outstanding
-// "pre-rejit" request for a MethodDesc that has just been jitted for the first time.
-// This is also called when methods are being restored in NGEN images. The sequence looks like:
-// *Enter holder
-// Enter Rejit table lock
-// DoJumpStampIfNecessary
-// *Runtime code publishes/restores method
-// *Exit holder
-// Leave rejit table lock
-// Send rejit error callbacks if needed
-//
-// This also has a non-locking early-out if ReJIT is not enabled.
-//
-// #PublishCode:
-// Note that the runtime needs to publish/restore the PCODE while this holder is
-// on the stack, so it can happen under the ReJitManager's lock.
-// This prevents a "lost pre-rejit" race with a profiler that calls
-// RequestReJIT just as the method finishes compiling. In particular, the locking ensures
-// atomicity between this set of steps (performed in DoJumpStampIfNecessary):
-// * (1) Checking whether there is a pre-rejit request for this MD
-// * (2) If not, skip doing the pre-rejit-jmp-stamp
-// * (3) Publishing the PCODE
-//
-// with respect to these steps performed in RequestReJIT:
-// * (a) Is PCODE published yet?
-// * (b) If not, create pre-rejit (placeholder) ReJitInfo which the prestub will
-// consult when it JITs the original IL
-//
-// Without this atomicity, we could get the ordering (1), (2), (a), (b), (3), resulting
-// in the rejit request getting completely ignored (i.e., we file away the pre-rejit
-// placeholder AFTER the prestub checks for it).
-//
-// A similar race is possible for code being restored. In that case the restoring thread
-// does:
-// * (1) Check if there is a pre-rejit request for this MD
-// * (2) If not, no need to jmp-stamp
-// * (3) Restore the MD
-
-// And RequestRejit does:
-// * (a) [In LoadedMethodDescIterator] Is a potential MD restored yet?
-// * (b) [In MarkInstantiationsForReJit] If not, don't queue it for jump-stamping
-//
-// Same ordering (1), (2), (a), (b), (3) results in missing both opportunities to jump
-// stamp.
-
-#if !defined(DACCESS_COMPILE) && !defined(CROSSGEN_COMPILE)
-ReJitPublishMethodHolder::ReJitPublishMethodHolder(MethodDesc* pMethodDesc, PCODE pCode) :
-m_pMD(NULL), m_hr(S_OK)
-{
- // This method can't have a contract because entering the table lock
- // below increments GCNoTrigger count. Contracts always revert these changes
- // at the end of the method but we need the incremented count to flow out of the
- // method. The balancing decrement occurs in the destructor.
- STATIC_CONTRACT_NOTHROW;
- STATIC_CONTRACT_GC_NOTRIGGER;
- STATIC_CONTRACT_CAN_TAKE_LOCK;
- STATIC_CONTRACT_MODE_ANY;
-
- // We come here from the PreStub and from MethodDesc::CheckRestore
- // The method should be effectively restored, but we haven't yet
- // cleared the unrestored bit so we can't assert pMethodDesc->IsRestored()
- // We can assert:
- _ASSERTE(pMethodDesc->GetMethodTable()->IsRestored());
-
- if (ReJitManager::IsReJITEnabled() && (pCode != NULL))
- {
- m_pMD = pMethodDesc;
- ReJitManager* pReJitManager = pMethodDesc->GetReJitManager();
- pReJitManager->m_crstTable.Enter();
- m_hr = pReJitManager->DoJumpStampIfNecessary(pMethodDesc, pCode);
- }
-}
-
-
-ReJitPublishMethodHolder::~ReJitPublishMethodHolder()
-{
- // This method can't have a contract because leaving the table lock
- // below decrements GCNoTrigger count. Contracts always revert these changes
- // at the end of the method but we need the decremented count to flow out of the
- // method. The balancing increment occurred in the constructor.
- STATIC_CONTRACT_NOTHROW;
- STATIC_CONTRACT_GC_TRIGGERS; // NOTRIGGER until we leave the lock
- STATIC_CONTRACT_CAN_TAKE_LOCK;
- STATIC_CONTRACT_MODE_ANY;
-
- if (m_pMD)
- {
- ReJitManager* pReJitManager = m_pMD->GetReJitManager();
- pReJitManager->m_crstTable.Leave();
- if (FAILED(m_hr))
- {
- ReJitManager::ReportReJITError(m_pMD->GetModule(), m_pMD->GetMemberDef(), m_pMD, m_hr);
- }
- }
-}
-
-ReJitPublishMethodTableHolder::ReJitPublishMethodTableHolder(MethodTable* pMethodTable) :
-m_pMethodTable(NULL)
-{
- // This method can't have a contract because entering the table lock
- // below increments GCNoTrigger count. Contracts always revert these changes
- // at the end of the method but we need the incremented count to flow out of the
- // method. The balancing decrement occurs in the destructor.
- STATIC_CONTRACT_NOTHROW;
- STATIC_CONTRACT_GC_NOTRIGGER;
- STATIC_CONTRACT_CAN_TAKE_LOCK;
- STATIC_CONTRACT_MODE_ANY;
-
- // We come here from MethodTable::SetIsRestored
- // The method table should be effectively restored, but we haven't yet
- // cleared the unrestored bit so we can't assert pMethodTable->IsRestored()
-
- if (ReJitManager::IsReJITEnabled())
- {
- m_pMethodTable = pMethodTable;
- ReJitManager* pReJitManager = pMethodTable->GetModule()->GetReJitManager();
- pReJitManager->m_crstTable.Enter();
- MethodTable::IntroducedMethodIterator itMethods(pMethodTable, FALSE);
- for (; itMethods.IsValid(); itMethods.Next())
- {
- // Although the MethodTable is restored, the methods might not be.
- // We need to be careful to only query portions of the MethodDesc
- // that work in a partially restored state. The only methods that need
- // further restoration are IL stubs (which aren't rejittable) and
- // generic methods. The only generic methods directly accesible from
- // the MethodTable are definitions. GetNativeCode() on generic defs
- // will run succesfully and return NULL which short circuits the
- // rest of the logic.
- MethodDesc * pMD = itMethods.GetMethodDesc();
- PCODE pCode = pMD->GetNativeCode();
- if (pCode != NULL)
- {
- HRESULT hr = pReJitManager->DoJumpStampIfNecessary(pMD, pCode);
- if (FAILED(hr))
- {
- ReJitManager::AddReJITError(pMD->GetModule(), pMD->GetMemberDef(), pMD, hr, &m_errors);
- }
- }
- }
- }
-}
-
-
-ReJitPublishMethodTableHolder::~ReJitPublishMethodTableHolder()
-{
- // This method can't have a contract because leaving the table lock
- // below decrements GCNoTrigger count. Contracts always revert these changes
- // at the end of the method but we need the decremented count to flow out of the
- // method. The balancing increment occurred in the constructor.
- STATIC_CONTRACT_NOTHROW;
- STATIC_CONTRACT_GC_TRIGGERS; // NOTRIGGER until we leave the lock
- STATIC_CONTRACT_CAN_TAKE_LOCK;
- STATIC_CONTRACT_MODE_ANY;
-
- if (m_pMethodTable)
- {
- ReJitManager* pReJitManager = m_pMethodTable->GetModule()->GetReJitManager();
- pReJitManager->m_crstTable.Leave();
- for (int i = 0; i < m_errors.Count(); i++)
- {
- ReJitManager::ReportReJITError(&(m_errors[i]));
- }
- }
-}
-#endif // !defined(DACCESS_COMPILE) && !defined(CROSSGEN_COMPILE)
-
+#endif // FEATURE_CODE_VERSIONING
#else // FEATURE_REJIT
// On architectures that don't support rejit, just keep around some do-nothing
@@ -3972,19 +1060,6 @@ HRESULT ReJitManager::RequestRevert(
return E_NOTIMPL;
}
-// static
-void ReJitManager::OnAppDomainExit(AppDomain * pAppDomain)
-{
-}
-
-ReJitManager::ReJitManager()
-{
-}
-
-void ReJitManager::PreInit(BOOL fSharedDomain)
-{
-}
-
ReJITID ReJitManager::GetReJitId(PTR_MethodDesc pMD, PCODE pCodeStart)
{
return 0;
@@ -3995,11 +1070,6 @@ ReJITID ReJitManager::GetReJitIdNoLock(PTR_MethodDesc pMD, PCODE pCodeStart)
return 0;
}
-PCODE ReJitManager::GetCodeStart(PTR_MethodDesc pMD, ReJITID reJitId)
-{
- return NULL;
-}
-
HRESULT ReJitManager::GetReJITIDs(PTR_MethodDesc pMD, ULONG cReJitIds, ULONG * pcReJitIds, ReJITID reJitIds[])
{
return E_NOTIMPL;
diff --git a/src/vm/rejit.h b/src/vm/rejit.h
index 3c8bfd66b2..8401ecb960 100644
--- a/src/vm/rejit.h
+++ b/src/vm/rejit.h
@@ -19,9 +19,8 @@
#include "contractimpl.h"
#include "shash.h"
#include "corprof.h"
+#include "codeversion.h"
-struct ReJitInfo;
-struct SharedReJitInfo;
class ReJitManager;
class MethodDesc;
class ClrDataAccess;
@@ -68,347 +67,9 @@ protected:
COR_IL_MAP * m_rgInstrumentedMapEntries;
};
-//---------------------------------------------------------------------------------------
-// Helper base class used by the structures below to enforce that their
-// pieces get allocated on the appropriate loader heaps
-//
-struct LoaderHeapAllocatedRejitStructure
-{
-public:
- void * operator new (size_t size, LoaderHeap * pHeap, const NoThrow&);
- void * operator new (size_t size, LoaderHeap * pHeap);
-};
-
-//---------------------------------------------------------------------------------------
-// One instance of this per rejit request for each mdMethodDef. Contains IL and
-// compilation flags. This is used primarily as a structure, so most of its
-// members are left public.
-//
-struct SharedReJitInfo : public LoaderHeapAllocatedRejitStructure
-{
-private:
- // This determines what to use next as the value of the profiling API's ReJITID.
- static ReJITID s_GlobalReJitId;
-
-public:
- // These represent the various states a SharedReJitInfo can be in.
- enum InternalFlags
- {
- // The profiler has requested a ReJit, so we've allocated stuff, but we haven't
- // called back to the profiler to get any info or indicate that the ReJit has
- // started. (This Info can be 'reused' for a new ReJit if the
- // profiler calls RequestRejit again before we transition to the next state.)
- kStateRequested = 0x00000000,
-
- // The CLR has initiated the call to the profiler's GetReJITParameters() callback
- // but it hasn't completed yet. At this point we have to assume the profiler has
- // commited to a specific IL body, even if the CLR doesn't know what it is yet.
- // If the profiler calls RequestRejit we need to allocate a new SharedReJitInfo
- // and call GetReJITParameters() again.
- kStateGettingReJITParameters = 0x00000001,
-
- // We have asked the profiler about this method via ICorProfilerFunctionControl,
- // and have thus stored the IL and codegen flags the profiler specified. Can only
- // transition to kStateReverted from this state.
- kStateActive = 0x00000002,
-
- // The methoddef has been reverted, but not freed yet. It (or its instantiations
- // for generics) *MAY* still be active on the stack someplace or have outstanding
- // memory references.
- kStateReverted = 0x00000003,
-
-
- kStateMask = 0x0000000F,
- };
-
- DWORD m_dwInternalFlags;
-
- // Data
- LPBYTE m_pbIL;
- DWORD m_dwCodegenFlags;
- InstrumentedILOffsetMapping m_instrumentedILMap;
-
-private:
- // This is the value of the profiling API's ReJITID for this particular
- // rejit request.
- const ReJITID m_reJitId;
-
- // Children
- ReJitInfo * m_pInfoList;
-
-public:
- // Constructor
- SharedReJitInfo();
-
- // Intentionally no destructor. SharedReJitInfo and its contents are
- // allocated on a loader heap, so SharedReJitInfo and its contents will be
- // freed when the AD is unloaded.
-
- // Read-Only Identifcation
- ReJITID GetId() { return m_reJitId; }
-
- void AddMethod(ReJitInfo * pInfo);
-
- void RemoveMethod(ReJitInfo * pInfo);
-
- ReJitInfo * GetMethods() { return m_pInfoList; }
-
- InternalFlags GetState();
-};
-
-//---------------------------------------------------------------------------------------
-// One instance of this per rejit request for each MethodDesc*. One SharedReJitInfo
-// corresponds to many ReJitInfos, as the SharedReJitInfo tracks the rejit request for
-// the methodDef token whereas the ReJitInfo tracks the rejit request for each correspond
-// MethodDesc* (instantiation). Points to actual generated code.
-//
-// In the case of "pre-rejit" (see comment at top of rejit.cpp), a special "placeholder"
-// instance of ReJitInfo is used to "remember" to jmp-stamp a not-yet-jitted-method once
-// it finally gets jitted the first time.
-//
-// Each ReJitManager contains a hash table of ReJitInfo instances, keyed by
-// ReJitManager::m_key.
-//
-// This is used primarily as a structure, so most of its members are left public.
-//
-struct ReJitInfo : public LoaderHeapAllocatedRejitStructure
-{
-public:
- // The size of the code used to jump stamp the prolog
- static const size_t JumpStubSize =
-#if defined(_X86_) || defined(_AMD64_)
- 5;
-#else
-#error "Need to define size of rejit jump-stamp for this platform"
- 1;
-#endif
-
- // Used by PtrSHash template as the key for this ReJitInfo. For regular
- // ReJitInfos, the key is the MethodDesc*. For placeholder ReJitInfos
- // (to facilitate pre-rejit), the key is (Module*, mdMethodDef).
- struct Key
- {
- public:
- enum
- {
- // The key has not yet had its values initialized
- kUninitialized = 0x0,
-
- // The key represents a loaded MethodDesc, and is identified by the m_pMD
- // field
- kMethodDesc = 0x1,
-
- // The key represents a "placeholder" ReJitInfo identified not by loaded
- // MethodDesc, but by the module and metadata token (m_pModule,
- // m_methodDef).
- kMetadataToken = 0x2,
- };
-
- // Storage consists of a discriminated union between MethodDesc* or
- // (Module*, mdMethodDef), with the key type as the discriminator.
- union
- {
- TADDR m_pMD;
- TADDR m_pModule;
- };
- ULONG32 m_methodDef : 28;
- ULONG32 m_keyType : 2;
-
- Key();
- Key(PTR_MethodDesc pMD);
- Key(PTR_Module pModule, mdMethodDef methodDef);
- };
-
- static COUNT_T Hash(Key key);
-
- enum InternalFlags
- {
- // This ReJitInfo is either a placeholder (identified by module and
- // metadata token, rather than loaded MethodDesc) OR this ReJitInfo is
- // identified by a loaded MethodDesc that has been reverted OR not yet
- // been jump-stamped. In the last case, the time window where this
- // ReJitInfo would stay in kJumpNone is rather small, as
- // RequestReJIT() will immediately cause the originally JITted code to
- // be jump-stamped.
- kJumpNone = 0x00000000,
-
- // This ReJitInfo is identified by a loaded MethodDesc that has been compiled and
- // jump-stamped, with the target being the prestub. The MethodDesc has not yet
- // been rejitted
- kJumpToPrestub = 0x00000001,
-
- // This ReJitInfo is identified by a loaded MethodDesc that has been compiled AND
- // rejitted. The top of the originally JITted code has been jump-stamped, with
- // the target being the latest version of the rejitted code.
- kJumpToRejittedCode = 0x00000002,
-
- kStateMask = 0x0000000F,
- };
-
- Key m_key;
- DWORD m_dwInternalFlags;
-
- // The beginning of the rejitted code
- PCODE m_pCode;
-
- // The parent SharedReJitInfo, which manages the rejit request for all
- // instantiations.
- PTR_SharedReJitInfo const m_pShared;
-
- // My next sibling ReJitInfo for this rejit request (e.g., another
- // generic instantiation of the same method)
- PTR_ReJitInfo m_pNext;
-
- // The originally JITted code that was overwritten with the jmp stamp.
- BYTE m_rgSavedCode[JumpStubSize];
-
-
- ReJitInfo(PTR_MethodDesc pMD, SharedReJitInfo * pShared);
- ReJitInfo(PTR_Module pModule, mdMethodDef methodDef, SharedReJitInfo * pShared);
-
- // Intentionally no destructor. ReJitInfo is allocated on a loader heap,
- // and will be freed (along with its associated SharedReJitInfo) when the
- // AD is unloaded.
-
- Key GetKey();
- PTR_MethodDesc GetMethodDesc();
- void GetModuleAndToken(Module ** ppModule, mdMethodDef * pMethodDef);
- void GetModuleAndTokenRegardlessOfKeyType(Module ** ppModule, mdMethodDef * pMethodDef);
- InternalFlags GetState();
-
- COR_ILMETHOD * GetIL();
-
- HRESULT JumpStampNativeCode(PCODE pCode = NULL);
- HRESULT UndoJumpStampNativeCode(BOOL fEESuspended);
- HRESULT UpdateJumpTarget(BOOL fEESuspended, PCODE pRejittedCode);
- HRESULT UpdateJumpStampHelper(BYTE* pbCode, INT64 i64OldValue, INT64 i64newValue, BOOL fContentionPossible);
-
-
-protected:
- void CommonInit();
- INDEBUG(BOOL CodeIsSaved();)
-};
-
-//---------------------------------------------------------------------------------------
-// Used by the SHash inside ReJitManager which maintains the set of ReJitInfo instances.
-//
-class ReJitInfoTraits : public DefaultSHashTraits<PTR_ReJitInfo>
-{
-public:
-
- // explicitly declare local typedefs for these traits types, otherwise
- // the compiler may get confused
- typedef DefaultSHashTraits<PTR_ReJitInfo> PARENT;
- typedef PARENT::element_t element_t;
- typedef PARENT::count_t count_t;
-
- typedef ReJitInfo::Key key_t;
-
- static key_t GetKey(const element_t &e);
- static BOOL Equals(key_t k1, key_t k2);
- static count_t Hash(key_t k);
- static bool IsNull(const element_t &e);
-};
-
-// RequestRejit and RequestRevert use these batches to accumulate ReJitInfos that need their
-// jump stamps updated
-class ReJitManager;
-struct ReJitManagerJumpStampBatch
-{
- ReJitManagerJumpStampBatch(ReJitManager * pReJitManager) : undoMethods(), preStubMethods()
- {
- LIMITED_METHOD_CONTRACT;
- this->pReJitManager = pReJitManager;
- }
-
- ReJitManager* pReJitManager;
- CDynArray<ReJitInfo *> undoMethods;
- CDynArray<ReJitInfo *> preStubMethods;
-};
-
-class ReJitManagerJumpStampBatchTraits : public DefaultSHashTraits<ReJitManagerJumpStampBatch *>
-{
-public:
-
- // explicitly declare local typedefs for these traits types, otherwise
- // the compiler may get confused
- typedef DefaultSHashTraits<ReJitManagerJumpStampBatch *> PARENT;
- typedef PARENT::element_t element_t;
- typedef PARENT::count_t count_t;
-
- typedef ReJitManager * key_t;
-
- static key_t GetKey(const element_t &e)
- {
- return e->pReJitManager;
- }
-
- static BOOL Equals(key_t k1, key_t k2)
- {
- return (k1 == k2);
- }
-
- static count_t Hash(key_t k)
- {
- return (count_t)k;
- }
-
- static bool IsNull(const element_t &e)
- {
- return (e == NULL);
- }
-};
-
-struct ReJitReportErrorWorkItem
-{
- Module* pModule;
- mdMethodDef methodDef;
- MethodDesc* pMethodDesc;
- HRESULT hrStatus;
-};
-
-
#endif // FEATURE_REJIT
-//
-// These holders are used by runtime code that is making new code
-// available for execution, either by publishing jitted code
-// or restoring NGEN code. It ensures the publishing is synchronized
-// with rejit requests
-//
-class ReJitPublishMethodHolder
-{
-public:
-#if !defined(FEATURE_REJIT) || defined(DACCESS_COMPILE) || defined(CROSSGEN_COMPILE)
- ReJitPublishMethodHolder(MethodDesc* pMethod, PCODE pCode) { }
-#else
- ReJitPublishMethodHolder(MethodDesc* pMethod, PCODE pCode);
- ~ReJitPublishMethodHolder();
-#endif
-
-private:
-#if defined(FEATURE_REJIT)
- MethodDesc * m_pMD;
- HRESULT m_hr;
-#endif
-};
-class ReJitPublishMethodTableHolder
-{
-public:
-#if !defined(FEATURE_REJIT) || defined(DACCESS_COMPILE) || defined(CROSSGEN_COMPILE)
- ReJitPublishMethodTableHolder(MethodTable* pMethodTable) { }
-#else
- ReJitPublishMethodTableHolder(MethodTable* pMethodTable);
- ~ReJitPublishMethodTableHolder();
-#endif
-
-private:
-#if defined(FEATURE_REJIT)
- MethodTable* m_pMethodTable;
- CDynArray<ReJitReportErrorWorkItem> m_errors;
-#endif
-};
//---------------------------------------------------------------------------------------
// The big honcho. One of these per AppDomain, plus one for the
@@ -420,55 +81,23 @@ class ReJitManager
friend class ClrDataAccess;
friend class DacDbiInterfaceImpl;
- //I would have prefered to make these inner classes, but
- //then I can't friend them from crst easily.
- friend class ReJitPublishMethodHolder;
- friend class ReJitPublishMethodTableHolder;
-
private:
#ifdef FEATURE_REJIT
- // Hash table mapping MethodDesc* (or (ModuleID, mdMethodDef)) to its
- // ReJitInfos. One key may map to multiple ReJitInfos if there have been
- // multiple rejit requests made for the same MD. See
- // code:ReJitManager::ReJitManager#Invariants for more information.
- typedef SHash<ReJitInfoTraits> ReJitInfoHash;
-
// One global crst (for the entire CLR instance) to synchronize
// cross-ReJitManager operations, such as batch calls to RequestRejit and
// RequestRevert (which modify multiple ReJitManager instances).
static CrstStatic s_csGlobalRequest;
- // All The ReJitInfos (and their linked SharedReJitInfos) for this domain.
- ReJitInfoHash m_table;
-
- // The crst that synchronizes the data in m_table, including
- // adding/removing to m_table, as well as state changes made to
- // individual ReJitInfos & SharedReJitInfos in m_table.
- CrstExplicitInit m_crstTable;
-
#endif //FEATURE_REJIT
public:
- // The ReJITManager takes care of grabbing its m_crstTable when necessary. However,
- // for clients who need to do this explicitly (like ETW rundown), this holder may be
- // used.
- class TableLockHolder
-#ifdef FEATURE_REJIT
- : public CrstHolder
-#endif
- {
- public:
- TableLockHolder(ReJitManager * pReJitManager);
- };
static void InitStatic();
static BOOL IsReJITEnabled();
- static void OnAppDomainExit(AppDomain * pAppDomain);
-
static HRESULT RequestReJIT(
ULONG cFunctions,
ModuleID rgModuleIDs[],
@@ -480,85 +109,56 @@ public:
mdMethodDef rgMethodDefs[],
HRESULT rgHrStatuses[]);
- static PCODE DoReJitIfNecessary(PTR_MethodDesc pMD); // Invokes the jit, or returns previously rejitted code
-
- static void DoJumpStampForAssemblyIfNecessary(Assembly* pAssemblyToSearch);
-
- static DWORD GetCurrentReJitFlags(PTR_MethodDesc pMD);
-
- ReJitManager();
-
- void PreInit(BOOL fSharedDomain);
-
- ReJITID GetReJitId(PTR_MethodDesc pMD, PCODE pCodeStart);
-
- ReJITID GetReJitIdNoLock(PTR_MethodDesc pMD, PCODE pCodeStart);
+ static HRESULT ConfigureILCodeVersion(ILCodeVersion ilCodeVersion);
+ static CORJIT_FLAGS JitFlagsFromProfCodegenFlags(DWORD dwCodegenFlags);
- PCODE GetCodeStart(PTR_MethodDesc pMD, ReJITID reJitId);
-
- HRESULT GetReJITIDs(PTR_MethodDesc pMD, ULONG cReJitIds, ULONG * pcReJitIds, ReJITID reJitIds[]);
+ static ReJITID GetReJitId(PTR_MethodDesc pMD, PCODE pCodeStart);
+ static ReJITID GetReJitIdNoLock(PTR_MethodDesc pMD, PCODE pCodeStart);
+ static HRESULT GetReJITIDs(PTR_MethodDesc pMD, ULONG cReJitIds, ULONG * pcReJitIds, ReJITID reJitIds[]);
#ifdef FEATURE_REJIT
-
- INDEBUG(BOOL IsTableCrstOwnedByCurrentThread());
+#ifndef DACCESS_COMPILE
+ static void ReportReJITError(CodeVersionManager::CodePublishError* pErrorRecord);
+ static void ReportReJITError(Module* pModule, mdMethodDef methodDef, MethodDesc* pMD, HRESULT hrStatus);
+#endif
private:
- static HRESULT IsMethodSafeForReJit(PTR_MethodDesc pMD);
- static void ReportReJITError(ReJitReportErrorWorkItem* pErrorRecord);
- static void ReportReJITError(Module* pModule, mdMethodDef methodDef, MethodDesc* pMD, HRESULT hrStatus);
- static HRESULT AddReJITError(ReJitInfo* pReJitInfo, HRESULT hrStatus, CDynArray<ReJitReportErrorWorkItem> * pErrors);
- static HRESULT AddReJITError(Module* pModule, mdMethodDef methodDef, MethodDesc* pMD, HRESULT hrStatus, CDynArray<ReJitReportErrorWorkItem> * pErrors);
- HRESULT BatchUpdateJumpStamps(CDynArray<ReJitInfo *> * pUndoMethods, CDynArray<ReJitInfo *> * pPreStubMethods, CDynArray<ReJitReportErrorWorkItem> * pErrors);
- PCODE DoReJitIfNecessaryWorker(PTR_MethodDesc pMD); // Invokes the jit, or returns previously rejitted code
- DWORD GetCurrentReJitFlagsWorker(PTR_MethodDesc pMD);
+ static HRESULT UpdateActiveILVersions(
+ ULONG cFunctions,
+ ModuleID rgModuleIDs[],
+ mdMethodDef rgMethodDefs[],
+ HRESULT rgHrStatuses[],
+ BOOL fIsRevert);
- HRESULT MarkAllInstantiationsForReJit(
- SharedReJitInfo * pSharedForAllGenericInstantiations,
- AppDomain * pAppDomainToSearch,
- PTR_Module pModuleContainingGenericDefinition,
- mdMethodDef methodDef,
- ReJitManagerJumpStampBatch* pJumpStampBatch,
- CDynArray<ReJitReportErrorWorkItem> * pRejitErrors);
-
- INDEBUG(BaseDomain * m_pDomain;)
- INDEBUG(void Dump(LPCSTR szIntroText);)
- INDEBUG(void AssertRestOfEntriesAreReverted(
- ReJitInfoHash::KeyIterator iter,
- ReJitInfoHash::KeyIterator end);)
-
-
- HRESULT DoJumpStampIfNecessary(MethodDesc* pMD, PCODE pCode);
- HRESULT MarkForReJit(PTR_MethodDesc pMD, SharedReJitInfo * pSharedToReuse, ReJitManagerJumpStampBatch* pJumpStampBatch, CDynArray<ReJitReportErrorWorkItem> * pRejitErrors, SharedReJitInfo ** ppSharedUsed);
- HRESULT MarkForReJit(PTR_Module pModule, mdMethodDef methodDef, ReJitManagerJumpStampBatch* pJumpStampBatch, CDynArray<ReJitReportErrorWorkItem> * pRejitErrors, SharedReJitInfo ** ppSharedUsed);
- HRESULT MarkForReJitHelper(
- PTR_MethodDesc pMD,
- PTR_Module pModule,
- mdMethodDef methodDef,
- SharedReJitInfo * pSharedToReuse,
- ReJitManagerJumpStampBatch* pJumpStampBatch,
- CDynArray<ReJitReportErrorWorkItem> * pRejitErrors,
- /* out */ SharedReJitInfo ** ppSharedUsed);
- HRESULT AddNewReJitInfo(
- PTR_MethodDesc pMD,
+ struct CodeActivationBatch
+ {
+ CodeActivationBatch(CodeVersionManager * pCodeVersionManager) :
+ m_pCodeVersionManager(pCodeVersionManager)
+ {}
+ CodeVersionManager* m_pCodeVersionManager;
+ CDynArray<ILCodeVersion> m_methodsToActivate;
+ };
+
+ class CodeActivationBatchTraits : public DefaultSHashTraits<CodeActivationBatch *>
+ {
+ public:
+ typedef DefaultSHashTraits<CodeActivationBatch *> PARENT;
+ typedef PARENT::element_t element_t;
+ typedef PARENT::count_t count_t;
+ typedef CodeVersionManager * key_t;
+ static key_t GetKey(const element_t &e) { return e->m_pCodeVersionManager; }
+ static BOOL Equals(key_t k1, key_t k2) { return (k1 == k2); }
+ static count_t Hash(key_t k) { return (count_t)k; }
+ static bool IsNull(const element_t &e) { return (e == NULL); }
+ };
+
+ static HRESULT BindILVersion(
+ CodeVersionManager* pCodeVersionManager,
PTR_Module pModule,
mdMethodDef methodDef,
- SharedReJitInfo * pShared,
- ReJitInfo ** ppInfo);
- HRESULT RequestRevertByToken(PTR_Module pModule, mdMethodDef methodDef);
- PTR_ReJitInfo FindReJitInfo(PTR_MethodDesc pMD, PCODE pCodeStart, ReJITID reJitId);
- PTR_ReJitInfo FindNonRevertedReJitInfo(PTR_Module pModule, mdMethodDef methodDef);
- PTR_ReJitInfo FindNonRevertedReJitInfo(PTR_MethodDesc pMD);
- PTR_ReJitInfo FindNonRevertedReJitInfoHelper(PTR_MethodDesc pMD, PTR_Module pModule, mdMethodDef methodDef);
- ReJitInfo* FindPreReJittedReJitInfo(ReJitInfoHash::KeyIterator beginIter, ReJitInfoHash::KeyIterator endIter);
- HRESULT Revert(SharedReJitInfo * pShared, ReJitManagerJumpStampBatch* pJumpStampBatch);
- PCODE DoReJit(ReJitInfo * pInfo);
- ReJitInfoHash::KeyIterator GetBeginIterator(PTR_MethodDesc pMD);
- ReJitInfoHash::KeyIterator GetEndIterator(PTR_MethodDesc pMD);
- ReJitInfoHash::KeyIterator GetBeginIterator(PTR_Module pModule, mdMethodDef methodDef);
- ReJitInfoHash::KeyIterator GetEndIterator(PTR_Module pModule, mdMethodDef methodDef);
- void RemoveReJitInfosFromDomain(AppDomain * pAppDomain);
+ ILCodeVersion *pILCodeVersion);
#endif // FEATURE_REJIT
diff --git a/src/vm/rejit.inl b/src/vm/rejit.inl
index 8662eeaedf..3c42bcea00 100644
--- a/src/vm/rejit.inl
+++ b/src/vm/rejit.inl
@@ -13,149 +13,6 @@
#ifdef FEATURE_REJIT
-inline SharedReJitInfo::InternalFlags SharedReJitInfo::GetState()
-{
- LIMITED_METHOD_CONTRACT;
-
- return (InternalFlags)(m_dwInternalFlags & kStateMask);
-}
-
-inline ReJitInfo::ReJitInfo(PTR_MethodDesc pMD, SharedReJitInfo * pShared) :
- m_key(pMD),
- m_pShared(pShared)
-{
- LIMITED_METHOD_CONTRACT;
-
- CommonInit();
-}
-
-inline ReJitInfo::ReJitInfo(PTR_Module pModule, mdMethodDef methodDef, SharedReJitInfo * pShared) :
- m_key(pModule, methodDef),
- m_pShared(pShared)
-{
- LIMITED_METHOD_CONTRACT;
-
- CommonInit();
-}
-
-inline ReJitInfo::Key::Key() :
- m_pMD(NULL),
- m_methodDef(mdTokenNil),
- m_keyType(kUninitialized)
-{
- LIMITED_METHOD_CONTRACT;
-}
-
-inline ReJitInfo::Key::Key(PTR_MethodDesc pMD) :
- m_pMD(dac_cast<TADDR>(pMD)),
- m_methodDef(mdTokenNil),
- m_keyType(kMethodDesc)
-{
- LIMITED_METHOD_CONTRACT;
-}
-
-inline ReJitInfo::Key::Key(PTR_Module pModule, mdMethodDef methodDef) :
- m_pModule(dac_cast<TADDR>(pModule)),
- m_methodDef(methodDef),
- m_keyType(kMetadataToken)
-{
- LIMITED_METHOD_CONTRACT;
-}
-
-inline ReJitInfo::Key ReJitInfo::GetKey()
-{
- LIMITED_METHOD_CONTRACT;
-
- return m_key;
-}
-
-inline ReJitInfo::InternalFlags ReJitInfo::GetState()
-{
- LIMITED_METHOD_CONTRACT;
-
- return (InternalFlags)(m_dwInternalFlags & kStateMask);
-}
-
-inline PTR_MethodDesc ReJitInfo::GetMethodDesc()
-{
- LIMITED_METHOD_CONTRACT;
-
- _ASSERTE(m_key.m_keyType == Key::kMethodDesc);
- return PTR_MethodDesc(m_key.m_pMD);
-}
-
-inline void ReJitInfo::GetModuleAndToken(Module ** ppModule, mdMethodDef * pMethodDef)
-{
- LIMITED_METHOD_CONTRACT;
-
- _ASSERTE(ppModule != NULL);
- _ASSERTE(pMethodDef != NULL);
- _ASSERTE(m_key.m_keyType == Key::kMetadataToken);
-
- *ppModule = PTR_Module(m_key.m_pModule);
- *pMethodDef = (mdMethodDef) m_key.m_methodDef;
-}
-
-#ifdef _DEBUG
-inline BOOL ReJitInfo::CodeIsSaved()
-{
- LIMITED_METHOD_CONTRACT;
-
- for (size_t i=0; i < sizeof(m_rgSavedCode); i++)
- {
- if (m_rgSavedCode[i] != 0)
- return TRUE;
- }
- return FALSE;
-}
-#endif //_DEBUG
-
-// static
-inline ReJitInfoTraits::key_t ReJitInfoTraits::GetKey(const element_t &e)
-{
- LIMITED_METHOD_CONTRACT;
-
- return e->GetKey();
-}
-
-// static
-inline BOOL ReJitInfoTraits::Equals(key_t k1, key_t k2)
-{
- LIMITED_METHOD_CONTRACT;
-
- // Always use the values of the TADDRs of the MethodDesc * and Module * when treating
- // them as lookup keys into the SHash.
-
- if (k1.m_keyType == ReJitInfo::Key::kMethodDesc)
- {
- return ((k2.m_keyType == ReJitInfo::Key::kMethodDesc) &&
- (dac_cast<TADDR>(PTR_MethodDesc(k1.m_pMD)) ==
- dac_cast<TADDR>(PTR_MethodDesc(k2.m_pMD))));
- }
-
- _ASSERTE(k1.m_keyType == ReJitInfo::Key::kMetadataToken);
- return ((k2.m_keyType == ReJitInfo::Key::kMetadataToken) &&
- (dac_cast<TADDR>(PTR_Module(k1.m_pModule)) ==
- dac_cast<TADDR>(PTR_Module(k2.m_pModule))) &&
- (k1.m_methodDef == k2.m_methodDef));
-}
-
-// static
-inline ReJitInfoTraits::count_t ReJitInfoTraits::Hash(key_t k)
-{
- LIMITED_METHOD_CONTRACT;
-
- return ReJitInfo::Hash(k);
-}
-
-// static
-inline bool ReJitInfoTraits::IsNull(const element_t &e)
-{
- LIMITED_METHOD_CONTRACT;
-
- return e == NULL;
-}
-
// static
inline void ReJitManager::InitStatic()
{
@@ -172,92 +29,9 @@ inline BOOL ReJitManager::IsReJITEnabled()
return CORProfilerEnableRejit();
}
-inline ReJitManager::ReJitInfoHash::KeyIterator ReJitManager::GetBeginIterator(PTR_MethodDesc pMD)
-{
- LIMITED_METHOD_CONTRACT;
-#ifndef DACCESS_COMPILE
- _ASSERTE(m_crstTable.OwnedByCurrentThread());
-#endif
- return m_table.Begin(ReJitInfo::Key(pMD));
-}
-
-inline ReJitManager::ReJitInfoHash::KeyIterator ReJitManager::GetEndIterator(PTR_MethodDesc pMD)
-{
- LIMITED_METHOD_CONTRACT;
-#ifndef DACCESS_COMPILE
- _ASSERTE(m_crstTable.OwnedByCurrentThread());
-#endif
- return m_table.End(ReJitInfo::Key(pMD));
-}
-
-inline ReJitManager::ReJitInfoHash::KeyIterator ReJitManager::GetBeginIterator(PTR_Module pModule, mdMethodDef methodDef)
-{
- LIMITED_METHOD_CONTRACT;
#ifndef DACCESS_COMPILE
- _ASSERTE(m_crstTable.OwnedByCurrentThread());
-#endif
- return m_table.Begin(ReJitInfo::Key(pModule, methodDef));
-}
-
-inline ReJitManager::ReJitInfoHash::KeyIterator ReJitManager::GetEndIterator(PTR_Module pModule, mdMethodDef methodDef)
-{
- LIMITED_METHOD_CONTRACT;
-#ifndef DACCESS_COMPILE
- _ASSERTE(m_crstTable.OwnedByCurrentThread());
-#endif
- return m_table.End(ReJitInfo::Key(pModule, methodDef));
-}
-
-#ifdef _DEBUG
-inline BOOL ReJitManager::IsTableCrstOwnedByCurrentThread()
-{
- LIMITED_METHOD_CONTRACT;
-
- return m_crstTable.OwnedByCurrentThread();
-}
-#endif //_DEBUG
-
-
-inline HRESULT ReJitManager::MarkForReJit(
- PTR_MethodDesc pMD,
- SharedReJitInfo * pSharedToReuse,
- ReJitManagerJumpStampBatch* pJumpStampBatch,
- CDynArray<ReJitReportErrorWorkItem> * pRejitErrors,
- /* out */ SharedReJitInfo ** ppSharedUsed)
-{
- WRAPPER_NO_CONTRACT;
-
- return MarkForReJitHelper(pMD, NULL, mdTokenNil, pSharedToReuse, pJumpStampBatch, pRejitErrors, ppSharedUsed);
-}
-
-inline HRESULT ReJitManager::MarkForReJit(
- PTR_Module pModule,
- mdMethodDef methodDef,
- ReJitManagerJumpStampBatch* pJumpStampBatch,
- CDynArray<ReJitReportErrorWorkItem> * pRejitErrors,
- /* out */ SharedReJitInfo ** ppSharedUsed)
-{
- WRAPPER_NO_CONTRACT;
-
- return MarkForReJitHelper(NULL, pModule, methodDef, NULL, pJumpStampBatch, pRejitErrors, ppSharedUsed);
-}
-
-inline PTR_ReJitInfo ReJitManager::FindNonRevertedReJitInfo(PTR_Module pModule, mdMethodDef methodDef)
-{
- WRAPPER_NO_CONTRACT;
-
- return FindNonRevertedReJitInfoHelper(NULL, pModule, methodDef);
-}
-
-inline PTR_ReJitInfo ReJitManager::FindNonRevertedReJitInfo(PTR_MethodDesc pMD)
-{
- WRAPPER_NO_CONTRACT;
-
- return FindNonRevertedReJitInfoHelper(pMD, NULL, NULL);
-}
-
//static
-inline void ReJitManager::ReportReJITError(ReJitReportErrorWorkItem* pErrorRecord)
+inline void ReJitManager::ReportReJITError(CodeVersionManager::CodePublishError* pErrorRecord)
{
CONTRACTL
{
@@ -298,14 +72,7 @@ inline void ReJitManager::ReportReJITError(Module* pModule, mdMethodDef methodDe
}
#endif // PROFILING_SUPPORTED
}
-
-inline ReJitManager::TableLockHolder::TableLockHolder(ReJitManager * pReJitManager)
-#ifdef FEATURE_REJIT
- : CrstHolder(&pReJitManager->m_crstTable)
-#endif // FEATURE_REJIT
-{
- WRAPPER_NO_CONTRACT;
-}
+#endif // DACCESS_COMPILE
#else // FEATURE_REJIT
@@ -313,32 +80,16 @@ inline ReJitManager::TableLockHolder::TableLockHolder(ReJitManager * pReJitManag
// stubs so the rest of the VM doesn't have to be littered with #ifdef FEATURE_REJIT
// static
-inline PCODE ReJitManager::DoReJitIfNecessary(PTR_MethodDesc)
-{
- return NULL;
-}
-
-// static
inline BOOL ReJitManager::IsReJITEnabled()
{
return FALSE;
}
-// static
-inline DWORD ReJitManager::GetCurrentReJitFlags(PTR_MethodDesc)
-{
- return 0;
-}
-
// static
inline void ReJitManager::InitStatic()
{
}
-inline ReJitManager::TableLockHolder::TableLockHolder(ReJitManager *)
-{
-}
-
#endif // FEATURE_REJIT
diff --git a/src/vm/tieredcompilation.cpp b/src/vm/tieredcompilation.cpp
index acc26b90a5..8486bf2671 100644
--- a/src/vm/tieredcompilation.cpp
+++ b/src/vm/tieredcompilation.cpp
@@ -12,6 +12,7 @@
#include "excep.h"
#include "log.h"
#include "win32threadpool.h"
+#include "threadsuspend.h"
#include "tieredcompilation.h"
// TieredCompilationManager determines which methods should be recompiled and
@@ -127,6 +128,43 @@ BOOL TieredCompilationManager::OnMethodCalled(MethodDesc* pMethodDesc, DWORD cur
{
return TRUE; // stop notifications for this method
}
+ AsyncPromoteMethodToTier1(pMethodDesc);
+ return TRUE;
+}
+
+void TieredCompilationManager::AsyncPromoteMethodToTier1(MethodDesc* pMethodDesc)
+{
+ STANDARD_VM_CONTRACT;
+
+ NativeCodeVersion t1NativeCodeVersion;
+
+ // Add an inactive native code entry in the versioning table to track the tier1
+ // compilation we are going to create. This entry binds the compilation to a
+ // particular version of the IL code regardless of any changes that may
+ // occur between now and when jitting completes. If the IL does change in that
+ // interval the new code entry won't be activated.
+ {
+ CodeVersionManager* pCodeVersionManager = pMethodDesc->GetCodeVersionManager();
+ CodeVersionManager::TableLockHolder lock(pCodeVersionManager);
+ ILCodeVersion ilVersion = pCodeVersionManager->GetActiveILCodeVersion(pMethodDesc);
+ NativeCodeVersionCollection nativeVersions = ilVersion.GetNativeCodeVersions(pMethodDesc);
+ for (NativeCodeVersionIterator cur = nativeVersions.Begin(), end = nativeVersions.End(); cur != end; cur++)
+ {
+ if (cur->GetOptimizationTier() == NativeCodeVersion::OptimizationTier1)
+ {
+ // we've already promoted
+ return;
+ }
+ }
+
+ if (FAILED(ilVersion.AddNativeCodeVersion(pMethodDesc, &t1NativeCodeVersion)))
+ {
+ // optimization didn't work for some reason (presumably OOM)
+ // just give up and continue on
+ return;
+ }
+ t1NativeCodeVersion.SetOptimizationTier(NativeCodeVersion::OptimizationTier1);
+ }
// Insert the method into the optimization queue and trigger a thread to service
// the queue if needed.
@@ -141,7 +179,7 @@ BOOL TieredCompilationManager::OnMethodCalled(MethodDesc* pMethodDesc, DWORD cur
// unserviced. Synchronous retries appear unlikely to offer any material improvement
// and complicating the code to narrow an already rare error case isn't desirable.
{
- SListElem<MethodDesc*>* pMethodListItem = new (nothrow) SListElem<MethodDesc*>(pMethodDesc);
+ SListElem<NativeCodeVersion>* pMethodListItem = new (nothrow) SListElem<NativeCodeVersion>(t1NativeCodeVersion);
SpinLockHolder holder(&m_lock);
if (pMethodListItem != NULL)
{
@@ -156,7 +194,7 @@ BOOL TieredCompilationManager::OnMethodCalled(MethodDesc* pMethodDesc, DWORD cur
}
else
{
- return TRUE; // stop notifications for this method
+ return;
}
}
@@ -181,11 +219,20 @@ BOOL TieredCompilationManager::OnMethodCalled(MethodDesc* pMethodDesc, DWORD cur
}
EX_END_CATCH(RethrowTerminalExceptions);
- return TRUE; // stop notifications for this method
+ return;
}
void TieredCompilationManager::OnAppDomainShutdown()
{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END
+
SpinLockHolder holder(&m_lock);
m_isAppDomainShuttingDown = TRUE;
}
@@ -227,17 +274,19 @@ void TieredCompilationManager::OptimizeMethodsCallback()
}
ULONGLONG startTickCount = CLRGetTickCount64();
- MethodDesc* pMethod = NULL;
+ NativeCodeVersion nativeCodeVersion;
EX_TRY
{
+ GCX_COOP();
ENTER_DOMAIN_ID(m_domainId);
{
+ GCX_PREEMP();
while (true)
{
{
SpinLockHolder holder(&m_lock);
- pMethod = GetNextMethodToOptimize();
- if (pMethod == NULL ||
+ nativeCodeVersion = GetNextMethodToOptimize();
+ if (nativeCodeVersion.IsNull() ||
m_isAppDomainShuttingDown)
{
m_countOptimizationThreadsRunning--;
@@ -245,7 +294,7 @@ void TieredCompilationManager::OptimizeMethodsCallback()
}
}
- OptimizeMethod(pMethod);
+ OptimizeMethod(nativeCodeVersion);
// If we have been running for too long return the thread to the threadpool and queue another event
// This gives the threadpool a chance to service other requests on this thread before returning to
@@ -270,51 +319,35 @@ void TieredCompilationManager::OptimizeMethodsCallback()
{
STRESS_LOG2(LF_TIEREDCOMPILATION, LL_ERROR, "TieredCompilationManager::OptimizeMethodsCallback: "
"Unhandled exception during method optimization, hr=0x%x, last method=%pM\n",
- GET_EXCEPTION()->GetHR(), pMethod);
+ GET_EXCEPTION()->GetHR(), nativeCodeVersion.GetMethodDesc());
}
EX_END_CATCH(RethrowTerminalExceptions);
}
// Jit compiles and installs new optimized code for a method.
// Called on a background thread.
-void TieredCompilationManager::OptimizeMethod(MethodDesc* pMethod)
+void TieredCompilationManager::OptimizeMethod(NativeCodeVersion nativeCodeVersion)
{
STANDARD_VM_CONTRACT;
- _ASSERTE(pMethod->IsEligibleForTieredCompilation());
- PCODE pJittedCode = CompileMethod(pMethod);
- if (pJittedCode != NULL)
+ _ASSERTE(nativeCodeVersion.GetMethodDesc()->IsEligibleForTieredCompilation());
+ if (CompileCodeVersion(nativeCodeVersion))
{
- InstallMethodCode(pMethod, pJittedCode);
+ ActivateCodeVersion(nativeCodeVersion);
}
}
// Compiles new optimized code for a method.
// Called on a background thread.
-PCODE TieredCompilationManager::CompileMethod(MethodDesc* pMethod)
+BOOL TieredCompilationManager::CompileCodeVersion(NativeCodeVersion nativeCodeVersion)
{
STANDARD_VM_CONTRACT;
PCODE pCode = NULL;
- ULONG sizeOfCode = 0;
+ MethodDesc* pMethod = nativeCodeVersion.GetMethodDesc();
EX_TRY
{
- CORJIT_FLAGS flags = CORJIT_FLAGS(CORJIT_FLAGS::CORJIT_FLAG_MCJIT_BACKGROUND);
- flags.Add(CORJIT_FLAGS(CORJIT_FLAGS::CORJIT_FLAG_TIER1));
-
- if (pMethod->IsDynamicMethod())
- {
- ILStubResolver* pResolver = pMethod->AsDynamicMethodDesc()->GetILStubResolver();
- flags.Add(pResolver->GetJitFlags());
- COR_ILMETHOD_DECODER* pILheader = pResolver->GetILHeader();
- pCode = UnsafeJitFunction(pMethod, pILheader, flags, &sizeOfCode);
- }
- else
- {
- COR_ILMETHOD_DECODER::DecoderStatus status;
- COR_ILMETHOD_DECODER header(pMethod->GetILHeader(), pMethod->GetModule()->GetMDImport(), &status);
- pCode = UnsafeJitFunction(pMethod, &header, flags, &sizeOfCode);
- }
+ pCode = pMethod->PrepareCode(nativeCodeVersion);
}
EX_CATCH
{
@@ -324,58 +357,96 @@ PCODE TieredCompilationManager::CompileMethod(MethodDesc* pMethod)
}
EX_END_CATCH(RethrowTerminalExceptions)
- return pCode;
+ return pCode != NULL;
}
// Updates the MethodDesc and precode so that future invocations of a method will
// execute the native code pointed to by pCode.
// Called on a background thread.
-void TieredCompilationManager::InstallMethodCode(MethodDesc* pMethod, PCODE pCode)
+void TieredCompilationManager::ActivateCodeVersion(NativeCodeVersion nativeCodeVersion)
{
STANDARD_VM_CONTRACT;
- _ASSERTE(!pMethod->IsNativeCodeStableAfterInit());
+ MethodDesc* pMethod = nativeCodeVersion.GetMethodDesc();
+ CodeVersionManager* pCodeVersionManager = pMethod->GetCodeVersionManager();
- PCODE pExistingCode = pMethod->GetNativeCode();
-#ifdef FEATURE_INTERPRETER
- if (!pMethod->SetNativeCodeInterlocked(pCode, pExistingCode, TRUE))
-#else
- if (!pMethod->SetNativeCodeInterlocked(pCode, pExistingCode))
-#endif
+ // If the ilParent version is active this will activate the native code version now.
+ // Otherwise if the ilParent version becomes active again in the future the native
+ // code version will activate then.
+ ILCodeVersion ilParent;
+ HRESULT hr = S_OK;
{
- //We aren't there yet, but when the feature is finished we shouldn't be racing against any other code mutator and there would be no
- //reason for this to fail
- STRESS_LOG2(LF_TIEREDCOMPILATION, LL_INFO10, "TieredCompilationManager::InstallMethodCode: Method %pM failed to update native code slot. Code=%pK\n",
- pMethod, pCode);
+ // As long as we are exclusively using precode publishing for tiered compilation
+ // methods this first attempt should succeed
+ CodeVersionManager::TableLockHolder lock(pCodeVersionManager);
+ ilParent = nativeCodeVersion.GetILCodeVersion();
+ hr = ilParent.SetActiveNativeCodeVersion(nativeCodeVersion, FALSE);
}
- else
+ if (hr == CORPROF_E_RUNTIME_SUSPEND_REQUIRED)
{
- Precode* pPrecode = pMethod->GetPrecode();
- if (!pPrecode->SetTargetInterlocked(pCode, FALSE))
+ // if we start using jump-stamp publishing for tiered compilation, the first attempt
+ // without the runtime suspended will fail and then this second attempt will
+ // succeed.
+ // Even though this works performance is likely to be quite bad. Realistically
+ // we are going to need batched updates to makes tiered-compilation + jump-stamp
+ // viable. This fallback path is just here as a proof-of-concept.
+ ThreadSuspend::SuspendEE(ThreadSuspend::SUSPEND_FOR_REJIT);
{
- //We aren't there yet, but when the feature is finished we shouldn't be racing against any other code mutator and there would be no
- //reason for this to fail
- STRESS_LOG2(LF_TIEREDCOMPILATION, LL_INFO10, "TieredCompilationManager::InstallMethodCode: Method %pM failed to update precode. Code=%pK\n",
- pMethod, pCode);
+ CodeVersionManager::TableLockHolder lock(pCodeVersionManager);
+ hr = ilParent.SetActiveNativeCodeVersion(nativeCodeVersion, TRUE);
}
+ ThreadSuspend::RestartEE(FALSE, TRUE);
+ }
+ if (FAILED(hr))
+ {
+ STRESS_LOG2(LF_TIEREDCOMPILATION, LL_INFO10, "TieredCompilationManager::ActivateCodeVersion: Method %pM failed to publish native code for native code version %d\n",
+ pMethod, nativeCodeVersion.GetVersionId());
}
}
// Dequeues the next method in the optmization queue.
// This should be called with m_lock already held and runs
// on the background thread.
-MethodDesc* TieredCompilationManager::GetNextMethodToOptimize()
+NativeCodeVersion TieredCompilationManager::GetNextMethodToOptimize()
{
STANDARD_VM_CONTRACT;
- SListElem<MethodDesc*>* pElem = m_methodsToOptimize.RemoveHead();
+ SListElem<NativeCodeVersion>* pElem = m_methodsToOptimize.RemoveHead();
if (pElem != NULL)
{
- MethodDesc* pMD = pElem->GetValue();
+ NativeCodeVersion nativeCodeVersion = pElem->GetValue();
delete pElem;
- return pMD;
+ return nativeCodeVersion;
+ }
+ return NativeCodeVersion();
+}
+
+//static
+CORJIT_FLAGS TieredCompilationManager::GetJitFlags(NativeCodeVersion nativeCodeVersion)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ CORJIT_FLAGS flags;
+ if (!nativeCodeVersion.GetMethodDesc()->IsEligibleForTieredCompilation())
+ {
+#ifdef FEATURE_INTERPRETER
+ flags.Set(CORJIT_FLAGS::CORJIT_FLAG_MAKEFINALCODE);
+#endif
+ return flags;
+ }
+
+ if (nativeCodeVersion.GetOptimizationTier() == NativeCodeVersion::OptimizationTier0)
+ {
+ flags.Set(CORJIT_FLAGS::CORJIT_FLAG_TIER0);
+ }
+ else
+ {
+ flags.Set(CORJIT_FLAGS::CORJIT_FLAG_TIER1);
+#ifdef FEATURE_INTERPRETER
+ flags.Set(CORJIT_FLAGS::CORJIT_FLAG_MAKEFINALCODE);
+#endif
}
- return NULL;
+ return flags;
}
#endif // FEATURE_TIERED_COMPILATION
diff --git a/src/vm/tieredcompilation.h b/src/vm/tieredcompilation.h
index 71236c5374..6e155fef14 100644
--- a/src/vm/tieredcompilation.h
+++ b/src/vm/tieredcompilation.h
@@ -26,19 +26,21 @@ public:
void Init(ADID appDomainId);
BOOL OnMethodCalled(MethodDesc* pMethodDesc, DWORD currentCallCount);
+ void AsyncPromoteMethodToTier1(MethodDesc* pMethodDesc);
void OnAppDomainShutdown();
+ static CORJIT_FLAGS GetJitFlags(NativeCodeVersion nativeCodeVersion);
private:
static DWORD StaticOptimizeMethodsCallback(void* args);
void OptimizeMethodsCallback();
- void OptimizeMethod(MethodDesc* pMethod);
- MethodDesc* GetNextMethodToOptimize();
- PCODE CompileMethod(MethodDesc* pMethod);
- void InstallMethodCode(MethodDesc* pMethod, PCODE pCode);
+ void OptimizeMethod(NativeCodeVersion nativeCodeVersion);
+ NativeCodeVersion GetNextMethodToOptimize();
+ BOOL CompileCodeVersion(NativeCodeVersion nativeCodeVersion);
+ void ActivateCodeVersion(NativeCodeVersion nativeCodeVersion);
SpinLock m_lock;
- SList<SListElem<MethodDesc*>> m_methodsToOptimize;
+ SList<SListElem<NativeCodeVersion>> m_methodsToOptimize;
ADID m_domainId;
BOOL m_isAppDomainShuttingDown;
DWORD m_countOptimizationThreadsRunning;