summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/design-docs/code-versioning.md9
-rw-r--r--src/inc/CrstTypes.def6
-rw-r--r--src/inc/clrconfigvalues.h7
-rw-r--r--src/inc/crsttypes.h449
-rw-r--r--src/inc/shash.inl8
-rw-r--r--src/vm/CMakeLists.txt2
-rw-r--r--src/vm/amd64/cgenamd64.cpp18
-rw-r--r--src/vm/amd64/virtualcallstubcpu.hpp90
-rw-r--r--src/vm/appdomain.cpp6
-rw-r--r--src/vm/appdomain.hpp11
-rw-r--r--src/vm/arm/stubs.cpp14
-rw-r--r--src/vm/arm/virtualcallstubcpu.hpp22
-rw-r--r--src/vm/arm64/virtualcallstubcpu.hpp18
-rw-r--r--src/vm/array.cpp5
-rw-r--r--src/vm/ceeload.h4
-rw-r--r--src/vm/ceeload.inl8
-rw-r--r--src/vm/ceemain.cpp2
-rw-r--r--src/vm/codeversion.cpp57
-rw-r--r--src/vm/codeversion.h3
-rw-r--r--src/vm/eeconfig.cpp10
-rw-r--r--src/vm/eeconfig.h8
-rw-r--r--src/vm/fptrstubs.cpp35
-rw-r--r--src/vm/frames.cpp34
-rw-r--r--src/vm/generics.cpp3
-rw-r--r--src/vm/i386/cgenx86.cpp19
-rw-r--r--src/vm/i386/virtualcallstubcpu.hpp24
-rw-r--r--src/vm/loaderallocator.cpp11
-rw-r--r--src/vm/loaderallocator.hpp29
-rw-r--r--src/vm/method.cpp273
-rw-r--r--src/vm/method.hpp339
-rw-r--r--src/vm/method.inl10
-rw-r--r--src/vm/methoddescbackpatchinfo.cpp238
-rw-r--r--src/vm/methoddescbackpatchinfo.h416
-rw-r--r--src/vm/methodtable.cpp3
-rw-r--r--src/vm/methodtable.h2
-rw-r--r--src/vm/methodtable.inl11
-rw-r--r--src/vm/methodtablebuilder.cpp48
-rw-r--r--src/vm/precode.cpp28
-rw-r--r--src/vm/prestub.cpp191
-rw-r--r--src/vm/tieredcompilation.cpp12
-rw-r--r--src/vm/virtualcallstub.cpp45
-rw-r--r--src/vm/virtualcallstub.h7
-rw-r--r--tests/src/baseservices/TieredCompilation/TieredVtableMethodTests.cs274
-rw-r--r--tests/src/baseservices/TieredCompilation/TieredVtableMethodTests.csproj22
44 files changed, 2299 insertions, 532 deletions
diff --git a/Documentation/design-docs/code-versioning.md b/Documentation/design-docs/code-versioning.md
index fdf8413871..928f4a5512 100644
--- a/Documentation/design-docs/code-versioning.md
+++ b/Documentation/design-docs/code-versioning.md
@@ -327,14 +327,7 @@ to update the active child at either of those levels (ReJIT uses SetActiveILCode
2. Recalculate the active code version for each entrypoint
3. Update the published code version for each entrypoint to match the active code version
-In order to do step 3 the CodeVersionManager relies on one of two different mechanisms, either a FixupPrecode or a JumpStamp. Both techniques roughly involve using a jmp instruction as the method entrypoint and then updating that jmp to point at whatever code version should be published. In the FixupPrecode case this is memory that was allocated dynamically for the explicit purpose of being the method entrypoint. In the JumpStamp this is memory that was initially used as the prolog of the default code version and then repurposed. JumpStamp is required for AOT compiled images that use direct calls from method to method, however changing between prolog instructions and a jmp instruction requires EE suspension to ensure that threads have been evacuated from the region. FixupPrecode can be updated with only an Interlocked operation which offers lower overhead updates when it can be used.
-
-All methods have been classified to use at most one of the techniques, based on:
-
-```
-MethodDesc::IsVersionableWithPrecode()
-MethodDesc::IsVersionableWithJumpStamp()
-```
+In order to do step 3 the `CodeVersionManager` relies on one of three different mechanisms, a `FixupPrecode`, a `JumpStamp`, or backpatching entry point slots. In [method.hpp](https://github.com/dotnet/coreclr/blob/master/src/vm/method.hpp) these mechanisms are described in the `MethodDesc::IsVersionableWith*()` functions, and all methods have been classified to use at most one of the techniques, based on the `MethodDesc::IsVersionableWith*()` functions.
### Thread-safety ###
CodeVersionManager is designed for use in a free-threaded environment, in many cases by requiring the caller to acquire a lock before calling. This lock can be acquired by constructing an instance of the
diff --git a/src/inc/CrstTypes.def b/src/inc/CrstTypes.def
index bb996a7af5..5900a33945 100644
--- a/src/inc/CrstTypes.def
+++ b/src/inc/CrstTypes.def
@@ -491,7 +491,7 @@ End
// Used to synchronize all rejit information stored in a given AppDomain.
Crst ReJITDomainTable
- AcquiredBefore LoaderHeap SingleUseLock DeadlockDetection JumpStubCache DebuggerController
+ AcquiredBefore LoaderHeap SingleUseLock DeadlockDetection JumpStubCache DebuggerController MethodDescBackpatchInfoTracker
AcquiredAfter ReJITGlobalRequest ThreadStore GlobalStrLiteralMap SystemDomain DebuggerMutex
End
@@ -699,3 +699,7 @@ End
Crst COMCallWrapper
End
+
+Crst MethodDescBackpatchInfoTracker
+ AcquiredBefore FuncPtrStubs
+End
diff --git a/src/inc/clrconfigvalues.h b/src/inc/clrconfigvalues.h
index 20e24dce9c..843f7e4574 100644
--- a/src/inc/clrconfigvalues.h
+++ b/src/inc/clrconfigvalues.h
@@ -643,7 +643,6 @@ RETAIL_CONFIG_DWORD_INFO(INTERNAL_HillClimbing_SampleIntervalLow,
RETAIL_CONFIG_DWORD_INFO(INTERNAL_HillClimbing_SampleIntervalHigh, W("HillClimbing_SampleIntervalHigh"), 200, "");
RETAIL_CONFIG_DWORD_INFO(INTERNAL_HillClimbing_GainExponent, W("HillClimbing_GainExponent"), 200, "The exponent to apply to the gain, times 100. 100 means to use linear gain, higher values will enhance large moves and damp small ones.");
-
///
/// Tiered Compilation
///
@@ -657,6 +656,12 @@ RETAIL_CONFIG_DWORD_INFO(UNSUPPORTED_TieredCompilation_Test_CallCounting, W("Tie
RETAIL_CONFIG_DWORD_INFO(UNSUPPORTED_TieredCompilation_Test_OptimizeTier0, W("TieredCompilation_Test_OptimizeTier0"), 0, "Use optimized codegen (normally used by tier1) in tier0")
#endif
+///
+/// Entry point slot backpatch
+///
+#ifndef CROSSGEN_COMPILE
+RETAIL_CONFIG_DWORD_INFO(UNSUPPORTED_BackpatchEntryPointSlots, W("BackpatchEntryPointSlots"), 1, "Indicates whether to enable entry point slot backpatching, for instance to avoid making virtual calls through a precode and instead to patch virtual slots for a method when its entry point changes.")
+#endif
///
/// TypeLoader
diff --git a/src/inc/crsttypes.h b/src/inc/crsttypes.h
index 7c867f7fb8..e8413dde55 100644
--- a/src/inc/crsttypes.h
+++ b/src/inc/crsttypes.h
@@ -96,78 +96,79 @@ enum CrstType
CrstLoaderHeap = 77,
CrstMda = 78,
CrstMetadataTracker = 79,
- CrstModIntPairList = 80,
- CrstModule = 81,
- CrstModuleFixup = 82,
- CrstModuleLookupTable = 83,
- CrstMulticoreJitHash = 84,
- CrstMulticoreJitManager = 85,
- CrstMUThunkHash = 86,
- CrstNativeBinderInit = 87,
- CrstNativeImageCache = 88,
- CrstNls = 89,
- CrstNotifyGdb = 90,
- CrstObjectList = 91,
- CrstOnEventManager = 92,
- CrstPatchEntryPoint = 93,
- CrstPEImage = 94,
- CrstPEImagePDBStream = 95,
- CrstPendingTypeLoadEntry = 96,
- CrstPinHandle = 97,
- CrstPinnedByrefValidation = 98,
- CrstProfilerGCRefDataFreeList = 99,
- CrstProfilingAPIStatus = 100,
- CrstPublisherCertificate = 101,
- CrstRCWCache = 102,
- CrstRCWCleanupList = 103,
- CrstRCWRefCache = 104,
- CrstReadyToRunEntryPointToMethodDescMap = 105,
- CrstReDacl = 106,
- CrstReflection = 107,
- CrstReJITDomainTable = 108,
- CrstReJITGlobalRequest = 109,
- CrstRemoting = 110,
- CrstRetThunkCache = 111,
- CrstRWLock = 112,
- CrstSavedExceptionInfo = 113,
- CrstSaveModuleProfileData = 114,
- CrstSecurityStackwalkCache = 115,
- CrstSharedAssemblyCreate = 116,
- CrstSigConvert = 117,
- CrstSingleUseLock = 118,
- CrstSpecialStatics = 119,
- CrstSqmManager = 120,
- CrstStackSampler = 121,
- CrstStressLog = 122,
- CrstStrongName = 123,
- CrstStubCache = 124,
- CrstStubDispatchCache = 125,
- CrstStubUnwindInfoHeapSegments = 126,
- CrstSyncBlockCache = 127,
- CrstSyncHashLock = 128,
- CrstSystemBaseDomain = 129,
- CrstSystemDomain = 130,
- CrstSystemDomainDelayedUnloadList = 131,
- CrstThreadIdDispenser = 132,
- CrstThreadpoolEventCache = 133,
- CrstThreadpoolTimerQueue = 134,
- CrstThreadpoolWaitThreads = 135,
- CrstThreadpoolWorker = 136,
- CrstThreadStaticDataHashTable = 137,
- CrstThreadStore = 138,
- CrstTieredCompilation = 139,
- CrstTPMethodTable = 140,
- CrstTypeEquivalenceMap = 141,
- CrstTypeIDMap = 142,
- CrstUMEntryThunkCache = 143,
- CrstUMThunkHash = 144,
- CrstUniqueStack = 145,
- CrstUnresolvedClassLock = 146,
- CrstUnwindInfoTableLock = 147,
- CrstVSDIndirectionCellLock = 148,
- CrstWinRTFactoryCache = 149,
- CrstWrapperTemplate = 150,
- kNumberOfCrstTypes = 151
+ CrstMethodDescBackpatchInfoTracker = 80,
+ CrstModIntPairList = 81,
+ CrstModule = 82,
+ CrstModuleFixup = 83,
+ CrstModuleLookupTable = 84,
+ CrstMulticoreJitHash = 85,
+ CrstMulticoreJitManager = 86,
+ CrstMUThunkHash = 87,
+ CrstNativeBinderInit = 88,
+ CrstNativeImageCache = 89,
+ CrstNls = 90,
+ CrstNotifyGdb = 91,
+ CrstObjectList = 92,
+ CrstOnEventManager = 93,
+ CrstPatchEntryPoint = 94,
+ CrstPEImage = 95,
+ CrstPEImagePDBStream = 96,
+ CrstPendingTypeLoadEntry = 97,
+ CrstPinHandle = 98,
+ CrstPinnedByrefValidation = 99,
+ CrstProfilerGCRefDataFreeList = 100,
+ CrstProfilingAPIStatus = 101,
+ CrstPublisherCertificate = 102,
+ CrstRCWCache = 103,
+ CrstRCWCleanupList = 104,
+ CrstRCWRefCache = 105,
+ CrstReadyToRunEntryPointToMethodDescMap = 106,
+ CrstReDacl = 107,
+ CrstReflection = 108,
+ CrstReJITDomainTable = 109,
+ CrstReJITGlobalRequest = 110,
+ CrstRemoting = 111,
+ CrstRetThunkCache = 112,
+ CrstRWLock = 113,
+ CrstSavedExceptionInfo = 114,
+ CrstSaveModuleProfileData = 115,
+ CrstSecurityStackwalkCache = 116,
+ CrstSharedAssemblyCreate = 117,
+ CrstSigConvert = 118,
+ CrstSingleUseLock = 119,
+ CrstSpecialStatics = 120,
+ CrstSqmManager = 121,
+ CrstStackSampler = 122,
+ CrstStressLog = 123,
+ CrstStrongName = 124,
+ CrstStubCache = 125,
+ CrstStubDispatchCache = 126,
+ CrstStubUnwindInfoHeapSegments = 127,
+ CrstSyncBlockCache = 128,
+ CrstSyncHashLock = 129,
+ CrstSystemBaseDomain = 130,
+ CrstSystemDomain = 131,
+ CrstSystemDomainDelayedUnloadList = 132,
+ CrstThreadIdDispenser = 133,
+ CrstThreadpoolEventCache = 134,
+ CrstThreadpoolTimerQueue = 135,
+ CrstThreadpoolWaitThreads = 136,
+ CrstThreadpoolWorker = 137,
+ CrstThreadStaticDataHashTable = 138,
+ CrstThreadStore = 139,
+ CrstTieredCompilation = 140,
+ CrstTPMethodTable = 141,
+ CrstTypeEquivalenceMap = 142,
+ CrstTypeIDMap = 143,
+ CrstUMEntryThunkCache = 144,
+ CrstUMThunkHash = 145,
+ CrstUniqueStack = 146,
+ CrstUnresolvedClassLock = 147,
+ CrstUnwindInfoTableLock = 148,
+ CrstVSDIndirectionCellLock = 149,
+ CrstWinRTFactoryCache = 150,
+ CrstWrapperTemplate = 151,
+ kNumberOfCrstTypes = 152
};
#endif // __CRST_TYPES_INCLUDED
@@ -178,157 +179,158 @@ enum CrstType
// An array mapping CrstType to level.
int g_rgCrstLevelMap[] =
{
- 9, // CrstAllowedFiles
- 9, // CrstAppDomainCache
- 12, // CrstAppDomainHandleTable
- 0, // CrstArgBasedStubCache
- 0, // CrstAssemblyDependencyGraph
- 0, // CrstAssemblyIdentityCache
- 0, // CrstAssemblyList
- 7, // CrstAssemblyLoader
- 3, // CrstAvailableClass
- 3, // CrstAvailableParamTypes
- 7, // CrstBaseDomain
- -1, // CrstCCompRC
- 9, // CrstCer
- 11, // CrstClassFactInfoHash
- 8, // CrstClassInit
- -1, // CrstClrNotification
- 0, // CrstCLRPrivBinderMaps
- 3, // CrstCLRPrivBinderMapsAdd
- 6, // CrstCodeFragmentHeap
- 0, // CrstCOMCallWrapper
- 4, // CrstCOMWrapperCache
- 0, // CrstConnectionNameTable
- 15, // CrstContexts
- -1, // CrstCoreCLRBinderLog
- 0, // CrstCrstCLRPrivBinderLocalWinMDPath
- 7, // CrstCSPCache
- 3, // CrstDataTest1
- 0, // CrstDataTest2
- 0, // CrstDbgTransport
- 0, // CrstDeadlockDetection
- -1, // CrstDebuggerController
- 3, // CrstDebuggerFavorLock
- 0, // CrstDebuggerHeapExecMemLock
- 0, // CrstDebuggerHeapLock
- 4, // CrstDebuggerJitInfo
- 9, // CrstDebuggerMutex
- 0, // CrstDelegateToFPtrHash
- 14, // CrstDomainLocalBlock
- 0, // CrstDynamicIL
- 3, // CrstDynamicMT
- 3, // CrstDynLinkZapItems
- 7, // CrstEtwTypeLogHash
- 17, // CrstEventPipe
- 0, // CrstEventStore
- 0, // CrstException
- 7, // CrstExecuteManLock
- 0, // CrstExecuteManRangeLock
- 3, // CrstFCall
- 7, // CrstFriendAccessCache
- 7, // CrstFuncPtrStubs
- 5, // CrstFusionAppCtx
- 9, // CrstGCCover
- 0, // CrstGCMemoryPressure
- 11, // CrstGlobalStrLiteralMap
- 1, // CrstHandleTable
- 0, // CrstHostAssemblyMap
- 3, // CrstHostAssemblyMapAdd
- 0, // CrstIbcProfile
- 9, // CrstIJWFixupData
- 0, // CrstIJWHash
- 7, // CrstILStubGen
- 3, // CrstInlineTrackingMap
- 15, // CrstInstMethodHashTable
- 0, // CrstInterfaceVTableMap
- 17, // CrstInterop
- 4, // CrstInteropData
- 11, // CrstIOThreadpoolWorker
- 0, // CrstIsJMCMethod
- 7, // CrstISymUnmanagedReader
- 8, // CrstJit
- 0, // CrstJitGenericHandleCache
- -1, // CrstJitPerf
- 6, // CrstJumpStubCache
- 0, // CrstLeafLock
- -1, // CrstListLock
- 13, // CrstLoaderAllocator
- 14, // CrstLoaderAllocatorReferences
- 0, // CrstLoaderHeap
- 0, // CrstMda
- -1, // CrstMetadataTracker
- 0, // CrstModIntPairList
- 4, // CrstModule
- 13, // CrstModuleFixup
- 3, // CrstModuleLookupTable
- 0, // CrstMulticoreJitHash
- 11, // CrstMulticoreJitManager
- 0, // CrstMUThunkHash
- -1, // CrstNativeBinderInit
- -1, // CrstNativeImageCache
- 0, // CrstNls
- 0, // CrstNotifyGdb
- 2, // CrstObjectList
- 0, // CrstOnEventManager
- 0, // CrstPatchEntryPoint
- 4, // CrstPEImage
- 0, // CrstPEImagePDBStream
- 16, // CrstPendingTypeLoadEntry
- 0, // CrstPinHandle
- 0, // CrstPinnedByrefValidation
- 0, // CrstProfilerGCRefDataFreeList
- 0, // CrstProfilingAPIStatus
- 0, // CrstPublisherCertificate
- 3, // CrstRCWCache
- 0, // CrstRCWCleanupList
- 3, // CrstRCWRefCache
- 4, // CrstReadyToRunEntryPointToMethodDescMap
- 0, // CrstReDacl
- 9, // CrstReflection
- 7, // CrstReJITDomainTable
- 12, // CrstReJITGlobalRequest
- 17, // CrstRemoting
- 3, // CrstRetThunkCache
- 0, // CrstRWLock
- 3, // CrstSavedExceptionInfo
- 0, // CrstSaveModuleProfileData
- 0, // CrstSecurityStackwalkCache
- 4, // CrstSharedAssemblyCreate
- 3, // CrstSigConvert
- 5, // CrstSingleUseLock
- 0, // CrstSpecialStatics
- 0, // CrstSqmManager
- 0, // CrstStackSampler
- -1, // CrstStressLog
- 0, // CrstStrongName
- 5, // CrstStubCache
- 0, // CrstStubDispatchCache
- 4, // CrstStubUnwindInfoHeapSegments
- 3, // CrstSyncBlockCache
- 0, // CrstSyncHashLock
- 4, // CrstSystemBaseDomain
- 11, // CrstSystemDomain
- 0, // CrstSystemDomainDelayedUnloadList
- 0, // CrstThreadIdDispenser
- 0, // CrstThreadpoolEventCache
- 7, // CrstThreadpoolTimerQueue
- 7, // CrstThreadpoolWaitThreads
- 11, // CrstThreadpoolWorker
- 4, // CrstThreadStaticDataHashTable
- 10, // CrstThreadStore
- 9, // CrstTieredCompilation
- 9, // CrstTPMethodTable
- 3, // CrstTypeEquivalenceMap
- 7, // CrstTypeIDMap
- 3, // CrstUMEntryThunkCache
- 0, // CrstUMThunkHash
- 3, // CrstUniqueStack
- 7, // CrstUnresolvedClassLock
- 3, // CrstUnwindInfoTableLock
- 3, // CrstVSDIndirectionCellLock
- 3, // CrstWinRTFactoryCache
- 3, // CrstWrapperTemplate
+ 9, // CrstAllowedFiles
+ 9, // CrstAppDomainCache
+ 14, // CrstAppDomainHandleTable
+ 0, // CrstArgBasedStubCache
+ 0, // CrstAssemblyDependencyGraph
+ 0, // CrstAssemblyIdentityCache
+ 0, // CrstAssemblyList
+ 7, // CrstAssemblyLoader
+ 3, // CrstAvailableClass
+ 3, // CrstAvailableParamTypes
+ 7, // CrstBaseDomain
+ -1, // CrstCCompRC
+ 9, // CrstCer
+ 13, // CrstClassFactInfoHash
+ 8, // CrstClassInit
+ -1, // CrstClrNotification
+ 0, // CrstCLRPrivBinderMaps
+ 3, // CrstCLRPrivBinderMapsAdd
+ 6, // CrstCodeFragmentHeap
+ 0, // CrstCOMCallWrapper
+ 4, // CrstCOMWrapperCache
+ 0, // CrstConnectionNameTable
+ 17, // CrstContexts
+ -1, // CrstCoreCLRBinderLog
+ 0, // CrstCrstCLRPrivBinderLocalWinMDPath
+ 7, // CrstCSPCache
+ 3, // CrstDataTest1
+ 0, // CrstDataTest2
+ 0, // CrstDbgTransport
+ 0, // CrstDeadlockDetection
+ -1, // CrstDebuggerController
+ 3, // CrstDebuggerFavorLock
+ 0, // CrstDebuggerHeapExecMemLock
+ 0, // CrstDebuggerHeapLock
+ 4, // CrstDebuggerJitInfo
+ 11, // CrstDebuggerMutex
+ 0, // CrstDelegateToFPtrHash
+ 16, // CrstDomainLocalBlock
+ 0, // CrstDynamicIL
+ 3, // CrstDynamicMT
+ 3, // CrstDynLinkZapItems
+ 7, // CrstEtwTypeLogHash
+ 19, // CrstEventPipe
+ 0, // CrstEventStore
+ 0, // CrstException
+ 7, // CrstExecuteManLock
+ 0, // CrstExecuteManRangeLock
+ 3, // CrstFCall
+ 7, // CrstFriendAccessCache
+ 7, // CrstFuncPtrStubs
+ 5, // CrstFusionAppCtx
+ 11, // CrstGCCover
+ 0, // CrstGCMemoryPressure
+ 13, // CrstGlobalStrLiteralMap
+ 1, // CrstHandleTable
+ 0, // CrstHostAssemblyMap
+ 3, // CrstHostAssemblyMapAdd
+ 0, // CrstIbcProfile
+ 9, // CrstIJWFixupData
+ 0, // CrstIJWHash
+ 7, // CrstILStubGen
+ 3, // CrstInlineTrackingMap
+ 17, // CrstInstMethodHashTable
+ 0, // CrstInterfaceVTableMap
+ 19, // CrstInterop
+ 4, // CrstInteropData
+ 13, // CrstIOThreadpoolWorker
+ 0, // CrstIsJMCMethod
+ 7, // CrstISymUnmanagedReader
+ 8, // CrstJit
+ 0, // CrstJitGenericHandleCache
+ -1, // CrstJitPerf
+ 6, // CrstJumpStubCache
+ 0, // CrstLeafLock
+ -1, // CrstListLock
+ 15, // CrstLoaderAllocator
+ 16, // CrstLoaderAllocatorReferences
+ 0, // CrstLoaderHeap
+ 0, // CrstMda
+ -1, // CrstMetadataTracker
+ 9, // CrstMethodDescBackpatchInfoTracker
+ 0, // CrstModIntPairList
+ 4, // CrstModule
+ 15, // CrstModuleFixup
+ 3, // CrstModuleLookupTable
+ 0, // CrstMulticoreJitHash
+ 13, // CrstMulticoreJitManager
+ 0, // CrstMUThunkHash
+ -1, // CrstNativeBinderInit
+ -1, // CrstNativeImageCache
+ 0, // CrstNls
+ 0, // CrstNotifyGdb
+ 2, // CrstObjectList
+ 0, // CrstOnEventManager
+ 0, // CrstPatchEntryPoint
+ 4, // CrstPEImage
+ 0, // CrstPEImagePDBStream
+ 18, // CrstPendingTypeLoadEntry
+ 0, // CrstPinHandle
+ 0, // CrstPinnedByrefValidation
+ 0, // CrstProfilerGCRefDataFreeList
+ 0, // CrstProfilingAPIStatus
+ 0, // CrstPublisherCertificate
+ 3, // CrstRCWCache
+ 0, // CrstRCWCleanupList
+ 3, // CrstRCWRefCache
+ 4, // CrstReadyToRunEntryPointToMethodDescMap
+ 0, // CrstReDacl
+ 9, // CrstReflection
+ 10, // CrstReJITDomainTable
+ 14, // CrstReJITGlobalRequest
+ 19, // CrstRemoting
+ 3, // CrstRetThunkCache
+ 0, // CrstRWLock
+ 3, // CrstSavedExceptionInfo
+ 0, // CrstSaveModuleProfileData
+ 0, // CrstSecurityStackwalkCache
+ 4, // CrstSharedAssemblyCreate
+ 3, // CrstSigConvert
+ 5, // CrstSingleUseLock
+ 0, // CrstSpecialStatics
+ 0, // CrstSqmManager
+ 0, // CrstStackSampler
+ -1, // CrstStressLog
+ 0, // CrstStrongName
+ 5, // CrstStubCache
+ 0, // CrstStubDispatchCache
+ 4, // CrstStubUnwindInfoHeapSegments
+ 3, // CrstSyncBlockCache
+ 0, // CrstSyncHashLock
+ 4, // CrstSystemBaseDomain
+ 13, // CrstSystemDomain
+ 0, // CrstSystemDomainDelayedUnloadList
+ 0, // CrstThreadIdDispenser
+ 0, // CrstThreadpoolEventCache
+ 7, // CrstThreadpoolTimerQueue
+ 7, // CrstThreadpoolWaitThreads
+ 13, // CrstThreadpoolWorker
+ 4, // CrstThreadStaticDataHashTable
+ 12, // CrstThreadStore
+ 9, // CrstTieredCompilation
+ 9, // CrstTPMethodTable
+ 3, // CrstTypeEquivalenceMap
+ 7, // CrstTypeIDMap
+ 3, // CrstUMEntryThunkCache
+ 0, // CrstUMThunkHash
+ 3, // CrstUniqueStack
+ 7, // CrstUnresolvedClassLock
+ 3, // CrstUnwindInfoTableLock
+ 3, // CrstVSDIndirectionCellLock
+ 3, // CrstWinRTFactoryCache
+ 3, // CrstWrapperTemplate
};
// An array mapping CrstType to a stringized name.
@@ -414,6 +416,7 @@ LPCSTR g_rgCrstNameMap[] =
"CrstLoaderHeap",
"CrstMda",
"CrstMetadataTracker",
+ "CrstMethodDescBackpatchInfoTracker",
"CrstModIntPairList",
"CrstModule",
"CrstModuleFixup",
diff --git a/src/inc/shash.inl b/src/inc/shash.inl
index 76a5b8c9a6..c43e91f0ed 100644
--- a/src/inc/shash.inl
+++ b/src/inc/shash.inl
@@ -227,6 +227,14 @@ void SHash<TRAITS>::RemoveAll()
}
CONTRACT_END;
+ if (TRAITS::s_DestructPerEntryCleanupAction)
+ {
+ for (Iterator i = Begin(); i != End(); i++)
+ {
+ TRAITS::OnDestructPerEntryCleanupAction(*i);
+ }
+ }
+
delete [] m_table;
m_table = NULL;
diff --git a/src/vm/CMakeLists.txt b/src/vm/CMakeLists.txt
index 8b066589e3..b3137d3e45 100644
--- a/src/vm/CMakeLists.txt
+++ b/src/vm/CMakeLists.txt
@@ -89,6 +89,7 @@ set(VM_SOURCES_DAC_AND_WKS_COMMON
loaderallocator.cpp
memberload.cpp
method.cpp
+ methoddescbackpatchinfo.cpp
methodimpl.cpp
methoditer.cpp
methodtable.cpp
@@ -188,6 +189,7 @@ set(VM_HEADERS_DAC_AND_WKS_COMMON
memberload.h
method.hpp
method.inl
+ methoddescbackpatchinfo.h
methodimpl.h
methoditer.h
methodtable.h
diff --git a/src/vm/amd64/cgenamd64.cpp b/src/vm/amd64/cgenamd64.cpp
index 25e0ba4aaa..79f05d24cd 100644
--- a/src/vm/amd64/cgenamd64.cpp
+++ b/src/vm/amd64/cgenamd64.cpp
@@ -880,10 +880,20 @@ EXTERN_C PCODE VirtualMethodFixupWorker(TransitionBlock * pTransitionBlock, CORC
INSTALL_MANAGED_EXCEPTION_DISPATCHER;
INSTALL_UNWIND_AND_CONTINUE_HANDLER_NO_PROBE;
- // Skip fixup precode jump for better perf
- PCODE pDirectTarget = Precode::TryToSkipFixupPrecode(pCode);
- if (pDirectTarget != NULL)
- pCode = pDirectTarget;
+ if (pMD->IsVersionableWithVtableSlotBackpatch())
+ {
+ // The entry point for this method needs to be versionable, so use a FuncPtrStub similarly to what is done in
+ // MethodDesc::GetMultiCallableAddrOfCode()
+ GCX_COOP();
+ pCode = pMD->GetLoaderAllocator()->GetFuncPtrStubs()->GetFuncPtrStub(pMD);
+ }
+ else
+ {
+ // Skip fixup precode jump for better perf
+ PCODE pDirectTarget = Precode::TryToSkipFixupPrecode(pCode);
+ if (pDirectTarget != NULL)
+ pCode = pDirectTarget;
+ }
INT64 oldValue = *(INT64*)pThunk;
BYTE* pOldValue = (BYTE*)&oldValue;
diff --git a/src/vm/amd64/virtualcallstubcpu.hpp b/src/vm/amd64/virtualcallstubcpu.hpp
index 7547559df0..c163736fac 100644
--- a/src/vm/amd64/virtualcallstubcpu.hpp
+++ b/src/vm/amd64/virtualcallstubcpu.hpp
@@ -125,23 +125,29 @@ struct DispatchStubShort
static BOOL isShortStub(LPCBYTE pCode);
inline PCODE implTarget() const { LIMITED_METHOD_CONTRACT; return (PCODE) _implTarget; }
+
+ inline TADDR implTargetSlot() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (TADDR)&_implTarget;
+ }
+
inline PCODE failTarget() const { LIMITED_METHOD_CONTRACT; return (PCODE) &_failDispl + sizeof(DISPL) + _failDispl; }
private:
- BYTE part1 [2]; // 0f 85 jne
- DISPL _failDispl; // xx xx xx xx failEntry ;must be forward jmp for perf reasons
- BYTE part2 [2]; // 48 B8 mov rax,
+ BYTE part1 [2]; // 48 B8 mov rax,
size_t _implTarget; // xx xx xx xx xx xx xx xx 64-bit address
+ BYTE part2[2]; // 0f 85 jne
+ DISPL _failDispl; // xx xx xx xx failEntry ;must be forward jmp for perf reasons
BYTE part3 [2]; // FF E0 jmp rax
-
- // 31 bytes long, need 1 byte of padding to 8-byte align.
- BYTE alignPad [1]; // cc
};
+#define DispatchStubShort_offsetof_failDisplBase (offsetof(DispatchStubLong, _failDispl) + sizeof(DISPL))
+
inline BOOL DispatchStubShort::isShortStub(LPCBYTE pCode)
{
LIMITED_METHOD_CONTRACT;
- return reinterpret_cast<DispatchStubShort const *>(pCode)->part1[0] == 0x0f;
+ return reinterpret_cast<DispatchStubShort const *>(pCode)->part2[0] == 0x0f;
}
@@ -155,27 +161,34 @@ struct DispatchStubLong
static inline BOOL isLongStub(LPCBYTE pCode);
inline PCODE implTarget() const { LIMITED_METHOD_CONTRACT; return (PCODE) _implTarget; }
+
+ inline TADDR implTargetSlot() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (TADDR)&_implTarget;
+ }
+
inline PCODE failTarget() const { LIMITED_METHOD_CONTRACT; return (PCODE) _failTarget; }
private:
- BYTE part1 [1]; // 75 jne
- BYTE _failDispl; // xx failLabel
- BYTE part2 [2]; // 48 B8 mov rax,
+ BYTE part1[2]; // 48 B8 mov rax,
size_t _implTarget; // xx xx xx xx xx xx xx xx 64-bit address
+ BYTE part2 [1]; // 75 jne
+ BYTE _failDispl; // xx failLabel
BYTE part3 [2]; // FF E0 jmp rax
// failLabel:
BYTE part4 [2]; // 48 B8 mov rax,
size_t _failTarget; // xx xx xx xx xx xx xx xx 64-bit address
BYTE part5 [2]; // FF E0 jmp rax
-
- // 39 bytes long, need 1 byte of padding to 8-byte align.
- BYTE alignPad [1]; // cc
};
+#define DispatchStubLong_offsetof_failDisplBase (offsetof(DispatchStubLong, _failDispl) + sizeof(BYTE))
+#define DispatchStubLong_offsetof_failLabel (offsetof(DispatchStubLong, part4[0]))
+
inline BOOL DispatchStubLong::isLongStub(LPCBYTE pCode)
{
LIMITED_METHOD_CONTRACT;
- return reinterpret_cast<DispatchStubLong const *>(pCode)->part1[0] == 0x75;
+ return reinterpret_cast<DispatchStubLong const *>(pCode)->part2[0] == 0x75;
}
/*DispatchStub**************************************************************************************
@@ -234,6 +247,18 @@ struct DispatchStub
return getLongStub()->implTarget();
}
+ inline TADDR implTargetSlot(EntryPointSlots::SlotType *slotTypeRef) const
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(slotTypeRef != nullptr);
+
+ *slotTypeRef = EntryPointSlots::SlotType_Executable;
+ if (type() == e_TYPE_SHORT)
+ return getShortStub()->implTargetSlot();
+ else
+ return getLongStub()->implTargetSlot();
+ }
+
inline PCODE failTarget() const
{
if (type() == e_TYPE_SHORT)
@@ -249,9 +274,10 @@ private:
inline DispatchStubLong const *getLongStub() const
{ LIMITED_METHOD_CONTRACT; return reinterpret_cast<DispatchStubLong const *>(this + 1); }
- BYTE _entryPoint [2]; // 48 B8 mov rax,
+ BYTE _entryPoint [2]; // 48 B8 mov rax,
size_t _expectedMT; // xx xx xx xx xx xx xx xx 64-bit address
BYTE part1 [3]; // 48 39 XX cmp [THIS_REG], rax
+ BYTE nopOp; // 90 nop ; 1-byte nop to align _implTarget
// Followed by either DispatchStubShort or DispatchStubLong, depending
// on whether we were able to make a rel32 or had to make an abs64 jump
@@ -291,7 +317,7 @@ struct DispatchHolder
static BOOL CanShortJumpDispatchStubReachFailTarget(PCODE failTarget, LPCBYTE stubMemory)
{
STATIC_CONTRACT_WRAPPER;
- LPCBYTE pFrom = stubMemory + sizeof(DispatchStub) + offsetof(DispatchStubShort, part2[0]);
+ LPCBYTE pFrom = stubMemory + sizeof(DispatchStub) + DispatchStubShort_offsetof_failDisplBase;
size_t cbRelJump = failTarget - (PCODE)pFrom;
return FitsInI4(cbRelJump);
}
@@ -554,10 +580,13 @@ void LookupHolder::Initialize(PCODE resolveWorkerTarget, size_t dispatchToken)
void DispatchHolder::InitializeStatic()
{
- // Check that _expectedMT is aligned in the DispatchHolder
- static_assert_no_msg(((sizeof(DispatchStub)+sizeof(DispatchStubShort)) % sizeof(void*)) == 0);
- static_assert_no_msg(((sizeof(DispatchStub)+sizeof(DispatchStubLong)) % sizeof(void*)) == 0);
- CONSISTENCY_CHECK((offsetof(DispatchStubLong, part4[0]) - offsetof(DispatchStubLong, part2[0])) < INT8_MAX);
+ // Check that _implTarget is aligned in the DispatchStub for backpatching
+ static_assert_no_msg(((sizeof(DispatchStub) + offsetof(DispatchStubShort, _implTarget)) % sizeof(void *)) == 0);
+ static_assert_no_msg(((sizeof(DispatchStub) + offsetof(DispatchStubLong, _implTarget)) % sizeof(void *)) == 0);
+
+ static_assert_no_msg(((sizeof(DispatchStub) + sizeof(DispatchStubShort)) % sizeof(void*)) == 0);
+ static_assert_no_msg(((sizeof(DispatchStub) + sizeof(DispatchStubLong)) % sizeof(void*)) == 0);
+ static_assert_no_msg((DispatchStubLong_offsetof_failLabel - DispatchStubLong_offsetof_failDisplBase) < INT8_MAX);
// Common dispatch stub initialization
dispatchInit._entryPoint [0] = 0x48;
@@ -570,24 +599,24 @@ void DispatchHolder::InitializeStatic()
#else
dispatchInit.part1 [2] = 0x01; // RCX
#endif
+ dispatchInit.nopOp = 0x90;
// Short dispatch stub initialization
- dispatchShortInit.part1 [0] = 0x0F;
- dispatchShortInit.part1 [1] = 0x85;
- dispatchShortInit._failDispl = 0xcccccccc;
- dispatchShortInit.part2 [0] = 0x48;
- dispatchShortInit.part2 [1] = 0xb8;
+ dispatchShortInit.part1 [0] = 0x48;
+ dispatchShortInit.part1 [1] = 0xb8;
dispatchShortInit._implTarget = 0xcccccccccccccccc;
+ dispatchShortInit.part2 [0] = 0x0F;
+ dispatchShortInit.part2 [1] = 0x85;
+ dispatchShortInit._failDispl = 0xcccccccc;
dispatchShortInit.part3 [0] = 0xFF;
dispatchShortInit.part3 [1] = 0xE0;
- dispatchShortInit.alignPad [0] = INSTR_INT3;
// Long dispatch stub initialization
- dispatchLongInit.part1 [0] = 0x75;
- dispatchLongInit._failDispl = BYTE(&dispatchLongInit.part4[0] - &dispatchLongInit.part2[0]);
- dispatchLongInit.part2 [0] = 0x48;
- dispatchLongInit.part2 [1] = 0xb8;
+ dispatchLongInit.part1 [0] = 0x48;
+ dispatchLongInit.part1 [1] = 0xb8;
dispatchLongInit._implTarget = 0xcccccccccccccccc;
+ dispatchLongInit.part2 [0] = 0x75;
+ dispatchLongInit._failDispl = BYTE(DispatchStubLong_offsetof_failLabel - DispatchStubLong_offsetof_failDisplBase);
dispatchLongInit.part3 [0] = 0xFF;
dispatchLongInit.part3 [1] = 0xE0;
// failLabel:
@@ -596,7 +625,6 @@ void DispatchHolder::InitializeStatic()
dispatchLongInit._failTarget = 0xcccccccccccccccc;
dispatchLongInit.part5 [0] = 0xFF;
dispatchLongInit.part5 [1] = 0xE0;
- dispatchLongInit.alignPad [0] = INSTR_INT3;
};
void DispatchHolder::Initialize(PCODE implTarget, PCODE failTarget, size_t expectedMT,
diff --git a/src/vm/appdomain.cpp b/src/vm/appdomain.cpp
index f0933afda5..37d9c847d5 100644
--- a/src/vm/appdomain.cpp
+++ b/src/vm/appdomain.cpp
@@ -3904,6 +3904,12 @@ void AppDomain::Terminate()
}
#endif // FEATURE_COMINTEROP
+#ifndef CROSSGEN_COMPILE
+ // Recorded entry point slots may point into the virtual call stub manager's heaps, so clear it first
+ GetLoaderAllocator()
+ ->GetMethodDescBackpatchInfoTracker()
+ ->ClearDependencyMethodDescEntryPointSlots(GetLoaderAllocator());
+#endif
if (!IsAtProcessExit())
{
diff --git a/src/vm/appdomain.hpp b/src/vm/appdomain.hpp
index c67f8af00c..9b5d01ae05 100644
--- a/src/vm/appdomain.hpp
+++ b/src/vm/appdomain.hpp
@@ -43,10 +43,7 @@
#include "appxutil.h"
-#ifdef FEATURE_TIERED_COMPILATION
#include "tieredcompilation.h"
-#include "callcounter.h"
-#endif
#include "codeversion.h"
@@ -1465,14 +1462,6 @@ public:
CodeVersionManager* GetCodeVersionManager() { return &m_codeVersionManager; }
#endif //FEATURE_CODE_VERSIONING
-#ifdef FEATURE_TIERED_COMPILATION
-private:
- CallCounter m_callCounter;
-
-public:
- CallCounter* GetCallCounter() { return &m_callCounter; }
-#endif
-
#ifdef DACCESS_COMPILE
public:
virtual void EnumMemoryRegions(CLRDataEnumMemoryFlags flags,
diff --git a/src/vm/arm/stubs.cpp b/src/vm/arm/stubs.cpp
index d863900eec..8b59e4eff6 100644
--- a/src/vm/arm/stubs.cpp
+++ b/src/vm/arm/stubs.cpp
@@ -914,11 +914,6 @@ Note that ResolveWorkerChainLookupAsmStub currently points directly
to ResolveWorkerAsmStub; in the future, this could be separate.
*/
-void LookupHolder::InitializeStatic()
-{
- // Nothing to initialize
-}
-
void LookupHolder::Initialize(PCODE resolveWorkerTarget, size_t dispatchToken)
{
// Called directly by JITTED code
@@ -936,11 +931,6 @@ void LookupHolder::Initialize(PCODE resolveWorkerTarget, size_t dispatchToken)
_ASSERTE(4 == LookupStub::entryPointLen);
}
-void DispatchHolder::InitializeStatic()
-{
- // Nothing to initialize
-};
-
void DispatchHolder::Initialize(PCODE implTarget, PCODE failTarget, size_t expectedMT)
{
// Called directly by JITTED code
@@ -1013,10 +1003,6 @@ void DispatchHolder::Initialize(PCODE implTarget, PCODE failTarget, size_t expe
_stub._implTarget = implTarget;
}
-void ResolveHolder::InitializeStatic()
-{
-}
-
void ResolveHolder::Initialize(PCODE resolveWorkerTarget, PCODE patcherTarget,
size_t dispatchToken, UINT32 hashedToken,
void * cacheAddr, INT32 * counterAddr)
diff --git a/src/vm/arm/virtualcallstubcpu.hpp b/src/vm/arm/virtualcallstubcpu.hpp
index 6dc99e5093..461e91bf1c 100644
--- a/src/vm/arm/virtualcallstubcpu.hpp
+++ b/src/vm/arm/virtualcallstubcpu.hpp
@@ -73,7 +73,7 @@ stubs as necessary. In the case of LookupStubs, alignment is necessary since
LookupStubs are placed in a hash table keyed by token. */
struct LookupHolder
{
- static void InitializeStatic();
+ static void InitializeStatic() { LIMITED_METHOD_CONTRACT; }
void Initialize(PCODE resolveWorkerTarget, size_t dispatchToken);
@@ -117,6 +117,16 @@ struct DispatchStub
inline size_t expectedMT() { LIMITED_METHOD_CONTRACT; return _expectedMT; }
inline PCODE implTarget() { LIMITED_METHOD_CONTRACT; return _implTarget; }
+
+ inline TADDR implTargetSlot(EntryPointSlots::SlotType *slotTypeRef) const
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(slotTypeRef != nullptr);
+
+ *slotTypeRef = EntryPointSlots::SlotType_Normal;
+ return (TADDR)&_implTarget;
+ }
+
inline PCODE failTarget() { LIMITED_METHOD_CONTRACT; return _failTarget; }
inline size_t size() { LIMITED_METHOD_CONTRACT; return sizeof(DispatchStub); }
@@ -151,7 +161,13 @@ atomically update it. When we get a resolver function that does what we want, w
and live with just the inlineTarget field in the stub itself, since immutability will hold.*/
struct DispatchHolder
{
- static void InitializeStatic();
+ static void InitializeStatic()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ // Check that _implTarget is aligned in the DispatchHolder for backpatching
+ static_assert_no_msg(((offsetof(DispatchHolder, _stub) + offsetof(DispatchStub, _implTarget)) % sizeof(void *)) == 0);
+ }
void Initialize(PCODE implTarget, PCODE failTarget, size_t expectedMT);
@@ -245,7 +261,7 @@ any of its inlined tokens (non-prehashed) is aligned, then the token field in th
is not needed. */
struct ResolveHolder
{
- static void InitializeStatic();
+ static void InitializeStatic() { LIMITED_METHOD_CONTRACT; }
void Initialize(PCODE resolveWorkerTarget, PCODE patcherTarget,
size_t dispatchToken, UINT32 hashedToken,
diff --git a/src/vm/arm64/virtualcallstubcpu.hpp b/src/vm/arm64/virtualcallstubcpu.hpp
index c7b3f75e68..96069c7abf 100644
--- a/src/vm/arm64/virtualcallstubcpu.hpp
+++ b/src/vm/arm64/virtualcallstubcpu.hpp
@@ -75,6 +75,16 @@ struct DispatchStub
inline size_t expectedMT() { LIMITED_METHOD_CONTRACT; return _expectedMT; }
inline PCODE implTarget() { LIMITED_METHOD_CONTRACT; return _implTarget; }
+
+ inline TADDR implTargetSlot(EntryPointSlots::SlotType *slotTypeRef) const
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(slotTypeRef != nullptr);
+
+ *slotTypeRef = EntryPointSlots::SlotType_Normal;
+ return (TADDR)&_implTarget;
+ }
+
inline PCODE failTarget() { LIMITED_METHOD_CONTRACT; return _failTarget; }
inline size_t size() { LIMITED_METHOD_CONTRACT; return sizeof(DispatchStub); }
@@ -89,7 +99,13 @@ private:
struct DispatchHolder
{
- static void InitializeStatic() { }
+ static void InitializeStatic()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ // Check that _implTarget is aligned in the DispatchHolder for backpatching
+ static_assert_no_msg(((offsetof(DispatchHolder, _stub) + offsetof(DispatchStub, _implTarget)) % sizeof(void *)) == 0);
+ }
void Initialize(PCODE implTarget, PCODE failTarget, size_t expectedMT)
{
diff --git a/src/vm/array.cpp b/src/vm/array.cpp
index cde360c562..b93657c432 100644
--- a/src/vm/array.cpp
+++ b/src/vm/array.cpp
@@ -543,8 +543,11 @@ MethodTable* Module::CreateArrayMethodTable(TypeHandle elemTypeHnd, CorElementTy
if (!canShareVtableChunks)
{
// Copy top level class's vtable - note, vtable is contained within the MethodTable
+ MethodTable::MethodDataWrapper hParentMTData(MethodTable::GetMethodData(pParentClass, FALSE));
for (UINT32 i = 0; i < numVirtuals; i++)
- pMT->SetSlot(i, pParentClass->GetSlot(i));
+ {
+ pMT->CopySlotFrom(i, hParentMTData, pParentClass);
+ }
}
if (pClass != NULL)
diff --git a/src/vm/ceeload.h b/src/vm/ceeload.h
index 46537da687..81f7acd62e 100644
--- a/src/vm/ceeload.h
+++ b/src/vm/ceeload.h
@@ -74,7 +74,6 @@ class MethodTable;
class AppDomain;
class DynamicMethodTable;
class CodeVersionManager;
-class CallCounter;
class TieredCompilationManager;
#ifdef FEATURE_PREJIT
class CerNgenRootTable;
@@ -1796,9 +1795,6 @@ protected:
#ifdef FEATURE_CODE_VERSIONING
CodeVersionManager * GetCodeVersionManager();
#endif
-#ifdef FEATURE_TIERED_COMPILATION
- CallCounter * GetCallCounter();
-#endif
mdFile GetModuleRef()
{
diff --git a/src/vm/ceeload.inl b/src/vm/ceeload.inl
index 07d657bcaa..20c8d8cbdf 100644
--- a/src/vm/ceeload.inl
+++ b/src/vm/ceeload.inl
@@ -653,12 +653,4 @@ inline CodeVersionManager * Module::GetCodeVersionManager()
}
#endif // FEATURE_CODE_VERSIONING
-#ifdef FEATURE_TIERED_COMPILATION
-inline CallCounter * Module::GetCallCounter()
-{
- LIMITED_METHOD_CONTRACT;
- return GetDomain()->GetCallCounter();
-}
-#endif // FEATURE_TIERED_COMPILATION
-
#endif // CEELOAD_INL_
diff --git a/src/vm/ceemain.cpp b/src/vm/ceemain.cpp
index fc7e6393cd..defa4ac09b 100644
--- a/src/vm/ceemain.cpp
+++ b/src/vm/ceemain.cpp
@@ -668,6 +668,8 @@ void EEStartupHelper(COINITIEE fFlags)
// This needs to be done before the EE has started
InitializeStartupFlags();
+ MethodDescBackpatchInfoTracker::StaticInitialize();
+
InitThreadManager();
STRESS_LOG0(LF_STARTUP, LL_ALWAYS, "Returned successfully from InitThreadManager");
diff --git a/src/vm/codeversion.cpp b/src/vm/codeversion.cpp
index 00d6aff431..70072e190c 100644
--- a/src/vm/codeversion.cpp
+++ b/src/vm/codeversion.cpp
@@ -1103,7 +1103,7 @@ HRESULT MethodDescVersioningState::SyncJumpStamp(NativeCodeVersion nativeCodeVer
HRESULT hr = S_OK;
PCODE pCode = nativeCodeVersion.IsNull() ? NULL : nativeCodeVersion.GetNativeCode();
MethodDesc* pMethod = GetMethodDesc();
- _ASSERTE(pMethod->IsVersionable() && pMethod->IsVersionableWithJumpStamp());
+ _ASSERTE(pMethod->IsVersionableWithJumpStamp());
if (!pMethod->HasNativeCode())
{
@@ -2135,7 +2135,6 @@ PCODE CodeVersionManager::PublishVersionableCodeIfNecessary(MethodDesc* pMethodD
HRESULT hr = S_OK;
PCODE pCode = NULL;
- BOOL fIsJumpStampMethod = pMethodDesc->IsVersionableWithJumpStamp();
NativeCodeVersion activeVersion;
{
@@ -2239,37 +2238,26 @@ HRESULT CodeVersionManager::PublishNativeCodeVersion(MethodDesc* pMethod, Native
{
// TODO: This function needs to make sure it does not change the precode's target if call counting is in progress. Track
// whether call counting is currently being done for the method, and use a lock to ensure the expected precode target.
- LIMITED_METHOD_CONTRACT;
+ WRAPPER_NO_CONTRACT;
_ASSERTE(LockOwnedByCurrentThread());
_ASSERTE(pMethod->IsVersionable());
HRESULT hr = S_OK;
PCODE pCode = nativeCodeVersion.IsNull() ? NULL : nativeCodeVersion.GetNativeCode();
- if (pMethod->IsVersionableWithPrecode())
+ if (pMethod->IsVersionableWithoutJumpStamp())
{
- Precode* pPrecode = pMethod->GetOrCreatePrecode();
- if (pCode == NULL)
+ EX_TRY
{
- EX_TRY
+ if (pCode == NULL)
{
- pPrecode->Reset();
+ pMethod->ResetCodeEntryPoint();
}
- EX_CATCH_HRESULT(hr);
- return hr;
- }
- else
- {
- EX_TRY
+ else
{
- pPrecode->SetTargetInterlocked(pCode, FALSE);
-
- // SetTargetInterlocked() would return false if it lost the race with another thread. That is fine, this thread
- // can continue assuming it was successful, similarly to it successfully updating the target and another thread
- // updating the target again shortly afterwards.
- hr = S_OK;
+ pMethod->SetCodeEntryPoint(pCode);
}
- EX_CATCH_HRESULT(hr);
- return hr;
}
+ EX_CATCH_HRESULT(hr);
+ return hr;
}
else
{
@@ -2535,13 +2523,13 @@ HRESULT CodeVersionManager::DoJumpStampIfNecessary(MethodDesc* pMD, PCODE pCode)
return S_OK;
}
- if (!(pMD->IsVersionable() && pMD->IsVersionableWithJumpStamp()))
+ if (!pMD->IsVersionableWithJumpStamp())
{
return GetNonVersionableError(pMD);
}
#ifndef FEATURE_JUMPSTAMP
- _ASSERTE(!"How did we get here? IsVersionableWithJumpStamp() should have been FALSE above");
+ _ASSERTE(!"How did we get here? IsVersionableWithJumpStamp() should have been false above");
return S_OK;
#else
HRESULT hr;
@@ -2573,6 +2561,27 @@ void CodeVersionManager::OnAppDomainExit(AppDomain * pAppDomain)
}
#endif
+// Returns true if CodeVersionManager is capable of versioning this method. There may be other reasons that the runtime elects
+// not to version a method even if CodeVersionManager could support it. Use the MethodDesc::IsVersionableWith*() accessors to
+// get the final determination of versioning support for a given method.
+//
+//static
+bool CodeVersionManager::IsMethodSupported(PTR_MethodDesc pMethodDesc)
+{
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(pMethodDesc != NULL);
+
+ return
+ // CodeVersionManager data structures don't properly handle the lifetime semantics of dynamic code at this point
+ !pMethodDesc->IsDynamicMethod() &&
+
+ // CodeVersionManager data structures don't properly handle the lifetime semantics of collectible code at this point
+ !pMethodDesc->GetLoaderAllocator()->IsCollectible() &&
+
+ // EnC has its own way of versioning
+ !pMethodDesc->IsEnCMethod();
+}
+
//---------------------------------------------------------------------------------------
//
// Small helper to determine whether a given (possibly instantiated generic) MethodDesc
diff --git a/src/vm/codeversion.h b/src/vm/codeversion.h
index d2c14c69a8..6554fc57cb 100644
--- a/src/vm/codeversion.h
+++ b/src/vm/codeversion.h
@@ -555,7 +555,6 @@ public:
typedef SHash<ILCodeVersioningStateHashTraits> ILCodeVersioningStateHash;
-
class CodeVersionManager
{
friend class ILCodeVersion;
@@ -615,6 +614,8 @@ public:
static void OnAppDomainExit(AppDomain* pAppDomain);
#endif
+ static bool IsMethodSupported(PTR_MethodDesc pMethodDesc);
+
private:
#ifndef DACCESS_COMPILE
diff --git a/src/vm/eeconfig.cpp b/src/vm/eeconfig.cpp
index a2c1b1d6b5..a11dacd01d 100644
--- a/src/vm/eeconfig.cpp
+++ b/src/vm/eeconfig.cpp
@@ -360,7 +360,11 @@ HRESULT EEConfig::Init()
tieredCompilation_tier1CallCountThreshold = 1;
tieredCompilation_tier1CallCountingDelayMs = 0;
#endif
-
+
+#ifndef CROSSGEN_COMPILE
+ backpatchEntryPointSlots = false;
+#endif
+
#if defined(FEATURE_GDBJIT) && defined(_DEBUG)
pszGDBJitElfDump = NULL;
#endif // FEATURE_GDBJIT && _DEBUG
@@ -1245,6 +1249,10 @@ HRESULT EEConfig::sync()
}
#endif
+#ifndef CROSSGEN_COMPILE
+ backpatchEntryPointSlots = CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_BackpatchEntryPointSlots) != 0;
+#endif
+
#if defined(FEATURE_GDBJIT) && defined(_DEBUG)
{
LPWSTR pszGDBJitElfDumpW = NULL;
diff --git a/src/vm/eeconfig.h b/src/vm/eeconfig.h
index cc53d49dce..9df371515a 100644
--- a/src/vm/eeconfig.h
+++ b/src/vm/eeconfig.h
@@ -291,6 +291,10 @@ public:
DWORD TieredCompilation_Tier1CallCountingDelayMs() const { LIMITED_METHOD_CONTRACT; return tieredCompilation_tier1CallCountingDelayMs; }
#endif
+#ifndef CROSSGEN_COMPILE
+ bool BackpatchEntryPointSlots() const { LIMITED_METHOD_CONTRACT; return backpatchEntryPointSlots; }
+#endif
+
#if defined(FEATURE_GDBJIT) && defined(_DEBUG)
inline bool ShouldDumpElfOnMethod(LPCUTF8 methodName) const
{
@@ -1028,6 +1032,10 @@ private: //----------------------------------------------------------------
DWORD tieredCompilation_tier1CallCountingDelayMs;
#endif
+#ifndef CROSSGEN_COMPILE
+ bool backpatchEntryPointSlots;
+#endif
+
#if defined(FEATURE_GDBJIT) && defined(_DEBUG)
LPCUTF8 pszGDBJitElfDump;
#endif // FEATURE_GDBJIT && _DEBUG
diff --git a/src/vm/fptrstubs.cpp b/src/vm/fptrstubs.cpp
index b100f1abaf..1e326c4b90 100644
--- a/src/vm/fptrstubs.cpp
+++ b/src/vm/fptrstubs.cpp
@@ -17,7 +17,7 @@ Precode* FuncPtrStubs::Lookup(MethodDesc * pMD, PrecodeType type)
CONTRACTL
{
NOTHROW;
- GC_TRIGGERS;
+ GC_NOTRIGGER;
}
CONTRACTL_END;
@@ -82,6 +82,7 @@ PCODE FuncPtrStubs::GetFuncPtrStub(MethodDesc * pMD, PrecodeType type)
}
PCODE target = NULL;
+ bool setTargetAfterAddingToHashTable = false;
if (type != GetDefaultType(pMD) &&
// Always use stable entrypoint for LCG. If the cached precode pointed directly to JITed code,
@@ -91,12 +92,18 @@ PCODE FuncPtrStubs::GetFuncPtrStub(MethodDesc * pMD, PrecodeType type)
// Set the target if precode is not of the default type. We are patching the precodes of the default type only.
target = pMD->GetMultiCallableAddrOfCode();
}
- else
- if (pMD->HasStableEntryPoint())
+ else if (pMD->HasStableEntryPoint())
{
// Set target
target = pMD->GetStableEntryPoint();
}
+ else if (pMD->IsVersionableWithVtableSlotBackpatch())
+ {
+ // The funcptr stub must point to the current entry point after it is created and exposed. Keep the target as null for
+ // now. The precode will initially point to the prestub and its target will be updated after the precode is exposed.
+ _ASSERTE(target == NULL);
+ setTargetAfterAddingToHashTable = true;
+ }
else
{
// Set the target if method is methodimpled. We would not get to patch it otherwise.
@@ -137,6 +144,28 @@ PCODE FuncPtrStubs::GetFuncPtrStub(MethodDesc * pMD, PrecodeType type)
m_hashTable.Add(pPrecode);
amt.SuppressRelease();
}
+ else
+ {
+ setTargetAfterAddingToHashTable = false;
+ }
+ }
+ }
+
+ if (setTargetAfterAddingToHashTable)
+ {
+ _ASSERTE(pMD->IsVersionableWithVtableSlotBackpatch());
+
+ PCODE temporaryEntryPoint = pMD->GetTemporaryEntryPoint();
+ MethodDescBackpatchInfoTracker::ConditionalLockHolder lockHolder;
+
+ // Set the funcptr stub's entry point to the current entry point inside the lock and after the funcptr stub is exposed,
+ // to synchronize with backpatching in MethodDesc::BackpatchEntryPointSlots()
+ PCODE entryPoint = pMD->GetMethodEntryPoint();
+ if (entryPoint != temporaryEntryPoint)
+ {
+ // Need only patch the precode from the prestub, since if someone else managed to patch the precode already then its
+ // target would already be up-to-date
+ pPrecode->SetTargetInterlocked(entryPoint, TRUE /* fOnlyRedirectFromPrestub */);
}
}
diff --git a/src/vm/frames.cpp b/src/vm/frames.cpp
index cea5241ef5..b387161761 100644
--- a/src/vm/frames.cpp
+++ b/src/vm/frames.cpp
@@ -568,13 +568,19 @@ BOOL PrestubMethodFrame::TraceFrame(Thread *thread, BOOL fromPatch,
//
// We want to set a frame patch, unless we're already at the
- // frame patch, in which case we'll trace stable entrypoint which
- // should be set by now.
+ // frame patch, in which case we'll trace the method entrypoint.
//
if (fromPatch)
{
- trace->InitForStub(GetFunction()->GetStableEntryPoint());
+ // In between the time where the Prestub read the method entry point from the slot and the time it reached
+ // ThePrestubPatchLabel, GetMethodEntryPoint() could have been updated due to code versioning. This will result in the
+ // debugger getting some version of the code or the prestub, but not necessarily the exact code pointer that winds up
+ // getting executed. The debugger has code that handles this ambiguity by placing a breakpoint at the start of all
+ // native code versions, even if they aren't the one that was reported by this trace, see
+ // DebuggerController::PatchTrace() under case TRACE_MANAGED. This alleviates the StubManager from having to prevent the
+ // race that occurs here.
+ trace->InitForStub(GetFunction()->GetMethodEntryPoint());
}
else
{
@@ -733,25 +739,11 @@ BOOL StubDispatchFrame::TraceFrame(Thread *thread, BOOL fromPatch,
{
WRAPPER_NO_CONTRACT;
- //
- // We want to set a frame patch, unless we're already at the
- // frame patch, in which case we'll trace stable entrypoint which
- // should be set by now.
- //
+ // StubDispatchFixupWorker and VSD_ResolveWorker never directly call managed code. Returning false instructs the debugger to
+ // step out of the call that erected this frame and continuing trying to trace execution from there.
+ LOG((LF_CORDB, LL_INFO1000, "StubDispatchFrame::TraceFrame: return FALSE\n"));
- if (fromPatch)
- {
- trace->InitForStub(GetFunction()->GetStableEntryPoint());
- }
- else
- {
- trace->InitForStub(GetPreStubEntryPoint());
- }
-
- LOG((LF_CORDB, LL_INFO10000,
- "StubDispatchFrame::TraceFrame: ip=" FMT_ADDR "\n", DBG_ADDR(trace->GetAddress()) ));
-
- return TRUE;
+ return FALSE;
}
Frame::Interception StubDispatchFrame::GetInterception()
diff --git a/src/vm/generics.cpp b/src/vm/generics.cpp
index 1d98d98d0e..773d863244 100644
--- a/src/vm/generics.cpp
+++ b/src/vm/generics.cpp
@@ -445,9 +445,10 @@ ClassLoader::CreateTypeHandleForNonCanonicalGenericInstantiation(
if (!canShareVtableChunks)
{
// Need to assign the slots one by one to filter out jump thunks
+ MethodTable::MethodDataWrapper hOldMTData(MethodTable::GetMethodData(pOldMT, FALSE));
for (DWORD i = 0; i < cSlots; i++)
{
- pMT->SetSlot(i, pOldMT->GetRestoredSlot(i));
+ pMT->CopySlotFrom(i, hOldMTData, pOldMT);
}
}
diff --git a/src/vm/i386/cgenx86.cpp b/src/vm/i386/cgenx86.cpp
index 43604ebffb..f9e7c2bc16 100644
--- a/src/vm/i386/cgenx86.cpp
+++ b/src/vm/i386/cgenx86.cpp
@@ -1562,10 +1562,21 @@ EXTERN_C PVOID STDCALL VirtualMethodFixupWorker(Object * pThisPtr, CORCOMPILE_V
if (!DoesSlotCallPrestub(pCode))
{
- // Skip fixup precode jump for better perf
- PCODE pDirectTarget = Precode::TryToSkipFixupPrecode(pCode);
- if (pDirectTarget != NULL)
- pCode = pDirectTarget;
+ MethodDesc *pMD = MethodTable::GetMethodDescForSlotAddress(pCode);
+ if (pMD->IsVersionableWithVtableSlotBackpatch())
+ {
+ // The entry point for this method needs to be versionable, so use a FuncPtrStub similarly to what is done in
+ // MethodDesc::GetMultiCallableAddrOfCode()
+ GCX_COOP();
+ pCode = pMD->GetLoaderAllocator()->GetFuncPtrStubs()->GetFuncPtrStub(pMD);
+ }
+ else
+ {
+ // Skip fixup precode jump for better perf
+ PCODE pDirectTarget = Precode::TryToSkipFixupPrecode(pCode);
+ if (pDirectTarget != NULL)
+ pCode = pDirectTarget;
+ }
INT64 oldValue = *(INT64*)pThunk;
BYTE* pOldValue = (BYTE*)&oldValue;
diff --git a/src/vm/i386/virtualcallstubcpu.hpp b/src/vm/i386/virtualcallstubcpu.hpp
index 3bdae8c3ec..558b31e396 100644
--- a/src/vm/i386/virtualcallstubcpu.hpp
+++ b/src/vm/i386/virtualcallstubcpu.hpp
@@ -136,6 +136,16 @@ struct DispatchStub
inline size_t expectedMT() { LIMITED_METHOD_CONTRACT; return _expectedMT; }
inline PCODE implTarget() { LIMITED_METHOD_CONTRACT; return (PCODE) &_implDispl + sizeof(DISPL) + _implDispl; }
+
+ inline TADDR implTargetSlot(EntryPointSlots::SlotType *slotTypeRef) const
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(slotTypeRef != nullptr);
+
+ *slotTypeRef = EntryPointSlots::SlotType_ExecutableRel32;
+ return (TADDR)&_implDispl;
+ }
+
inline PCODE failTarget() { LIMITED_METHOD_CONTRACT; return (PCODE) &_failDispl + sizeof(DISPL) + _failDispl; }
inline size_t size() { LIMITED_METHOD_CONTRACT; return sizeof(DispatchStub); }
@@ -150,7 +160,7 @@ private:
size_t _expectedMT; // xx xx xx xx expectedMT ; If you change it, change also AdjustContextForVirtualStub in excep.cpp!!!
BYTE jmpOp1[2]; // 0f 85 jne
DISPL _failDispl; // xx xx xx xx failEntry ;must be forward jmp for perf reasons
- BYTE jmpOp2; // e9 jmp
+ BYTE jmpOp2; // e9 jmp
DISPL _implDispl; // xx xx xx xx implTarget
#else //STUB_LOGGING
BYTE _entryPoint [2]; // ff 05 inc
@@ -196,12 +206,10 @@ struct DispatchHolder
static DispatchHolder* FromDispatchEntry(PCODE dispatchEntry);
private:
- //force expectedMT to be aligned since used as key in hash tables.
-#ifndef STUB_LOGGING
- BYTE align[(sizeof(void*)-(offsetof(DispatchStub,_expectedMT)%sizeof(void*)))%sizeof(void*)];
-#endif
+ // Force _implDispl to be aligned so that it is backpatchable for tiering
+ BYTE align[(sizeof(void*) - (offsetof(DispatchStub, _implDispl) % sizeof(void*))) % sizeof(void*)];
DispatchStub _stub;
- BYTE pad[(sizeof(void*)-(sizeof(DispatchStub)%sizeof(void*))+offsetof(DispatchStub,_expectedMT))%sizeof(void*)]; //complete DWORD
+ BYTE pad[(sizeof(void*) - (sizeof(DispatchStub) % sizeof(void*)) + offsetof(DispatchStub, _implDispl)) % sizeof(void*)]; //complete DWORD
};
struct ResolveStub;
@@ -745,8 +753,8 @@ DispatchStub dispatchInit;
void DispatchHolder::InitializeStatic()
{
- // Check that _expectedMT is aligned in the DispatchHolder
- static_assert_no_msg(((offsetof(DispatchHolder, _stub) + offsetof(DispatchStub,_expectedMT)) % sizeof(void*)) == 0);
+ // Check that _implDispl is aligned in the DispatchHolder for backpatching
+ static_assert_no_msg(((offsetof(DispatchHolder, _stub) + offsetof(DispatchStub, _implDispl)) % sizeof(void*)) == 0);
static_assert_no_msg((sizeof(DispatchHolder) % sizeof(void*)) == 0);
#ifndef STUB_LOGGING
diff --git a/src/vm/loaderallocator.cpp b/src/vm/loaderallocator.cpp
index 994ae95f97..7b2ef1ee11 100644
--- a/src/vm/loaderallocator.cpp
+++ b/src/vm/loaderallocator.cpp
@@ -55,7 +55,7 @@ LoaderAllocator::LoaderAllocator()
m_pFatTokenSetLock = NULL;
m_pFatTokenSet = NULL;
#endif
-
+
#ifndef CROSSGEN_COMPILE
m_pVirtualCallStubManager = NULL;
#endif
@@ -92,6 +92,9 @@ LoaderAllocator::~LoaderAllocator()
#if !defined(DACCESS_COMPILE) && !defined(CROSSGEN_COMPILE)
Terminate();
+ // This info is cleaned up before the virtual call stub manager is uninitialized
+ _ASSERTE(!GetMethodDescBackpatchInfoTracker()->HasDependencyMethodDescEntryPointSlots());
+
// Assert that VSD is not still active when the destructor is called.
_ASSERTE(m_pVirtualCallStubManager == NULL);
@@ -595,6 +598,11 @@ void LoaderAllocator::GCLoaderAllocators(LoaderAllocator* pOriginalLoaderAllocat
pDomainLoaderAllocatorDestroyIterator->ReleaseManagedAssemblyLoadContext();
+ // Recorded entry point slots may point into the virtual call stub manager's heaps, so clear it first
+ pDomainLoaderAllocatorDestroyIterator
+ ->GetMethodDescBackpatchInfoTracker()
+ ->ClearDependencyMethodDescEntryPointSlots(pDomainLoaderAllocatorDestroyIterator);
+
// The following code was previously happening on delete ~DomainAssembly->Terminate
// We are moving this part here in order to make sure that we can unload a LoaderAllocator
// that didn't have a DomainAssembly
@@ -1621,6 +1629,7 @@ void LoaderAllocator::UninitVirtualCallStubManager()
m_pVirtualCallStubManager = NULL;
}
}
+
#endif // !CROSSGEN_COMPILE
#endif // !DACCESS_COMPILE
diff --git a/src/vm/loaderallocator.hpp b/src/vm/loaderallocator.hpp
index 7237359666..e2c5dc5e1a 100644
--- a/src/vm/loaderallocator.hpp
+++ b/src/vm/loaderallocator.hpp
@@ -20,6 +20,9 @@ class FuncPtrStubs;
#include "qcall.h"
#include "ilstubcache.h"
+#include "callcounter.h"
+#include "methoddescbackpatchinfo.h"
+
#define VPTRU_LoaderAllocator 0x3200
enum LoaderAllocatorType
@@ -225,8 +228,6 @@ protected:
#endif
private:
- typedef SHash<PtrSetSHashTraits<LoaderAllocator * > > LoaderAllocatorSet;
-
LoaderAllocatorSet m_LoaderAllocatorReferences;
Volatile<UINT32> m_cReferences;
// This will be set by code:LoaderAllocator::Destroy (from managed scout finalizer) and signalizes that
@@ -265,6 +266,14 @@ private:
CrstExplicitInit m_InteropDataCrst;
#endif
+#ifdef FEATURE_TIERED_COMPILATION
+ CallCounter m_callCounter;
+#endif
+
+#ifndef CROSSGEN_COMPILE
+ MethodDescBackpatchInfoTracker m_methodDescBackpatchInfoTracker;
+#endif
+
#ifndef DACCESS_COMPILE
public:
@@ -570,6 +579,22 @@ public:
#endif // FEATURE_COMINTEROP
+#ifdef FEATURE_TIERED_COMPILATION
+public:
+ CallCounter* GetCallCounter()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return &m_callCounter;
+ }
+#endif // FEATURE_TIERED_COMPILATION
+
+#ifndef CROSSGEN_COMPILE
+ MethodDescBackpatchInfoTracker *GetMethodDescBackpatchInfoTracker()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return &m_methodDescBackpatchInfoTracker;
+ }
+#endif
}; // class LoaderAllocator
typedef VPTR(LoaderAllocator) PTR_LoaderAllocator;
diff --git a/src/vm/method.cpp b/src/vm/method.cpp
index 09bc2cbf9f..967d1560c9 100644
--- a/src/vm/method.cpp
+++ b/src/vm/method.cpp
@@ -542,6 +542,9 @@ PCODE MethodDesc::GetMethodEntryPoint()
}
CONTRACTL_END;
+ // Similarly to SetMethodEntryPoint(), it is up to the caller to ensure that calls to this function are appropriately
+ // synchronized
+
// Keep implementations of MethodDesc::GetMethodEntryPoint and MethodDesc::GetAddrOfSlot in sync!
g_IBCLogger.LogMethodDescAccess(this);
@@ -2181,6 +2184,12 @@ PCODE MethodDesc::TryGetMultiCallableAddrOfCode(CORINFO_ACCESS_FLAGS accessFlags
if (HasStableEntryPoint())
return GetStableEntryPoint();
+ if (IsVersionableWithVtableSlotBackpatch())
+ {
+ // Caller has to call via slot or allocate funcptr stub
+ return NULL;
+ }
+
// Force the creation of the precode if we would eventually got one anyway
if (MayHavePrecode())
return GetOrCreatePrecode()->GetEntryPoint();
@@ -2299,7 +2308,13 @@ BOOL MethodDesc::IsPointingToPrestub()
CONTRACTL_END;
if (!HasStableEntryPoint())
+ {
+ if (IsVersionableWithVtableSlotBackpatch())
+ {
+ return !IsRestored() || GetMethodEntryPoint() == GetTemporaryEntryPoint();
+ }
return TRUE;
+ }
if (!HasPrecode())
return FALSE;
@@ -4729,6 +4744,7 @@ void MethodDesc::InterlockedUpdateFlags2(BYTE bMask, BOOL fSet)
Precode* MethodDesc::GetOrCreatePrecode()
{
WRAPPER_NO_CONTRACT;
+ _ASSERTE(!IsVersionableWithVtableSlotBackpatch());
if (HasPrecode())
{
@@ -4791,6 +4807,229 @@ Precode* MethodDesc::GetOrCreatePrecode()
return Precode::GetPrecodeFromEntryPoint(addr);
}
+bool MethodDesc::DetermineAndSetIsEligibleForTieredCompilation()
+{
+ WRAPPER_NO_CONTRACT;
+
+#ifdef FEATURE_TIERED_COMPILATION
+#ifndef FEATURE_CODE_VERSIONING
+ #error Tiered compilation requires code versioning
+#endif
+
+ // Keep in-sync with MethodTableBuilder::NeedsNativeCodeSlot(bmtMDMethod * pMDMethod)
+ // to ensure native slots are available where needed.
+ if (
+ // Policy
+ g_pConfig->TieredCompilation() &&
+
+ // Functional requirement - NGEN images embed direct calls that we would be unable to detect and redirect
+ !IsZapped() &&
+
+ // Functional requirement - The NativeCodeSlot is required to hold the code pointer for the default code version because
+ // the method's entry point slot will point to a precode or to the current code entry point
+ HasNativeCodeSlot() &&
+
+ // Functional requirement - These methods have no IL that could be optimized
+ !IsWrapperStub() &&
+
+ // Functional requirement
+ CodeVersionManager::IsMethodSupported(this) &&
+
+ // Policy - Debugging works much better with unoptimized code
+ !CORDisableJITOptimizations(GetModule()->GetDebuggerInfoBits()) &&
+
+ // Policy - Tiered compilation is not disabled by the profiler
+ !CORProfilerDisableTieredCompilation())
+ {
+ m_bFlags2 |= enum_flag2_IsEligibleForTieredCompilation;
+ _ASSERTE(IsVersionableWithoutJumpStamp());
+ return true;
+ }
+#endif
+
+ return false;
+}
+
+#ifndef CROSSGEN_COMPILE
+
+void MethodDesc::RecordAndBackpatchEntryPointSlot(
+ LoaderAllocator *slotLoaderAllocator, // the loader allocator from which the slot's memory is allocated
+ TADDR slot,
+ EntryPointSlots::SlotType slotType)
+{
+ WRAPPER_NO_CONTRACT;
+
+ LoaderAllocator *mdLoaderAllocator = GetLoaderAllocator();
+ MethodDescBackpatchInfoTracker::ConditionalLockHolder lockHolder;
+
+ RecordAndBackpatchEntryPointSlot_Locked(
+ mdLoaderAllocator,
+ slotLoaderAllocator,
+ slot,
+ slotType,
+ GetEntryPointToBackpatch_Locked());
+}
+
+// This function tries to record a slot that would contain an entry point for the method, and backpatches the slot to contain
+// method's current entry point. Once recorded, changes to the entry point due to tiering will cause the slot to be backpatched
+// as necessary.
+void MethodDesc::RecordAndBackpatchEntryPointSlot_Locked(
+ LoaderAllocator *mdLoaderAllocator,
+ LoaderAllocator *slotLoaderAllocator, // the loader allocator from which the slot's memory is allocated
+ TADDR slot,
+ EntryPointSlots::SlotType slotType,
+ PCODE currentEntryPoint)
+{
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(MethodDescBackpatchInfoTracker::IsLockedByCurrentThread());
+ _ASSERTE(mdLoaderAllocator != nullptr);
+ _ASSERTE(mdLoaderAllocator == GetLoaderAllocator());
+ _ASSERTE(slotLoaderAllocator != nullptr);
+ _ASSERTE(slot != NULL);
+ _ASSERTE(slotType < EntryPointSlots::SlotType_Count);
+ _ASSERTE(MayHaveEntryPointSlotsToBackpatch());
+
+ // The specified current entry point must actually be *current* in the sense that it must have been retrieved inside the
+ // lock, such that a recorded slot is guaranteed to point to the entry point at the time at which it was recorded, in order
+ // to synchronize with backpatching in MethodDesc::BackpatchEntryPointSlots(). If a slot pointing to an older entry point
+ // were to be recorded due to concurrency issues, it would not get backpatched to point to the more recent, actually
+ // current, entry point until another entry point change, which may never happen.
+ _ASSERTE(currentEntryPoint == GetEntryPointToBackpatch_Locked());
+
+ MethodDescBackpatchInfo *backpatchInfo =
+ mdLoaderAllocator->GetMethodDescBackpatchInfoTracker()->GetOrAddBackpatchInfo_Locked(this);
+ if (slotLoaderAllocator == mdLoaderAllocator)
+ {
+ // Entry point slots to backpatch are recorded in the backpatch info
+ backpatchInfo->GetSlots()->AddSlot_Locked(slot, slotType);
+ }
+ else
+ {
+ // Register the slot's loader allocator with the MethodDesc's backpatch info. Entry point slots to backpatch are
+ // recorded in the slot's LoaderAllocator.
+ backpatchInfo->AddDependentLoaderAllocator_Locked(slotLoaderAllocator);
+ slotLoaderAllocator
+ ->GetMethodDescBackpatchInfoTracker()
+ ->GetOrAddDependencyMethodDescEntryPointSlots_Locked(this)
+ ->AddSlot_Locked(slot, slotType);
+ }
+
+ EntryPointSlots::Backpatch_Locked(slot, slotType, currentEntryPoint);
+}
+
+void MethodDesc::BackpatchEntryPointSlots(PCODE entryPoint, bool isPrestubEntryPoint)
+{
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(entryPoint != NULL);
+ _ASSERTE(MayHaveEntryPointSlotsToBackpatch());
+ _ASSERTE(isPrestubEntryPoint == (entryPoint == GetPrestubEntryPointToBackpatch()));
+
+ LoaderAllocator *mdLoaderAllocator = GetLoaderAllocator();
+ MethodDescBackpatchInfoTracker *backpatchInfoTracker = mdLoaderAllocator->GetMethodDescBackpatchInfoTracker();
+ MethodDescBackpatchInfoTracker::ConditionalLockHolder lockHolder;
+
+ // Get the entry point to backpatch inside the lock to synchronize with backpatching in MethodDesc::DoBackpatch()
+ if (GetEntryPointToBackpatch_Locked() == entryPoint)
+ {
+ return;
+ }
+
+ if (IsVersionableWithVtableSlotBackpatch())
+ {
+ // Backpatch the func ptr stub if it was created
+ FuncPtrStubs *funcPtrStubs = mdLoaderAllocator->GetFuncPtrStubsNoCreate();
+ if (funcPtrStubs != nullptr)
+ {
+ Precode *funcPtrPrecode = funcPtrStubs->Lookup(this);
+ if (funcPtrPrecode != nullptr)
+ {
+ if (isPrestubEntryPoint)
+ {
+ funcPtrPrecode->ResetTargetInterlocked();
+ }
+ else
+ {
+ funcPtrPrecode->SetTargetInterlocked(entryPoint, FALSE /* fOnlyRedirectFromPrestub */);
+ }
+ }
+ }
+ }
+
+ MethodDescBackpatchInfo *backpatchInfo = backpatchInfoTracker->GetBackpatchInfo_Locked(this);
+ if (backpatchInfo != nullptr)
+ {
+ // Backpatch slots from the same loader allocator
+ backpatchInfo->GetSlots()->Backpatch_Locked(entryPoint);
+
+ // Backpatch slots from dependent loader allocators
+ backpatchInfo->ForEachDependentLoaderAllocator_Locked(
+ [&](LoaderAllocator *slotLoaderAllocator) // the loader allocator from which the slot's memory is allocated
+ {
+ _ASSERTE(slotLoaderAllocator != nullptr);
+ _ASSERTE(slotLoaderAllocator != mdLoaderAllocator);
+
+ EntryPointSlots *slotsToBackpatch =
+ slotLoaderAllocator
+ ->GetMethodDescBackpatchInfoTracker()
+ ->GetDependencyMethodDescEntryPointSlots_Locked(this);
+ if (slotsToBackpatch != nullptr)
+ {
+ slotsToBackpatch->Backpatch_Locked(entryPoint);
+ }
+ });
+ }
+
+ // Set the entry point to backpatch inside the lock to synchronize with backpatching in MethodDesc::DoBackpatch(), and set
+ // it last in case there are exceptions above, as setting the entry point indicates that all recorded slots have been
+ // backpatched
+ SetEntryPointToBackpatch_Locked(entryPoint);
+}
+
+void MethodDesc::SetCodeEntryPoint(PCODE entryPoint)
+{
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(entryPoint != NULL);
+
+ if (MayHaveEntryPointSlotsToBackpatch())
+ {
+ BackpatchEntryPointSlots(entryPoint);
+ }
+ else if (IsVersionableWithoutJumpStamp())
+ {
+ _ASSERTE(IsVersionableWithPrecode());
+ GetOrCreatePrecode()->SetTargetInterlocked(entryPoint, FALSE /* fOnlyRedirectFromPrestub */);
+
+ // SetTargetInterlocked() would return false if it lost the race with another thread. That is fine, this thread
+ // can continue assuming it was successful, similarly to it successfully updating the target and another thread
+ // updating the target again shortly afterwards.
+ }
+ else if (HasPrecode())
+ {
+ GetPrecode()->SetTargetInterlocked(entryPoint);
+ }
+ else if (!HasStableEntryPoint())
+ {
+ SetStableEntryPointInterlocked(entryPoint);
+ }
+}
+
+void MethodDesc::ResetCodeEntryPoint()
+{
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(IsVersionableWithoutJumpStamp());
+
+ if (MayHaveEntryPointSlotsToBackpatch())
+ {
+ BackpatchToResetEntryPointSlots();
+ return;
+ }
+
+ _ASSERTE(IsVersionableWithPrecode());
+ GetPrecode()->ResetTargetInterlocked();
+}
+
+#endif // !CROSSGEN_COMPILE
+
//*******************************************************************************
BOOL MethodDesc::SetNativeCodeInterlocked(PCODE addr, PCODE pExpected /*=NULL*/)
{
@@ -4831,6 +5070,39 @@ BOOL MethodDesc::SetNativeCodeInterlocked(PCODE addr, PCODE pExpected /*=NULL*/)
return SetStableEntryPointInterlocked(addr);
}
+#ifndef CROSSGEN_COMPILE
+
+//*******************************************************************************
+void MethodDesc::SetMethodEntryPoint(PCODE addr)
+{
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(addr != NULL);
+
+ // Similarly to GetMethodEntryPoint(), it is up to the caller to ensure that calls to this function are appropriately
+ // synchronized. Currently, the only caller synchronizes with the following lock.
+ _ASSERTE(MethodDescBackpatchInfoTracker::IsLockedByCurrentThread());
+
+ TADDR pSlot = GetAddrOfSlot();
+
+ TADDR *slotAddr;
+ PCODE newVal;
+
+ if (IsVtableSlot())
+ {
+ newVal = MethodTable::VTableIndir2_t::GetRelative(pSlot, addr);
+ slotAddr = (TADDR *) EnsureWritablePages((MethodTable::VTableIndir2_t *) pSlot);
+ }
+ else
+ {
+ newVal = addr;
+ slotAddr = (TADDR *) EnsureWritablePages((PCODE *) pSlot);
+ }
+
+ *(TADDR *)slotAddr = newVal;
+}
+
+#endif // CROSSGEN_COMPILE
+
//*******************************************************************************
BOOL MethodDesc::SetStableEntryPointInterlocked(PCODE addr)
{
@@ -4840,6 +5112,7 @@ BOOL MethodDesc::SetStableEntryPointInterlocked(PCODE addr)
} CONTRACTL_END;
_ASSERTE(!HasPrecode());
+ _ASSERTE(!IsVersionableWithoutJumpStamp());
PCODE pExpected = GetTemporaryEntryPoint();
TADDR pSlot = GetAddrOfSlot();
diff --git a/src/vm/method.hpp b/src/vm/method.hpp
index 68ed28f0bd..ac61d3391e 100644
--- a/src/vm/method.hpp
+++ b/src/vm/method.hpp
@@ -246,11 +246,13 @@ public:
inline PCODE GetStableEntryPoint()
{
LIMITED_METHOD_DAC_CONTRACT;
-
_ASSERTE(HasStableEntryPoint());
+ _ASSERTE(!IsVersionableWithVtableSlotBackpatch());
+
return GetMethodEntryPoint();
}
+ void SetMethodEntryPoint(PCODE addr);
BOOL SetStableEntryPointInterlocked(PCODE addr);
BOOL HasTemporaryEntryPoint();
@@ -258,6 +260,17 @@ public:
void SetTemporaryEntryPoint(LoaderAllocator *pLoaderAllocator, AllocMemTracker *pamTracker);
+ PCODE GetInitialEntryPointForCopiedSlot()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ if (IsVersionableWithVtableSlotBackpatch())
+ {
+ return GetTemporaryEntryPoint();
+ }
+ return GetMethodEntryPoint();
+ }
+
inline BOOL HasPrecode()
{
LIMITED_METHOD_DAC_CONTRACT;
@@ -275,7 +288,7 @@ public:
return pPrecode;
}
- inline BOOL MayHavePrecode()
+ inline bool MayHavePrecode()
{
CONTRACTL
{
@@ -285,7 +298,17 @@ public:
}
CONTRACTL_END
- return !MayHaveNativeCode() || IsVersionableWithPrecode();
+ // Ideally, methods that will not have native code (!MayHaveNativeCode() == true) should not be versionable. Currently,
+ // that is not the case, in some situations it was seen that 1/4 to 1/3 of versionable methods do not have native
+ // code, though there is no significant overhead from this. MayHaveNativeCode() appears to be an expensive check to do
+ // for each MethodDesc, even if it's done only once, and when it was attempted, at the time it was showing up noticeably
+ // in startup performance profiles.
+ //
+ // In particular, methods versionable with vtable slot backpatch should not have a precode (in the sense HasPrecode()
+ // must return false) even if they will not have native code.
+ bool result = IsVersionableWithoutJumpStamp() ? IsVersionableWithPrecode() : !MayHaveNativeCode();
+ _ASSERTE(!result || !IsVersionableWithVtableSlotBackpatch());
+ return result;
}
void InterlockedUpdateFlags2(BYTE bMask, BOOL fSet);
@@ -495,6 +518,10 @@ public:
CallCounter* GetCallCounter();
#endif
+#ifndef CROSSGEN_COMPILE
+ MethodDescBackpatchInfoTracker* GetBackpatchInfoTracker();
+#endif
+
PTR_LoaderAllocator GetLoaderAllocator();
// GetLoaderAllocatorForCode returns the allocator with the responsibility for allocation.
@@ -1138,92 +1165,273 @@ public:
public:
- // TRUE iff it is possible to change the code this method will run using
- // the CodeVersionManager.
- // Note: EnC currently returns FALSE here because it uses its own seperate
- // scheme to manage versionability. We will likely want to converge them
- // at some point.
- BOOL IsVersionable()
+ // True iff it is possible to change the code this method will run using the CodeVersionManager. Note: EnC currently returns
+ // false here because it uses its own seperate scheme to manage versionability. We will likely want to converge them at some
+ // point.
+ bool IsVersionable()
{
-#ifndef FEATURE_CODE_VERSIONING
- return FALSE;
-#else
- return IsVersionableWithPrecode() || IsVersionableWithJumpStamp();
-#endif
+ WRAPPER_NO_CONTRACT;
+ return IsVersionableWithoutJumpStamp() || IsVersionableWithJumpStamp();
}
- // If true, these methods version using the CodeVersionManager and
- // switch between different code versions by updating the target of the precode.
- // Note: EnC returns FALSE - even though it uses precode updates it does not
- // use the CodeVersionManager right now
- BOOL IsVersionableWithPrecode()
+ // True iff this method's code may be versioned using a technique other than JumpStamp
+ bool IsVersionableWithoutJumpStamp()
{
+ WRAPPER_NO_CONTRACT;
+
#ifdef FEATURE_CODE_VERSIONING
- return
- // policy: which things do we want to version with a precode if possible
- IsEligibleForTieredCompilation() &&
-
- // functional requirements:
- !IsZapped() && // NGEN directly invokes the pre-generated native code.
- // without necessarily going through the prestub or
- // precode
- HasNativeCodeSlot(); // the stable entry point will need to point at our
- // precode and not directly contain the native code.
+ return IsEligibleForTieredCompilation();
#else
- return FALSE;
+ return false;
#endif
}
- // If true, these methods version using the CodeVersionManager and switch between
- // different code versions by overwriting the first bytes of the method's initial
- // native code with a jmp instruction.
- BOOL IsVersionableWithJumpStamp()
+ // True iff all calls to the method should funnel through a Precode which can be updated to point to the current method
+ // body. This versioning technique can introduce more indirections than optimal but it has low memory overhead when a
+ // FixupPrecode may be shared with the temporary entry point that is created anyway.
+ bool IsVersionableWithPrecode()
{
+ WRAPPER_NO_CONTRACT;
+ return IsVersionableWithoutJumpStamp() && !Helper_IsEligibleForVersioningWithVtableSlotBackpatch();
+ }
+
+ // True iff all calls to the method should go through a backpatchable vtable slot or through a FuncPtrStub. This versioning
+ // technique eliminates extra indirections from precodes but is more memory intensive to track all the appropriate slots.
+ // See Helper_IsEligibleForVersioningWithEntryPointSlotBackpatch() for more details.
+ bool IsVersionableWithVtableSlotBackpatch()
+ {
+ WRAPPER_NO_CONTRACT;
+ return IsVersionableWithoutJumpStamp() && Helper_IsEligibleForVersioningWithVtableSlotBackpatch();
+ }
+
+ // True iff All calls to the method go to the default code and the prologue of that code will be overwritten with a jmp to
+ // other code if necessary. This is the only technique that can handle NGEN'ed code that embeds untracked direct calls
+ // between methods. It has much higher update overhead than other approaches because it needs runtime suspension to evacuate
+ // all threads from method prologues before a prologue can be patched. The patching is also not compatible with a debugger
+ // that may be trying to rewrite the same code bytes to add/remove a breakpoint.
+ bool IsVersionableWithJumpStamp()
+ {
+ WRAPPER_NO_CONTRACT;
+
#if defined(FEATURE_CODE_VERSIONING) && defined(FEATURE_JUMPSTAMP)
return
- // for native image code this is policy, but for jitted code it is a functional requirement
- // to ensure the prolog is sufficiently large
+ // Functional requirement / policy - Only one versioning technique may be used for a method, and versioning without
+ // a jump stamp is preferred
+ !IsVersionableWithoutJumpStamp() &&
+
+ // Functional requirement - If we aren't doing tiered compilation, ReJIT is currently the only other reason to make
+ // methods versionable. ReJIT is required to work even in NGEN images where the other versioning techniques aren't
+ // supported. If both ReJIT and tiered compilation are enabled then we prefer using the Precode or
+ // EntryPointSlotBackpatch techniques because they offer lower overhead method update performance and don't
+ // interfere with the debugger.
ReJitManager::IsReJITEnabled() &&
- // functional requirement - the runtime doesn't expect both options to be possible
- !IsVersionableWithPrecode() &&
-
- // functional requirement - we must be able to evacuate the prolog and the prolog must be big
- // enough, both of which are only designed to work on jitted code
+ // Functional requirement - We must be able to evacuate the prolog and the prolog must be big enough, both of which
+ // are only designed to work on jitted code
(IsIL() || IsNoMetadata()) &&
- !IsUnboxingStub() &&
- !IsInstantiatingStub() &&
+ !IsWrapperStub() &&
- // functional requirement - code version manager can't handle what would happen if the code
- // was collected
- !GetLoaderAllocator()->IsCollectible();
+ // Functional requirement
+ CodeVersionManager::IsMethodSupported(PTR_MethodDesc(this));
#else
- return FALSE;
+ return false;
#endif
}
+public:
+
+ bool IsEligibleForTieredCompilation()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
#ifdef FEATURE_TIERED_COMPILATION
+ return (m_bFlags2 & enum_flag2_IsEligibleForTieredCompilation) != 0;
+#else
+ return false;
+#endif
+ }
+
// Is this method allowed to be recompiled and the entrypoint redirected so that we
// can optimize its performance? Eligibility is invariant for the lifetime of a method.
- BOOL IsEligibleForTieredCompilation()
+ bool DetermineAndSetIsEligibleForTieredCompilation();
+
+private:
+ // This function is not intended to be called in most places, and is named as such to discourage calling it accidentally
+ bool Helper_IsEligibleForVersioningWithVtableSlotBackpatch()
{
- LIMITED_METHOD_DAC_CONTRACT;
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(IsVersionableWithoutJumpStamp());
+ _ASSERTE(IsIL() || IsDynamicMethod());
+
+#if defined(FEATURE_CODE_VERSIONING) && !defined(CROSSGEN_COMPILE)
+ _ASSERTE(CodeVersionManager::IsMethodSupported(PTR_MethodDesc(this)));
+
+ // For a method eligible for code versioning and vtable slot backpatch:
+ // - It does not have a precode (HasPrecode() returns false)
+ // - It does not have a stable entry point (HasStableEntryPoint() returns false)
+ // - A call to the method may be:
+ // - An indirect call through the MethodTable's backpatchable vtable slot
+ // - A direct call to a backpatchable FuncPtrStub, perhaps through a JumpStub
+ // - For interface methods, an indirect call through the virtual stub dispatch (VSD) indirection cell to a
+ // backpatchable DispatchStub or a ResolveStub that refers to a backpatchable ResolveCacheEntry
+ // - The purpose is that typical calls to the method have no additional overhead when code versioning is enabled
+ //
+ // Recording and backpatching slots:
+ // - In order for all vtable slots for the method to be backpatchable:
+ // - A vtable slot initially points to the MethodDesc's temporary entry point, even when the method is inherited by
+ // a derived type (the slot's value is not copied from the parent)
+ // - The temporary entry point always points to the prestub and is never backpatched, in order to be able to
+ // discover new vtable slots through which the method may be called
+ // - The prestub, as part of DoBackpatch(), records any slots that are transitioned from the temporary entry point
+ // to the method's at-the-time current, non-prestub entry point
+ // - Any further changes to the method's entry point cause recorded slots to be backpatched in
+ // BackpatchEntryPointSlots()
+ // - In order for the FuncPtrStub to be backpatchable:
+ // - After the FuncPtrStub is created and exposed, it is patched to point to the method's at-the-time current entry
+ // point if necessary
+ // - Any further changes to the method's entry point cause the FuncPtrStub to be backpatched in
+ // BackpatchEntryPointSlots()
+ // - In order for VSD entities to be backpatchable:
+ // - A DispatchStub's entry point target is aligned and recorded for backpatching in BackpatchEntryPointSlots()
+ // - A ResolveCacheEntry's entry point target is recorded for backpatching in BackpatchEntryPointSlots()
+ //
+ // Slot lifetime and management of recorded slots:
+ // - A slot is recorded in the LoaderAllocator in which the slot is allocated, see
+ // RecordAndBackpatchEntryPointSlot()
+ // - An inherited slot that has a shorter lifetime than the MethodDesc, when recorded, needs to be accessible by the
+ // MethodDesc for backpatching, so the dependent LoaderAllocator with the slot to backpatch is also recorded in the
+ // MethodDesc's LoaderAllocator, see
+ // MethodDescBackpatchInfo::AddDependentLoaderAllocator_Locked()
+ // - At the end of a LoaderAllocator's lifetime, the LoaderAllocator is unregistered from dependency LoaderAllocators,
+ // see MethodDescBackpatchInfoTracker::ClearDependencyMethodDescEntryPointSlots()
+ // - When a MethodDesc's entry point changes, backpatching also includes iterating over recorded dependent
+ // LoaderAllocators to backpatch the relevant slots recorded there, see BackpatchEntryPointSlots()
+ //
+ // Synchronization between entry point changes and backpatching slots
+ // - A global lock is used to ensure that all recorded backpatchable slots corresponding to a MethodDesc point to the
+ // same entry point, see DoBackpatch() and BackpatchEntryPointSlots() for examples
+ //
+ // Typical slot value transitions when tiered compilation is enabled:
+ // - Initially, the slot contains the method's temporary entry point, which always points to the prestub (see above)
+ // - After the tier 0 JIT completes, the slot is transitioned to the tier 0 entry point, and the slot is recorded for
+ // backpatching
+ // - When tiered compilation decides to begin counting calls for the method, the slot is transitioned to the temporary
+ // entry point (call counting currently happens in the prestub)
+ // - When the call count reaches the tier 1 threshold, the slot is transitioned to the tier 0 entry point and a tier 1
+ // JIT is scheduled
+ // - After the tier 1 JIT completes, the slot is transitioned to the tier 1 entry point
+
+ return
+ // Policy
+ g_pConfig->BackpatchEntryPointSlots() &&
- // Keep in-sync with MethodTableBuilder::NeedsNativeCodeSlot(bmtMDMethod * pMDMethod)
- // to ensure native slots are available where needed.
- return g_pConfig->TieredCompilation() &&
- !IsZapped() &&
- !IsEnCMethod() &&
- HasNativeCodeSlot() &&
- !IsUnboxingStub() &&
- !IsInstantiatingStub() &&
- !IsDynamicMethod() &&
- !GetLoaderAllocator()->IsCollectible() &&
- !CORDisableJITOptimizations(GetModule()->GetDebuggerInfoBits()) &&
- !CORProfilerDisableTieredCompilation();
+ // Functional requirement - The entry point must be through a vtable slot in the MethodTable that may be recorded
+ // and backpatched
+ IsVtableSlot() &&
+
+ // Functional requirement - True interface methods are not backpatched, see DoBackpatch()
+ !(IsInterface() && !IsStatic());
+#else
+ // Entry point slot backpatch is disabled for CrossGen
+ return false;
+#endif
}
+
+public:
+ bool MayHaveEntryPointSlotsToBackpatch()
+ {
+ WRAPPER_NO_CONTRACT;
+
+#ifndef CROSSGEN_COMPILE
+ // This is the only case currently. In the future, a method that does not have a vtable slot may still record entry
+ // point slots that need to be backpatched on entry point change, and in such cases the conditions here may be changed.
+ bool result = IsVersionableWithVtableSlotBackpatch();
+
+ // Cases where this function returns true are not expected to need to handle JumpStamp versioning in the future
+ _ASSERTE(!result || !IsVersionableWithJumpStamp());
+ return result;
+#else
+ // Entry point slot backpatch is disabled for CrossGen
+ return false;
#endif
+ }
+
+#ifndef CROSSGEN_COMPILE
+
+private:
+ // Gets the prestub entry point to use for backpatching. Entry point slot backpatch uses this entry point as an oracle to
+ // determine if the entry point actually changed and warrants backpatching.
+ PCODE GetPrestubEntryPointToBackpatch()
+ {
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(MayHaveEntryPointSlotsToBackpatch());
+
+ // At the moment this is the only case, see MayHaveEntryPointSlotsToBackpatch()
+ _ASSERTE(IsVersionableWithVtableSlotBackpatch());
+ return GetTemporaryEntryPoint();
+ }
+
+ // Gets the entry point stored in the primary storage location for backpatching. Entry point slot backpatch uses this entry
+ // point as an oracle to determine if the entry point actually changed and warrants backpatching.
+ PCODE GetEntryPointToBackpatch_Locked()
+ {
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(MethodDescBackpatchInfoTracker::IsLockedByCurrentThread());
+ _ASSERTE(MayHaveEntryPointSlotsToBackpatch());
+
+ // At the moment this is the only case, see MayHaveEntryPointSlotsToBackpatch()
+ _ASSERTE(IsVersionableWithVtableSlotBackpatch());
+ return GetMethodEntryPoint();
+ }
+
+ // Sets the entry point stored in the primary storage location for backpatching. Entry point slot backpatch uses this entry
+ // point as an oracle to determine if the entry point actually changed and warrants backpatching.
+ void SetEntryPointToBackpatch_Locked(PCODE entryPoint)
+ {
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(MethodDescBackpatchInfoTracker::IsLockedByCurrentThread());
+ _ASSERTE(entryPoint != NULL);
+ _ASSERTE(MayHaveEntryPointSlotsToBackpatch());
+
+ // At the moment this is the only case, see MayHaveEntryPointSlotsToBackpatch(). If that changes in the future, this
+ // function may have to handle other cases in SetCodeEntryPoint().
+ _ASSERTE(IsVersionableWithVtableSlotBackpatch());
+ SetMethodEntryPoint(entryPoint);
+ }
+
+public:
+ void RecordAndBackpatchEntryPointSlot(LoaderAllocator *slotLoaderAllocator, TADDR slot, EntryPointSlots::SlotType slotType);
+private:
+ void RecordAndBackpatchEntryPointSlot_Locked(LoaderAllocator *mdLoaderAllocator, LoaderAllocator *slotLoaderAllocator, TADDR slot, EntryPointSlots::SlotType slotType, PCODE currentEntryPoint);
+public:
+ void MethodDesc::BackpatchEntryPointSlots(PCODE entryPoint)
+ {
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(entryPoint != GetPrestubEntryPointToBackpatch());
+ _ASSERTE(MayHaveEntryPointSlotsToBackpatch());
+
+ BackpatchEntryPointSlots(entryPoint, false /* isPrestubEntryPoint */);
+ }
+
+ void MethodDesc::BackpatchToResetEntryPointSlots()
+ {
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(MayHaveEntryPointSlotsToBackpatch());
+
+ BackpatchEntryPointSlots(GetPrestubEntryPointToBackpatch(), true /* isPrestubEntryPoint */);
+ }
+
+private:
+ void BackpatchEntryPointSlots(PCODE entryPoint, bool isPrestubEntryPoint);
+
+public:
+ void SetCodeEntryPoint(PCODE entryPoint);
+ void ResetCodeEntryPoint();
+
+#endif // !CROSSGEN_COMPILE
+
+public:
bool RequestedAggressiveOptimization()
{
WRAPPER_NO_CONTRACT;
@@ -1250,11 +1458,7 @@ public:
return false;
#endif
- return
-#ifdef FEATURE_TIERED_COMPILATION
- !IsEligibleForTieredCompilation() &&
-#endif
- !IsEnCMethod();
+ return !IsVersionableWithoutJumpStamp() && !IsEnCMethod();
}
//Is this method currently pointing to native code that will never change?
@@ -1657,7 +1861,8 @@ protected:
enum_flag2_IsJitIntrinsic = 0x10, // Jit may expand method as an intrinsic
- // unused = 0x20,
+ enum_flag2_IsEligibleForTieredCompilation = 0x20,
+
// unused = 0x40,
// unused = 0x80,
};
diff --git a/src/vm/method.inl b/src/vm/method.inl
index 9d55ae9260..f72d42f4c8 100644
--- a/src/vm/method.inl
+++ b/src/vm/method.inl
@@ -182,7 +182,15 @@ inline CodeVersionManager * MethodDesc::GetCodeVersionManager()
inline CallCounter * MethodDesc::GetCallCounter()
{
LIMITED_METHOD_CONTRACT;
- return GetModule()->GetCallCounter();
+ return GetLoaderAllocator()->GetCallCounter();
+}
+#endif
+
+#ifndef CROSSGEN_COMPILE
+inline MethodDescBackpatchInfoTracker * MethodDesc::GetBackpatchInfoTracker()
+{
+ LIMITED_METHOD_CONTRACT;
+ return GetLoaderAllocator()->GetMethodDescBackpatchInfoTracker();
}
#endif
diff --git a/src/vm/methoddescbackpatchinfo.cpp b/src/vm/methoddescbackpatchinfo.cpp
new file mode 100644
index 0000000000..386786c6ba
--- /dev/null
+++ b/src/vm/methoddescbackpatchinfo.cpp
@@ -0,0 +1,238 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+#include "common.h"
+
+#include "excep.h"
+#include "log.h"
+#include "methoddescbackpatchinfo.h"
+
+#ifdef CROSSGEN_COMPILE
+ #error This file is not expected to be included into CrossGen
+#endif
+
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// EntryPointSlots
+
+#ifndef DACCESS_COMPILE
+
+void EntryPointSlots::Backpatch_Locked(PCODE entryPoint)
+{
+ WRAPPER_NO_CONTRACT;
+ static_assert_no_msg(SlotType_Count <= sizeof(INT32));
+ _ASSERTE(MethodDescBackpatchInfoTracker::IsLockedByCurrentThread());
+ _ASSERTE(entryPoint != NULL);
+
+ TADDR *slots = m_slots.GetElements();
+ COUNT_T slotCount = m_slots.GetCount();
+ for (COUNT_T i = 0; i < slotCount; ++i)
+ {
+ TADDR slot = slots[i];
+ SlotType slotType = (SlotType)(slot & SlotType_Mask);
+ slot ^= slotType;
+ Backpatch_Locked(slot, slotType, entryPoint);
+ }
+}
+
+void EntryPointSlots::Backpatch_Locked(TADDR slot, SlotType slotType, PCODE entryPoint)
+{
+ WRAPPER_NO_CONTRACT;
+ static_assert_no_msg(SlotType_Count <= sizeof(INT32));
+ _ASSERTE(MethodDescBackpatchInfoTracker::IsLockedByCurrentThread());
+ _ASSERTE(slot != NULL);
+ _ASSERTE(!(slot & SlotType_Mask));
+ _ASSERTE(slotType >= SlotType_Normal);
+ _ASSERTE(slotType < SlotType_Count);
+ _ASSERTE(entryPoint != NULL);
+ _ASSERTE(IS_ALIGNED((SIZE_T)slot, GetRequiredSlotAlignment(slotType)));
+
+ switch (slotType)
+ {
+ case SlotType_Normal:
+ *(PCODE *)slot = entryPoint;
+ break;
+
+ case SlotType_Vtable:
+ ((MethodTable::VTableIndir2_t *)slot)->SetValue(entryPoint);
+ break;
+
+ case SlotType_Executable:
+ *(PCODE *)slot = entryPoint;
+ goto Flush;
+
+ case SlotType_ExecutableRel32:
+ // A rel32 may require a jump stub on some architectures, and is currently not supported
+ _ASSERTE(sizeof(void *) <= 4);
+
+ *(PCODE *)slot = entryPoint - ((PCODE)slot + sizeof(PCODE));
+ // fall through
+
+ Flush:
+ ClrFlushInstructionCache((LPCVOID)slot, sizeof(PCODE));
+ break;
+
+ default:
+ UNREACHABLE();
+ break;
+ }
+}
+
+#endif // !DACCESS_COMPILE
+
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// MethodDescBackpatchInfo
+
+#ifndef DACCESS_COMPILE
+
+void MethodDescBackpatchInfo::AddDependentLoaderAllocator_Locked(LoaderAllocator *dependentLoaderAllocator)
+{
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(MethodDescBackpatchInfoTracker::IsLockedByCurrentThread());
+ _ASSERTE(m_methodDesc != nullptr);
+ _ASSERTE(dependentLoaderAllocator != nullptr);
+ _ASSERTE(dependentLoaderAllocator != m_methodDesc->GetLoaderAllocator());
+
+ LoaderAllocatorSet *set = m_dependentLoaderAllocators;
+ if (set != nullptr)
+ {
+ if (set->Lookup(dependentLoaderAllocator) != nullptr)
+ {
+ return;
+ }
+ set->Add(dependentLoaderAllocator);
+ return;
+ }
+
+ NewHolder<LoaderAllocatorSet> setHolder = new LoaderAllocatorSet();
+ setHolder->Add(dependentLoaderAllocator);
+ m_dependentLoaderAllocators = setHolder.Extract();
+}
+
+void MethodDescBackpatchInfo::RemoveDependentLoaderAllocator_Locked(LoaderAllocator *dependentLoaderAllocator)
+{
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(MethodDescBackpatchInfoTracker::IsLockedByCurrentThread());
+ _ASSERTE(m_methodDesc != nullptr);
+ _ASSERTE(dependentLoaderAllocator != nullptr);
+ _ASSERTE(dependentLoaderAllocator != m_methodDesc->GetLoaderAllocator());
+ _ASSERTE(m_dependentLoaderAllocators != nullptr);
+ _ASSERTE(m_dependentLoaderAllocators->Lookup(dependentLoaderAllocator) == dependentLoaderAllocator);
+
+ m_dependentLoaderAllocators->Remove(dependentLoaderAllocator);
+}
+
+#endif // !DACCESS_COMPILE
+
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// MethodDescBackpatchInfoTracker
+
+CrstStatic MethodDescBackpatchInfoTracker::s_lock;
+
+#ifndef DACCESS_COMPILE
+
+void MethodDescBackpatchInfoTracker::StaticInitialize()
+{
+ WRAPPER_NO_CONTRACT;
+ s_lock.Init(CrstMethodDescBackpatchInfoTracker);
+}
+
+#endif // DACCESS_COMPILE
+
+#ifdef _DEBUG
+
+bool MethodDescBackpatchInfoTracker::IsLockedByCurrentThread()
+{
+ WRAPPER_NO_CONTRACT;
+
+#ifndef DACCESS_COMPILE
+ return !!s_lock.OwnedByCurrentThread();
+#else
+ return true;
+#endif
+}
+
+bool MethodDescBackpatchInfoTracker::MayHaveEntryPointSlotsToBackpatch(PTR_MethodDesc methodDesc)
+{
+ // The only purpose of this method is to allow asserts in inline functions defined in the .h file, by which time MethodDesc
+ // is not fully defined
+
+ WRAPPER_NO_CONTRACT;
+ return methodDesc->MayHaveEntryPointSlotsToBackpatch();
+}
+
+#endif // _DEBUG
+
+#ifndef DACCESS_COMPILE
+
+MethodDescBackpatchInfo *MethodDescBackpatchInfoTracker::AddBackpatchInfo_Locked(MethodDesc *methodDesc)
+{
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(IsLockedByCurrentThread());
+ _ASSERTE(methodDesc != nullptr);
+ _ASSERTE(methodDesc->MayHaveEntryPointSlotsToBackpatch());
+ _ASSERTE(m_backpatchInfoHash.Lookup(methodDesc) == nullptr);
+
+ NewHolder<MethodDescBackpatchInfo> backpatchInfoHolder = new MethodDescBackpatchInfo(methodDesc);
+ m_backpatchInfoHash.Add(backpatchInfoHolder);
+ return backpatchInfoHolder.Extract();
+}
+
+EntryPointSlots *MethodDescBackpatchInfoTracker::GetDependencyMethodDescEntryPointSlots_Locked(MethodDesc *methodDesc)
+{
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(IsLockedByCurrentThread());
+ _ASSERTE(methodDesc != nullptr);
+ _ASSERTE(methodDesc->MayHaveEntryPointSlotsToBackpatch());
+
+ MethodDescEntryPointSlots *methodDescSlots =
+ m_dependencyMethodDescEntryPointSlotsHash.Lookup(methodDesc);
+ return methodDescSlots == nullptr ? nullptr : methodDescSlots->GetSlots();
+}
+
+EntryPointSlots *MethodDescBackpatchInfoTracker::GetOrAddDependencyMethodDescEntryPointSlots_Locked(MethodDesc *methodDesc)
+{
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(IsLockedByCurrentThread());
+ _ASSERTE(methodDesc != nullptr);
+ _ASSERTE(methodDesc->MayHaveEntryPointSlotsToBackpatch());
+
+ MethodDescEntryPointSlots *methodDescSlots = m_dependencyMethodDescEntryPointSlotsHash.Lookup(methodDesc);
+ if (methodDescSlots != nullptr)
+ {
+ return methodDescSlots->GetSlots();
+ }
+
+ NewHolder<MethodDescEntryPointSlots> methodDescSlotsHolder = new MethodDescEntryPointSlots(methodDesc);
+ m_dependencyMethodDescEntryPointSlotsHash.Add(methodDescSlotsHolder);
+ return methodDescSlotsHolder.Extract()->GetSlots();
+}
+
+void MethodDescBackpatchInfoTracker::ClearDependencyMethodDescEntryPointSlots(LoaderAllocator *loaderAllocator)
+{
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(loaderAllocator != nullptr);
+ _ASSERTE(loaderAllocator->GetMethodDescBackpatchInfoTracker() == this);
+
+ ConditionalLockHolder lockHolder;
+
+ for (MethodDescEntryPointSlotsHash::Iterator
+ it = m_dependencyMethodDescEntryPointSlotsHash.Begin(),
+ itEnd = m_dependencyMethodDescEntryPointSlotsHash.End();
+ it != itEnd;
+ ++it)
+ {
+ MethodDesc *methodDesc = (*it)->GetMethodDesc();
+ MethodDescBackpatchInfo *backpatchInfo = methodDesc->GetBackpatchInfoTracker()->GetBackpatchInfo_Locked(methodDesc);
+ if (backpatchInfo != nullptr)
+ {
+ backpatchInfo->RemoveDependentLoaderAllocator_Locked(loaderAllocator);
+ }
+ }
+
+ m_dependencyMethodDescEntryPointSlotsHash.RemoveAll();
+}
+
+#endif // DACCESS_COMPILE
+
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
diff --git a/src/vm/methoddescbackpatchinfo.h b/src/vm/methoddescbackpatchinfo.h
new file mode 100644
index 0000000000..3aa2b13255
--- /dev/null
+++ b/src/vm/methoddescbackpatchinfo.h
@@ -0,0 +1,416 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+#pragma once
+
+#include "debugmacrosext.h"
+
+// MethodDescBackpatchInfoTracker:
+// - Root container for all other types in this file
+// - There is one instance per LoaderAllocator
+// - Contains a collection of MethodDescBackpatchInfo objects
+// - Contains a collection of MethodDescEntryPointSlots objects
+//
+// MethodDescBackpatchInfo:
+// - Container for backpatch information for a MethodDesc allocated in the same LoaderAllocator
+// - Contains an EntryPointSlots collection that contains slots allocated in the same LoaderAllocator. These are slots
+// recorded for backpatching when the MethodDesc's code entry point changes.
+// - Contains a LoaderAllocatorSet collection that contains dependent LoaderAllocators that in turn have slots recorded for
+// backpatching when the MethodDesc's entry point changes. These are slots associated with the MethodDesc but allocated and
+// recorded in a LoaderAllocator on the MethodDesc's LoaderAllocator.
+//
+// EntryPointSlots and MethodDescEntryPointSlots
+// - Collection of slots recorded for backpatching
+// - There is one instance per MethodDescBackpatchInfo for slots allocated in the MethodDesc's LoaderAllocator
+// - There is one instance per MethodDesc in MethodDescBackpatchInfoTracker, for slots allocated in LoaderAllocators that are
+// dependent on the MethodDesc's LoaderAllocator. The dependent LoaderAllocators are also recorded in the
+// MethodDescBackPatchInfo associated with the MethodDesc's LoaderAllocator.
+
+typedef SHash<PtrSetSHashTraits<LoaderAllocator *>> LoaderAllocatorSet;
+
+#ifndef CROSSGEN_COMPILE
+
+#define DISABLE_COPY(T) \
+ T(const T &) = delete; \
+ T &operator =(const T &) = delete
+
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// EntryPointSlots
+
+// See comment at the top of methoddescbackpatchinfo.h for a description of this and related data structures
+class EntryPointSlots
+{
+public:
+ enum SlotType : UINT8
+ {
+ SlotType_Normal, // pointer-sized value not in executable code
+ SlotType_Vtable, // pointer-sized value not in executable code, may be relative based on MethodTable::VTableIndir2_t
+ SlotType_Executable, // pointer-sized value in executable code
+ SlotType_ExecutableRel32, // 32-bit value relative to the end of the slot, in executable code
+
+ SlotType_Count,
+ SlotType_Mask = SlotType_Vtable | SlotType_Executable | SlotType_ExecutableRel32
+ };
+
+private:
+ typedef SArray<TADDR> SlotArray;
+
+private:
+ SlotArray m_slots;
+
+public:
+ EntryPointSlots()
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+#ifndef DACCESS_COMPILE
+private:
+ static SIZE_T GetRequiredSlotAlignment(SlotType slotType)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(slotType >= SlotType_Normal);
+ _ASSERTE(slotType < SlotType_Count);
+
+ return slotType == SlotType_ExecutableRel32 ? sizeof(INT32) : sizeof(void *);
+ }
+
+public:
+ void AddSlot_Locked(TADDR slot, SlotType slotType);
+ void Backpatch_Locked(PCODE entryPoint);
+ static void Backpatch_Locked(TADDR slot, SlotType slotType, PCODE entryPoint);
+#endif
+
+ DISABLE_COPY(EntryPointSlots);
+};
+
+// See comment at the top of methoddescbackpatchinfo.h for a description of this and related data structures
+class MethodDescEntryPointSlots
+{
+private:
+ MethodDesc *m_methodDesc;
+
+ // This field and its data is protected by MethodDescBackpatchInfoTracker's lock
+ EntryPointSlots m_slots;
+
+public:
+ MethodDescEntryPointSlots(MethodDesc *methodDesc) : m_methodDesc(methodDesc)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(methodDesc != nullptr);
+ }
+
+public:
+ MethodDesc *GetMethodDesc() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_methodDesc;
+ }
+
+#ifndef DACCESS_COMPILE
+ EntryPointSlots *GetSlots()
+ {
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(m_methodDesc != nullptr);
+
+ return &m_slots;
+ }
+#endif
+
+ DISABLE_COPY(MethodDescEntryPointSlots);
+};
+
+class MethodDescEntryPointSlotsHashTraits
+ : public DeleteElementsOnDestructSHashTraits<NoRemoveSHashTraits<DefaultSHashTraits<MethodDescEntryPointSlots *>>>
+{
+public:
+ typedef DeleteElementsOnDestructSHashTraits<NoRemoveSHashTraits<DefaultSHashTraits<MethodDescEntryPointSlots *>>> Base;
+ typedef Base::element_t element_t;
+ typedef Base::count_t count_t;
+
+ typedef MethodDesc *key_t;
+
+ static key_t GetKey(element_t e)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return e->GetMethodDesc();
+ }
+
+ static BOOL Equals(key_t k1, key_t k2)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return k1 == k2;
+ }
+
+ static count_t Hash(key_t k)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (count_t)((size_t)dac_cast<TADDR>(k) >> 2);
+ }
+
+ static const element_t Null() { LIMITED_METHOD_CONTRACT; return nullptr; }
+ static bool IsNull(const element_t &e) { LIMITED_METHOD_CONTRACT; return e == nullptr; }
+};
+
+typedef SHash<MethodDescEntryPointSlotsHashTraits> MethodDescEntryPointSlotsHash;
+
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// MethodDescBackpatchInfo
+
+// See comment at the top of methoddescbackpatchinfo.h for a description of this and related data structures
+class MethodDescBackpatchInfo
+{
+private:
+ MethodDesc *m_methodDesc;
+
+ // Entry point slots that need to be backpatched when the method's entry point changes. This may include vtable slots, slots
+ // from virtual stub dispatch for interface methods (slots from dispatch stubs and resolve cache entries), etc. This
+ // collection only contains slots allocated in this MethodDesc's LoaderAllocator. This field and its data is protected by
+ // MethodDescBackpatchInfoTracker's lock.
+ EntryPointSlots m_slots;
+
+ // A set of LoaderAllocators from which slots that were allocated, are associated with the dependency MethodDesc and have
+ // been recorded for backpatching. For example, a derived type in a shorter-lifetime LoaderAllocator that inherits a
+ // MethodDesc from a longer-lifetime base type, would have its slot recorded in the slot's LoaderAllocator, and that
+ // LoaderAllocator would be recorded here in the MethodDesc's LoaderAllocator. This field and its data is protected by
+ // MethodDescBackpatchInfoTracker's lock.
+ LoaderAllocatorSet *m_dependentLoaderAllocators;
+
+public:
+ MethodDescBackpatchInfo(MethodDesc *methodDesc = nullptr);
+
+#ifndef DACCESS_COMPILE
+public:
+ ~MethodDescBackpatchInfo()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ LoaderAllocatorSet *set = m_dependentLoaderAllocators;
+ if (set != nullptr)
+ {
+ delete set;
+ }
+ }
+#endif
+
+public:
+ MethodDesc *GetMethodDesc() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_methodDesc;
+ }
+
+#ifndef DACCESS_COMPILE
+public:
+ EntryPointSlots *GetSlots()
+ {
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(m_methodDesc != nullptr);
+
+ return &m_slots;
+ }
+
+public:
+ template<class Visit> void ForEachDependentLoaderAllocator_Locked(Visit visit);
+ void AddDependentLoaderAllocator_Locked(LoaderAllocator *dependentLoaderAllocator);
+ void RemoveDependentLoaderAllocator_Locked(LoaderAllocator *dependentLoaderAllocator);
+#endif
+
+ DISABLE_COPY(MethodDescBackpatchInfo);
+};
+
+class MethodDescBackpatchInfoHashTraits
+ : public DeleteElementsOnDestructSHashTraits<NoRemoveSHashTraits<DefaultSHashTraits<MethodDescBackpatchInfo *>>>
+{
+public:
+ typedef DeleteElementsOnDestructSHashTraits<NoRemoveSHashTraits<DefaultSHashTraits<MethodDescBackpatchInfo *>>> Base;
+ typedef Base::element_t element_t;
+ typedef Base::count_t count_t;
+
+ typedef MethodDesc *key_t;
+
+ static key_t GetKey(element_t e)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return e->GetMethodDesc();
+ }
+
+ static BOOL Equals(key_t k1, key_t k2)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return k1 == k2;
+ }
+
+ static count_t Hash(key_t k)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (count_t)((size_t)dac_cast<TADDR>(k) >> 2);
+ }
+
+ static const element_t Null() { LIMITED_METHOD_CONTRACT; return nullptr; }
+ static bool IsNull(const element_t &e) { LIMITED_METHOD_CONTRACT; return e == nullptr; }
+};
+
+typedef SHash<MethodDescBackpatchInfoHashTraits> MethodDescBackpatchInfoHash;
+
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// MethodDescBackpatchInfoTracker
+
+// See comment at the top of methoddescbackpatchinfo.h for a description of this and related data structures
+class MethodDescBackpatchInfoTracker
+{
+private:
+ static CrstStatic s_lock;
+
+ // Contains information about slots associated with the MethodDesc that were recorded for backpatching. This field and its
+ // data is protected by s_lock.
+ MethodDescBackpatchInfoHash m_backpatchInfoHash;
+
+ // Contains slots associated with a MethodDesc from a dependency LoaderAllocator, which are recorded for backpatching when
+ // the MethodDesc's entry point changes. This field and its data is protected by s_lock.
+ MethodDescEntryPointSlotsHash m_dependencyMethodDescEntryPointSlotsHash;
+
+#ifndef DACCESS_COMPILE
+public:
+ static void StaticInitialize();
+#endif
+
+#ifdef _DEBUG
+public:
+ static bool IsLockedByCurrentThread();
+#endif
+
+public:
+ class ConditionalLockHolder : CrstHolderWithState
+ {
+ public:
+ ConditionalLockHolder(bool acquireLock = true)
+ : CrstHolderWithState(
+#ifndef DACCESS_COMPILE
+ acquireLock ? &MethodDescBackpatchInfoTracker::s_lock : nullptr
+#else
+ nullptr
+#endif
+ )
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+ };
+
+public:
+ MethodDescBackpatchInfoTracker()
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+#ifdef _DEBUG
+public:
+ static bool MayHaveEntryPointSlotsToBackpatch(PTR_MethodDesc methodDesc);
+#endif
+
+#ifndef DACCESS_COMPILE
+public:
+ MethodDescBackpatchInfo *GetBackpatchInfo_Locked(MethodDesc *methodDesc) const
+ {
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(IsLockedByCurrentThread());
+ _ASSERTE(methodDesc != nullptr);
+ _ASSERTE(MayHaveEntryPointSlotsToBackpatch(methodDesc));
+
+ return m_backpatchInfoHash.Lookup(methodDesc);
+ }
+
+ MethodDescBackpatchInfo *GetOrAddBackpatchInfo_Locked(MethodDesc *methodDesc)
+ {
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(IsLockedByCurrentThread());
+ _ASSERTE(methodDesc != nullptr);
+ _ASSERTE(MayHaveEntryPointSlotsToBackpatch(methodDesc));
+
+ MethodDescBackpatchInfo *backpatchInfo = m_backpatchInfoHash.Lookup(methodDesc);
+ if (backpatchInfo != nullptr)
+ {
+ return backpatchInfo;
+ }
+ return AddBackpatchInfo_Locked(methodDesc);
+ }
+
+private:
+ MethodDescBackpatchInfo *AddBackpatchInfo_Locked(MethodDesc *methodDesc);
+
+public:
+ bool HasDependencyMethodDescEntryPointSlots() const
+ {
+ WRAPPER_NO_CONTRACT;
+ return m_dependencyMethodDescEntryPointSlotsHash.GetCount() != 0;
+ }
+
+ EntryPointSlots *GetDependencyMethodDescEntryPointSlots_Locked(MethodDesc *methodDesc);
+ EntryPointSlots *GetOrAddDependencyMethodDescEntryPointSlots_Locked(MethodDesc *methodDesc);
+ void ClearDependencyMethodDescEntryPointSlots(LoaderAllocator *loaderAllocator);
+#endif
+
+ friend class ConditionalLockHolder;
+
+ DISABLE_COPY(MethodDescBackpatchInfoTracker);
+};
+
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// Inline and template definitions
+
+#ifndef DACCESS_COMPILE
+
+inline void EntryPointSlots::AddSlot_Locked(TADDR slot, SlotType slotType)
+{
+ WRAPPER_NO_CONTRACT;
+ static_assert_no_msg(SlotType_Count <= sizeof(INT32));
+ _ASSERTE(MethodDescBackpatchInfoTracker::IsLockedByCurrentThread());
+ _ASSERTE(slot != NULL);
+ _ASSERTE(!(slot & SlotType_Mask));
+ _ASSERTE(slotType >= SlotType_Normal);
+ _ASSERTE(slotType < SlotType_Count);
+ _ASSERTE(IS_ALIGNED((SIZE_T)slot, GetRequiredSlotAlignment(slotType)));
+
+ m_slots.Append(slot | slotType);
+}
+
+#endif // DACCESS_COMPILE
+
+inline MethodDescBackpatchInfo::MethodDescBackpatchInfo(MethodDesc *methodDesc)
+ : m_methodDesc(methodDesc), m_dependentLoaderAllocators(nullptr)
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(
+ methodDesc == nullptr ||
+ MethodDescBackpatchInfoTracker::MayHaveEntryPointSlotsToBackpatch(PTR_MethodDesc(methodDesc)));
+}
+
+#ifndef DACCESS_COMPILE
+
+template<class Visit>
+inline void MethodDescBackpatchInfo::ForEachDependentLoaderAllocator_Locked(Visit visit)
+{
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(MethodDescBackpatchInfoTracker::IsLockedByCurrentThread());
+ _ASSERTE(m_methodDesc != nullptr);
+
+ LoaderAllocatorSet *set = m_dependentLoaderAllocators;
+ if (set == nullptr)
+ {
+ return;
+ }
+
+ for (LoaderAllocatorSet::Iterator it = set->Begin(), itEnd = set->End(); it != itEnd; ++it)
+ {
+ visit(*it);
+ }
+}
+
+#endif // DACCESS_COMPILE
+
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+#undef DISABLE_COPY
+
+#endif // !CROSSGEN_COMPILE
diff --git a/src/vm/methodtable.cpp b/src/vm/methodtable.cpp
index e158436c21..ab42626de2 100644
--- a/src/vm/methodtable.cpp
+++ b/src/vm/methodtable.cpp
@@ -9773,8 +9773,7 @@ void MethodTable::SetSlot(UINT32 slotNumber, PCODE slotCode)
if (fSharedVtableChunk)
{
MethodDesc* pMD = GetMethodDescForSlotAddress(slotCode);
- _ASSERTE(pMD->HasStableEntryPoint());
- _ASSERTE(pMD->GetStableEntryPoint() == slotCode);
+ _ASSERTE(pMD->IsVersionableWithVtableSlotBackpatch() || pMD->GetStableEntryPoint() == slotCode);
}
}
#endif
diff --git a/src/vm/methodtable.h b/src/vm/methodtable.h
index bbdfc90623..20fc5e0003 100644
--- a/src/vm/methodtable.h
+++ b/src/vm/methodtable.h
@@ -3543,6 +3543,8 @@ public:
MethodTable * pMTDecl,
MethodTable * pMTImpl);
+ void CopySlotFrom(UINT32 slotNumber, MethodDataWrapper &hSourceMTData, MethodTable *pSourceMT);
+
protected:
static void CheckInitMethodDataCache();
static MethodData *FindParentMethodDataHelper(MethodTable *pMT);
diff --git a/src/vm/methodtable.inl b/src/vm/methodtable.inl
index d998f1fe36..70b31c0761 100644
--- a/src/vm/methodtable.inl
+++ b/src/vm/methodtable.inl
@@ -618,6 +618,17 @@ inline MethodDesc* MethodTable::GetMethodDescForSlot(DWORD slot)
#ifndef DACCESS_COMPILE
//==========================================================================================
+inline void MethodTable::CopySlotFrom(UINT32 slotNumber, MethodDataWrapper &hSourceMTData, MethodTable *pSourceMT)
+{
+ WRAPPER_NO_CONTRACT;
+
+ MethodDesc *pMD = hSourceMTData->GetImplMethodDesc(slotNumber);
+ _ASSERTE(CheckPointer(pMD));
+ _ASSERTE(pMD == pSourceMT->GetMethodDescForSlot(slotNumber));
+ SetSlot(slotNumber, pMD->GetInitialEntryPointForCopiedSlot());
+}
+
+//==========================================================================================
inline INT32 MethodTable::MethodIterator::GetNumMethods() const
{
LIMITED_METHOD_CONTRACT;
diff --git a/src/vm/methodtablebuilder.cpp b/src/vm/methodtablebuilder.cpp
index c33c2db564..e47474c8e9 100644
--- a/src/vm/methodtablebuilder.cpp
+++ b/src/vm/methodtablebuilder.cpp
@@ -6954,7 +6954,7 @@ MethodTableBuilder::NeedsNativeCodeSlot(bmtMDMethod * pMDMethod)
#ifdef FEATURE_TIERED_COMPILATION
- // Keep in-sync with MethodDesc::IsEligibleForTieredCompilation()
+ // Keep in-sync with MethodDesc::DetermineAndSetIsEligibleForTieredCompilation()
if (g_pConfig->TieredCompilation() &&
(pMDMethod->GetMethodType() == METHOD_TYPE_NORMAL || pMDMethod->GetMethodType() == METHOD_TYPE_INSTANTIATED))
{
@@ -8837,10 +8837,10 @@ void MethodTableBuilder::CopyExactParentSlots(MethodTable *pMT, MethodTable *pAp
// just the first indirection to detect sharing.
if (pMT->GetVtableIndirections()[0].GetValueMaybeNull() != pCanonMT->GetVtableIndirections()[0].GetValueMaybeNull())
{
+ MethodTable::MethodDataWrapper hCanonMTData(MethodTable::GetMethodData(pCanonMT, FALSE));
for (DWORD i = 0; i < nParentVirtuals; i++)
{
- PCODE target = pCanonMT->GetRestoredSlot(i);
- pMT->SetSlot(i, target);
+ pMT->CopySlotFrom(i, hCanonMTData, pCanonMT);
}
}
}
@@ -8849,11 +8849,13 @@ void MethodTableBuilder::CopyExactParentSlots(MethodTable *pMT, MethodTable *pAp
MethodTable::MethodDataWrapper hMTData(MethodTable::GetMethodData(pMT, FALSE));
MethodTable * pParentMT = pMT->GetParentMethodTable();
+ MethodTable::MethodDataWrapper hParentMTData(MethodTable::GetMethodData(pParentMT, FALSE));
for (DWORD i = 0; i < nParentVirtuals; i++)
{
// fix up wrongly-inherited method descriptors
MethodDesc* pMD = hMTData->GetImplMethodDesc(i);
+ CONSISTENCY_CHECK(CheckPointer(pMD));
CONSISTENCY_CHECK(pMD == pMT->GetMethodDescForSlot(i));
if (pMD->GetMethodTable() == pMT)
@@ -8880,8 +8882,7 @@ void MethodTableBuilder::CopyExactParentSlots(MethodTable *pMT, MethodTable *pAp
}
// The slot lives in an unshared chunk. We need to update the slot contents
- PCODE target = pParentMT->GetRestoredSlot(i);
- pMT->SetSlot(i, target);
+ pMT->CopySlotFrom(i, hParentMTData, pParentMT);
}
}
} // MethodTableBuilder::CopyExactParentSlots
@@ -10511,7 +10512,7 @@ MethodTableBuilder::SetupMethodTable2(
//
DWORD indirectionIndex = MethodTable::GetIndexOfVtableIndirection(iCurSlot);
if (GetParentMethodTable()->GetVtableIndirections()[indirectionIndex].GetValueMaybeNull() != pMT->GetVtableIndirections()[indirectionIndex].GetValueMaybeNull())
- pMT->SetSlot(iCurSlot, pMD->GetMethodEntryPoint());
+ pMT->SetSlot(iCurSlot, pMD->GetInitialEntryPointForCopiedSlot());
}
else
{
@@ -10625,22 +10626,27 @@ MethodTableBuilder::SetupMethodTable2(
// This indicates that the method body in this slot was copied here through a methodImpl.
// Thus, copy the value of the slot from which the body originally came, in case it was
// overridden, to make sure the two slots stay in sync.
- INDEBUG(MethodDesc * pMDOld; pMDOld = pMD;)
- if(pMD->GetSlot() != i &&
- pMT->GetSlot(i) != pMT->GetSlot(pMD->GetSlot()))
+ DWORD originalIndex = pMD->GetSlot();
+ if (originalIndex != i)
{
- // Copy the slot value in the method's original slot.
- pMT->SetSlot(i,pMT->GetSlot(pMD->GetSlot()));
- hMTData->InvalidateCachedVirtualSlot(i);
-
- // Update the pMD to the new method desc we just copied over ourselves with. This will
- // be used in the check for missing method block below.
- pMD = pMT->GetMethodDescForSlot(pMD->GetSlot());
-
- // This method is now duplicate
- pMD->SetDuplicate();
- INDEBUG(g_dupMethods++;)
- fChangeMade = TRUE;
+ MethodDesc *pOriginalMD = hMTData->GetImplMethodDesc(originalIndex);
+ CONSISTENCY_CHECK(CheckPointer(pOriginalMD));
+ CONSISTENCY_CHECK(pOriginalMD == pMT->GetMethodDescForSlot(originalIndex));
+ if (pMD != pOriginalMD)
+ {
+ // Copy the slot value in the method's original slot.
+ pMT->SetSlot(i, pOriginalMD->GetInitialEntryPointForCopiedSlot());
+ hMTData->InvalidateCachedVirtualSlot(i);
+
+ // Update the pMD to the new method desc we just copied over ourselves with. This will
+ // be used in the check for missing method block below.
+ pMD = pOriginalMD;
+
+ // This method is now duplicate
+ pMD->SetDuplicate();
+ INDEBUG(g_dupMethods++;)
+ fChangeMade = TRUE;
+ }
}
}
}
diff --git a/src/vm/precode.cpp b/src/vm/precode.cpp
index 9f3d37ca8b..38cf9aa82e 100644
--- a/src/vm/precode.cpp
+++ b/src/vm/precode.cpp
@@ -494,6 +494,32 @@ TADDR Precode::AllocateTemporaryEntryPoints(MethodDescChunk * pChunk,
int count = pChunk->GetCount();
+ // Determine eligibility for tiered compilation
+#ifdef HAS_COMPACT_ENTRYPOINTS
+ bool hasMethodDescVersionableWithPrecode = false;
+#endif
+ {
+ MethodDesc *pMD = pChunk->GetFirstMethodDesc();
+ for (int i = 0; i < count; ++i)
+ {
+ if (pMD->DetermineAndSetIsEligibleForTieredCompilation())
+ {
+ _ASSERTE(pMD->IsEligibleForTieredCompilation());
+ _ASSERTE(!pMD->IsVersionableWithPrecode() || pMD->RequiresStableEntryPoint());
+ }
+
+#ifdef HAS_COMPACT_ENTRYPOINTS
+ if (pMD->IsVersionableWithPrecode())
+ {
+ _ASSERTE(pMD->RequiresStableEntryPoint());
+ hasMethodDescVersionableWithPrecode = true;
+ }
+#endif
+
+ pMD = (MethodDesc *)(dac_cast<TADDR>(pMD) + pMD->SizeOf());
+ }
+ }
+
PrecodeType t = PRECODE_STUB;
bool preallocateJumpStubs = false;
@@ -521,7 +547,7 @@ TADDR Precode::AllocateTemporaryEntryPoints(MethodDescChunk * pChunk,
#ifdef HAS_COMPACT_ENTRYPOINTS
// Note that these are just best guesses to save memory. If we guessed wrong,
// we will allocate a new exact type of precode in GetOrCreatePrecode.
- BOOL fForcedPrecode = pFirstMD->RequiresStableEntryPoint(count > 1);
+ BOOL fForcedPrecode = hasMethodDescVersionableWithPrecode || pFirstMD->RequiresStableEntryPoint(count > 1);
#ifdef _TARGET_ARM_
if (pFirstMD->RequiresMethodDescCallingConvention(count > 1)
diff --git a/src/vm/prestub.cpp b/src/vm/prestub.cpp
index 29f09ab109..5c08becafa 100644
--- a/src/vm/prestub.cpp
+++ b/src/vm/prestub.cpp
@@ -46,9 +46,8 @@
#include "perfmap.h"
#endif
-#ifdef FEATURE_TIERED_COMPILATION
#include "callcounter.h"
-#endif
+#include "methoddescbackpatchinfo.h"
#if defined(FEATURE_GDBJIT)
#include "gdbjit.h"
@@ -83,58 +82,116 @@ PCODE MethodDesc::DoBackpatch(MethodTable * pMT, MethodTable *pDispatchingMT, BO
{
STANDARD_VM_CHECK;
PRECONDITION(!ContainsGenericVariables());
- PRECONDITION(HasStableEntryPoint());
PRECONDITION(pMT == GetMethodTable());
}
CONTRACTL_END;
- PCODE pTarget = GetStableEntryPoint();
- if (!HasTemporaryEntryPoint())
- return pTarget;
+ bool isVersionableWithVtableSlotBackpatch = IsVersionableWithVtableSlotBackpatch();
+ LoaderAllocator *mdLoaderAllocator = isVersionableWithVtableSlotBackpatch ? GetLoaderAllocator() : nullptr;
+
+ // Only take the lock if the method is versionable with vtable slot backpatch, for recording slots and synchronizing with
+ // backpatching slots
+ MethodDescBackpatchInfoTracker::ConditionalLockHolder lockHolder(isVersionableWithVtableSlotBackpatch);
+
+ // Get the method entry point inside the lock above to synchronize with backpatching in
+ // MethodDesc::BackpatchEntryPointSlots()
+ PCODE pTarget = GetMethodEntryPoint();
+
+ PCODE pExpected;
+ if (isVersionableWithVtableSlotBackpatch)
+ {
+ _ASSERTE(pTarget == GetEntryPointToBackpatch_Locked());
- PCODE pExpected = GetTemporaryEntryPoint();
+ pExpected = GetTemporaryEntryPoint();
+ if (pExpected == pTarget)
+ return pTarget;
- if (pExpected == pTarget)
- return pTarget;
+ // True interface methods are never backpatched and are not versionable with vtable slot backpatch
+ _ASSERTE(!(pMT->IsInterface() && !IsStatic()));
- // True interface methods are never backpatched
- if (pMT->IsInterface() && !IsStatic())
- return pTarget;
+ // Backpatching the funcptr stub:
+ // For methods versionable with vtable slot backpatch, a funcptr stub is guaranteed to point to the at-the-time
+ // current entry point shortly after creation, and backpatching it further is taken care of by
+ // MethodDesc::BackpatchEntryPointSlots()
- if (fFullBackPatch)
+ // Backpatching the temporary entry point:
+ // The temporary entry point is never backpatched for methods versionable with vtable slot backpatch. New vtable
+ // slots inheriting the method will initially point to the temporary entry point and it must point to the prestub
+ // and come here for backpatching such that the new vtable slot can be discovered and recorded for future
+ // backpatching.
+
+ _ASSERTE(!HasNonVtableSlot());
+ }
+ else
{
- FuncPtrStubs * pFuncPtrStubs = GetLoaderAllocator()->GetFuncPtrStubsNoCreate();
- if (pFuncPtrStubs != NULL)
+ _ASSERTE(pTarget == GetStableEntryPoint());
+
+ if (!HasTemporaryEntryPoint())
+ return pTarget;
+
+ pExpected = GetTemporaryEntryPoint();
+ if (pExpected == pTarget)
+ return pTarget;
+
+ // True interface methods are never backpatched
+ if (pMT->IsInterface() && !IsStatic())
+ return pTarget;
+
+ if (fFullBackPatch)
{
- Precode* pFuncPtrPrecode = pFuncPtrStubs->Lookup(this);
- if (pFuncPtrPrecode != NULL)
+ FuncPtrStubs * pFuncPtrStubs = GetLoaderAllocator()->GetFuncPtrStubsNoCreate();
+ if (pFuncPtrStubs != NULL)
{
- // If there is a funcptr precode to patch, we are done for this round.
- if (pFuncPtrPrecode->SetTargetInterlocked(pTarget))
- return pTarget;
+ Precode* pFuncPtrPrecode = pFuncPtrStubs->Lookup(this);
+ if (pFuncPtrPrecode != NULL)
+ {
+ // If there is a funcptr precode to patch, we are done for this round.
+ if (pFuncPtrPrecode->SetTargetInterlocked(pTarget))
+ return pTarget;
+ }
}
- }
#ifndef HAS_COMPACT_ENTRYPOINTS
- // Patch the fake entrypoint if necessary
- Precode::GetPrecodeFromEntryPoint(pExpected)->SetTargetInterlocked(pTarget);
+ // Patch the fake entrypoint if necessary
+ Precode::GetPrecodeFromEntryPoint(pExpected)->SetTargetInterlocked(pTarget);
#endif // HAS_COMPACT_ENTRYPOINTS
+ }
+
+ if (HasNonVtableSlot())
+ return pTarget;
}
- if (HasNonVtableSlot())
- return pTarget;
+ auto RecordAndBackpatchSlot = [&](MethodTable *patchedMT, DWORD slotIndex)
+ {
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(isVersionableWithVtableSlotBackpatch);
+
+ RecordAndBackpatchEntryPointSlot_Locked(
+ mdLoaderAllocator,
+ patchedMT->GetLoaderAllocator(),
+ patchedMT->GetSlotPtr(slotIndex),
+ EntryPointSlots::SlotType_Vtable,
+ pTarget);
+ };
BOOL fBackpatched = FALSE;
-#define BACKPATCH(pPatchedMT) \
- do \
- { \
- if (pPatchedMT->GetSlot(dwSlot) == pExpected) \
- { \
- pPatchedMT->SetSlot(dwSlot, pTarget); \
- fBackpatched = TRUE; \
- } \
- } \
+#define BACKPATCH(pPatchedMT) \
+ do \
+ { \
+ if (pPatchedMT->GetSlot(dwSlot) == pExpected) \
+ { \
+ if (isVersionableWithVtableSlotBackpatch) \
+ { \
+ RecordAndBackpatchSlot(pPatchedMT, dwSlot); \
+ } \
+ else \
+ { \
+ pPatchedMT->SetSlot(dwSlot, pTarget); \
+ } \
+ fBackpatched = TRUE; \
+ } \
+ } \
while(0)
// The owning slot has been updated already, so there is no need to backpatch it
@@ -154,8 +211,10 @@ PCODE MethodDesc::DoBackpatch(MethodTable * pMT, MethodTable *pDispatchingMT, BO
// that it returns the stable entrypoint eventually to avoid going through the slow path all the time.
//
MethodTable * pRestoredSlotMT = pDispatchingMT->GetRestoredSlotMT(dwSlot);
-
- BACKPATCH(pRestoredSlotMT);
+ if (pRestoredSlotMT != pDispatchingMT)
+ {
+ BACKPATCH(pRestoredSlotMT);
+ }
}
}
@@ -168,7 +227,7 @@ PCODE MethodDesc::DoBackpatch(MethodTable * pMT, MethodTable *pDispatchingMT, BO
BACKPATCH(pMT);
- if (pDispatchingMT != NULL)
+ if (pDispatchingMT != NULL && pDispatchingMT != pMT)
{
BACKPATCH(pDispatchingMT);
}
@@ -185,7 +244,7 @@ PCODE MethodDesc::DoBackpatch(MethodTable * pMT, MethodTable *pDispatchingMT, BO
{
BACKPATCH(pMT);
- if (pDispatchingMT != NULL)
+ if (pDispatchingMT != NULL && pDispatchingMT != pMT)
{
BACKPATCH(pDispatchingMT);
}
@@ -1752,8 +1811,10 @@ PCODE MethodDesc::DoPrestub(MethodTable *pDispatchingMT)
/*************************** VERSIONABLE CODE *********************/
BOOL fIsPointingToPrestub = IsPointingToPrestub();
+ bool fIsVersionableWithoutJumpStamp = false;
#ifdef FEATURE_CODE_VERSIONING
- if (IsVersionableWithPrecode() ||
+ fIsVersionableWithoutJumpStamp = IsVersionableWithoutJumpStamp();
+ if (fIsVersionableWithoutJumpStamp ||
(!fIsPointingToPrestub && IsVersionableWithJumpStamp()))
{
pCode = GetCodeVersionManager()->PublishVersionableCodeIfNecessary(this, fCanBackpatchPrestub);
@@ -1803,7 +1864,7 @@ PCODE MethodDesc::DoPrestub(MethodTable *pDispatchingMT)
#endif // defined(FEATURE_SHARE_GENERIC_CODE)
else if (IsIL() || IsNoMetadata())
{
- if (!IsNativeCodeStableAfterInit())
+ if (!IsNativeCodeStableAfterInit() && (!fIsVersionableWithoutJumpStamp || IsVersionableWithPrecode()))
{
GetOrCreatePrecode();
}
@@ -1874,13 +1935,14 @@ PCODE MethodDesc::DoPrestub(MethodTable *pDispatchingMT)
if (pCode != NULL)
{
- if (HasPrecode())
- GetPrecode()->SetTargetInterlocked(pCode);
- else
- if (!HasStableEntryPoint())
- {
- SetStableEntryPointInterlocked(pCode);
- }
+ if (fIsVersionableWithoutJumpStamp)
+ {
+ // Methods versionable without a jump stamp should not get here unless there was a failure. There may have been a
+ // failure to update the code versions above for some reason. Don't backpatch this time and try again next time.
+ return pCode;
+ }
+
+ SetCodeEntryPoint(pCode);
}
else
{
@@ -1888,8 +1950,7 @@ PCODE MethodDesc::DoPrestub(MethodTable *pDispatchingMT)
{
pStub->DecRef();
}
- else
- if (pStub->HasExternalEntryPoint())
+ else if (pStub->HasExternalEntryPoint())
{
// If the Stub wraps code that is outside of the Stub allocation, then we
// need to free the Stub allocation now.
@@ -2319,11 +2380,20 @@ EXTERN_C PCODE STDCALL ExternalMethodFixupWorker(TransitionBlock * pTransitionBl
pCode = pMD->GetMethodEntryPoint();
//
- // Note that we do not want to call code:MethodDesc::IsPointingToPrestub() here. It does not take remoting interception
- // into account and so it would cause otherwise intercepted methods to be JITed. It is a compat issue if the JITing fails.
+ // Note that we do not want to call code:MethodDesc::IsPointingToPrestub() here. It does not take remoting
+ // interception into account and so it would cause otherwise intercepted methods to be JITed. It is a compat
+ // issue if the JITing fails.
//
if (!DoesSlotCallPrestub(pCode))
{
+ if (pMD->IsVersionableWithVtableSlotBackpatch())
+ {
+ // The entry point for this method needs to be versionable, so use a FuncPtrStub similarly to what is done
+ // in MethodDesc::GetMultiCallableAddrOfCode()
+ GCX_COOP();
+ pCode = pMD->GetLoaderAllocator()->GetFuncPtrStubs()->GetFuncPtrStub(pMD);
+ }
+
pCode = PatchNonVirtualExternalMethod(pMD, pCode, pImportSection, pIndirection);
}
}
@@ -2380,10 +2450,21 @@ EXTERN_C PCODE VirtualMethodFixupWorker(Object * pThisPtr, CORCOMPILE_VIRTUAL_I
if (!DoesSlotCallPrestub(pCode))
{
- // Skip fixup precode jump for better perf
- PCODE pDirectTarget = Precode::TryToSkipFixupPrecode(pCode);
- if (pDirectTarget != NULL)
- pCode = pDirectTarget;
+ MethodDesc *pMD = MethodTable::GetMethodDescForSlotAddress(pCode);
+ if (pMD->IsVersionableWithVtableSlotBackpatch())
+ {
+ // The entry point for this method needs to be versionable, so use a FuncPtrStub similarly to what is done in
+ // MethodDesc::GetMultiCallableAddrOfCode()
+ GCX_COOP();
+ pCode = pMD->GetLoaderAllocator()->GetFuncPtrStubs()->GetFuncPtrStub(pMD);
+ }
+ else
+ {
+ // Skip fixup precode jump for better perf
+ PCODE pDirectTarget = Precode::TryToSkipFixupPrecode(pCode);
+ if (pDirectTarget != NULL)
+ pCode = pDirectTarget;
+ }
// Patch the thunk to the actual method body
if (EnsureWritableExecutablePagesNoThrow(&pThunk->m_pTarget, sizeof(pThunk->m_pTarget)))
diff --git a/src/vm/tieredcompilation.cpp b/src/vm/tieredcompilation.cpp
index 2c24618bd8..f5745a1282 100644
--- a/src/vm/tieredcompilation.cpp
+++ b/src/vm/tieredcompilation.cpp
@@ -406,6 +406,8 @@ void WINAPI TieredCompilationManager::TieringDelayTimerCallback(PVOID parameter,
void TieredCompilationManager::TieringDelayTimerCallbackInAppDomain(LPVOID parameter)
{
WRAPPER_NO_CONTRACT;
+
+ GCX_PREEMP();
GetAppDomain()->GetTieredCompilationManager()->TieringDelayTimerCallbackWorker();
}
@@ -495,9 +497,15 @@ void TieredCompilationManager::ResumeCountingCalls(MethodDesc* pMethodDesc)
{
WRAPPER_NO_CONTRACT;
_ASSERTE(pMethodDesc != nullptr);
- _ASSERTE(pMethodDesc->IsVersionableWithPrecode());
- pMethodDesc->GetPrecode()->ResetTargetInterlocked();
+ EX_TRY
+ {
+ pMethodDesc->ResetCodeEntryPoint();
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(RethrowTerminalExceptions);
}
bool TieredCompilationManager::TryAsyncOptimizeMethods()
diff --git a/src/vm/virtualcallstub.cpp b/src/vm/virtualcallstub.cpp
index ef11c6ecce..875ee1cc0d 100644
--- a/src/vm/virtualcallstub.cpp
+++ b/src/vm/virtualcallstub.cpp
@@ -489,7 +489,7 @@ void VirtualCallStubManager::Init(BaseDomain *pDomain, LoaderAllocator *pLoaderA
// Record the parent domain
parentDomain = pDomain;
- isCollectible = !!pLoaderAllocator->IsCollectible();
+ m_loaderAllocator = pLoaderAllocator;
//
// Init critical sections
@@ -628,7 +628,7 @@ void VirtualCallStubManager::Init(BaseDomain *pDomain, LoaderAllocator *pLoaderA
BYTE * initReservedMem = NULL;
- if (!isCollectible)
+ if (!m_loaderAllocator->IsCollectible())
{
DWORD dwTotalReserveMemSizeCalc = indcell_heap_reserve_size +
cache_entry_heap_reserve_size +
@@ -833,7 +833,7 @@ void VirtualCallStubManager::Uninit()
{
WRAPPER_NO_CONTRACT;
- if (isCollectible)
+ if (m_loaderAllocator->IsCollectible())
{
parentDomain->GetCollectibleVSDRanges()->RemoveRanges(this);
}
@@ -891,7 +891,7 @@ VirtualCallStubManager::~VirtualCallStubManager()
// This was the block reserved by Init for the heaps.
// For the collectible case, the VSD logic does not allocate the memory.
- if (m_initialReservedMemForHeaps && !isCollectible)
+ if (m_initialReservedMemForHeaps && !m_loaderAllocator->IsCollectible())
ClrVirtualFree (m_initialReservedMemForHeaps, 0, MEM_RELEASE);
// Free critical section
@@ -2630,6 +2630,12 @@ VirtualCallStubManager::TraceResolver(
slot = pItfMD->GetMethodTable()->FindDispatchSlot(pItfMD->GetSlot(), TRUE /* throwOnConflict */);
}
+ // The dispatch slot's target may change due to code versioning shortly after it was retrieved above for the trace. This
+ // will result in the debugger getting some version of the code or the prestub, but not necessarily the exact code pointer
+ // that winds up getting executed. The debugger has code that handles this ambiguity by placing a breakpoint at the start of
+ // all native code versions, even if they aren't the one that was reported by this trace, see
+ // DebuggerController::PatchTrace() under case TRACE_MANAGED. This alleviates the StubManager from having to prevent the
+ // race that occurs here.
return (StubManager::TraceStub(slot.GetTarget(), trace));
}
@@ -2791,6 +2797,16 @@ DispatchHolder *VirtualCallStubManager::GenerateDispatchStub(PCODE ad
#endif
);
+#ifdef FEATURE_CODE_VERSIONING
+ MethodDesc *pMD = MethodTable::GetMethodDescForSlotAddress(addrOfCode);
+ if (pMD->IsVersionableWithVtableSlotBackpatch())
+ {
+ EntryPointSlots::SlotType slotType;
+ TADDR slot = holder->stub()->implTargetSlot(&slotType);
+ pMD->RecordAndBackpatchEntryPointSlot(m_loaderAllocator, slot, slotType);
+ }
+#endif
+
ClrFlushInstructionCache(holder->stub(), holder->stub()->size());
AddToCollectibleVSDRangeList(holder);
@@ -2837,6 +2853,16 @@ DispatchHolder *VirtualCallStubManager::GenerateDispatchStubLong(PCODE
(size_t)pMTExpected,
DispatchStub::e_TYPE_LONG);
+#ifdef FEATURE_CODE_VERSIONING
+ MethodDesc *pMD = MethodTable::GetMethodDescForSlotAddress(addrOfCode);
+ if (pMD->IsVersionableWithVtableSlotBackpatch())
+ {
+ EntryPointSlots::SlotType slotType;
+ TADDR slot = holder->stub()->implTargetSlot(&slotType);
+ pMD->RecordAndBackpatchEntryPointSlot(m_loaderAllocator, slot, slotType);
+ }
+#endif
+
ClrFlushInstructionCache(holder->stub(), holder->stub()->size());
AddToCollectibleVSDRangeList(holder);
@@ -3005,6 +3031,17 @@ ResolveCacheElem *VirtualCallStubManager::GenerateResolveCacheElem(void *addrOfC
e->pNext = NULL;
+#ifdef FEATURE_CODE_VERSIONING
+ MethodDesc *pMD = MethodTable::GetMethodDescForSlotAddress((PCODE)addrOfCode);
+ if (pMD->IsVersionableWithVtableSlotBackpatch())
+ {
+ pMD->RecordAndBackpatchEntryPointSlot(
+ m_loaderAllocator,
+ (TADDR)&e->target,
+ EntryPointSlots::SlotType_Normal);
+ }
+#endif
+
//incr our counters
stats.cache_entry_counter++;
stats.cache_entry_space += sizeof(ResolveCacheElem);
diff --git a/src/vm/virtualcallstub.h b/src/vm/virtualcallstub.h
index 360f04a7df..68badec5b5 100644
--- a/src/vm/virtualcallstub.h
+++ b/src/vm/virtualcallstub.h
@@ -278,7 +278,7 @@ public:
cache_entry_rangeList(),
vtable_rangeList(),
parentDomain(NULL),
- isCollectible(false),
+ m_loaderAllocator(NULL),
m_initialReservedMemForHeaps(NULL),
m_FreeIndCellList(NULL),
m_RecycledIndCellList(NULL),
@@ -516,7 +516,7 @@ private:
template <typename STUB_HOLDER>
void AddToCollectibleVSDRangeList(STUB_HOLDER *holder)
{
- if (isCollectible)
+ if (m_loaderAllocator->IsCollectible())
{
parentDomain->GetCollectibleVSDRanges()->AddRange(reinterpret_cast<BYTE *>(holder->stub()),
reinterpret_cast<BYTE *>(holder->stub()) + holder->stub()->size(),
@@ -639,7 +639,8 @@ private:
private:
// The parent domain of this manager
PTR_BaseDomain parentDomain;
- bool isCollectible;
+
+ PTR_LoaderAllocator m_loaderAllocator;
BYTE * m_initialReservedMemForHeaps;
diff --git a/tests/src/baseservices/TieredCompilation/TieredVtableMethodTests.cs b/tests/src/baseservices/TieredCompilation/TieredVtableMethodTests.cs
new file mode 100644
index 0000000000..039f68399c
--- /dev/null
+++ b/tests/src/baseservices/TieredCompilation/TieredVtableMethodTests.cs
@@ -0,0 +1,274 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+using System;
+using System.Reflection;
+using System.Reflection.Emit;
+using System.Runtime.CompilerServices;
+using System.Text;
+using System.Threading;
+
+public static class TieredVtableMethodTests
+{
+ private const int CallCountPerIteration = 8;
+
+ private static StringBuilder s_expectedCallSequence = new StringBuilder();
+ private static StringBuilder s_actualCallSequence = new StringBuilder();
+
+ private static int Main()
+ {
+ const int Pass = 100, Fail = 101;
+
+ var baseObj = new Base();
+ var derivedObj = new Derived();
+ var derivedForDevirtualizationObj = new DerivedForDevirtualization();
+
+ PromoteToTier1(
+ () => CallVirtualMethod(baseObj),
+ () => CallVirtualMethod(derivedObj),
+ () => CallGenericVirtualMethodWithValueType(baseObj),
+ () => CallGenericVirtualMethodWithValueType(derivedObj),
+ () => CallGenericVirtualMethodWithReferenceType(baseObj),
+ () => CallGenericVirtualMethodWithReferenceType(derivedObj),
+ () => CallVirtualMethodForDevirtualization(derivedForDevirtualizationObj),
+ () => CallInterfaceVirtualMethodPolymorhpic(baseObj),
+ () => CallInterfaceVirtualMethodPolymorhpic(derivedObj));
+
+ for (int i = 0; i < 4; ++i)
+ {
+ CallVirtualMethod(baseObj, CallCountPerIteration);
+ CallVirtualMethod(derivedObj, CallCountPerIteration);
+ CallGenericVirtualMethodWithValueType(baseObj, CallCountPerIteration);
+ CallGenericVirtualMethodWithValueType(derivedObj, CallCountPerIteration);
+ CallGenericVirtualMethodWithReferenceType(baseObj, CallCountPerIteration);
+ CallGenericVirtualMethodWithReferenceType(derivedObj, CallCountPerIteration);
+ CallVirtualMethodForDevirtualization(derivedForDevirtualizationObj, CallCountPerIteration);
+ CallInterfaceVirtualMethodMonomorphicOnBase(baseObj, CallCountPerIteration);
+ CallInterfaceVirtualMethodMonomorphicOnDerived(derivedObj, CallCountPerIteration);
+ CallInterfaceVirtualMethodPolymorhpic(baseObj, CallCountPerIteration);
+ CallInterfaceVirtualMethodPolymorhpic(derivedObj, CallCountPerIteration);
+
+ for (int j = 0; j < 2; ++j)
+ {
+ RunCollectibleIterations();
+
+ GC.Collect();
+ GC.WaitForPendingFinalizers();
+ GC.WaitForPendingFinalizers();
+ }
+ }
+
+ if (s_actualCallSequence.Equals(s_expectedCallSequence))
+ {
+ return Pass;
+ }
+
+ Console.WriteLine($"Expected: {s_expectedCallSequence}");
+ Console.WriteLine($"Actual: {s_actualCallSequence}");
+ return Fail;
+ }
+
+ /// Creates a collectible type deriving from <see cref="Base"/> similar to <see cref="Derived"/>. The collectible derived
+ /// type inherits vtable slots from the base. After multiple iterations of the test, the collectible type will be collected
+ /// and replaced with another new collectible type. This is used to cover vtable slot backpatching and cleanup of recorded
+ /// slots in collectible types.
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ private static void RunCollectibleIterations()
+ {
+ Base collectibleDerivedObj = CreateCollectibleDerived();
+
+ PromoteToTier1(
+ () => CallVirtualMethod(collectibleDerivedObj),
+ () => CallGenericVirtualMethodWithValueType(collectibleDerivedObj),
+ () => CallGenericVirtualMethodWithReferenceType(collectibleDerivedObj),
+ () => CallInterfaceVirtualMethodPolymorhpic(collectibleDerivedObj));
+
+ CallVirtualMethod(collectibleDerivedObj, CallCountPerIteration);
+ CallGenericVirtualMethodWithValueType(collectibleDerivedObj, CallCountPerIteration);
+ CallGenericVirtualMethodWithReferenceType(collectibleDerivedObj, CallCountPerIteration);
+ CallInterfaceVirtualMethodPolymorhpic(collectibleDerivedObj, CallCountPerIteration);
+ }
+
+ public interface IBase
+ {
+ void InterfaceVirtualMethod();
+ }
+
+ public class Base : IBase
+ {
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ public virtual void VirtualMethod()
+ {
+ s_actualCallSequence.Append("v ");
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ public virtual void GenericVirtualMethod<T>(T t)
+ {
+ s_actualCallSequence.Append(typeof(T).IsValueType ? "gvv " : "gvr ");
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ public virtual void VirtualMethodForDevirtualization()
+ {
+ s_actualCallSequence.Append("vd ");
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ public virtual void InterfaceVirtualMethod()
+ {
+ s_actualCallSequence.Append("iv ");
+ }
+ }
+
+ private class Derived : Base
+ {
+ // Prevent this type from sharing the vtable chunk from the base
+ public virtual void VirtualMethod2()
+ {
+ }
+ }
+
+ // Derived type that is sealed for testing devirtualization of calls to inherited virtual methods
+ private sealed class DerivedForDevirtualization : Derived
+ {
+ // Prevent this type from sharing the vtable chunk from the base
+ public override void VirtualMethod()
+ {
+ }
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ private static void CallVirtualMethod(Base obj, int count = 1)
+ {
+ for (int i = 0; i < count; ++i)
+ {
+ s_expectedCallSequence.Append("v ");
+ obj.VirtualMethod();
+ }
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ private static void CallGenericVirtualMethodWithValueType(Base obj, int count = 1)
+ {
+ for (int i = 0; i < count; ++i)
+ {
+ s_expectedCallSequence.Append("gvv ");
+ obj.GenericVirtualMethod(0);
+ }
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ private static void CallGenericVirtualMethodWithReferenceType(Base obj, int count = 1)
+ {
+ var objArg = new object();
+ for (int i = 0; i < count; ++i)
+ {
+ s_expectedCallSequence.Append("gvr ");
+ obj.GenericVirtualMethod(objArg);
+ }
+ }
+
+ /// The virtual call in this method may be devirtualized because <see cref="DerivedForDevirtualization"/> is sealed
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ private static void CallVirtualMethodForDevirtualization(DerivedForDevirtualization obj, int count = 1)
+ {
+ for (int i = 0; i < count; ++i)
+ {
+ s_expectedCallSequence.Append("vd ");
+ obj.VirtualMethodForDevirtualization();
+ }
+ }
+
+ /// The interface call site in this method is monomorphic on <see cref="Base"/> and is used to cover dispatch stub
+ /// backpatching
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ private static void CallInterfaceVirtualMethodMonomorphicOnBase(IBase obj, int count = 1)
+ {
+ for (int i = 0; i < count; ++i)
+ {
+ s_expectedCallSequence.Append("iv ");
+ obj.InterfaceVirtualMethod();
+ }
+ }
+
+ /// The interface call site in this method is monomorphic on <see cref="Base"/> and is used to cover dispatch stub
+ /// backpatching
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ private static void CallInterfaceVirtualMethodMonomorphicOnDerived(IBase obj, int count = 1)
+ {
+ for (int i = 0; i < count; ++i)
+ {
+ s_expectedCallSequence.Append("iv ");
+ obj.InterfaceVirtualMethod();
+ }
+ }
+
+ // The call site in this method is polymorphic and is used to cover resolve cache entry backpatching
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ private static void CallInterfaceVirtualMethodPolymorhpic(IBase obj, int count = 1)
+ {
+ for (int i = 0; i < count; ++i)
+ {
+ s_expectedCallSequence.Append("iv ");
+ obj.InterfaceVirtualMethod();
+ }
+ }
+
+ private static ulong s_collectibleIndex = 0;
+
+ private static Base CreateCollectibleDerived()
+ {
+ ulong collectibleIndex = s_collectibleIndex++;
+
+ var ab =
+ AssemblyBuilder.DefineDynamicAssembly(
+ new AssemblyName($"CollectibleDerivedAssembly{collectibleIndex}"),
+ AssemblyBuilderAccess.RunAndCollect);
+ var mob = ab.DefineDynamicModule($"CollectibleDerivedModule{collectibleIndex}");
+ var tb =
+ mob.DefineType(
+ $"CollectibleDerived{collectibleIndex}",
+ TypeAttributes.Class | TypeAttributes.Public,
+ typeof(Base));
+
+ /// Add a virtual method to prevent this type from sharing the vtable chunk from the base, similarly to what is done in
+ /// <see cref="Derived"/>
+ {
+ var mb =
+ tb.DefineMethod(
+ "VirtualMethod2",
+ MethodAttributes.Public | MethodAttributes.Virtual | MethodAttributes.NewSlot);
+ var ilg = mb.GetILGenerator();
+ ilg.Emit(OpCodes.Ret);
+ }
+
+ return (Base)Activator.CreateInstance(tb.CreateTypeInfo());
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ private static void PromoteToTier1(params Action[] actions)
+ {
+ // Call the methods once to register a call each for call counting
+ foreach (Action action in actions)
+ {
+ action();
+ }
+
+ // Allow time for call counting to begin
+ Thread.Sleep(500);
+
+ // Call the methods enough times to trigger tier 1 promotion
+ for (int i = 0; i < 100; ++i)
+ {
+ foreach (Action action in actions)
+ {
+ action();
+ }
+ }
+
+ // Allow time for the methods to be jitted at tier 1
+ Thread.Sleep(Math.Max(500, 100 * actions.Length));
+ }
+}
diff --git a/tests/src/baseservices/TieredCompilation/TieredVtableMethodTests.csproj b/tests/src/baseservices/TieredCompilation/TieredVtableMethodTests.csproj
new file mode 100644
index 0000000000..5c64a51cd6
--- /dev/null
+++ b/tests/src/baseservices/TieredCompilation/TieredVtableMethodTests.csproj
@@ -0,0 +1,22 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project ToolsVersion="12.0" DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <Import Project="$([MSBuild]::GetDirectoryNameOfFileAbove($(MSBuildThisFileDirectory), dir.props))\dir.props" />
+ <PropertyGroup>
+ <Configuration Condition=" '$(Configuration)' == '' ">Debug</Configuration>
+ <Platform Condition=" '$(Platform)' == '' ">AnyCPU</Platform>
+ <ProjectGuid>{DF8B8A76-BC07-4A5F-BD74-1B5D79B94E92}</ProjectGuid>
+ <OutputType>Exe</OutputType>
+ <LangVersion>latest</LangVersion>
+ <AllowUnsafeBlocks>true</AllowUnsafeBlocks>
+ <CLRTestPriority>0</CLRTestPriority>
+ </PropertyGroup>
+ <!-- Default configurations to help VS understand the configurations -->
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)' == 'Debug|x64'">
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)' == 'Release|x64'">
+ </PropertyGroup>
+ <ItemGroup>
+ <Compile Include="TieredVtableMethodTests.cs" />
+ </ItemGroup>
+ <Import Project="$([MSBuild]::GetDirectoryNameOfFileAbove($(MSBuildThisFileDirectory), dir.targets))\dir.targets" />
+</Project>