summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJUNG DONG-HEON <dheon.jung@samsung.com>2020-06-08 10:20:14 +0900
committer이형주/Common Platform Lab(SR)/Staff Engineer/삼성전자 <leee.lee@samsung.com>2020-06-18 07:38:46 +0900
commit7d6fa13ce85654174b882c9e934c000dfd2222fe (patch)
tree46c473fdedd5747c2ad281170c0416407b74a503
parent488be5d790020489f7f4dd7d43680f43b101dbd4 (diff)
downloadcoreclr-7d6fa13ce85654174b882c9e934c000dfd2222fe.tar.gz
coreclr-7d6fa13ce85654174b882c9e934c000dfd2222fe.tar.bz2
coreclr-7d6fa13ce85654174b882c9e934c000dfd2222fe.zip
Implement instantiating and unboxing through portable stublinker code… (#106)
* Implement instantiating and unboxing through portable stublinker code - Handle only the cases with register to register moves - Shares abi processing logic with delegate shuffle thunk creation - Architecture specific logic is relatively simple - Do not permit use of HELPERREG in computed instantiating stubs - Fix GetArgLoc such that it works on all architectures and OS combinations Add a JIT stress test case for testing all of the various combinations - Use the same calling convention test architecture that was used as part of tail call work Rename secure delegates to wrapper delegates - Secure delegates are no longer a feature of the runtime - But the wrapper delegate lives on as a workaround for a weird detail of the ARM32 abi
-rw-r--r--clr.featuredefines.props6
-rw-r--r--clrdefinitions.cmake9
-rw-r--r--src/System.Private.CoreLib/src/System/MulticastDelegate.cs21
-rw-r--r--src/ToolBox/superpmi/superpmi-shared/methodcontext.cpp14
-rw-r--r--src/ToolBox/superpmi/superpmi-shared/methodcontext.h4
-rw-r--r--src/debug/daccess/dacdbiimpl.cpp2
-rw-r--r--src/debug/inc/dacdbiinterface.h2
-rw-r--r--src/inc/cordebug.idl4
-rw-r--r--src/inc/corinfo.h6
-rw-r--r--src/inc/daccess.h3
-rw-r--r--src/inc/vptr_list.h4
-rw-r--r--src/jit/gentree.h2
-rw-r--r--src/jit/importer.cpp4
-rw-r--r--src/jit/morph.cpp10
-rw-r--r--src/vm/CMakeLists.txt1
-rw-r--r--src/vm/amd64/InstantiatingStub.asm149
-rw-r--r--src/vm/amd64/cgenamd64.cpp3
-rw-r--r--src/vm/amd64/cgencpu.h5
-rw-r--r--src/vm/appdomain.cpp22
-rw-r--r--src/vm/arm/cgencpu.h13
-rw-r--r--src/vm/arm/stubs.cpp925
-rw-r--r--src/vm/arm64/cgencpu.h10
-rw-r--r--src/vm/arm64/stubs.cpp67
-rw-r--r--src/vm/callingconvention.h56
-rw-r--r--src/vm/class.cpp2
-rw-r--r--src/vm/class.h2
-rw-r--r--src/vm/comdelegate.cpp675
-rw-r--r--src/vm/comdelegate.h28
-rw-r--r--src/vm/crossgencompile.cpp2
-rw-r--r--src/vm/dllimport.h8
-rw-r--r--src/vm/frames.cpp36
-rw-r--r--src/vm/frames.h108
-rw-r--r--src/vm/i386/stublinkerx86.cpp824
-rw-r--r--src/vm/i386/stublinkerx86.h14
-rw-r--r--src/vm/ilstubcache.cpp8
-rw-r--r--src/vm/ilstubresolver.cpp4
-rw-r--r--src/vm/ilstubresolver.h4
-rw-r--r--src/vm/interpreter.cpp6
-rw-r--r--src/vm/jitinterface.cpp12
-rw-r--r--src/vm/method.hpp8
-rw-r--r--src/vm/prestub.cpp81
-rw-r--r--src/vm/virtualcallstub.h2
-rw-r--r--src/zap/zapinfo.cpp2
43 files changed, 761 insertions, 2407 deletions
diff --git a/clr.featuredefines.props b/clr.featuredefines.props
index 929768fdbd..ac15abd310 100644
--- a/clr.featuredefines.props
+++ b/clr.featuredefines.props
@@ -19,6 +19,8 @@
<FeatureArrayStubAsIL>true</FeatureArrayStubAsIL>
<FeatureMulticastStubAsIL>true</FeatureMulticastStubAsIL>
+ <FeaturePortableShuffleThunks Condition="'$(Platform)' != 'x86'">true</FeaturePortableShuffleThunks>
+ <FeatureInstantiatingStubAsIL>true</FeatureInstantiatingStubAsIL>
<FeatureStubsAsIL>true</FeatureStubsAsIL>
</PropertyGroup>
@@ -34,6 +36,8 @@
<FeatureAppX>true</FeatureAppX>
<FeatureWin32Registry>true</FeatureWin32Registry>
<FeatureProfAttach>true</FeatureProfAttach>
+ <FeaturePortableShuffleThunks Condition="'$(Platform)' != 'x86'">true</FeaturePortableShuffleThunks>
+ <FeatureInstantiatingStubAsIL Condition="'$(Platform)' != 'x86'">true</FeatureInstantiatingStubAsIL>
</PropertyGroup>
<!-- Features we're currently flighting, but don't intend to ship in officially supported releases -->
@@ -46,6 +50,7 @@
<DefineConstants Condition="'$(FeatureAppX)' == 'true'">$(DefineConstants);FEATURE_APPX</DefineConstants>
<DefineConstants Condition="'$(FeatureArrayStubAsIL)' == 'true'">$(DefineConstants);FEATURE_ARRAYSTUB_AS_IL</DefineConstants>
<DefineConstants Condition="'$(FeatureMulticastStubAsIL)' == 'true'">$(DefineConstants);FEATURE_MULTICASTSTUB_AS_IL</DefineConstants>
+ <DefineConstants Condition="'$(FeatureInstantiatingStubAsIL)' == 'true'">$(DefineConstants);FEATURE_INSTANTIATINGSTUB_AS_IL</DefineConstants>
<DefineConstants Condition="'$(FeatureStubsAsIL)' == 'true'">$(DefineConstants);FEATURE_STUBS_AS_IL</DefineConstants>
<DefineConstants Condition="'$(FeatureClassicCominterop)' == 'true'">$(DefineConstants);FEATURE_CLASSIC_COMINTEROP</DefineConstants>
<DefineConstants Condition="'$(FeatureCollectibleALC)' == 'true'">$(DefineConstants);FEATURE_COLLECTIBLE_ALC</DefineConstants>
@@ -62,6 +67,7 @@
<DefineConstants Condition="'$(FeatureDefaultInterfaces)' == 'true'">$(DefineConstants);FEATURE_DEFAULT_INTERFACES</DefineConstants>
<DefineConstants Condition="'$(FeatureTypeEquivalence)' == 'true'">$(DefineConstants);FEATURE_TYPEEQUIVALENCE</DefineConstants>
<DefineConstants Condition="'$(FeatureBasicFreeze)' == 'true'">$(DefineConstants);FEATURE_BASICFREEZE</DefineConstants>
+ <DefineConstants Condition="'$(FeaturePortableShuffleThunks)' == 'true'">$(DefineConstants);FEATURE_PORTABLE_SHUFFLE_THUNKS</DefineConstants>
<DefineConstants Condition="'$(FeatureUtf8String)' == 'true'">$(DefineConstants);FEATURE_UTF8STRING</DefineConstants>
<DefineConstants Condition="'$(ProfilingSupportedBuild)' == 'true'">$(DefineConstants);PROFILING_SUPPORTED</DefineConstants>
diff --git a/clrdefinitions.cmake b/clrdefinitions.cmake
index a976e7713b..4d366e1d7e 100644
--- a/clrdefinitions.cmake
+++ b/clrdefinitions.cmake
@@ -103,6 +103,15 @@ else(WIN32)
add_definitions(-DFEATURE_ARRAYSTUB_AS_IL)
add_definitions(-DFEATURE_MULTICASTSTUB_AS_IL)
endif(WIN32)
+
+if(NOT CLR_CMAKE_TARGET_ARCH_I386)
+ add_definitions(-DFEATURE_PORTABLE_SHUFFLE_THUNKS)
+endif()
+
+if(CLR_CMAKE_PLATFORM_UNIX OR NOT CLR_CMAKE_TARGET_ARCH_I386)
+ add_definitions(-DFEATURE_INSTANTIATINGSTUB_AS_IL)
+endif()
+
add_definitions(-DFEATURE_CODE_VERSIONING)
add_definitions(-DFEATURE_COLLECTIBLE_TYPES)
diff --git a/src/System.Private.CoreLib/src/System/MulticastDelegate.cs b/src/System.Private.CoreLib/src/System/MulticastDelegate.cs
index 67b3ef443b..368a462798 100644
--- a/src/System.Private.CoreLib/src/System/MulticastDelegate.cs
+++ b/src/System.Private.CoreLib/src/System/MulticastDelegate.cs
@@ -17,10 +17,9 @@ namespace System
[ComVisible(true)]
public abstract class MulticastDelegate : Delegate
{
- // This is set under 3 circumstances
+ // This is set under 2 circumstances
// 1. Multicast delegate
- // 2. Secure/Wrapper delegate
- // 3. Inner delegate of secure delegate where the secure delegate security context is a collectible method
+ // 2. Wrapper delegate
private object? _invocationList; // Initialized by VM as needed
private IntPtr _invocationCount;
@@ -74,7 +73,7 @@ namespace System
{
// there are 4 kind of delegate kinds that fall into this bucket
// 1- Multicast (_invocationList is Object[])
- // 2- Secure/Wrapper (_invocationList is Delegate)
+ // 2- Wrapper (_invocationList is Delegate)
// 3- Unmanaged FntPtr (_invocationList == null)
// 4- Open virtual (_invocationCount == MethodDesc of target, _invocationList == null, LoaderAllocator, or DynamicResolver)
@@ -90,7 +89,7 @@ namespace System
// now we know 'this' is not a special one, so we can work out what the other is
if ((d._invocationList as Delegate) != null)
- // this is a secure/wrapper delegate so we need to unwrap and check the inner one
+ // this is a wrapper delegate so we need to unwrap and check the inner one
return Equals(d._invocationList);
return base.Equals(obj);
@@ -99,7 +98,7 @@ namespace System
{
if (_invocationList is Delegate invocationListDelegate)
{
- // this is a secure/wrapper delegate so we need to unwrap and check the inner one
+ // this is a wrapper delegate so we need to unwrap and check the inner one
return invocationListDelegate.Equals(obj);
}
else
@@ -124,7 +123,7 @@ namespace System
// now we know 'this' is not a special one, so we can work out what the other is
if ((d._invocationList as Delegate) != null)
- // this is a secure/wrapper delegate so we need to unwrap and check the inner one
+ // this is a wrapper delegate so we need to unwrap and check the inner one
return Equals(d._invocationList);
// now we can call on the base
@@ -472,7 +471,7 @@ namespace System
{
if (_invocationList is Delegate t)
{
- // this is a secure/wrapper delegate so we need to unwrap and check the inner one
+ // this is a wrapper delegate so we need to unwrap and check the inner one
return t.GetHashCode();
}
}
@@ -499,7 +498,7 @@ namespace System
{
// _invocationCount != 0 we are in one of these cases:
// - Multicast -> return the target of the last delegate in the list
- // - Secure/wrapper delegate -> return the target of the inner delegate
+ // - wrapper delegate -> return the target of the inner delegate
// - unmanaged function pointer - return null
// - virtual open delegate - return null
if (InvocationListLogicallyNull())
@@ -537,7 +536,7 @@ namespace System
if (_invocationList is MulticastDelegate innerDelegate)
{
- // must be a secure/wrapper delegate
+ // must be a wrapper delegate
return innerDelegate.GetMethodImpl();
}
}
@@ -562,7 +561,7 @@ namespace System
return (MethodInfo)_methodBase;
}
- // Otherwise, must be an inner delegate of a SecureDelegate of an open virtual method. In that case, call base implementation
+ // Otherwise, must be an inner delegate of a Wrapper of an open virtual method. In that case, call base implementation
return base.GetMethodImpl();
}
diff --git a/src/ToolBox/superpmi/superpmi-shared/methodcontext.cpp b/src/ToolBox/superpmi/superpmi-shared/methodcontext.cpp
index 00b9d1a2ca..d6e4c14891 100644
--- a/src/ToolBox/superpmi/superpmi-shared/methodcontext.cpp
+++ b/src/ToolBox/superpmi/superpmi-shared/methodcontext.cpp
@@ -1363,7 +1363,7 @@ void MethodContext::recGetCallInfo(CORINFO_RESOLVED_TOKEN* pResolvedToken,
value.instParamLookup.accessType = (DWORD)pResult->instParamLookup.accessType;
value.instParamLookup.handle = (DWORDLONG)pResult->instParamLookup.handle;
- value.secureDelegateInvoke = (DWORD)pResult->secureDelegateInvoke;
+ value.wrapperDelegateInvoke = (DWORD)pResult->wrapperDelegateInvoke;
}
else
ZeroMemory(&value, sizeof(Agnostic_CORINFO_CALL_INFO));
@@ -1388,7 +1388,7 @@ void MethodContext::dmpGetCallInfo(const Agnostic_GetCallInfo& key, const Agnost
value.hMethod, value.methodFlags, value.classFlags,
SpmiDumpHelper::DumpAgnostic_CORINFO_SIG_INFO(value.sig).c_str(),
SpmiDumpHelper::DumpAgnostic_CORINFO_SIG_INFO(value.verSig).c_str(), value.instParamLookup.accessType,
- value.instParamLookup.handle, value.secureDelegateInvoke, value.exceptionCode,
+ value.instParamLookup.handle, value.wrapperDelegateInvoke, value.exceptionCode,
SpmiDumpHelper::DumpAgnostic_CORINFO_LOOKUP(value.stubLookup).c_str());
}
void MethodContext::repGetCallInfo(CORINFO_RESOLVED_TOKEN* pResolvedToken,
@@ -1471,7 +1471,7 @@ void MethodContext::repGetCallInfo(CORINFO_RESOLVED_TOKEN* pResolvedToken,
}
pResult->instParamLookup.accessType = (InfoAccessType)value.instParamLookup.accessType;
pResult->instParamLookup.handle = (CORINFO_GENERIC_HANDLE)value.instParamLookup.handle;
- pResult->secureDelegateInvoke = (BOOL)value.secureDelegateInvoke;
+ pResult->wrapperDelegateInvoke = (BOOL)value.wrapperDelegateInvoke;
*exceptionCode = (DWORD)value.exceptionCode;
DEBUG_REP(dmpGetCallInfo(key, value));
@@ -3888,7 +3888,7 @@ void MethodContext::recGetEEInfo(CORINFO_EE_INFO* pEEInfoOut)
value.offsetOfGCState = (DWORD)pEEInfoOut->offsetOfGCState;
value.offsetOfDelegateInstance = (DWORD)pEEInfoOut->offsetOfDelegateInstance;
value.offsetOfDelegateFirstTarget = (DWORD)pEEInfoOut->offsetOfDelegateFirstTarget;
- value.offsetOfSecureDelegateIndirectCell = (DWORD)pEEInfoOut->offsetOfSecureDelegateIndirectCell;
+ value.offsetOfWrapperDelegateIndirectCell = (DWORD)pEEInfoOut->offsetOfWrapperDelegateIndirectCell;
value.offsetOfTransparentProxyRP = (DWORD)pEEInfoOut->offsetOfTransparentProxyRP;
value.offsetOfRealProxyServer = (DWORD)pEEInfoOut->offsetOfRealProxyServer;
value.offsetOfObjArrayData = (DWORD)pEEInfoOut->offsetOfObjArrayData;
@@ -3914,7 +3914,7 @@ void MethodContext::dmpGetEEInfo(DWORD key, const Agnostic_CORINFO_EE_INFO& valu
value.inlinedCallFrameInfo.offsetOfCallSiteSP, value.inlinedCallFrameInfo.offsetOfCalleeSavedFP,
value.inlinedCallFrameInfo.offsetOfCallTarget, value.inlinedCallFrameInfo.offsetOfReturnAddress,
value.offsetOfThreadFrame, value.offsetOfGCState, value.offsetOfDelegateInstance,
- value.offsetOfDelegateFirstTarget, value.offsetOfSecureDelegateIndirectCell,
+ value.offsetOfDelegateFirstTarget, value.offsetOfWrapperDelegateIndirectCell,
value.offsetOfTransparentProxyRP, value.offsetOfRealProxyServer, value.offsetOfObjArrayData,
value.sizeOfReversePInvokeFrame, value.osPageSize, value.maxUncheckedOffsetForNullObject, value.targetAbi,
value.osType, value.osMajor, value.osMinor, value.osBuild);
@@ -3943,7 +3943,7 @@ void MethodContext::repGetEEInfo(CORINFO_EE_INFO* pEEInfoOut)
pEEInfoOut->offsetOfGCState = (unsigned)value.offsetOfGCState;
pEEInfoOut->offsetOfDelegateInstance = (unsigned)value.offsetOfDelegateInstance;
pEEInfoOut->offsetOfDelegateFirstTarget = (unsigned)value.offsetOfDelegateFirstTarget;
- pEEInfoOut->offsetOfSecureDelegateIndirectCell = (unsigned)value.offsetOfSecureDelegateIndirectCell;
+ pEEInfoOut->offsetOfWrapperDelegateIndirectCell = (unsigned)value.offsetOfWrapperDelegateIndirectCell;
pEEInfoOut->offsetOfTransparentProxyRP = (unsigned)value.offsetOfTransparentProxyRP;
pEEInfoOut->offsetOfRealProxyServer = (unsigned)value.offsetOfRealProxyServer;
pEEInfoOut->offsetOfObjArrayData = (unsigned)value.offsetOfObjArrayData;
@@ -3971,7 +3971,7 @@ void MethodContext::repGetEEInfo(CORINFO_EE_INFO* pEEInfoOut)
pEEInfoOut->offsetOfGCState = (unsigned)0xc;
pEEInfoOut->offsetOfDelegateInstance = (unsigned)0x8;
pEEInfoOut->offsetOfDelegateFirstTarget = (unsigned)0x18;
- pEEInfoOut->offsetOfSecureDelegateIndirectCell = (unsigned)0x40;
+ pEEInfoOut->offsetOfWrapperDelegateIndirectCell = (unsigned)0x40;
pEEInfoOut->offsetOfTransparentProxyRP = (unsigned)0x8;
pEEInfoOut->offsetOfRealProxyServer = (unsigned)0x18;
pEEInfoOut->offsetOfObjArrayData = (unsigned)0x18;
diff --git a/src/ToolBox/superpmi/superpmi-shared/methodcontext.h b/src/ToolBox/superpmi/superpmi-shared/methodcontext.h
index c28a3f7233..05c3234ec0 100644
--- a/src/ToolBox/superpmi/superpmi-shared/methodcontext.h
+++ b/src/ToolBox/superpmi/superpmi-shared/methodcontext.h
@@ -169,7 +169,7 @@ public:
DWORD offsetOfGCState;
DWORD offsetOfDelegateInstance;
DWORD offsetOfDelegateFirstTarget;
- DWORD offsetOfSecureDelegateIndirectCell;
+ DWORD offsetOfWrapperDelegateIndirectCell;
DWORD offsetOfTransparentProxyRP;
DWORD offsetOfRealProxyServer;
DWORD offsetOfObjArrayData;
@@ -306,7 +306,7 @@ public:
DWORD exactContextNeedsRuntimeLookup;
Agnostic_CORINFO_LOOKUP stubLookup; // first view of union. others are matching or subordinate
Agnostic_CORINFO_CONST_LOOKUP instParamLookup;
- DWORD secureDelegateInvoke;
+ DWORD wrapperDelegateInvoke;
DWORD exceptionCode;
};
struct Agnostic_GetMethodInfo
diff --git a/src/debug/daccess/dacdbiimpl.cpp b/src/debug/daccess/dacdbiimpl.cpp
index fb930c16fa..5341dc752a 100644
--- a/src/debug/daccess/dacdbiimpl.cpp
+++ b/src/debug/daccess/dacdbiimpl.cpp
@@ -3510,7 +3510,7 @@ HRESULT DacDbiInterfaceImpl::GetDelegateType(VMPTR_Object delegateObject, Delega
*delegateType = kTrueMulticastDelegate;
if (invocationListMT->IsDelegate())
- *delegateType = kSecureDelegate;
+ *delegateType = kWrapperDelegate;
// Cases missing: Loader allocator, or dynamic resolver.
return S_OK;
diff --git a/src/debug/inc/dacdbiinterface.h b/src/debug/inc/dacdbiinterface.h
index 77cc74d678..db70c9b4b5 100644
--- a/src/debug/inc/dacdbiinterface.h
+++ b/src/debug/inc/dacdbiinterface.h
@@ -2718,7 +2718,7 @@ public:
kOpenInstanceVSD,
kClosedStaticWithScpecialSig,
kTrueMulticastDelegate,
- kSecureDelegate,
+ kWrapperDelegate,
kUnmanagedFunctionDelegate,
kUnknownDelegateType
} DelegateType;
diff --git a/src/inc/cordebug.idl b/src/inc/cordebug.idl
index c0dcb7670b..1e27c197f3 100644
--- a/src/inc/cordebug.idl
+++ b/src/inc/cordebug.idl
@@ -6507,7 +6507,7 @@ interface ICorDebugDelegateObjectValue : IUnknown
* - NULL if the delegate function is a static function or an open delegate
* - HRESULT CORDBG_E_UNSUPPORTED_DELEGATE for curently unsupported delegates.
* In this case, the value of ppObject should not be used. Some of these
- * include: Secure wrappers, Open Virual delegates.
+ * include: Wrapper delegates, Open Virual delegates.
*/
HRESULT GetTarget([out] ICorDebugReferenceValue **ppObject);
@@ -6519,7 +6519,7 @@ interface ICorDebugDelegateObjectValue : IUnknown
* such as generic methods, which won't contain the instantiation.
* - HRESULT CORDBG_E_UNSUPPORTED_DELEGATE for curently unsupported delegates.
* In this case, the value of ppObject should not be used. Some of these
- * include: Secure wrappers, Open Virual delegates.
+ * include: Wrapper delegates, Open Virual delegates.
*/
HRESULT GetFunction([out] ICorDebugFunction **ppFunction);
}
diff --git a/src/inc/corinfo.h b/src/inc/corinfo.h
index 9cc849043e..c3af9f8b78 100644
--- a/src/inc/corinfo.h
+++ b/src/inc/corinfo.h
@@ -1699,7 +1699,7 @@ struct CORINFO_CALL_INFO
CORINFO_CONST_LOOKUP instParamLookup; // Used by Ready-to-Run
- BOOL secureDelegateInvoke;
+ BOOL wrapperDelegateInvoke;
};
//----------------------------------------------------------------------------
@@ -1826,8 +1826,8 @@ struct CORINFO_EE_INFO
unsigned offsetOfDelegateInstance;
unsigned offsetOfDelegateFirstTarget;
- // Secure delegate offsets
- unsigned offsetOfSecureDelegateIndirectCell;
+ // Wrapper delegate offsets
+ unsigned offsetOfWrapperDelegateIndirectCell;
// Remoting offsets
unsigned offsetOfTransparentProxyRP;
diff --git a/src/inc/daccess.h b/src/inc/daccess.h
index e4deab9207..802df17d0b 100644
--- a/src/inc/daccess.h
+++ b/src/inc/daccess.h
@@ -2165,8 +2165,7 @@ public: name(int dummy) : base(dummy) {}
#define VPTR_UNIQUE_BaseDomain (100000)
#define VPTR_UNIQUE_SystemDomain (VPTR_UNIQUE_BaseDomain + 1)
#define VPTR_UNIQUE_ComMethodFrame (VPTR_UNIQUE_SystemDomain + 1)
-#define VPTR_UNIQUE_StubHelperFrame (VPTR_UNIQUE_ComMethodFrame + 1)
-#define VPTR_UNIQUE_RedirectedThreadFrame (VPTR_UNIQUE_StubHelperFrame + 1)
+#define VPTR_UNIQUE_RedirectedThreadFrame (VPTR_UNIQUE_ComMethodFrame + 1)
#define VPTR_UNIQUE_HijackFrame (VPTR_UNIQUE_RedirectedThreadFrame + 1)
#define PTR_TO_TADDR(ptr) ((TADDR)(ptr))
diff --git a/src/inc/vptr_list.h b/src/inc/vptr_list.h
index 566cfa6e72..9d1128e208 100644
--- a/src/inc/vptr_list.h
+++ b/src/inc/vptr_list.h
@@ -77,7 +77,6 @@ VPTR_CLASS(HelperMethodFrame_PROTECTOBJ)
VPTR_CLASS(HijackFrame)
#endif
VPTR_CLASS(InlinedCallFrame)
-VPTR_CLASS(SecureDelegateFrame)
VPTR_CLASS(MulticastFrame)
VPTR_CLASS(PInvokeCalliFrame)
VPTR_CLASS(PrestubMethodFrame)
@@ -92,9 +91,6 @@ VPTR_CLASS(ExternalMethodFrame)
#ifdef FEATURE_READYTORUN
VPTR_CLASS(DynamicHelperFrame)
#endif
-#if !defined(_TARGET_X86_)
-VPTR_CLASS(StubHelperFrame)
-#endif
#if defined(_TARGET_X86_)
VPTR_CLASS(UMThkCallFrame)
#endif
diff --git a/src/jit/gentree.h b/src/jit/gentree.h
index c52fc9694e..b638e68d53 100644
--- a/src/jit/gentree.h
+++ b/src/jit/gentree.h
@@ -3444,7 +3444,7 @@ struct GenTreeCall final : public GenTree
#define GTF_CALL_M_R2R_REL_INDIRECT 0x00002000 // GT_CALL -- ready to run call is indirected through a relative address
#define GTF_CALL_M_DOES_NOT_RETURN 0x00004000 // GT_CALL -- call does not return
-#define GTF_CALL_M_SECURE_DELEGATE_INV 0x00008000 // GT_CALL -- call is in secure delegate
+#define GTF_CALL_M_WRAPPER_DELEGATE_INV 0x00008000 // GT_CALL -- call is in wrapper delegate
#define GTF_CALL_M_FAT_POINTER_CHECK 0x00010000 // GT_CALL -- CoreRT managed calli needs transformation, that checks
// special bit in calli address. If it is set, then it is necessary
// to restore real function address and load hidden argument
diff --git a/src/jit/importer.cpp b/src/jit/importer.cpp
index 50ddc58d89..98aa46d237 100644
--- a/src/jit/importer.cpp
+++ b/src/jit/importer.cpp
@@ -7842,9 +7842,9 @@ var_types Compiler::impImportCall(OPCODE opcode,
/* Set the delegate flag */
call->gtCall.gtCallMoreFlags |= GTF_CALL_M_DELEGATE_INV;
- if (callInfo->secureDelegateInvoke)
+ if (callInfo->wrapperDelegateInvoke)
{
- call->gtCall.gtCallMoreFlags |= GTF_CALL_M_SECURE_DELEGATE_INV;
+ call->gtCall.gtCallMoreFlags |= GTF_CALL_M_WRAPPER_DELEGATE_INV;
}
if (opcode == CEE_CALLVIRT)
diff --git a/src/jit/morph.cpp b/src/jit/morph.cpp
index afdb3c9512..becb0bb215 100644
--- a/src/jit/morph.cpp
+++ b/src/jit/morph.cpp
@@ -2782,13 +2782,13 @@ void Compiler::fgInitArgInfo(GenTreeCall* call)
}
#endif // defined(_TARGET_X86_) || defined(_TARGET_ARM_)
#if defined(_TARGET_ARM_)
- // A non-standard calling convention using secure delegate invoke is used on ARM, only, but not for secure
+ // A non-standard calling convention using wrapper delegate invoke is used on ARM, only, for wrapper
// delegates. It is used for VSD delegate calls where the VSD custom calling convention ABI requires passing
// R4, a callee-saved register, with a special value. Since R4 is a callee-saved register, its value needs
- // to be preserved. Thus, the VM uses a secure delegate IL stub, which preserves R4 and also sets up R4
- // correctly for the VSD call. The VM is simply reusing an existing mechanism (secure delegate IL stub)
+ // to be preserved. Thus, the VM uses a wrapper delegate IL stub, which preserves R4 and also sets up R4
+ // correctly for the VSD call. The VM is simply reusing an existing mechanism (wrapper delegate IL stub)
// to achieve its goal for delegate VSD call. See COMDelegate::NeedsWrapperDelegate() in the VM for details.
- else if (call->gtCallMoreFlags & GTF_CALL_M_SECURE_DELEGATE_INV)
+ else if (call->gtCallMoreFlags & GTF_CALL_M_WRAPPER_DELEGATE_INV)
{
GenTree* arg = call->gtCallObjp;
if (arg->OperIsLocal())
@@ -2805,7 +2805,7 @@ void Compiler::fgInitArgInfo(GenTreeCall* call)
noway_assert(arg != nullptr);
GenTree* newArg = new (this, GT_ADDR)
- GenTreeAddrMode(TYP_BYREF, arg, nullptr, 0, eeGetEEInfo()->offsetOfSecureDelegateIndirectCell);
+ GenTreeAddrMode(TYP_BYREF, arg, nullptr, 0, eeGetEEInfo()->offsetOfWrapperDelegateIndirectCell);
// Append newArg as the last arg
GenTreeArgList** insertionPoint = &call->gtCallArgs;
diff --git a/src/vm/CMakeLists.txt b/src/vm/CMakeLists.txt
index 7d887c1c03..e5cf6839c5 100644
--- a/src/vm/CMakeLists.txt
+++ b/src/vm/CMakeLists.txt
@@ -658,7 +658,6 @@ if(CLR_CMAKE_TARGET_ARCH_AMD64)
${ARCH_SOURCES_DIR}/GenericComCallStubs.asm
${ARCH_SOURCES_DIR}/GenericComPlusCallStubs.asm
${ARCH_SOURCES_DIR}/getstate.asm
- ${ARCH_SOURCES_DIR}/InstantiatingStub.asm
${ARCH_SOURCES_DIR}/JitHelpers_Fast.asm
${ARCH_SOURCES_DIR}/JitHelpers_FastWriteBarriers.asm
${ARCH_SOURCES_DIR}/JitHelpers_InlineGetThread.asm
diff --git a/src/vm/amd64/InstantiatingStub.asm b/src/vm/amd64/InstantiatingStub.asm
deleted file mode 100644
index 8601e4ae44..0000000000
--- a/src/vm/amd64/InstantiatingStub.asm
+++ /dev/null
@@ -1,149 +0,0 @@
-; Licensed to the .NET Foundation under one or more agreements.
-; The .NET Foundation licenses this file to you under the MIT license.
-; See the LICENSE file in the project root for more information.
-
-; ==++==
-;
-
-;
-; ==--==
-
-include <AsmMacros.inc>
-include AsmConstants.inc
-
-extern s_pStubHelperFrameVPtr:qword
-extern JIT_FailFast:proc
-extern s_gsCookie:qword
-
-
-OFFSETOF_SECRET_PARAMS equ 0h
-OFFSETOF_GSCOOKIE equ OFFSETOF_SECRET_PARAMS + \
- 18h + 8h ; +8 for stack alignment padding
-OFFSETOF_FRAME equ OFFSETOF_GSCOOKIE + \
- 8h
-OFFSETOF_FRAME_REGISTERS equ OFFSETOF_FRAME + \
- SIZEOF__Frame
-SIZEOF_FIXED_FRAME equ OFFSETOF_FRAME_REGISTERS + \
- SIZEOF_CalleeSavedRegisters + 8h ; +8 for return address
-
-.errnz SIZEOF_FIXED_FRAME mod 16, SIZEOF_FIXED_FRAME not aligned
-
-;
-; This method takes three secret parameters on the stack:
-;
-; incoming:
-;
-; rsp -> nStackSlots
-; entrypoint of shared MethodDesc
-; extra stack param
-; <space for StubHelperFrame>
-; return address
-; rcx home
-; rdx home
-; :
-;
-;
-; Stack Layout:
-;
-; rsp-> callee scratch
-; + 8h callee scratch
-; +10h callee scratch
-; +18h callee scratch
-; :
-; stack arguments
-; :
-; rbp-> nStackSlots
-; + 8h entrypoint of shared MethodDesc
-; +10h extra stack param
-; +18h padding
-; +20h gsCookie
-; +28h __VFN_table
-; +30h m_Next
-; +38h m_calleeSavedRegisters
-; +98h m_ReturnAddress
-; +a0h rcx home
-; +a8h rdx home
-; +b0h r8 home
-; +b8h r9 home
-;
-NESTED_ENTRY InstantiatingMethodStubWorker, _TEXT
- .allocstack SIZEOF_FIXED_FRAME - 8h ; -8 for return address
-
- SAVE_CALLEE_SAVED_REGISTERS OFFSETOF_FRAME_REGISTERS
-
- SAVE_ARGUMENT_REGISTERS SIZEOF_FIXED_FRAME
-
- set_frame rbp, 0
- END_PROLOGUE
-
- sub rsp, SIZEOF_MAX_OUTGOING_ARGUMENT_HOMES
-
- ;
- ; fully initialize the StubHelperFrame
- ;
- mov rax, s_pStubHelperFrameVPtr
- mov [rbp + OFFSETOF_FRAME], rax
-
- mov rax, s_gsCookie
- mov [rbp + OFFSETOF_GSCOOKIE], rax
-
- ;
- ; link the StubHelperFrame
- ;
- INLINE_GETTHREAD r12
- mov rdx, [r12 + OFFSETOF__Thread__m_pFrame]
- mov [rbp + OFFSETOF_FRAME + OFFSETOF__Frame__m_Next], rdx
- lea rcx, [rbp + OFFSETOF_FRAME]
- mov [r12 + OFFSETOF__Thread__m_pFrame], rcx
-
- add rsp, SIZEOF_MAX_OUTGOING_ARGUMENT_HOMES
-
- mov rcx, [rbp + OFFSETOF_SECRET_PARAMS + 0h] ; nStackSlots (includes padding for stack alignment)
-
- lea rsi, [rbp + SIZEOF_FIXED_FRAME + SIZEOF_MAX_OUTGOING_ARGUMENT_HOMES + 8 * rcx]
-
-StackCopyLoop: ; copy the arguments to stack top-down to carefully probe for sufficient stack space
- sub rsi, 8
- push qword ptr [rsi]
- dec rcx
- jnz StackCopyLoop
-
- push qword ptr [rbp+OFFSETOF_SECRET_PARAMS + 10h] ; push extra stack arg
- sub rsp, SIZEOF_MAX_OUTGOING_ARGUMENT_HOMES
-
- mov rcx, [rbp + SIZEOF_FIXED_FRAME + 00h]
- mov rdx, [rbp + SIZEOF_FIXED_FRAME + 08h]
- mov r8, [rbp + SIZEOF_FIXED_FRAME + 10h]
- mov r9, [rbp + SIZEOF_FIXED_FRAME + 18h]
-
- call qword ptr [rbp+OFFSETOF_SECRET_PARAMS + 8h] ; call target
-
-ifdef _DEBUG
- mov rcx, s_gsCookie
- cmp [rbp + OFFSETOF_GSCookie], rcx
- je GoodGSCookie
- call JIT_FailFast
-GoodGSCookie:
-endif ; _DEBUG
-
- ;
- ; unlink the StubHelperFrame
- ;
- mov rcx, [rbp + OFFSETOF_FRAME + OFFSETOF__Frame__m_Next]
- mov [r12 + OFFSETOF__Thread__m_pFrame], rcx
-
- ;
- ; epilog
- ;
-
- lea rsp, [rbp + OFFSETOF_FRAME_REGISTERS]
-
- POP_CALLEE_SAVED_REGISTERS
-
- ret
-
-NESTED_END InstantiatingMethodStubWorker, _TEXT
-
-
- end
-
diff --git a/src/vm/amd64/cgenamd64.cpp b/src/vm/amd64/cgenamd64.cpp
index d6fcf16d9c..a49ba195a2 100644
--- a/src/vm/amd64/cgenamd64.cpp
+++ b/src/vm/amd64/cgenamd64.cpp
@@ -83,9 +83,6 @@ void TransitionFrame::UpdateRegDisplay(const PREGDISPLAY pRD)
#ifndef DACCESS_COMPILE
-extern "C" TADDR s_pStubHelperFrameVPtr;
-TADDR s_pStubHelperFrameVPtr = StubHelperFrame::GetMethodFrameVPtr();
-
void TailCallFrame::InitFromContext(T_CONTEXT * pContext)
{
WRAPPER_NO_CONTRACT;
diff --git a/src/vm/amd64/cgencpu.h b/src/vm/amd64/cgencpu.h
index 619fd4ba64..61586c3d75 100644
--- a/src/vm/amd64/cgencpu.h
+++ b/src/vm/amd64/cgencpu.h
@@ -35,7 +35,6 @@ class ComCallMethodDesc;
//
// functions implemented in AMD64 assembly
//
-EXTERN_C void InstantiatingMethodStubWorker(void);
EXTERN_C void SinglecastDelegateInvokeStub();
EXTERN_C void FastCallFinalizeWorker(Object *obj, PCODE funcPtr);
@@ -274,7 +273,9 @@ typedef DPTR(struct FloatArgumentRegisters) PTR_FloatArgumentRegisters;
struct FloatArgumentRegisters {
M128A d[NUM_FLOAT_ARGUMENT_REGISTERS]; // xmm0-xmm7
};
-
+#else
+// Windows x64 calling convention uses 4 registers for floating point data
+#define NUM_FLOAT_ARGUMENT_REGISTERS 4
#endif
diff --git a/src/vm/appdomain.cpp b/src/vm/appdomain.cpp
index eb70175f37..f28e45a7d8 100644
--- a/src/vm/appdomain.cpp
+++ b/src/vm/appdomain.cpp
@@ -2567,28 +2567,14 @@ StackWalkAction SystemDomain::CallersMethodCallbackWithStackMark(CrawlFrame* pCf
if (frame && frame->GetFrameType() == Frame::TYPE_MULTICAST)
{
- // This must be either a secure delegate frame or a true multicast delegate invocation.
+ // This must be either a multicast delegate invocation.
_ASSERTE(pFunc->GetMethodTable()->IsDelegate());
- DELEGATEREF del = (DELEGATEREF)((SecureDelegateFrame*)frame)->GetThis(); // This can throw.
+ DELEGATEREF del = (DELEGATEREF)((MulticastFrame*)frame)->GetThis(); // This can throw.
- if (COMDelegate::IsSecureDelegate(del))
- {
- if (del->IsWrapperDelegate())
- {
- // On ARM, we use secure delegate infrastructure to preserve R4 register.
- return SWA_CONTINUE;
- }
- // For a secure delegate frame, we should return the delegate creator instead
- // of the delegate method itself.
- pFunc = (MethodDesc*) del->GetMethodPtrAux();
- }
- else
- {
- _ASSERTE(COMDelegate::IsTrueMulticastDelegate(del));
- return SWA_CONTINUE;
- }
+ _ASSERTE(COMDelegate::IsTrueMulticastDelegate(del));
+ return SWA_CONTINUE;
}
// Return the first non-reflection/remoting frame if no stack mark was
diff --git a/src/vm/arm/cgencpu.h b/src/vm/arm/cgencpu.h
index c93780a634..937a696651 100644
--- a/src/vm/arm/cgencpu.h
+++ b/src/vm/arm/cgencpu.h
@@ -162,6 +162,7 @@ struct FloatArgumentRegisters {
double d[8]; // d0-d7
};
};
+#define NUM_FLOAT_ARGUMENT_REGISTERS 16 // Count the single registers, as they are addressable more finely
// forward decl
struct REGDISPLAY;
@@ -945,9 +946,6 @@ public:
}
#endif // FEATURE_INTERPRETER
- void EmitStubLinkFrame(TADDR pFrameVptr, int offsetOfFrame, int offsetOfTransitionBlock);
- void EmitStubUnlinkFrame();
-
void ThumbEmitCondFlagJump(CodeLabel * target,UINT cond);
void ThumbEmitCondRegJump(CodeLabel *target, BOOL nonzero, ThumbReg reg);
@@ -957,15 +955,8 @@ public:
// Scratches r12.
void ThumbEmitCallManagedMethod(MethodDesc *pMD, bool fTailcall);
- void EmitUnboxMethodStub(MethodDesc* pRealMD);
- static UINT_PTR HashMulticastInvoke(MetaSig* pSig);
-
- void EmitMulticastInvoke(UINT_PTR hash);
- void EmitSecureDelegateInvoke(UINT_PTR hash);
void EmitShuffleThunk(struct ShuffleEntry *pShuffleEntryArray);
-#if defined(FEATURE_SHARE_GENERIC_CODE)
- void EmitInstantiatingMethodStub(MethodDesc* pSharedMD, void* extra);
-#endif // FEATURE_SHARE_GENERIC_CODE
+ VOID EmitComputedInstantiatingMethodStub(MethodDesc* pSharedMD, struct ShuffleEntry *pShuffleEntryArray, void* extraArg);
static Stub * CreateTailCallCopyArgsThunk(CORINFO_SIG_INFO * pSig,
MethodDesc* pMD,
diff --git a/src/vm/arm/stubs.cpp b/src/vm/arm/stubs.cpp
index 078a16cb8f..18eb9969c7 100644
--- a/src/vm/arm/stubs.cpp
+++ b/src/vm/arm/stubs.cpp
@@ -1682,526 +1682,75 @@ void StubLinkerCPU::ThumbEmitCallManagedMethod(MethodDesc *pMD, bool fTailcall)
}
}
-// Common code used to generate either an instantiating method stub or an unboxing stub (in the case where the
-// unboxing stub also needs to provide a generic instantiation parameter). The stub needs to add the
-// instantiation parameter provided in pHiddenArg and re-arrange the rest of the incoming arguments as a
-// result (since on ARM this hidden parameter is inserted before explicit user arguments we need a type of
-// shuffle thunk in the reverse direction of the type used for static delegates). If pHiddenArg == NULL it
-// indicates that we're in the unboxing case and should add sizeof(MethodTable*) to the incoming this pointer
-// before dispatching to the target. In this case the instantiating parameter is always the non-shared
-// MethodTable pointer we can deduce directly from the incoming 'this' reference.
-void StubLinkerCPU::ThumbEmitCallWithGenericInstantiationParameter(MethodDesc *pMD, void *pHiddenArg)
+VOID StubLinkerCPU::EmitComputedInstantiatingMethodStub(MethodDesc* pSharedMD, struct ShuffleEntry *pShuffleEntryArray, void* extraArg)
{
- // There is a simple case and a complex case.
- // 1) In the simple case the addition of the hidden arg doesn't push any user args onto the stack. In
- // this case we only have to re-arrange/initialize some argument registers and tail call to the
- // target.
- // 2) In the complex case we have to modify the stack by pushing some of the register based user
- // arguments. We can't tail call in this case because we've altered the size of the stack and our
- // caller doesn't expect this and can't compensate. Instead we'll need to create a stack frame
- // (including an explicit Frame to make it crawlable to the runtime) and copy the incoming arguments
- // over.
- //
- // First we need to analyze the signature of the target method both with and without the extra
- // instantiation argument. We use ArgIterator to determine the difference in location
- // (register or stack offset) for each argument between the two cases. This forms a set instructions that
- // tell us how to copy incoming arguments into outgoing arguments (and if those instructions don't include
- // any writes to stack locations in the outgoing case then we know we can generate a simple thunk).
-
- SigTypeContext sTypeContext(pMD, TypeHandle());
-
- // Incoming, source, method signature.
- MetaSig sSrcSig(pMD->GetSignature(),
- pMD->GetModule(),
- &sTypeContext,
- MetaSig::sigMember);
-
- // Outgoing, destination, method signature.
- MetaSig sDstSig(pMD->GetSignature(),
- pMD->GetModule(),
- &sTypeContext,
- MetaSig::sigMember);
-
- sDstSig.SetHasParamTypeArg();
-
- // Wrap calling convention parsers round the source and destination signatures. These will be responsible
- // for determining where each argument lives in registers or on the stack.
- ArgIterator sSrcArgLocations(&sSrcSig);
- ArgIterator sDstArgLocations(&sDstSig);
-
- // Define an argument descriptor type that describes how a single 4 byte portion of an argument is mapped
- // in the source and destination signature. We only have to worry about general registers and stack
- // locations here; floating point argument registers are left unmodified by this thunk.
- struct ArgDesc
- {
- int m_idxSrc; // Source register or stack offset
- int m_idxDst; // Destination register or stack offset
- bool m_fSrcIsReg; // Source index is a register number
- bool m_fDstIsReg; // Destination index is a register number
- };
-
- // The number of argument move descriptors we'll need is a function of the number of 4-byte registers or
- // stack slots the arguments occupy. The following calculation will over-estimate in a few side cases, but
- // not by much (it assumes all four argument registers are used plus the number of stack slots that
- // MetaSig calculates are needed for the rest of the arguments).
- DWORD cArgDescriptors = 4 + (sSrcArgLocations.SizeOfArgStack() / 4);
-
- // Allocate the array of argument descriptors.
- CQuickArray<ArgDesc> rgArgDescs;
- rgArgDescs.AllocThrows(cArgDescriptors);
-
- // We only need to map translations for arguments that could come after the instantiation parameter we're
- // inserting. On the ARM the only implicit argument that could follow is a vararg signature cookie, but
- // it's disallowed in this case. So we simply walk the user arguments.
- _ASSERTE(!sSrcSig.IsVarArg());
-
- INT srcOffset;
- INT dstOffset;
-
- DWORD idxCurrentDesc = 0;
- while ((srcOffset = sSrcArgLocations.GetNextOffset()) != TransitionBlock::InvalidOffset)
- {
- dstOffset = sDstArgLocations.GetNextOffset();
-
- // Get the placement for a single argument in the source and destination signatures (may include
- // multiple registers and/or stack locations if the argument is larger than 4 bytes).
- ArgLocDesc sSrcArgLoc;
- sSrcArgLocations.GetArgLoc(srcOffset, &sSrcArgLoc);
- ArgLocDesc sDstArgLoc;
- sDstArgLocations.GetArgLoc(dstOffset, &sDstArgLoc);
-
- // Fill in as many single-slot descriptors as the argument needs. Note that we ignore any floating
- // point register cases (m_cFloatReg > 0) since these will never change due to the hidden arg
- // insertion.
- while (sSrcArgLoc.m_cGenReg || sSrcArgLoc.m_cStack)
- {
- _ASSERTE(idxCurrentDesc < cArgDescriptors);
-
- if (sSrcArgLoc.m_cGenReg)
- {
- sSrcArgLoc.m_cGenReg--;
- rgArgDescs[idxCurrentDesc].m_idxSrc = sSrcArgLoc.m_idxGenReg++;
- rgArgDescs[idxCurrentDesc].m_fSrcIsReg = true;
- }
- else
- {
- _ASSERTE(sSrcArgLoc.m_cStack > 0);
- sSrcArgLoc.m_cStack--;
- rgArgDescs[idxCurrentDesc].m_idxSrc = sSrcArgLoc.m_idxStack++;
- rgArgDescs[idxCurrentDesc].m_fSrcIsReg = false;
- }
-
- if (sDstArgLoc.m_cGenReg)
- {
- sDstArgLoc.m_cGenReg--;
- rgArgDescs[idxCurrentDesc].m_idxDst = sDstArgLoc.m_idxGenReg++;
- rgArgDescs[idxCurrentDesc].m_fDstIsReg = true;
- }
- else
- {
- _ASSERTE(sDstArgLoc.m_cStack > 0);
- sDstArgLoc.m_cStack--;
- rgArgDescs[idxCurrentDesc].m_idxDst = sDstArgLoc.m_idxStack++;
- rgArgDescs[idxCurrentDesc].m_fDstIsReg = false;
- }
-
- idxCurrentDesc++;
- }
- }
-
- bool isRelative = MethodTable::VTableIndir2_t::isRelative
- && pMD->IsVtableSlot();
-
-#ifndef FEATURE_NGEN_RELOCS_OPTIMIZATIONS
- _ASSERTE(!isRelative);
-#endif
-
- // Update descriptor count to the actual number used.
- cArgDescriptors = idxCurrentDesc;
-
- // Note the position at which we have the first move to a stack location
- DWORD idxFirstMoveToStack = -1;
+ STANDARD_VM_CONTRACT;
- // We have a problem where register to register moves are concerned. Since we're adding an argument the
- // moves will be from a lower numbered register to a higher numbered one (e.g. r0 -> r1). But the argument
- // descriptors we just produced will order them starting from the lowest registers. If we emit move
- // instructions in this order we'll end up copying the value of the lowest register into all of the rest
- // (e.g. r0 -> r1, r1 -> r2 etc.). We don't have this problem with stack based arguments since the
- // argument stacks don't overlap in the same fashion. To solve this we'll reverse the order of the
- // descriptors with register destinations (there will be at most four of these so it's fairly cheap).
- if (cArgDescriptors > 1)
+ struct ShuffleEntry *pEntry = pShuffleEntryArray;
+ while (pEntry->srcofs != ShuffleEntry::SENTINEL)
{
- // Start by assuming we have all four register destination descriptors.
- int idxLastRegDesc = min(3, cArgDescriptors - 1);
+ _ASSERTE(pEntry->dstofs & ShuffleEntry::REGMASK);
+ _ASSERTE(pEntry->srcofs & ShuffleEntry::REGMASK);
+ _ASSERTE(!(pEntry->dstofs & ShuffleEntry::FPREGMASK));
+ _ASSERTE(!(pEntry->srcofs & ShuffleEntry::FPREGMASK));
+ _ASSERTE(pEntry->dstofs != ShuffleEntry::HELPERREG);
+ _ASSERTE(pEntry->srcofs != ShuffleEntry::HELPERREG);
- // Adjust that count to match reality.
- while (idxLastRegDesc >= 0 && !rgArgDescs[idxLastRegDesc].m_fDstIsReg)
- {
- idxLastRegDesc--;
- }
-
- if (idxLastRegDesc < 0)
- {
- // No register is used to pass any of the parameters. No need to reverse the order of the descriptors
- idxFirstMoveToStack = 0;
- }
- else
- {
- _ASSERTE(idxLastRegDesc >= 0 && ((DWORD)idxLastRegDesc) < cArgDescriptors);
-
- // First move to stack location happens after the last move to register location
- idxFirstMoveToStack = idxLastRegDesc+1;
-
- // Calculate how many descriptors we'll need to swap.
- DWORD cSwaps = (idxLastRegDesc + 1) / 2;
- // Finally we can swap the descriptors.
- int idxFirstRegDesc = 0;
- while (cSwaps)
- {
- ArgDesc sTempDesc = rgArgDescs[idxLastRegDesc];
- rgArgDescs[idxLastRegDesc] = rgArgDescs[idxFirstRegDesc];
- rgArgDescs[idxFirstRegDesc] = sTempDesc;
-
- _ASSERTE(idxFirstRegDesc < idxLastRegDesc);
- idxFirstRegDesc++;
- idxLastRegDesc--;
- cSwaps--;
- }
- }
+ ThumbEmitMovRegReg(ThumbReg(pEntry->dstofs & ShuffleEntry::OFSMASK),
+ ThumbReg(pEntry->srcofs & ShuffleEntry::OFSMASK));
+ pEntry++;
}
- // If we're ever required to write to the destination stack then we can't implement this case with a
- // simple tail call stub. (That's not technically true: there are edge cases caused by 64-bit alignment
- // requirements that might allow us to use a simple stub since the extra argument fits in a "hole" in the
- // arguments, but these are infrequent enough that it's likely not worth the effort of detecting them).
- ArgDesc *pLastArg = cArgDescriptors ? &rgArgDescs[cArgDescriptors - 1] : NULL;
- if ((pLastArg == NULL) || pLastArg->m_fDstIsReg)
+ MetaSig msig(pSharedMD);
+ ArgIterator argit(&msig);
+ if (argit.HasParamType())
{
- // Simple case where we can just rearrange a few argument registers and tail call.
-
- for (idxCurrentDesc = 0; idxCurrentDesc < cArgDescriptors; idxCurrentDesc++)
- {
- // Because we're in the simple case we know we'll never be asked to move a value onto the stack
- // and since we're adding a parameter we should never be required to move a value from the stack
- // to a register either. So all of the descriptors should be register to register moves.
- _ASSERTE(rgArgDescs[idxCurrentDesc].m_fSrcIsReg && rgArgDescs[idxCurrentDesc].m_fDstIsReg);
- ThumbEmitMovRegReg(ThumbReg(rgArgDescs[idxCurrentDesc].m_idxDst),
- ThumbReg(rgArgDescs[idxCurrentDesc].m_idxSrc));
- }
-
// Place instantiation parameter into the correct register.
ArgLocDesc sInstArgLoc;
- sDstArgLocations.GetParamTypeLoc(&sInstArgLoc);
+ argit.GetParamTypeLoc(&sInstArgLoc);
int regHidden = sInstArgLoc.m_idxGenReg;
_ASSERTE(regHidden != -1);
- if (pHiddenArg)
- {
- // mov regHidden, #pHiddenArg
- ThumbEmitMovConstant(ThumbReg(regHidden), (TADDR)pHiddenArg);
- }
- else
- {
- // Extract MethodTable pointer (the hidden arg) from the object instance.
- // ldr regHidden, [r0]
- ThumbEmitLoadRegIndirect(ThumbReg(regHidden), ThumbReg(0), 0);
- }
-
- if (pHiddenArg == NULL)
- {
- // Unboxing stub case.
-
- // Skip over the MethodTable* to find the address of the unboxed value type.
- // add r0, #sizeof(MethodTable*)
- ThumbEmitIncrement(ThumbReg(0), sizeof(MethodTable*));
- }
-
- // Emit a tail call to the target method.
- if (isRelative)
+ if (extraArg == NULL)
{
- ThumbEmitProlog(1, 0, FALSE);
- }
-
- ThumbEmitCallManagedMethod(pMD, true);
-
- if (isRelative)
- {
- ThumbEmitEpilog();
- }
- }
- else
- {
- // Complex case where we need to emit a new stack frame and copy the arguments.
-
- // Calculate the size of the new stack frame:
- //
- // +------------+
- // SP -> | | <-- Space for helper arg, if isRelative is true
- // +------------+
- // | | <-+
- // : : | Outgoing arguments
- // | | <-+
- // +------------+
- // | Padding | <-- Optional, maybe required so that SP is 64-bit aligned
- // +------------+
- // | GS Cookie |
- // +------------+
- // +-> | vtable ptr |
- // | +------------+
- // | | m_Next |
- // | +------------+
- // | | R4 | <-+
- // Stub | +------------+ |
- // Helper | : : |
- // Frame | +------------+ | Callee saved registers
- // | | R11 | |
- // | +------------+ |
- // | | LR/RetAddr | <-+
- // | +------------+
- // | | R0 | <-+
- // | +------------+ |
- // | : : | Argument registers
- // | +------------+ |
- // +-> | R3 | <-+
- // +------------+
- // Old SP -> | |
- //
- DWORD cbStackArgs = (pLastArg->m_idxDst + 1) * 4;
- DWORD cbStackFrame = cbStackArgs + sizeof(GSCookie) + sizeof(StubHelperFrame);
- cbStackFrame = ALIGN_UP(cbStackFrame, 8);
-
- if (isRelative)
- {
- cbStackFrame += 4;
- }
-
- DWORD cbStackFrameWithoutSavedRegs = cbStackFrame - (13 * 4); // r0-r11,lr
-
- // Prolog:
- ThumbEmitProlog(8, // Save r4-r11,lr (count doesn't include lr)
- cbStackFrameWithoutSavedRegs, // Additional space in the stack frame required
- TRUE); // Push argument registers
-
- DWORD offsetOfFrame = cbStackFrame - sizeof(StubHelperFrame);
-
- // Initialize and link the StubHelperFrame and associated GS cookie.
- EmitStubLinkFrame(StubHelperFrame::GetMethodFrameVPtr(), offsetOfFrame, StubHelperFrame::GetOffsetOfTransitionBlock());
-
- // Initialize temporary registers used when copying arguments:
- // r6 == pointer to first incoming stack-based argument
- // r7 == pointer to first outgoing stack-based argument
-
- // add r6, sp, #cbStackFrame
- ThumbEmitAdd(ThumbReg(6), thumbRegSp, cbStackFrame);
-
- // mov r7, sp
- ThumbEmitMovRegReg(ThumbReg(7), thumbRegSp);
-
- // Copy incoming to outgoing arguments. Stack arguments are generally written consecutively and as
- // such we use post-increment forms of register indirect addressing to keep our input (r6) and output
- // (r7) pointers up to date. But sometimes we'll skip four bytes due to 64-bit alignment requirements
- // and need to bump one or both of the pointers to compensate. We determine
- //
- // At this point, the ArgumentDescriptor array is divied into two parts:
- //
- // 1) Reverse sorted register to register moves (see the comment earlier in the method for details)
- // 2) Register or Stack to Stack moves (if any) in the original order.
- //
- // Its possible that the register to register moves may move to a target register that happens
- // to be a source for the register -> stack move. If this happens, and we emit the argument moves
- // in the current order, then we can lose the contents of the register involved in register->stack
- // move (stack->stack moves are not a problem as the locations dont overlap).
- //
- // To address this, we will emit the argument moves in two loops:
- //
- // 1) First loop will emit the moves that have stack location as the target
- // 2) Second loop will emit moves that have register as the target.
- DWORD idxCurrentLoopBegin = 0, idxCurrentLoopEnd = cArgDescriptors;
- if (idxFirstMoveToStack != -1)
- {
- _ASSERTE(idxFirstMoveToStack < cArgDescriptors);
- idxCurrentLoopBegin = idxFirstMoveToStack;
-
- for (idxCurrentDesc = idxCurrentLoopBegin; idxCurrentDesc < idxCurrentLoopEnd; idxCurrentDesc++)
+ if (pSharedMD->RequiresInstMethodTableArg())
{
- ArgDesc *pArgDesc = &rgArgDescs[idxCurrentDesc];
-
- if (pArgDesc->m_fSrcIsReg)
- {
- // Source value is in a register.
-
- _ASSERTE(!pArgDesc->m_fDstIsReg);
- // Register to stack. Calculate delta from last stack write; normally it will be 4 bytes
- // and our pointer has already been set up correctly by the post increment of the last
- // write. But in some cases we need to skip four bytes due to a 64-bit alignment
- // requirement. In those cases we need to emit an extra add to keep the pointer correct.
- // Note that the first stack argument is guaranteed to be 64-bit aligned by the ABI and as
- // such the first stack slot is never skipped.
- if ((pArgDesc->m_idxDst > 0) &&
- (pArgDesc->m_idxDst != (rgArgDescs[idxCurrentDesc - 1].m_idxDst + 1)))
- {
- _ASSERTE(pArgDesc->m_idxDst == (rgArgDescs[idxCurrentDesc - 1].m_idxDst + 2));
- ThumbEmitIncrement(ThumbReg(7), 4);
- }
-
- // str srcReg, [r7], #4
- ThumbEmitStoreIndirectPostIncrement(pArgDesc->m_idxSrc, ThumbReg(7), 4);
- }
- else
- {
- // Source value is on the stack. We should have no cases where a stack argument moves back to
- // a register (because we're adding an argument).
- _ASSERTE(!pArgDesc->m_fDstIsReg);
-
- // Stack to stack move. We need to use register (r6) to store the value temporarily between
- // the read and the write. See the comments above for why we need to check stack deltas and
- // possibly insert extra add instructions in some cases.
- if ((pArgDesc->m_idxSrc > 0) &&
- (pArgDesc->m_idxSrc != (rgArgDescs[idxCurrentDesc - 1].m_idxSrc + 1)))
- {
- _ASSERTE(pArgDesc->m_idxSrc == (rgArgDescs[idxCurrentDesc - 1].m_idxSrc + 2));
- ThumbEmitIncrement(ThumbReg(6), 4);
- }
- if ((pArgDesc->m_idxDst > 0) &&
- (pArgDesc->m_idxDst != (rgArgDescs[idxCurrentDesc - 1].m_idxDst + 1)))
- {
- _ASSERTE(pArgDesc->m_idxDst == (rgArgDescs[idxCurrentDesc - 1].m_idxDst + 2));
- ThumbEmitIncrement(ThumbReg(7), 4);
- }
-
- // ldr r8, [r6], #4
- ThumbEmitLoadIndirectPostIncrement(ThumbReg(8), ThumbReg(6), 4);
-
- // str r8, [r7], #4
- ThumbEmitStoreIndirectPostIncrement(ThumbReg(8), ThumbReg(7), 4);
- }
+ // Unboxing stub case
+ // Extract MethodTable pointer (the hidden arg) from the object instance.
+ // ldr regHidden, [r0]
+ ThumbEmitLoadRegIndirect(ThumbReg(regHidden), ThumbReg(0), 0);
}
-
- // Update the indexes to be used for the second loop
- idxCurrentLoopEnd = idxCurrentLoopBegin;
- idxCurrentLoopBegin = 0;
- }
-
- // Now, perform the register to register moves
- for (idxCurrentDesc = idxCurrentLoopBegin; idxCurrentDesc < idxCurrentLoopEnd; idxCurrentDesc++)
- {
- ArgDesc *pArgDesc = &rgArgDescs[idxCurrentDesc];
-
- // All moves to stack locations have been done (if applicable).
- // Since we are moving to a register destination, the source
- // will also be a register and cannot be a stack location (refer to the previous loop).
- _ASSERTE(pArgDesc->m_fSrcIsReg && pArgDesc->m_fDstIsReg);
-
- // Register to register case.
- ThumbEmitMovRegReg(pArgDesc->m_idxDst, pArgDesc->m_idxSrc);
- }
-
-
- // Place instantiation parameter into the correct register.
- ArgLocDesc sInstArgLoc;
- sDstArgLocations.GetParamTypeLoc(&sInstArgLoc);
- int regHidden = sInstArgLoc.m_idxGenReg;
- _ASSERTE(regHidden != -1);
- if (pHiddenArg)
- {
- // mov regHidden, #pHiddenArg
- ThumbEmitMovConstant(ThumbReg(regHidden), (TADDR)pHiddenArg);
}
else
{
- // Extract MethodTable pointer (the hidden arg) from the object instance.
- // ldr regHidden, [r0]
- ThumbEmitLoadRegIndirect(ThumbReg(regHidden), ThumbReg(0), 0);
- }
-
- if (pHiddenArg == NULL)
- {
- // Unboxing stub case.
-
- // Skip over the MethodTable* to find the address of the unboxed value type.
- // add r0, #sizeof(MethodTable*)
- ThumbEmitIncrement(ThumbReg(0), sizeof(MethodTable*));
+ // mov regHidden, #pHiddenArg
+ ThumbEmitMovConstant(ThumbReg(regHidden), (TADDR)extraArg);
}
-
- // Emit a regular (non-tail) call to the target method.
- ThumbEmitCallManagedMethod(pMD, false);
-
- // Unlink the StubHelperFrame.
- EmitStubUnlinkFrame();
-
- // Epilog
- ThumbEmitEpilog();
- }
-}
-
-#if defined(FEATURE_SHARE_GENERIC_CODE)
-// The stub generated by this method passes an extra dictionary argument before jumping to
-// shared-instantiation generic code.
-//
-// pSharedMD is either
-// * An InstantiatedMethodDesc for a generic method whose code is shared across instantiations.
-// In this case, the extra argument is the InstantiatedMethodDesc for the instantiation-specific stub itself.
-// or * A MethodDesc for a static method in a generic class whose code is shared across instantiations.
-// In this case, the extra argument is the MethodTable pointer of the instantiated type.
-VOID StubLinkerCPU::EmitInstantiatingMethodStub(MethodDesc* pSharedMD, void* extra)
-{
- CONTRACTL
- {
- THROWS;
- GC_TRIGGERS;
- INJECT_FAULT(COMPlusThrowOM(););
- PRECONDITION(pSharedMD->RequiresInstMethodTableArg() || pSharedMD->RequiresInstMethodDescArg());
}
- CONTRACTL_END;
-
- // Share code with the instantiating version of the unboxing stub (see below).
- ThumbEmitCallWithGenericInstantiationParameter(pSharedMD, extra);
-}
-#endif // FEATURE_SHARE_GENERIC_CODE
-
-void StubLinkerCPU::EmitUnboxMethodStub(MethodDesc *pMD)
-{
- if (pMD->RequiresInstMethodTableArg())
+ if (extraArg == NULL)
{
- // In this case we also have to add an instantiating parameter (which is always the MethodTable* from
- // the instance we're called on). Most of this code is shared with the instantiating method stub
- // above, the NULL parameter informs the emitter that we're both an unboxing stub and that the extra
- // parameter can be deduced from the 'this' reference.
- ThumbEmitCallWithGenericInstantiationParameter(pMD, NULL);
+ // Unboxing stub case
+ // Skip over the MethodTable* to find the address of the unboxed value type.
+ // add r0, #sizeof(MethodTable*)
+ ThumbEmitIncrement(ThumbReg(0), sizeof(MethodTable*));
}
- else
- {
- // We assume that we'll never see a case where a boxed value type method will require an instantiated
- // method desc as a parameter. The stubs on other platforms make this assumption (and indeed this
- // method isn't even passed an additional instantiation parameter). This is trivially true for the
- // non-interface call case: the only methods callable directly on the boxed instance are the methods
- // of Object, none of which are generic. For the interface dispatch case we're relying on the fact
- // that the jit always provides the instantiating argument explicitly.
- _ASSERTE(!pMD->RequiresInstMethodDescArg());
-
- // Address of the value type is address of the boxed instance plus four.
- // add r0, #4
- ThumbEmitIncrement(ThumbReg(0), 4);
- bool isRelative = MethodTable::VTableIndir2_t::isRelative
- && pMD->IsVtableSlot();
+ bool isRelative = MethodTable::VTableIndir2_t::isRelative
+ && pSharedMD->IsVtableSlot();
#ifndef FEATURE_NGEN_RELOCS_OPTIMIZATIONS
- _ASSERTE(!isRelative);
+ _ASSERTE(!isRelative);
#endif
+ if (isRelative)
+ {
+ ThumbEmitProlog(1, 0, FALSE);
+ }
- if (isRelative)
- {
- ThumbEmitProlog(1, 0, FALSE);
- }
-
- // Tail call the real target.
- ThumbEmitCallManagedMethod(pMD, true /* tail call */);
+ ThumbEmitCallManagedMethod(pSharedMD, true);
- if (isRelative)
- {
- ThumbEmitEpilog();
- }
+ if (isRelative)
+ {
+ ThumbEmitEpilog();
}
}
@@ -2558,355 +2107,6 @@ void InitJITHelpers1()
}
}
-// +64 stack-based arguments here
-// -- MulticastFrame end
-// +48 r0-r3 argument registers
-// +44 lr return address
-// +40 fp frame pointer
-// +12 r4-r10 callee saved registers
-// +8 datum (typically a MethodDesc*)
-// +4 m_Next
-// +0 the frame vptr
-// -- MulticastFrame start
-// -4 gs cookie
-// -... floating point argument registers
-void StubLinkerCPU::EmitMulticastInvoke(UINT_PTR hash)
-{
- //Decode Multicast Delegate hash
- unsigned int numStackBytes = hash >> 8;
- _ASSERTE(numStackBytes <= 0x7fff);
-
- unsigned int numFPRegs = (hash & 0xf8) >> 3;
- _ASSERTE(numFPRegs <= 16);
-
- unsigned int numGenRegs = hash & 0x7;
- _ASSERTE(numGenRegs <= 4);
-
- DWORD offsetOfFPRegs = 0;
-
- DWORD cbStackFrame = numStackBytes;
- if (numFPRegs)
- {
- cbStackFrame = ALIGN_UP(cbStackFrame, 8);
- offsetOfFPRegs = cbStackFrame;
- cbStackFrame += 4 * numFPRegs;
- }
- cbStackFrame += sizeof(GSCookie) + sizeof(MulticastFrame);
- cbStackFrame = ALIGN_UP(cbStackFrame, 8);
- DWORD cbStackFrameWithoutSavedRegs = cbStackFrame - (13 * 4); // r0-r11,lr
-
- // Prolog:
- ThumbEmitProlog(8, // Save r4-r11,lr (count doesn't include lr)
- cbStackFrameWithoutSavedRegs, // Additional space in the stack frame required
- TRUE); // Push argument registers
-
- DWORD offsetOfFrame = cbStackFrame - sizeof(MulticastFrame);
-
- // Move the MethodDesc* we're calling to r12.
- // ldr r12, [r0, #offsetof(DelegateObject, _methodPtrAux)]
- ThumbEmitLoadRegIndirect(ThumbReg(12), ThumbReg(0), DelegateObject::GetOffsetOfMethodPtrAux());
-
- // Initialize MulticastFrame::m_pMD to the MethodDesc* we're calling
- // str r12, [sp + #(offsetOfFrame + offsetof(MulticastFrame, m_pMD))]
- ThumbEmitStoreRegIndirect(ThumbReg(12), thumbRegSp, offsetOfFrame + MulticastFrame::GetOffsetOfDatum());
-
- if (numFPRegs)
- {
- ThumbEmitAdd(ThumbReg(4), thumbRegSp, offsetOfFPRegs);
-
- // save floating point arguments at offsetOfFPRegs
- //vstm{IA} R4,{s0-s(numFPRegs -1)}
- Emit16(0xec84);
- Emit16(0x0a00 | (WORD)numFPRegs);
- }
-
- // Initialize and link the MulticastFrame and associated GS cookie.
- EmitStubLinkFrame(MulticastFrame::GetMethodFrameVPtr(), offsetOfFrame, MulticastFrame::GetOffsetOfTransitionBlock());
-
- //r7 as counter. Initialize it to 0.
- // mov r7, 0
- ThumbEmitMovConstant(ThumbReg(7), 0);
-
- //initialize r9 to _invocationCount
- ThumbEmitLoadRegIndirect(ThumbReg(9), ThumbReg(0), DelegateObject::GetOffsetOfInvocationCount());
-
- CodeLabel *pLoopLabel = NewCodeLabel();
- CodeLabel *pEndLoopLabel = NewCodeLabel();
-
- //loop:
- EmitLabel(pLoopLabel);
-
- // cmp r7, r9
- ThumbEmitCmpReg(ThumbReg(7), ThumbReg(9));
-
- // if equal goto endloop
- // beq endloop
- ThumbEmitCondFlagJump(pEndLoopLabel, 0);
-
- UINT32 count = 0;
- if(numStackBytes)
- {
- //r1 = pos for stack args in Frame
- ThumbEmitAdd(ThumbReg(1), ThumbReg(4), MulticastFrame::GetOffsetOfTransitionBlock() + TransitionBlock::GetOffsetOfArgs());
-
- //r2 = stack pos for args of calling func
- ThumbEmitMovRegReg(ThumbReg(2), thumbRegSp);
-
- // ..move stack args..
- _ASSERTE(numStackBytes%4 == 0);
- while (count != numStackBytes)
- {
- ThumbEmitLoadIndirectPostIncrement(ThumbReg(0), ThumbReg(1), 4);
- ThumbEmitStoreIndirectPostIncrement(ThumbReg(0), ThumbReg(2), 4);
- count += 4;
- }
- }
-
- count = 1;
- while(count < numGenRegs)
- {
- ThumbEmitLoadRegIndirect(ThumbReg(count), ThumbReg(4), MulticastFrame::GetOffsetOfTransitionBlock() + TransitionBlock::GetOffsetOfArgumentRegisters() + count*4);
- count++;
- }
-
- if(numFPRegs)
- {
- ThumbEmitAdd(ThumbReg(0), thumbRegSp, offsetOfFPRegs);
- //vldm{IA}.32 R0, s0-s(numFPRegs-1)
- Emit16(0xec90);
- Emit16(0x0a00 | (WORD)numFPRegs);
- }
-
- //ldr r0, [r4+0x30] // get the first argument
- ThumbEmitLoadRegIndirect(ThumbReg(0),ThumbReg(4), MulticastFrame::GetOffsetOfTransitionBlock() + TransitionBlock::GetOffsetOfArgumentRegisters());
-
- // ldr r6, [r0+0x14] //invocationList
- ThumbEmitLoadRegIndirect(ThumbReg(6), ThumbReg(0), DelegateObject::GetOffsetOfInvocationList());
-
- // r6 - address of first delegate in invocation list
- // add r6,r6,0xC
- ThumbEmitAdd(ThumbReg(6), ThumbReg(6), PtrArray::GetDataOffset());
-
- //ldr r8,[r6+r7*4] //get delegate object
- ThumbEmitLoadOffsetScaledReg(ThumbReg(8), ThumbReg(6), ThumbReg(7), 2);
-
- // ldr r0, [r8+0x04] //_target from the delegate
- ThumbEmitLoadRegIndirect(ThumbReg(0), ThumbReg(8), DelegateObject::GetOffsetOfTarget());
-
- // ldr r8, [r8+0xC] // methodPtr from the delegate
- ThumbEmitLoadRegIndirect(ThumbReg(8), ThumbReg(8), DelegateObject::GetOffsetOfMethodPtr());
-
- //call delegate
- ThumbEmitCallRegister(ThumbReg(8));
-
- //increment counter
- ThumbEmitAdd(ThumbReg(7), ThumbReg(7), 1);
-
- // The debugger may need to stop here, so grab the offset of this code.
- EmitPatchLabel();
-
- //goto loop
- ThumbEmitNearJump(pLoopLabel);
-
- //endloop:
- EmitLabel(pEndLoopLabel);
-
-
- //At this point of the stub:
- //r4 must point to Frame
- //and r5 must be current Thread*
-
- EmitStubUnlinkFrame();
-
- // Epilog
- ThumbEmitEpilog();
-}
-
-void StubLinkerCPU::EmitSecureDelegateInvoke(UINT_PTR hash)
-{
- //Decode Multicast Delegate hash
- unsigned int numStackBytes = hash >> 8;
- _ASSERTE(numStackBytes <= 0x7fff);
-
- DWORD cbStackFrame = numStackBytes + sizeof(GSCookie) + sizeof(SecureDelegateFrame);
- cbStackFrame = ALIGN_UP(cbStackFrame, 8);
- DWORD cbStackFrameWithoutSavedRegs = cbStackFrame - (13 * 4); // r0-r11,lr
-
- // Prolog:
- ThumbEmitProlog(8, // Save r4-r11,lr (count doesn't include lr)
- cbStackFrameWithoutSavedRegs, // Additional space in the stack frame required
- TRUE); // Push argument registers
-
- DWORD offsetOfFrame = cbStackFrame - sizeof(SecureDelegateFrame);
-
- // Move the MethodDesc* we're calling to r12.
- // ldr r12, [r0, #offsetof(DelegateObject, _invocationCount)]
- ThumbEmitLoadRegIndirect(ThumbReg(12), ThumbReg(0), DelegateObject::GetOffsetOfInvocationCount());
-
- // Initialize SecureDelegateFrame::m_pMD to the MethodDesc* we're calling
- // str r12, [sp + #(offsetOfFrame + offsetof(SecureDelegateFrame, m_pMD))]
- ThumbEmitStoreRegIndirect(ThumbReg(12), thumbRegSp, offsetOfFrame + SecureDelegateFrame::GetOffsetOfDatum());
-
- // Initialize and link the SecureDelegateFrame and associated GS cookie.
- EmitStubLinkFrame(SecureDelegateFrame::GetMethodFrameVPtr(), offsetOfFrame, SecureDelegateFrame::GetOffsetOfTransitionBlock());
-
- // At this point:
- // r0 : secure delegate
- // r4 : SecureDelegateFrame *
- // r5 : Thread *
-
- if (numStackBytes)
- {
- // Copy stack based arguments from the calling frame into this one. Use the following registers:
- // r6 : pointer to source arguments
- // r7 : pointer to destination arguments
- // r8 : temporary storage during copy operation
-
- // add r6, r4, #MulticastFrame::GetOffsetOfArgs()
- ThumbEmitAdd(ThumbReg(6), ThumbReg(4), MulticastFrame::GetOffsetOfTransitionBlock() + TransitionBlock::GetOffsetOfArgs());
-
- // mov r7, sp
- ThumbEmitMovRegReg(ThumbReg(7), thumbRegSp);
-
- // Unrolled loop to copy the stack based arguments. Might want to consider a second path with a loop
- // for large argument lists if anyone complains about this.
- _ASSERTE((numStackBytes % 4) == 0);
- for (unsigned int i = 0; i < numStackBytes; i += 4)
- {
- // Read one 4-byte value from the source stack and copy it to the new stack, post-incrementing
- // both source and destination as we go.
- // ldr r8, [r6], #4
- // str r8, [r7], #4
- ThumbEmitLoadIndirectPostIncrement(ThumbReg(8), ThumbReg(6), 4);
- ThumbEmitStoreIndirectPostIncrement(ThumbReg(8), ThumbReg(7), 4);
- }
- }
-
- // Stack-based arguments are copied. Floating point argument registers and r1-r3 are all still correct.
- // All we need to do now is calculate the real value for r0 and the target address. Secure delegates wrap
- // an inner delegate (kept in _invocationList). We retrieve this inner delegate and then perform the usual
- // delegate invocation pattern on that.
-
- // Get "real" delegate.
- // ldr r0, [r0, #offsetof(DelegateObject, _invocationList)]
- ThumbEmitLoadRegIndirect(ThumbReg(0), ThumbReg(0), DelegateObject::GetOffsetOfInvocationList());
-
- // Load the destination address from the inner delegate.
- // ldr r12, [r0, #offsetof(DelegateObject, _methodPtr)]
- ThumbEmitLoadRegIndirect(ThumbReg(12), ThumbReg(0), DelegateObject::GetOffsetOfMethodPtr());
-
- // This is only required for unbound delegates which use VSD stubs..but does not harm if done unconditionally
- // add r4, r0+#offsetof(DelegateObject, _methodPtrAux) ; // r4 now contains indirection cell
- ThumbEmitAdd(ThumbReg(4), ThumbReg(0), DelegateObject::GetOffsetOfMethodPtrAux());
-
- // Replace the delegate reference with the object cached as the delegate's target.
- // ldr r0, [r0, #offsetof(DelegateObject, _target)]
- ThumbEmitLoadRegIndirect(ThumbReg(0), ThumbReg(0), DelegateObject::GetOffsetOfTarget());
-
- // Perform the call.
- // blx r12
- ThumbEmitCallRegister(ThumbReg(12));
-
- // restore frame pointer in r4
- ThumbEmitAdd(ThumbReg(4), thumbRegSp, offsetOfFrame);
-
- // Unlink SecureDelegateFrame. This requires the frame pointer in r4 and the thread pointer in r5.
- EmitStubUnlinkFrame();
-
- // Epilog
- ThumbEmitEpilog();
-}
-
-//The function expects r4 to point to frame
-//and r5 must be current Thread*
-void StubLinkerCPU::EmitStubUnlinkFrame()
-{
-#ifdef _DEBUG
- // EmitStubUnlinkFrame is emitted just before the epilog.
- // Thus, at this point, all other callee-saved registers
- // could be used since we are anyways going to restore them
- // via epilog execution.
-
- // Ensure that GSCookie is valid
- //
- // ldr r6, [r4-4]; Load the value of GSCookie
- ThumbEmitSub(ThumbReg(6), ThumbReg(4), 4);
- ThumbEmitLoadRegIndirect(ThumbReg(6), ThumbReg(6), 0);
-
- // mov r7, s_gsCookie
- ThumbEmitMovConstant(ThumbReg(7), GetProcessGSCookie());
-
- // cmp r6, r7 ; Are the GSCookie values in sync?
- ThumbEmitCmpReg(ThumbReg(6), ThumbReg(7));
-
- CodeLabel *pAllDoneLabel = NewCodeLabel();
-
- // beq AllDone; yes, GSCookie is good.
- ThumbEmitCondFlagJump(pAllDoneLabel, 0);
-
- // If we are here, then GSCookie was bad.
- // Call into DoJITFailFast.
- //
- // mov r12, DoJITFailFast
- ThumbEmitMovConstant(ThumbReg(12), (int)DoJITFailFast);
- // bl r12
- ThumbEmitCallRegister(ThumbReg(12));
- // Emit a breakpoint - we are not expected to come here at all
- // if we performed a FailFast.
- ThumbEmitBreakpoint();
-
- //AllDone:
- EmitLabel(pAllDoneLabel);
-#endif // _DEBUG
-
- // Unlink the MulticastFrame.
- // ldr r6, [r4 + #offsetof(MulticastFrame, m_Next)]
- // str r6, [r5 + #offsetof(Thread, m_pFrame)]
- ThumbEmitLoadRegIndirect(ThumbReg(6), ThumbReg(4), Frame::GetOffsetOfNextLink());
- ThumbEmitStoreRegIndirect(ThumbReg(6), ThumbReg(5), offsetof(Thread, m_pFrame));
-
-}
-
-//pFrameVptr = vtable ptr of Frame
-//offsetOfFrame = Frame offset in bytes from sp
-//After this method: r4 points to the Frame on stack
-// and r5 has current Thread*
-void StubLinkerCPU::EmitStubLinkFrame(TADDR pFrameVptr, int offsetOfFrame, int offsetOfTransitionBlock)
-{
- // Initialize r4 to point to where we start filling the frame.
- ThumbEmitAdd(ThumbReg(4), thumbRegSp, offsetOfFrame - sizeof(GSCookie));
-
- // Write the initial GS cookie value
- // mov r5, s_gsCookie
- // str r5, [r4]
- ThumbEmitMovConstant(ThumbReg(5), s_gsCookie);
- ThumbEmitStoreIndirectPostIncrement(ThumbReg(5), ThumbReg(4), 4);
-
- // Initialize the vtable pointer.
- // mov r5, #vfptr
- // str r5, [r4 + #offsetof(Frame, _vfptr)]
- ThumbEmitMovConstant(ThumbReg(5), pFrameVptr);
- ThumbEmitStoreRegIndirect(ThumbReg(5), ThumbReg(4), 0);
-
- // Link the frame to the thread's frame chain.
- // r5 <- current Thread*
- // ldr r6, [r5 + #offsetof(Thread, m_pFrame)]
- // str r6, [r4 + #offsetof(MulticastFrame, m_Next)]
- // str r4, [r5 + #offsetof(Thread, m_pFrame)]
-
- ThumbEmitGetThread(ThumbReg(5));
-#ifdef FEATURE_PAL
- // reload argument registers that could have been corrupted by the call
- for (int reg = 0; reg < 4; reg++)
- ThumbEmitLoadRegIndirect(ThumbReg(reg), ThumbReg(4),
- offsetOfTransitionBlock + TransitionBlock::GetOffsetOfArgumentRegisters() + offsetof(ArgumentRegisters, r[reg]));
-#endif
-
- ThumbEmitLoadRegIndirect(ThumbReg(6), ThumbReg(5), Thread::GetOffsetOfCurrentFrame());
- ThumbEmitStoreRegIndirect(ThumbReg(6), ThumbReg(4), Frame::GetOffsetOfNextLink());
- ThumbEmitStoreRegIndirect(ThumbReg(4), ThumbReg(5), Thread::GetOffsetOfCurrentFrame());
-}
-
#endif // CROSSGEN_COMPILE
void StubLinkerCPU::ThumbEmitNearJump(CodeLabel *target)
@@ -2931,49 +2131,6 @@ void StubLinkerCPU::ThumbEmitCondRegJump(CodeLabel *target, BOOL nonzero, ThumbR
EmitLabelRef(target, reinterpret_cast<ThumbCondJump&>(gThumbCondJump), variation);
}
-UINT_PTR StubLinkerCPU::HashMulticastInvoke(MetaSig *pSig)
-{
- // Generate a hash key as follows:
- // Bit0-2 : num of general purpose registers used
- // Bit3-7 : num of FP regs used (counting in terms of s0,s1...)
- // Bit8-22 : num of stack bytes used
-
- ArgIterator delegateCallConv(pSig);
-
- UINT numStackBytes = delegateCallConv.SizeOfArgStack();
-
- if (numStackBytes > 0x7FFF)
- COMPlusThrow(kNotSupportedException, W("NotSupported_TooManyArgs"));
-
- int cGenReg = 1; // r0 is always used for this pointer
- int cFPReg = 0;
-
- // if it has a return buffer argument r1 is also used
- if(delegateCallConv.HasRetBuffArg())
- cGenReg = 2;
-
- int argOffset;
- while ((argOffset = delegateCallConv.GetNextOffset()) != TransitionBlock::InvalidOffset)
- {
- ArgLocDesc currArgLoc;
- delegateCallConv.GetArgLoc(argOffset, &currArgLoc);
-
- if(currArgLoc.m_idxGenReg != -1)
- cGenReg = currArgLoc.m_idxGenReg + currArgLoc.m_cGenReg;
-
- if(currArgLoc.m_idxFloatReg != -1)
- cFPReg = currArgLoc.m_idxFloatReg + currArgLoc.m_cFloatReg;
- }
-
- // only r0-r3 can be used for arguments
- _ASSERTE(cGenReg <= 4);
-
- // only s0-s15 can be used for arguments
- _ASSERTE(cFPReg <= 16);
-
- return (numStackBytes << 8 | cFPReg << 3 | cGenReg);
-}
-
void StubLinkerCPU::ThumbCopyOneTailCallArg(UINT * pnSrcAlign, const ArgLocDesc * pArgLoc, UINT * pcbStackSpace)
{
if (pArgLoc->m_fRequires64BitAlignment && (*pnSrcAlign & 1)) {
diff --git a/src/vm/arm64/cgencpu.h b/src/vm/arm64/cgencpu.h
index 9a5fc84420..9b81f72d9e 100644
--- a/src/vm/arm64/cgencpu.h
+++ b/src/vm/arm64/cgencpu.h
@@ -159,6 +159,7 @@ struct FloatArgumentRegisters {
NEON128 q[8]; // q0-q7
};
+#define NUM_FLOAT_ARGUMENT_REGISTERS 8
//**********************************************************************
// Exception handling
@@ -453,14 +454,17 @@ public:
};
- static void Init();
-
- void EmitUnboxMethodStub(MethodDesc* pRealMD);
+ static void Init();
+
void EmitCallManagedMethod(MethodDesc *pMD, BOOL fTailCall);
void EmitCallLabel(CodeLabel *target, BOOL fTailCall, BOOL fIndirect);
void EmitShuffleThunk(struct ShuffleEntry *pShuffleEntryArray);
+#if defined(FEATURE_SHARE_GENERIC_CODE)
+ void EmitComputedInstantiatingMethodStub(MethodDesc* pSharedMD, struct ShuffleEntry *pShuffleEntryArray, void* extraArg);
+#endif // FEATURE_SHARE_GENERIC_CODE
+
#ifdef _DEBUG
void EmitNop() { Emit32(0xD503201F); }
#endif
diff --git a/src/vm/arm64/stubs.cpp b/src/vm/arm64/stubs.cpp
index b5dee2e23c..35e0ee74cd 100644
--- a/src/vm/arm64/stubs.cpp
+++ b/src/vm/arm64/stubs.cpp
@@ -1801,6 +1801,61 @@ VOID StubLinkerCPU::EmitShuffleThunk(ShuffleEntry *pShuffleEntryArray)
EmitJumpRegister(IntReg(16));
}
+// Emits code to adjust arguments for static delegate target.
+VOID StubLinkerCPU::EmitComputedInstantiatingMethodStub(MethodDesc* pSharedMD, struct ShuffleEntry *pShuffleEntryArray, void* extraArg)
+{
+ STANDARD_VM_CONTRACT;
+
+ for (ShuffleEntry* pEntry = pShuffleEntryArray; pEntry->srcofs != ShuffleEntry::SENTINEL; pEntry++)
+ {
+ _ASSERTE(pEntry->dstofs & ShuffleEntry::REGMASK);
+ _ASSERTE(pEntry->srcofs & ShuffleEntry::REGMASK);
+ _ASSERTE(!(pEntry->dstofs & ShuffleEntry::FPREGMASK));
+ _ASSERTE(!(pEntry->srcofs & ShuffleEntry::FPREGMASK));
+ _ASSERTE(pEntry->dstofs != ShuffleEntry::HELPERREG);
+ _ASSERTE(pEntry->srcofs != ShuffleEntry::HELPERREG);
+
+ EmitMovReg(IntReg(pEntry->dstofs & ShuffleEntry::OFSMASK), IntReg(pEntry->srcofs & ShuffleEntry::OFSMASK));
+ }
+
+ MetaSig msig(pSharedMD);
+ ArgIterator argit(&msig);
+
+ if (argit.HasParamType())
+ {
+ ArgLocDesc sInstArgLoc;
+ argit.GetParamTypeLoc(&sInstArgLoc);
+ int regHidden = sInstArgLoc.m_idxGenReg;
+ _ASSERTE(regHidden != -1);
+
+ if (extraArg == NULL)
+ {
+ if (pSharedMD->RequiresInstMethodTableArg())
+ {
+ // Unboxing stub case
+ // Fill param arg with methodtable of this pointer
+ // ldr regHidden, [x0, #0]
+ EmitLoadStoreRegImm(eLOAD, IntReg(regHidden), IntReg(0), 0);
+ }
+ }
+ else
+ {
+ EmitMovConstant(IntReg(regHidden), (UINT64)extraArg);
+ }
+ }
+
+ if (extraArg == NULL)
+ {
+ // Unboxing stub case
+ // Address of the value type is address of the boxed instance plus sizeof(MethodDesc*).
+ // add x0, #sizeof(MethodDesc*)
+ EmitAddImm(IntReg(0), IntReg(0), sizeof(MethodDesc*));
+ }
+
+ // Tail call the real target.
+ EmitCallManagedMethod(pSharedMD, TRUE /* tail call */);
+}
+
void StubLinkerCPU::EmitCallLabel(CodeLabel *target, BOOL fTailCall, BOOL fIndirect)
{
BranchInstructionFormat::VariationCodes variationCode = BranchInstructionFormat::VariationCodes::BIF_VAR_JUMP;
@@ -1828,18 +1883,6 @@ void StubLinkerCPU::EmitCallManagedMethod(MethodDesc *pMD, BOOL fTailCall)
#ifndef CROSSGEN_COMPILE
-void StubLinkerCPU::EmitUnboxMethodStub(MethodDesc *pMD)
-{
- _ASSERTE(!pMD->RequiresInstMethodDescArg());
-
- // Address of the value type is address of the boxed instance plus sizeof(MethodDesc*).
- // add x0, #sizeof(MethodDesc*)
- EmitAddImm(IntReg(0), IntReg(0), sizeof(MethodDesc*));
-
- // Tail call the real target.
- EmitCallManagedMethod(pMD, TRUE /* tail call */);
-}
-
#ifdef FEATURE_READYTORUN
//
diff --git a/src/vm/callingconvention.h b/src/vm/callingconvention.h
index cbc6aad5c4..43cc93bd85 100644
--- a/src/vm/callingconvention.h
+++ b/src/vm/callingconvention.h
@@ -546,6 +546,29 @@ public:
#endif
}
+#ifdef _TARGET_X86_
+ // Get layout information for the argument that the ArgIterator is currently visiting.
+ void GetArgLoc(int argOffset, ArgLocDesc *pLoc)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ pLoc->Init();
+
+ int cSlots = (GetArgSize() + 3) / 4;
+ if (!TransitionBlock::IsStackArgumentOffset(argOffset))
+ {
+ pLoc->m_idxGenReg = TransitionBlock::GetArgumentIndexFromOffset(argOffset);
+ _ASSERTE(cSlots == 1);
+ pLoc->m_cGenReg = cSlots;
+ }
+ else
+ {
+ pLoc->m_idxStack = TransitionBlock::GetStackArgumentIndexFromOffset(argOffset);
+ pLoc->m_cStack = cSlots;
+ }
+ }
+#endif
+
#ifdef _TARGET_ARM_
// Get layout information for the argument that the ArgIterator is currently visiting.
void GetArgLoc(int argOffset, ArgLocDesc *pLoc)
@@ -643,7 +666,7 @@ public:
}
#endif // _TARGET_ARM64_
-#if defined(_TARGET_AMD64_) && defined(UNIX_AMD64_ABI)
+#if defined(_TARGET_AMD64_)
// Get layout information for the argument that the ArgIterator is currently visiting.
void GetArgLoc(int argOffset, ArgLocDesc* pLoc)
{
@@ -655,7 +678,6 @@ public:
*pLoc = m_argLocDescForStructInRegs;
return;
}
-#endif // UNIX_AMD64_ABI
if (argOffset == TransitionBlock::StructInRegsOffset)
{
@@ -664,27 +686,47 @@ public:
_ASSERTE(false);
return;
}
+#endif // UNIX_AMD64_ABI
pLoc->Init();
+#if defined(UNIX_AMD64_ABI)
if (TransitionBlock::IsFloatArgumentRegisterOffset(argOffset))
{
// Dividing by 16 as size of each register in FloatArgumentRegisters is 16 bytes.
pLoc->m_idxFloatReg = (argOffset - TransitionBlock::GetOffsetOfFloatArgumentRegisters()) / 16;
pLoc->m_cFloatReg = 1;
}
- else if (!TransitionBlock::IsStackArgumentOffset(argOffset))
+ else
+#endif // UNIX_AMD64_ABI
+ if (!TransitionBlock::IsStackArgumentOffset(argOffset))
{
- pLoc->m_idxGenReg = TransitionBlock::GetArgumentIndexFromOffset(argOffset);
- pLoc->m_cGenReg = 1;
+#if !defined(UNIX_AMD64_ABI)
+ // On Windows x64, we re-use the location in the transition block for both the integer and floating point registers
+ if ((m_argType == ELEMENT_TYPE_R4) || (m_argType == ELEMENT_TYPE_R8))
+ {
+ pLoc->m_idxFloatReg = TransitionBlock::GetArgumentIndexFromOffset(argOffset);
+ pLoc->m_cFloatReg = 1;
+ }
+ else
+#endif
+ {
+ pLoc->m_idxGenReg = TransitionBlock::GetArgumentIndexFromOffset(argOffset);
+ pLoc->m_cGenReg = 1;
+ }
}
else
{
pLoc->m_idxStack = TransitionBlock::GetStackArgumentIndexFromOffset(argOffset);
- pLoc->m_cStack = (GetArgSize() + STACK_ELEM_SIZE - 1) / STACK_ELEM_SIZE;
+ int argOnStackSize;
+ if (IsArgPassedByRef())
+ argOnStackSize = STACK_ELEM_SIZE;
+ else
+ argOnStackSize = GetArgSize();
+ pLoc->m_cStack = (argOnStackSize + STACK_ELEM_SIZE - 1) / STACK_ELEM_SIZE;
}
}
-#endif // _TARGET_AMD64_ && UNIX_AMD64_ABI
+#endif // _TARGET_AMD64_
protected:
DWORD m_dwFlags; // Cached flags
diff --git a/src/vm/class.cpp b/src/vm/class.cpp
index 1cc1358d4e..a24f213c35 100644
--- a/src/vm/class.cpp
+++ b/src/vm/class.cpp
@@ -2941,7 +2941,7 @@ void EEClass::Fixup(DataImage *image, MethodTable *pMT)
image->ZeroPointerField(this, offsetof(DelegateEEClass, m_pUMThunkMarshInfo));
image->ZeroPointerField(this, offsetof(DelegateEEClass, m_pStaticCallStub));
image->ZeroPointerField(this, offsetof(DelegateEEClass, m_pMultiCastInvokeStub));
- image->ZeroPointerField(this, offsetof(DelegateEEClass, m_pSecureDelegateInvokeStub));
+ image->ZeroPointerField(this, offsetof(DelegateEEClass, m_pWrapperDelegateInvokeStub));
image->ZeroPointerField(this, offsetof(DelegateEEClass, m_pMarshalStub));
#ifdef FEATURE_COMINTEROP
diff --git a/src/vm/class.h b/src/vm/class.h
index 8d5e73febd..33676b57b2 100644
--- a/src/vm/class.h
+++ b/src/vm/class.h
@@ -2175,7 +2175,7 @@ public:
PTR_Stub m_pInstRetBuffCallStub;
RelativePointer<PTR_MethodDesc> m_pInvokeMethod;
PTR_Stub m_pMultiCastInvokeStub;
- PTR_Stub m_pSecureDelegateInvokeStub;
+ PTR_Stub m_pWrapperDelegateInvokeStub;
UMThunkMarshInfo* m_pUMThunkMarshInfo;
RelativePointer<PTR_MethodDesc> m_pBeginInvokeMethod;
RelativePointer<PTR_MethodDesc> m_pEndInvokeMethod;
diff --git a/src/vm/comdelegate.cpp b/src/vm/comdelegate.cpp
index 9d92a7c6e5..ae041112a3 100644
--- a/src/vm/comdelegate.cpp
+++ b/src/vm/comdelegate.cpp
@@ -35,11 +35,7 @@
#ifndef DACCESS_COMPILE
-#if defined(_TARGET_AMD64_) && !defined(UNIX_AMD64_ABI)
-
-// ShuffleOfs not needed
-
-#elif defined(_TARGET_X86_)
+#if defined(_TARGET_X86_)
// Return an encoded shuffle entry describing a general register or stack offset that needs to be shuffled.
static UINT16 ShuffleOfs(INT ofs, UINT stackSizeDelta = 0)
@@ -64,8 +60,9 @@ static UINT16 ShuffleOfs(INT ofs, UINT stackSizeDelta = 0)
return static_cast<UINT16>(ofs);
}
+#endif
-#else // Portable default implementation
+#ifdef FEATURE_PORTABLE_SHUFFLE_THUNKS
// Iterator for extracting shuffle entries for argument desribed by an ArgLocDesc.
// Used when calculating shuffle array entries in GenerateShuffleArray below.
@@ -149,9 +146,7 @@ public:
bool HasNextOfs()
{
return (m_currentGenRegIndex < m_argLocDesc->m_cGenReg) ||
-#if defined(UNIX_AMD64_ABI)
(m_currentFloatRegIndex < m_argLocDesc->m_cFloatReg) ||
-#endif
(m_currentStackSlotIndex < m_argLocDesc->m_cStack);
}
@@ -168,6 +163,7 @@ public:
{
return GetNextOfsInStruct();
}
+#endif // UNIX_AMD64_ABI
// Shuffle float registers first
if (m_currentFloatRegIndex < m_argLocDesc->m_cFloatReg)
@@ -177,7 +173,6 @@ public:
return (UINT16)index | ShuffleEntry::REGMASK | ShuffleEntry::FPREGMASK;
}
-#endif // UNIX_AMD64_ABI
// Shuffle any registers first (the order matters since otherwise we could end up shuffling a stack slot
// over a register we later need to shuffle down as well).
@@ -211,9 +206,6 @@ public:
}
};
-#endif
-
-#if defined(UNIX_AMD64_ABI)
// Return an index of argument slot. First indices are reserved for general purpose registers,
// the following ones for float registers and then the rest for stack slots.
// This index is independent of how many registers are actually used to pass arguments.
@@ -232,7 +224,11 @@ int GetNormalizedArgumentSlotIndex(UINT16 offset)
else
{
// stack slot
- index = NUM_ARGUMENT_REGISTERS + NUM_FLOAT_ARGUMENT_REGISTERS + (offset & ShuffleEntry::OFSMASK);
+ index = NUM_ARGUMENT_REGISTERS
+#ifdef NUM_FLOAT_ARGUMENT_REGISTERS
+ + NUM_FLOAT_ARGUMENT_REGISTERS
+#endif
+ + (offset & ShuffleEntry::OFSMASK);
}
return index;
@@ -253,169 +249,133 @@ struct ShuffleGraphNode
UINT8 isMarked;
};
-#endif // UNIX_AMD64_ABI
-
-VOID GenerateShuffleArray(MethodDesc* pInvoke, MethodDesc *pTargetMeth, SArray<ShuffleEntry> * pShuffleEntryArray)
+BOOL AddNextShuffleEntryToArray(ArgLocDesc sArgSrc, ArgLocDesc sArgDst, SArray<ShuffleEntry> * pShuffleEntryArray, ShuffleComputationType shuffleType)
{
- STANDARD_VM_CONTRACT;
-
ShuffleEntry entry;
ZeroMemory(&entry, sizeof(entry));
-#if defined(_TARGET_AMD64_) && !defined(UNIX_AMD64_ABI)
- MetaSig msig(pInvoke);
- ArgIterator argit(&msig);
+ ShuffleIterator iteratorSrc(&sArgSrc);
+ ShuffleIterator iteratorDst(&sArgDst);
- if (argit.HasRetBuffArg())
+ // Shuffle each slot in the argument (register or stack slot) from source to destination.
+ while (iteratorSrc.HasNextOfs())
{
- if (!pTargetMeth->IsStatic())
- {
- // Use ELEMENT_TYPE_END to signal the special handling required by
- // instance method with return buffer. "this" needs to come from
- // the first argument.
- entry.argtype = ELEMENT_TYPE_END;
- pShuffleEntryArray->Append(entry);
+ // We should have slots to shuffle in the destination at the same time as the source.
+ _ASSERTE(iteratorDst.HasNextOfs());
- msig.NextArgNormalized();
- }
- else
+ // Locate the next slot to shuffle in the source and destination and encode the transfer into a
+ // shuffle entry.
+ entry.srcofs = iteratorSrc.GetNextOfs();
+ entry.dstofs = iteratorDst.GetNextOfs();
+
+ // Only emit this entry if it's not a no-op (i.e. the source and destination locations are
+ // different).
+ if (entry.srcofs != entry.dstofs)
{
- entry.argtype = ELEMENT_TYPE_PTR;
+ if (shuffleType == ShuffleComputationType::InstantiatingStub)
+ {
+ // Instantiating Stub shuffles only support general register to register moves. More complex cases are handled by IL stubs
+ if (!(entry.srcofs & ShuffleEntry::REGMASK) || !(entry.dstofs & ShuffleEntry::REGMASK))
+ {
+ return FALSE;
+ }
+ if ((entry.srcofs == ShuffleEntry::HELPERREG) || (entry.dstofs == ShuffleEntry::HELPERREG))
+ {
+ return FALSE;
+ }
+ }
pShuffleEntryArray->Append(entry);
}
}
- CorElementType sigType;
+ // We should have run out of slots to shuffle in the destination at the same time as the source.
+ _ASSERTE(!iteratorDst.HasNextOfs());
- while ((sigType = msig.NextArgNormalized()) != ELEMENT_TYPE_END)
- {
- ZeroMemory(&entry, sizeof(entry));
- entry.argtype = sigType;
- pShuffleEntryArray->Append(entry);
- }
+ return TRUE;
+}
- ZeroMemory(&entry, sizeof(entry));
- entry.srcofs = ShuffleEntry::SENTINEL;
- pShuffleEntryArray->Append(entry);
+BOOL GenerateShuffleArrayPortable(MethodDesc* pMethodSrc, MethodDesc *pMethodDst, SArray<ShuffleEntry> * pShuffleEntryArray, ShuffleComputationType shuffleType)
+{
+ STANDARD_VM_CONTRACT;
-#elif defined(_TARGET_X86_)
- // Must create independent msigs to prevent the argiterators from
- // interfering with other.
- MetaSig sSigSrc(pInvoke);
- MetaSig sSigDst(pTargetMeth);
+ ShuffleEntry entry;
+ ZeroMemory(&entry, sizeof(entry));
- _ASSERTE(sSigSrc.HasThis());
+ MetaSig sSigSrc(pMethodSrc);
+ MetaSig sSigDst(pMethodDst);
+ // Initialize helpers that determine how each argument for the source and destination signatures is placed
+ // in registers or on the stack.
ArgIterator sArgPlacerSrc(&sSigSrc);
ArgIterator sArgPlacerDst(&sSigDst);
- UINT stackSizeSrc = sArgPlacerSrc.SizeOfArgStack();
- UINT stackSizeDst = sArgPlacerDst.SizeOfArgStack();
-
- if (stackSizeDst > stackSizeSrc)
+ if (shuffleType == ShuffleComputationType::InstantiatingStub)
{
- // we can drop arguments but we can never make them up - this is definitely not allowed
- COMPlusThrow(kVerificationException);
+ // Instantiating Stub shuffles only support register to register moves. More complex cases are handled by IL stubs
+ UINT stackSizeSrc = sArgPlacerSrc.SizeOfArgStack();
+ UINT stackSizeDst = sArgPlacerDst.SizeOfArgStack();
+ if (stackSizeSrc != stackSizeDst)
+ return FALSE;
}
- UINT stackSizeDelta;
-
-#ifdef UNIX_X86_ABI
- // Stack does not shrink as UNIX_X86_ABI uses CDECL (instead of STDCALL).
- stackSizeDelta = 0;
-#else
- stackSizeDelta = stackSizeSrc - stackSizeDst;
-#endif
-
- INT ofsSrc, ofsDst;
-
- // if the function is non static we need to place the 'this' first
- if (!pTargetMeth->IsStatic())
- {
- entry.srcofs = ShuffleOfs(sArgPlacerSrc.GetNextOffset());
- entry.dstofs = ShuffleEntry::REGMASK | 4;
- pShuffleEntryArray->Append(entry);
- }
- else if (sArgPlacerSrc.HasRetBuffArg())
- {
- // the first register is used for 'this'
- entry.srcofs = ShuffleOfs(sArgPlacerSrc.GetRetBuffArgOffset());
- entry.dstofs = ShuffleOfs(sArgPlacerDst.GetRetBuffArgOffset(), stackSizeDelta);
- if (entry.srcofs != entry.dstofs)
- pShuffleEntryArray->Append(entry);
- }
+ UINT stackSizeDelta = 0;
- while (TransitionBlock::InvalidOffset != (ofsSrc = sArgPlacerSrc.GetNextOffset()))
+#if defined(_TARGET_X86_) && !defined(UNIX_X86_ABI)
{
- ofsDst = sArgPlacerDst.GetNextOffset();
-
- int cbSize = sArgPlacerDst.GetArgSize();
+ UINT stackSizeSrc = sArgPlacerSrc.SizeOfArgStack();
+ UINT stackSizeDst = sArgPlacerDst.SizeOfArgStack();
- do
+ // Windows X86 calling convention requires the stack to shrink when removing
+ // arguments, as it is callee pop
+ if (stackSizeDst > stackSizeSrc)
{
- entry.srcofs = ShuffleOfs(ofsSrc);
- entry.dstofs = ShuffleOfs(ofsDst, stackSizeDelta);
-
- ofsSrc += STACK_ELEM_SIZE;
- ofsDst += STACK_ELEM_SIZE;
-
- if (entry.srcofs != entry.dstofs)
- pShuffleEntryArray->Append(entry);
-
- cbSize -= STACK_ELEM_SIZE;
+ // we can drop arguments but we can never make them up - this is definitely not allowed
+ COMPlusThrow(kVerificationException);
}
- while (cbSize > 0);
- }
- if (stackSizeDelta != 0)
- {
- // Emit code to move the return address
- entry.srcofs = 0; // retaddress is assumed to be at esp
- entry.dstofs = static_cast<UINT16>(stackSizeDelta);
- pShuffleEntryArray->Append(entry);
+ stackSizeDelta = stackSizeSrc - stackSizeDst;
}
-
- entry.srcofs = ShuffleEntry::SENTINEL;
- entry.dstofs = static_cast<UINT16>(stackSizeDelta);
- pShuffleEntryArray->Append(entry);
-
-#else // Portable default implementation
- MetaSig sSigSrc(pInvoke);
- MetaSig sSigDst(pTargetMeth);
-
- // Initialize helpers that determine how each argument for the source and destination signatures is placed
- // in registers or on the stack.
- ArgIterator sArgPlacerSrc(&sSigSrc);
- ArgIterator sArgPlacerDst(&sSigDst);
+#endif // Callee pop architectures - defined(_TARGET_X86_) && !defined(UNIX_X86_ABI)
INT ofsSrc;
INT ofsDst;
ArgLocDesc sArgSrc;
ArgLocDesc sArgDst;
-#if defined(UNIX_AMD64_ABI)
- int argSlots = NUM_FLOAT_ARGUMENT_REGISTERS + NUM_ARGUMENT_REGISTERS + sArgPlacerSrc.SizeOfArgStack() / sizeof(size_t);
-#endif // UNIX_AMD64_ABI
+ unsigned int argSlots = NUM_ARGUMENT_REGISTERS
+#ifdef NUM_FLOAT_ARGUMENT_REGISTERS
+ + NUM_FLOAT_ARGUMENT_REGISTERS
+#endif
+ + sArgPlacerSrc.SizeOfArgStack() / sizeof(size_t);
// If the target method in non-static (this happens for open instance delegates), we need to account for
// the implicit this parameter.
if (sSigDst.HasThis())
{
- // The this pointer is an implicit argument for the destination signature. But on the source side it's
- // just another regular argument and needs to be iterated over by sArgPlacerSrc and the MetaSig.
- sArgPlacerSrc.GetArgLoc(sArgPlacerSrc.GetNextOffset(), &sArgSrc);
-
- sArgPlacerSrc.GetThisLoc(&sArgDst);
-
- ShuffleIterator iteratorSrc(&sArgSrc);
- ShuffleIterator iteratorDst(&sArgDst);
+ if (shuffleType == ShuffleComputationType::DelegateShuffleThunk)
+ {
+ // The this pointer is an implicit argument for the destination signature. But on the source side it's
+ // just another regular argument and needs to be iterated over by sArgPlacerSrc and the MetaSig.
+ sArgPlacerSrc.GetArgLoc(sArgPlacerSrc.GetNextOffset(), &sArgSrc);
+ sArgPlacerSrc.GetThisLoc(&sArgDst);
+ }
+ else if (shuffleType == ShuffleComputationType::InstantiatingStub)
+ {
+ _ASSERTE(sSigSrc.HasThis()); // Instantiating stubs should have the same HasThis flag
+ sArgPlacerDst.GetThisLoc(&sArgDst);
+ sArgPlacerSrc.GetThisLoc(&sArgSrc);
+ }
+ else
+ {
+ _ASSERTE(FALSE); // Unknown shuffle type being generated
+ }
- entry.srcofs = iteratorSrc.GetNextOfs();
- entry.dstofs = iteratorDst.GetNextOfs();
- pShuffleEntryArray->Append(entry);
+ if (!AddNextShuffleEntryToArray(sArgSrc, sArgDst, pShuffleEntryArray, shuffleType))
+ return FALSE;
}
// Handle any return buffer argument.
+ _ASSERTE(!!sArgPlacerDst.HasRetBuffArg() == !!sArgPlacerSrc.HasRetBuffArg());
if (sArgPlacerDst.HasRetBuffArg())
{
// The return buffer argument is implicit in both signatures.
@@ -427,17 +387,8 @@ VOID GenerateShuffleArray(MethodDesc* pInvoke, MethodDesc *pTargetMeth, SArray<S
sArgPlacerSrc.GetRetBuffArgLoc(&sArgSrc);
sArgPlacerDst.GetRetBuffArgLoc(&sArgDst);
- ShuffleIterator iteratorSrc(&sArgSrc);
- ShuffleIterator iteratorDst(&sArgDst);
-
- entry.srcofs = iteratorSrc.GetNextOfs();
- entry.dstofs = iteratorDst.GetNextOfs();
-
- // Depending on the type of target method (static vs instance) the return buffer argument may end up
- // in the same register in both signatures. So we only commit the entry (by moving the entry pointer
- // along) in the case where it's not a no-op (i.e. the source and destination ops are different).
- if (entry.srcofs != entry.dstofs)
- pShuffleEntryArray->Append(entry);
+ if (!AddNextShuffleEntryToArray(sArgSrc, sArgDst, pShuffleEntryArray, shuffleType))
+ return FALSE;
#endif // !defined(_TARGET_ARM64_) || !defined(CALLDESCR_RETBUFFARGREG)
}
@@ -453,148 +404,243 @@ VOID GenerateShuffleArray(MethodDesc* pInvoke, MethodDesc *pTargetMeth, SArray<S
sArgPlacerSrc.GetArgLoc(ofsSrc, &sArgSrc);
sArgPlacerDst.GetArgLoc(ofsDst, &sArgDst);
- ShuffleIterator iteratorSrc(&sArgSrc);
- ShuffleIterator iteratorDst(&sArgDst);
-
- // Shuffle each slot in the argument (register or stack slot) from source to destination.
- while (iteratorSrc.HasNextOfs())
- {
- // Locate the next slot to shuffle in the source and destination and encode the transfer into a
- // shuffle entry.
- entry.srcofs = iteratorSrc.GetNextOfs();
- entry.dstofs = iteratorDst.GetNextOfs();
+ if (!AddNextShuffleEntryToArray(sArgSrc, sArgDst, pShuffleEntryArray, shuffleType))
+ return FALSE;
+ }
- // Only emit this entry if it's not a no-op (i.e. the source and destination locations are
- // different).
- if (entry.srcofs != entry.dstofs)
- pShuffleEntryArray->Append(entry);
- }
+ if (shuffleType == ShuffleComputationType::InstantiatingStub
+#if defined(UNIX_AMD64_ABI)
+ || true
+#endif // UNIX_AMD64_ABI
+ )
+ {
+ // The Unix AMD64 ABI can cause a struct to be passed on stack for the source and in registers for the destination.
+ // That can cause some arguments that are passed on stack for the destination to be passed in registers in the source.
+ // An extreme example of that is e.g.:
+ // void fn(int, int, int, int, int, struct {int, double}, double, double, double, double, double, double, double, double, double, double)
+ // For this signature, the shuffle needs to move slots as follows (please note the "forward" movement of xmm registers):
+ // RDI->RSI, RDX->RCX, R8->RDX, R9->R8, stack[0]->R9, xmm0->xmm1, xmm1->xmm2, ... xmm6->xmm7, xmm7->stack[0], stack[1]->xmm0, stack[2]->stack[1], stack[3]->stack[2]
+ // To prevent overwriting of slots before they are moved, we need to perform the shuffling in correct order
- // We should have run out of slots to shuffle in the destination at the same time as the source.
- _ASSERTE(!iteratorDst.HasNextOfs());
- }
+ NewArrayHolder<ShuffleGraphNode> pGraphNodes = new ShuffleGraphNode[argSlots];
+ // Initialize the graph array
+ for (unsigned int i = 0; i < argSlots; i++)
+ {
+ pGraphNodes[i].prev = ShuffleGraphNode::NoNode;
+ pGraphNodes[i].isMarked = true;
+ pGraphNodes[i].isSource = false;
+ }
-#if defined(UNIX_AMD64_ABI)
- // The Unix AMD64 ABI can cause a struct to be passed on stack for the source and in registers for the destination.
- // That can cause some arguments that are passed on stack for the destination to be passed in registers in the source.
- // An extreme example of that is e.g.:
- // void fn(int, int, int, int, int, struct {int, double}, double, double, double, double, double, double, double, double, double, double)
- // For this signature, the shuffle needs to move slots as follows (please note the "forward" movement of xmm registers):
- // RDI->RSI, RDX->RCX, R8->RDX, R9->R8, stack[0]->R9, xmm0->xmm1, xmm1->xmm2, ... xmm6->xmm7, xmm7->stack[0], stack[1]->xmm0, stack[2]->stack[1], stack[3]->stack[2]
- // To prevent overwriting of slots before they are moved, we need to perform the shuffling in correct order
+ // Build the directed graph representing register and stack slot shuffling.
+ // The links are directed from destination to source.
+ // During the build also set isSource flag for nodes that are sources of data.
+ // The ones that don't have the isSource flag set are beginnings of non-cyclic
+ // segments of the graph.
+ for (unsigned int i = 0; i < pShuffleEntryArray->GetCount(); i++)
+ {
+ ShuffleEntry entry = (*pShuffleEntryArray)[i];
- NewArrayHolder<ShuffleGraphNode> pGraphNodes = new ShuffleGraphNode[argSlots];
+ int srcIndex = GetNormalizedArgumentSlotIndex(entry.srcofs);
+ int dstIndex = GetNormalizedArgumentSlotIndex(entry.dstofs);
- // Initialize the graph array
- for (unsigned int i = 0; i < argSlots; i++)
- {
- pGraphNodes[i].prev = ShuffleGraphNode::NoNode;
- pGraphNodes[i].isMarked = true;
- pGraphNodes[i].isSource = false;
- }
+ _ASSERTE((srcIndex >= 0) && ((unsigned int)srcIndex < argSlots));
+ _ASSERTE((dstIndex >= 0) && ((unsigned int)dstIndex < argSlots));
- // Build the directed graph representing register and stack slot shuffling.
- // The links are directed from destination to source.
- // During the build also set isSource flag for nodes that are sources of data.
- // The ones that don't have the isSource flag set are beginnings of non-cyclic
- // segments of the graph.
- for (unsigned int i = 0; i < pShuffleEntryArray->GetCount(); i++)
- {
- ShuffleEntry entry = (*pShuffleEntryArray)[i];
+ // Unmark the node to indicate that it was not processed yet
+ pGraphNodes[srcIndex].isMarked = false;
+ // The node contains a register / stack slot that is a source from which we move data to a destination one
+ pGraphNodes[srcIndex].isSource = true;
+ pGraphNodes[srcIndex].ofs = entry.srcofs;
- int srcIndex = GetNormalizedArgumentSlotIndex(entry.srcofs);
- int dstIndex = GetNormalizedArgumentSlotIndex(entry.dstofs);
+ // Unmark the node to indicate that it was not processed yet
+ pGraphNodes[dstIndex].isMarked = false;
+ // Link to the previous node in the graph (source of data for the current node)
+ pGraphNodes[dstIndex].prev = srcIndex;
+ pGraphNodes[dstIndex].ofs = entry.dstofs;
+ }
- // Unmark the node to indicate that it was not processed yet
- pGraphNodes[srcIndex].isMarked = false;
- // The node contains a register / stack slot that is a source from which we move data to a destination one
- pGraphNodes[srcIndex].isSource = true;
- pGraphNodes[srcIndex].ofs = entry.srcofs;
+ // Now that we've built the graph, clear the array, we will regenerate it from the graph ensuring a proper order of shuffling
+ pShuffleEntryArray->Clear();
- // Unmark the node to indicate that it was not processed yet
- pGraphNodes[dstIndex].isMarked = false;
- // Link to the previous node in the graph (source of data for the current node)
- pGraphNodes[dstIndex].prev = srcIndex;
- pGraphNodes[dstIndex].ofs = entry.dstofs;
- }
+ // Add all non-cyclic subgraphs to the target shuffle array and mark their nodes as visited
+ for (unsigned int startIndex = 0; startIndex < argSlots; startIndex++)
+ {
+ unsigned int index = startIndex;
- // Now that we've built the graph, clear the array, we will regenerate it from the graph ensuring a proper order of shuffling
- pShuffleEntryArray->Clear();
+ if (!pGraphNodes[index].isMarked && !pGraphNodes[index].isSource)
+ {
+ // This node is not a source, that means it is an end of shuffle chain
+ // Generate shuffle array entries for all nodes in the chain in a correct
+ // order.
+ UINT16 dstOfs = ShuffleEntry::SENTINEL;
+
+ do
+ {
+ _ASSERTE(index < argSlots);
+ pGraphNodes[index].isMarked = true;
+ if (dstOfs != ShuffleEntry::SENTINEL)
+ {
+ entry.srcofs = pGraphNodes[index].ofs;
+ entry.dstofs = dstOfs;
+ pShuffleEntryArray->Append(entry);
+ }
- // Add all non-cyclic subgraphs to the target shuffle array and mark their nodes as visited
- for (unsigned int startIndex = 0; startIndex < argSlots; startIndex++)
- {
- unsigned int index = startIndex;
+ dstOfs = pGraphNodes[index].ofs;
+ index = pGraphNodes[index].prev;
+ }
+ while (index != ShuffleGraphNode::NoNode);
+ }
+ }
- if (!pGraphNodes[index].isMarked && !pGraphNodes[index].isSource)
+ // Process all cycles in the graph
+ for (unsigned int startIndex = 0; startIndex < argSlots; startIndex++)
{
- // This node is not a source, that means it is an end of shuffle chain
- // Generate shuffle array entries for all nodes in the chain in a correct
- // order.
- UINT16 dstOfs = ShuffleEntry::SENTINEL;
+ unsigned int index = startIndex;
- do
+ if (!pGraphNodes[index].isMarked)
{
- pGraphNodes[index].isMarked = true;
- if (dstOfs != ShuffleEntry::SENTINEL)
+ if (shuffleType == ShuffleComputationType::InstantiatingStub)
{
+ // Use of the helper reg isn't supported for these stubs.
+ return FALSE;
+ }
+ // This node is part of a new cycle as all non-cyclic parts of the graphs were already visited
+
+ // Move the first node register / stack slot to a helper reg
+ UINT16 dstOfs = ShuffleEntry::HELPERREG;
+
+ do
+ {
+ _ASSERTE(index < argSlots);
+ pGraphNodes[index].isMarked = true;
+
entry.srcofs = pGraphNodes[index].ofs;
entry.dstofs = dstOfs;
pShuffleEntryArray->Append(entry);
+
+ dstOfs = pGraphNodes[index].ofs;
+ index = pGraphNodes[index].prev;
}
+ while (index != startIndex);
- dstOfs = pGraphNodes[index].ofs;
- index = pGraphNodes[index].prev;
+ // Move helper reg to the last node register / stack slot
+ entry.srcofs = ShuffleEntry::HELPERREG;
+ entry.dstofs = dstOfs;
+ pShuffleEntryArray->Append(entry);
}
- while (index != ShuffleGraphNode::NoNode);
}
}
- // Process all cycles in the graph
- for (unsigned int startIndex = 0; startIndex < argSlots; startIndex++)
- {
- unsigned int index = startIndex;
+ entry.srcofs = ShuffleEntry::SENTINEL;
+ entry.dstofs = 0;
+ pShuffleEntryArray->Append(entry);
- if (!pGraphNodes[index].isMarked)
- {
- // This node is part of a new cycle as all non-cyclic parts of the graphs were already visited
+ return TRUE;
+}
+#endif // FEATURE_PORTABLE_SHUFFLE_THUNKS
- // Move the first node register / stack slot to a helper reg
- UINT16 dstOfs = ShuffleEntry::HELPERREG;
+VOID GenerateShuffleArray(MethodDesc* pInvoke, MethodDesc *pTargetMeth, SArray<ShuffleEntry> * pShuffleEntryArray)
+{
+ STANDARD_VM_CONTRACT;
- do
- {
- pGraphNodes[index].isMarked = true;
+#ifdef FEATURE_PORTABLE_SHUFFLE_THUNKS
+ // Portable default implementation
+ GenerateShuffleArrayPortable(pInvoke, pTargetMeth, pShuffleEntryArray, ShuffleComputationType::DelegateShuffleThunk);
+#elif defined(_TARGET_X86_)
+ ShuffleEntry entry;
+ ZeroMemory(&entry, sizeof(entry));
- entry.srcofs = pGraphNodes[index].ofs;
- entry.dstofs = dstOfs;
- pShuffleEntryArray->Append(entry);
+ // Must create independent msigs to prevent the argiterators from
+ // interfering with other.
+ MetaSig sSigSrc(pInvoke);
+ MetaSig sSigDst(pTargetMeth);
- dstOfs = pGraphNodes[index].ofs;
- index = pGraphNodes[index].prev;
- }
- while (index != startIndex);
+ _ASSERTE(sSigSrc.HasThis());
+
+ ArgIterator sArgPlacerSrc(&sSigSrc);
+ ArgIterator sArgPlacerDst(&sSigDst);
+
+ UINT stackSizeSrc = sArgPlacerSrc.SizeOfArgStack();
+ UINT stackSizeDst = sArgPlacerDst.SizeOfArgStack();
+
+ if (stackSizeDst > stackSizeSrc)
+ {
+ // we can drop arguments but we can never make them up - this is definitely not allowed
+ COMPlusThrow(kVerificationException);
+ }
+
+ UINT stackSizeDelta;
+
+#ifdef UNIX_X86_ABI
+ // Stack does not shrink as UNIX_X86_ABI uses CDECL (instead of STDCALL).
+ stackSizeDelta = 0;
+#else
+ stackSizeDelta = stackSizeSrc - stackSizeDst;
+#endif
+
+ INT ofsSrc, ofsDst;
- // Move helper reg to the last node register / stack slot
- entry.srcofs = ShuffleEntry::HELPERREG;
- entry.dstofs = dstOfs;
+ // if the function is non static we need to place the 'this' first
+ if (!pTargetMeth->IsStatic())
+ {
+ entry.srcofs = ShuffleOfs(sArgPlacerSrc.GetNextOffset());
+ entry.dstofs = ShuffleEntry::REGMASK | 4;
+ pShuffleEntryArray->Append(entry);
+ }
+ else if (sArgPlacerSrc.HasRetBuffArg())
+ {
+ // the first register is used for 'this'
+ entry.srcofs = ShuffleOfs(sArgPlacerSrc.GetRetBuffArgOffset());
+ entry.dstofs = ShuffleOfs(sArgPlacerDst.GetRetBuffArgOffset(), stackSizeDelta);
+ if (entry.srcofs != entry.dstofs)
pShuffleEntryArray->Append(entry);
- }
}
-#endif // UNIX_AMD64_ABI
+ while (TransitionBlock::InvalidOffset != (ofsSrc = sArgPlacerSrc.GetNextOffset()))
+ {
+ ofsDst = sArgPlacerDst.GetNextOffset();
+
+ int cbSize = sArgPlacerDst.GetArgSize();
+
+ do
+ {
+ entry.srcofs = ShuffleOfs(ofsSrc);
+ entry.dstofs = ShuffleOfs(ofsDst, stackSizeDelta);
+
+ ofsSrc += STACK_ELEM_SIZE;
+ ofsDst += STACK_ELEM_SIZE;
+
+ if (entry.srcofs != entry.dstofs)
+ pShuffleEntryArray->Append(entry);
+
+ cbSize -= STACK_ELEM_SIZE;
+ }
+ while (cbSize > 0);
+ }
+
+ if (stackSizeDelta != 0)
+ {
+ // Emit code to move the return address
+ entry.srcofs = 0; // retaddress is assumed to be at esp
+ entry.dstofs = static_cast<UINT16>(stackSizeDelta);
+ pShuffleEntryArray->Append(entry);
+ }
entry.srcofs = ShuffleEntry::SENTINEL;
- entry.dstofs = 0;
+ entry.dstofs = static_cast<UINT16>(stackSizeDelta);
pShuffleEntryArray->Append(entry);
+
+#else
+#error Unsupported architecture
#endif
}
ShuffleThunkCache *COMDelegate::m_pShuffleThunkCache = NULL;
-MulticastStubCache *COMDelegate::m_pSecureDelegateStubCache = NULL;
+#ifndef FEATURE_MULTICASTSTUB_AS_IL
MulticastStubCache *COMDelegate::m_pMulticastStubCache = NULL;
+#endif
CrstStatic COMDelegate::s_DelegateToFPtrHashCrst;
PtrHashMap* COMDelegate::s_pDelegateToFPtrHash = NULL;
@@ -619,8 +665,9 @@ void COMDelegate::Init()
s_pDelegateToFPtrHash->Init(TRUE, &lock);
m_pShuffleThunkCache = new ShuffleThunkCache(SystemDomain::GetGlobalLoaderAllocator()->GetStubHeap());
+#ifndef FEATURE_MULTICASTSTUB_AS_IL
m_pMulticastStubCache = new MulticastStubCache();
- m_pSecureDelegateStubCache = new MulticastStubCache();
+#endif
}
#ifdef FEATURE_COMINTEROP
@@ -948,7 +995,7 @@ FCIMPLEND
// This method is called (in the late bound case only) once a target method has been decided on. All the consistency checks
// (signature matching etc.) have been done at this point and the only major reason we could fail now is on security grounds
// (someone trying to create a delegate over a method that's not visible to them for instance). This method will initialize the
-// delegate (wrapping it in a secure delegate if necessary). Upon return the delegate should be ready for invocation.
+// delegate (wrapping it in a wrapper delegate if necessary). Upon return the delegate should be ready for invocation.
void COMDelegate::BindToMethod(DELEGATEREF *pRefThis,
OBJECTREF *pRefFirstArg,
MethodDesc *pTargetMethod,
@@ -968,8 +1015,8 @@ void COMDelegate::BindToMethod(DELEGATEREF *pRefThis,
}
CONTRACTL_END;
- // We might have to wrap the delegate in a secure delegate depending on the location of the target method. The following local
- // keeps track of the real (i.e. non-secure) delegate whether or not this is required.
+ // We might have to wrap the delegate in a wrapper delegate depending on the target method. The following local
+ // keeps track of the real (i.e. non-wrapper) delegate whether or not this is required.
DELEGATEREF refRealDelegate = NULL;
GCPROTECT_BEGIN(refRealDelegate);
@@ -1004,11 +1051,11 @@ void COMDelegate::BindToMethod(DELEGATEREF *pRefThis,
pTargetMethod);
}
- // If we didn't wrap the real delegate in a secure delegate then the real delegate is the one passed in.
+ // If we didn't wrap the real delegate in a wrapper delegate then the real delegate is the one passed in.
if (refRealDelegate == NULL)
{
if (NeedsWrapperDelegate(pTargetMethod))
- refRealDelegate = CreateSecureDelegate(*pRefThis, NULL, pTargetMethod);
+ refRealDelegate = CreateWrapperDelegate(*pRefThis, pTargetMethod);
else
refRealDelegate = *pRefThis;
}
@@ -1774,7 +1821,7 @@ FCIMPL3(void, COMDelegate::DelegateConstruct, Object* refThisUNSAFE, Object* tar
}
if (NeedsWrapperDelegate(pMeth))
- gc.refThis = CreateSecureDelegate(gc.refThis, NULL, pMeth);
+ gc.refThis = CreateWrapperDelegate(gc.refThis, pMeth);
if (pMeth->GetLoaderAllocator()->IsCollectible())
gc.refThis->SetMethodBase(pMeth->GetLoaderAllocator()->GetExposedObject());
@@ -1887,10 +1934,10 @@ MethodDesc *COMDelegate::GetMethodDesc(OBJECTREF orDelegate)
// this is one of the following:
// - multicast - _invocationList is Array && _invocationCount != 0
// - unamanaged ftn ptr - _invocationList == NULL && _invocationCount == -1
- // - secure delegate - _invocationList is Delegate && _invocationCount != NULL
+ // - wrapper delegate - _invocationList is Delegate && _invocationCount != NULL
// - virtual delegate - _invocationList == null && _invocationCount == (target MethodDesc)
- // or _invocationList points to a LoaderAllocator/DynamicResolver (inner open virtual delegate of a Secure Delegate)
- // in the secure delegate case we want to unwrap and return the method desc of the inner delegate
+ // or _invocationList points to a LoaderAllocator/DynamicResolver (inner open virtual delegate of a Wrapper Delegate)
+ // in the wrapper delegate case we want to unwrap and return the method desc of the inner delegate
// in the other cases we return the method desc for the invoke
innerDel = (DELEGATEREF) thisDel->GetInvocationList();
bool fOpenVirtualDelegate = false;
@@ -1971,10 +2018,10 @@ OBJECTREF COMDelegate::GetTargetObject(OBJECTREF obj)
// this is one of the following:
// - multicast
// - unmanaged ftn ptr
- // - secure delegate
+ // - wrapper delegate
// - virtual delegate - _invocationList == null && _invocationCount == (target MethodDesc)
- // or _invocationList points to a LoaderAllocator/DynamicResolver (inner open virtual delegate of a Secure Delegate)
- // in the secure delegate case we want to unwrap and return the object of the inner delegate
+ // or _invocationList points to a LoaderAllocator/DynamicResolver (inner open virtual delegate of a Wrapper Delegate)
+ // in the wrapper delegate case we want to unwrap and return the object of the inner delegate
innerDel = (DELEGATEREF) thisDel->GetInvocationList();
if (innerDel != NULL)
{
@@ -2161,9 +2208,9 @@ BOOL COMDelegate::NeedsWrapperDelegate(MethodDesc* pTargetMD)
#ifdef _TARGET_ARM_
// For arm VSD expects r4 to contain the indirection cell. However r4 is a non-volatile register
// and its value must be preserved. So we need to erect a frame and store indirection cell in r4 before calling
- // virtual stub dispatch. Erecting frame is already done by secure delegates so the secureDelegate infrastructure
+ // virtual stub dispatch. Erecting frame is already done by wrapper delegates so the Wrapper Delegate infrastructure
// can easliy be used for our purpose.
- // set needsSecureDelegate flag in order to erect a frame. (Secure Delegate stub also loads the right value in r4)
+ // set needsWrapperDelegate flag in order to erect a frame. (Wrapper Delegate stub also loads the right value in r4)
if (!pTargetMD->IsStatic() && pTargetMD->IsVirtual() && !pTargetMD->GetMethodTable()->IsValueType())
return TRUE;
#endif
@@ -2174,14 +2221,13 @@ BOOL COMDelegate::NeedsWrapperDelegate(MethodDesc* pTargetMD)
#ifndef CROSSGEN_COMPILE
-// to create a secure delegate wrapper we need:
+// to create a wrapper delegate wrapper we need:
// - the delegate to forward to -> _invocationList
-// - the creator assembly -> _methodAuxPtr
// - the delegate invoke MethodDesc -> _count
// the 2 fields used for invocation will contain:
// - the delegate itself -> _pORField
-// - the secure stub -> _pFPField
-DELEGATEREF COMDelegate::CreateSecureDelegate(DELEGATEREF delegate, MethodDesc* pCreatorMethod, MethodDesc* pTargetMD)
+// - the wrapper stub -> _pFPField
+DELEGATEREF COMDelegate::CreateWrapperDelegate(DELEGATEREF delegate, MethodDesc* pTargetMD)
{
CONTRACTL
{
@@ -2195,10 +2241,10 @@ DELEGATEREF COMDelegate::CreateSecureDelegate(DELEGATEREF delegate, MethodDesc*
MethodDesc *pMD = ((DelegateEEClass*)(pDelegateType->GetClass()))->GetInvokeMethod();
// allocate the object
struct _gc {
- DELEGATEREF refSecDel;
+ DELEGATEREF refWrapperDel;
DELEGATEREF innerDel;
} gc;
- gc.refSecDel = delegate;
+ gc.refWrapperDel = delegate;
gc.innerDel = NULL;
GCPROTECT_BEGIN(gc);
@@ -2207,38 +2253,17 @@ DELEGATEREF COMDelegate::CreateSecureDelegate(DELEGATEREF delegate, MethodDesc*
//
// Object reference field...
- gc.refSecDel->SetTarget(gc.refSecDel);
+ gc.refWrapperDel->SetTarget(gc.refWrapperDel);
- // save the secure invoke stub. GetSecureInvoke() can trigger GC.
- PCODE tmp = GetSecureInvoke(pMD);
- gc.refSecDel->SetMethodPtr(tmp);
- // save the assembly
- gc.refSecDel->SetMethodPtrAux((PCODE)(void *)pCreatorMethod);
+ // save the secure invoke stub. GetWrapperInvoke() can trigger GC.
+ PCODE tmp = GetWrapperInvoke(pMD);
+ gc.refWrapperDel->SetMethodPtr(tmp);
// save the delegate MethodDesc for the frame
- gc.refSecDel->SetInvocationCount((INT_PTR)pMD);
-
+ gc.refWrapperDel->SetInvocationCount((INT_PTR)pMD);
+
// save the delegate to forward to
gc.innerDel = (DELEGATEREF) pDelegateType->Allocate();
- gc.refSecDel->SetInvocationList(gc.innerDel);
-
- if (pCreatorMethod != NULL)
- {
- // If the pCreatorMethod is a collectible method, then stash a reference to the
- // LoaderAllocator/DynamicResolver of the collectible assembly/method in the invocationList
- // of the inner delegate
- // (The invocationList of the inner delegate is the only field garaunteed to be unused for
- // other purposes at this time.)
- if (pCreatorMethod->IsLCGMethod())
- {
- OBJECTREF refCollectible = pCreatorMethod->AsDynamicMethodDesc()->GetLCGMethodResolver()->GetManagedResolver();
- gc.innerDel->SetInvocationList(refCollectible);
- }
- else if (pCreatorMethod->GetLoaderAllocator()->IsCollectible())
- {
- OBJECTREF refCollectible = pCreatorMethod->GetLoaderAllocator()->GetExposedObject();
- gc.innerDel->SetInvocationList(refCollectible);
- }
- }
+ gc.refWrapperDel->SetInvocationList(gc.innerDel);
GCPROTECT_END();
@@ -2482,8 +2507,7 @@ FCIMPL1(PCODE, COMDelegate::GetMulticastInvoke, Object* refThisIn)
FCIMPLEND
#endif // FEATURE_MULTICASTSTUB_AS_IL
-#ifdef FEATURE_STUBS_AS_IL
-PCODE COMDelegate::GetSecureInvoke(MethodDesc* pMD)
+PCODE COMDelegate::GetWrapperInvoke(MethodDesc* pMD)
{
CONTRACTL
{
@@ -2495,7 +2519,7 @@ PCODE COMDelegate::GetSecureInvoke(MethodDesc* pMD)
MethodTable * pDelegateMT = pMD->GetMethodTable();
DelegateEEClass* delegateEEClass = (DelegateEEClass*) pDelegateMT->GetClass();
- Stub *pStub = delegateEEClass->m_pSecureDelegateInvokeStub;
+ Stub *pStub = delegateEEClass->m_pWrapperDelegateInvokeStub;
if (pStub == NULL)
{
@@ -2534,7 +2558,7 @@ PCODE COMDelegate::GetSecureInvoke(MethodDesc* pMD)
MethodDesc* pStubMD =
ILStubCache::CreateAndLinkNewILStubMethodDesc(pMD->GetLoaderAllocator(),
pMD->GetMethodTable(),
- ILSTUB_SECUREDELEGATE_INVOKE,
+ ILSTUB_WRAPPERDELEGATE_INVOKE,
pMD->GetModule(),
pSig, cbSig,
NULL,
@@ -2544,69 +2568,14 @@ PCODE COMDelegate::GetSecureInvoke(MethodDesc* pMD)
g_IBCLogger.LogEEClassCOWTableAccess(pDelegateMT);
- InterlockedCompareExchangeT<PTR_Stub>(EnsureWritablePages(&delegateEEClass->m_pSecureDelegateInvokeStub), pStub, NULL);
+ InterlockedCompareExchangeT<PTR_Stub>(EnsureWritablePages(&delegateEEClass->m_pWrapperDelegateInvokeStub), pStub, NULL);
}
return pStub->GetEntryPoint();
}
-#else // FEATURE_STUBS_AS_IL
-PCODE COMDelegate::GetSecureInvoke(MethodDesc* pMD)
-{
- CONTRACT (PCODE)
- {
- THROWS;
- GC_TRIGGERS;
- MODE_ANY;
- POSTCONDITION(RETVAL != NULL);
- }
- CONTRACT_END;
-
- MethodTable * pDelegateMT = pMD->GetMethodTable();
- DelegateEEClass* delegateEEClass = (DelegateEEClass*) pDelegateMT->GetClass();
-
- Stub *pStub = delegateEEClass->m_pSecureDelegateInvokeStub;
-
- if (pStub == NULL)
- {
- GCX_PREEMP();
-
- MetaSig sig(pMD);
-
- UINT_PTR hash = CPUSTUBLINKER::HashMulticastInvoke(&sig);
-
- pStub = m_pSecureDelegateStubCache->GetStub(hash);
- if (!pStub)
- {
- CPUSTUBLINKER sl;
-
- LOG((LF_CORDB,LL_INFO10000, "COMD::GIMS making a multicast delegate\n"));
- sl.EmitSecureDelegateInvoke(hash);
-
- // The cache is process-wide, based on signature. It never unloads
- Stub *pCandidate = sl.Link(SystemDomain::GetGlobalLoaderAllocator()->GetStubHeap(), NEWSTUB_FL_MULTICAST);
-
- Stub *pWinner = m_pSecureDelegateStubCache->AttemptToSetStub(hash, pCandidate);
- pCandidate->DecRef();
- if (!pWinner)
- COMPlusThrowOM();
-
- LOG((LF_CORDB,LL_INFO10000, "Putting a MC stub at 0x%x (code:0x%x)\n",
- pWinner, (BYTE*)pWinner+sizeof(Stub)));
-
- pStub = pWinner;
- }
-
- g_IBCLogger.LogEEClassCOWTableAccess(pDelegateMT);
- EnsureWritablePages(&delegateEEClass->m_pSecureDelegateInvokeStub);
- delegateEEClass->m_pSecureDelegateInvokeStub = pStub;
- }
- RETURN (pStub->GetEntryPoint());
-}
-#endif // FEATURE_STUBS_AS_IL
#endif // CROSSGEN_COMPILE
-
static BOOL IsLocationAssignable(TypeHandle fromHandle, TypeHandle toHandle, BOOL relaxedMatch, BOOL fromHandleIsBoxed)
{
CONTRACTL
@@ -3102,7 +3071,7 @@ MethodDesc* COMDelegate::GetDelegateCtor(TypeHandle delegateType, MethodDesc *pT
if (NeedsWrapperDelegate(pTargetMethod))
{
- // If we need a wrapper even it is not a secure delegate, go through slow path
+ // If we need a wrapper, go through slow path
return NULL;
}
@@ -3130,7 +3099,7 @@ MethodDesc* COMDelegate::GetDelegateCtor(TypeHandle delegateType, MethodDesc *pT
// 4- Static closed first arg target method null null 0
// 5- Static closed (special sig) delegate specialSig thunk target method first arg 0
// 6- Static opened delegate shuffle thunk target method null 0
- // 7- Secure delegate call thunk MethodDesc (frame) target delegate creator assembly
+ // 7- Wrapper delegate call thunk MethodDesc (frame) target delegate (arm only, VSD indirection cell address)
//
// Delegate invoke arg count == target method arg count - 2, 3, 6
// Delegate invoke arg count == 1 + target method arg count - 1, 4, 5
@@ -3299,7 +3268,7 @@ BOOL COMDelegate::ValidateCtor(TypeHandle instHnd,
return IsMethodDescCompatible(instHnd, ftnParentHnd, pFtn, dlgtHnd, pDlgtInvoke, DBF_RelaxedSignature, pfIsOpenDelegate);
}
-BOOL COMDelegate::IsSecureDelegate(DELEGATEREF dRef)
+BOOL COMDelegate::IsWrapperDelegate(DELEGATEREF dRef)
{
CONTRACTL
{
@@ -3314,7 +3283,7 @@ BOOL COMDelegate::IsSecureDelegate(DELEGATEREF dRef)
innerDel = (DELEGATEREF) dRef->GetInvocationList();
if (innerDel != NULL && innerDel->GetMethodTable()->IsDelegate())
{
- // We have a secure delegate
+ // We have a wrapper delegate
return TRUE;
}
}
diff --git a/src/vm/comdelegate.h b/src/vm/comdelegate.h
index 330f1f8886..d459c45477 100644
--- a/src/vm/comdelegate.h
+++ b/src/vm/comdelegate.h
@@ -18,10 +18,18 @@ class ShuffleThunkCache;
#include "dllimportcallback.h"
#include "stubcache.h"
+#ifndef FEATURE_MULTICASTSTUB_AS_IL
typedef ArgBasedStubCache MulticastStubCache;
+#endif
VOID GenerateShuffleArray(MethodDesc* pInvoke, MethodDesc *pTargetMeth, struct ShuffleEntry * pShuffleEntryArray, size_t nEntries);
+enum class ShuffleComputationType
+{
+ InstantiatingStub,
+ DelegateShuffleThunk
+};
+BOOL GenerateShuffleArrayPortable(MethodDesc* pMethodSrc, MethodDesc *pMethodDst, SArray<ShuffleEntry> * pShuffleEntryArray, ShuffleComputationType shuffleType);
// This class represents the native methods for the Delegate class
class COMDelegate
@@ -31,12 +39,12 @@ private:
// friend VOID CPUSTUBLINKER::EmitShuffleThunk(...);
friend class CPUSTUBLINKER;
friend class DelegateInvokeStubManager;
- friend class SecureDelegateFrame;
friend BOOL MulticastFrame::TraceFrame(Thread *thread, BOOL fromPatch,
TraceDestination *trace, REGDISPLAY *regs);
- static MulticastStubCache* m_pSecureDelegateStubCache;
+#ifndef FEATURE_MULTICASTSTUB_AS_IL
static MulticastStubCache* m_pMulticastStubCache;
+#endif
static CrstStatic s_DelegateToFPtrHashCrst; // Lock for the following hash.
static PtrHashMap* s_pDelegateToFPtrHash; // Hash table containing the Delegate->FPtr pairs
@@ -68,11 +76,11 @@ public:
// Get the invoke method for the delegate. Used to transition delegates to multicast delegates.
static FCDECL1(PCODE, GetMulticastInvoke, Object* refThis);
static FCDECL1(MethodDesc*, GetInvokeMethod, Object* refThis);
- static PCODE GetSecureInvoke(MethodDesc* pMD);
+ static PCODE GetWrapperInvoke(MethodDesc* pMD);
// determines where the delegate needs to be wrapped for non-security reason
static BOOL NeedsWrapperDelegate(MethodDesc* pTargetMD);
// on entry delegate points to the delegate to wrap
- static DELEGATEREF CreateSecureDelegate(DELEGATEREF delegate, MethodDesc* pCreatorMethod, MethodDesc* pTargetMD);
+ static DELEGATEREF CreateWrapperDelegate(DELEGATEREF delegate, MethodDesc* pTargetMD);
// Marshals a delegate to a unmanaged callback.
static LPVOID ConvertToCallback(OBJECTREF pDelegate);
@@ -104,8 +112,8 @@ public:
// Decides if pcls derives from Delegate.
static BOOL IsDelegate(MethodTable *pMT);
- // Decides if this is a secure delegate
- static BOOL IsSecureDelegate(DELEGATEREF dRef);
+ // Decides if this is a wrapper delegate
+ static BOOL IsWrapperDelegate(DELEGATEREF dRef);
// Get the cpu stub for a delegate invoke.
static PCODE GetInvokeMethodStub(EEImplMethodDesc* pMD);
@@ -197,20 +205,12 @@ struct ShuffleEntry
HELPERREG = 0xcfff, // Use a helper register as source or destination (used to handle cycles in the shuffling)
};
-#if defined(_TARGET_AMD64_) && !defined(UNIX_AMD64_ABI)
- union {
- UINT16 srcofs;
- CorElementType argtype; // AMD64: shuffle array is just types
- };
-#else
-
UINT16 srcofs;
union {
UINT16 dstofs; //if srcofs != SENTINEL
UINT16 stacksizedelta; //if dstofs == SENTINEL, difference in stack size between virtual and static sigs
};
-#endif // _TARGET_AMD64_
};
diff --git a/src/vm/crossgencompile.cpp b/src/vm/crossgencompile.cpp
index de07c81ef2..b533deb1e5 100644
--- a/src/vm/crossgencompile.cpp
+++ b/src/vm/crossgencompile.cpp
@@ -303,7 +303,7 @@ void Frame::Pop()
{
}
-PCODE COMDelegate::GetSecureInvoke(MethodDesc* pMD)
+PCODE COMDelegate::GetWrapperInvoke(MethodDesc* pMD)
{
return (PCODE)(0x12345);
}
diff --git a/src/vm/dllimport.h b/src/vm/dllimport.h
index 740a5e596e..bbff6fb09d 100644
--- a/src/vm/dllimport.h
+++ b/src/vm/dllimport.h
@@ -183,11 +183,11 @@ enum ILStubTypes
#ifdef FEATURE_MULTICASTSTUB_AS_IL
ILSTUB_MULTICASTDELEGATE_INVOKE = 0x80000010,
#endif
-#ifdef FEATURE_STUBS_AS_IL
+#ifdef FEATURE_INSTANTIATINGSTUB_AS_IL
ILSTUB_UNBOXINGILSTUB = 0x80000020,
ILSTUB_INSTANTIATINGSTUB = 0x80000040,
- ILSTUB_SECUREDELEGATE_INVOKE = 0x80000080,
#endif
+ ILSTUB_WRAPPERDELEGATE_INVOKE = 0x80000080,
};
#ifdef FEATURE_COMINTEROP
@@ -219,8 +219,8 @@ inline bool SF_IsArrayOpStub (DWORD dwStubFlags) { LIMITED_METHOD_CONT
inline bool SF_IsMulticastDelegateStub (DWORD dwStubFlags) { LIMITED_METHOD_CONTRACT; return (dwStubFlags == ILSTUB_MULTICASTDELEGATE_INVOKE); }
#endif
-#ifdef FEATURE_STUBS_AS_IL
-inline bool SF_IsSecureDelegateStub (DWORD dwStubFlags) { LIMITED_METHOD_CONTRACT; return (dwStubFlags == ILSTUB_SECUREDELEGATE_INVOKE); }
+inline bool SF_IsWrapperDelegateStub (DWORD dwStubFlags) { LIMITED_METHOD_CONTRACT; return (dwStubFlags == ILSTUB_WRAPPERDELEGATE_INVOKE); }
+#ifdef FEATURE_INSTANTIATINGSTUB_AS_IL
inline bool SF_IsUnboxingILStub (DWORD dwStubFlags) { LIMITED_METHOD_CONTRACT; return (dwStubFlags == ILSTUB_UNBOXINGILSTUB); }
inline bool SF_IsInstantiatingStub (DWORD dwStubFlags) { LIMITED_METHOD_CONTRACT; return (dwStubFlags == ILSTUB_INSTANTIATINGSTUB); }
#endif
diff --git a/src/vm/frames.cpp b/src/vm/frames.cpp
index cddfd9b75a..03b8e0fc8f 100644
--- a/src/vm/frames.cpp
+++ b/src/vm/frames.cpp
@@ -1825,42 +1825,6 @@ BOOL HelperMethodFrame::InsureInit(bool initialInit,
#include "comdelegate.h"
-Assembly* SecureDelegateFrame::GetAssembly()
-{
- WRAPPER_NO_CONTRACT;
-
-#if !defined(DACCESS_COMPILE)
- // obtain the frame off the delegate pointer
- DELEGATEREF delegate = (DELEGATEREF) GetThis();
- _ASSERTE(delegate);
- if (!delegate->IsWrapperDelegate())
- {
- MethodDesc* pMethod = (MethodDesc*) delegate->GetMethodPtrAux();
- Assembly* pAssembly = pMethod->GetAssembly();
- _ASSERTE(pAssembly != NULL);
- return pAssembly;
- }
- else
- return NULL;
-#else
- DacNotImpl();
- return NULL;
-#endif
-}
-
-BOOL SecureDelegateFrame::TraceFrame(Thread *thread, BOOL fromPatch, TraceDestination *trace, REGDISPLAY *regs)
-{
- WRAPPER_NO_CONTRACT;
-
- _ASSERTE(!fromPatch);
-
- // Unlike multicast delegates, secure delegates only call one method. So, we should just return false here
- // and let the step out logic continue to the caller of the secure delegate stub.
- LOG((LF_CORDB, LL_INFO1000, "SDF::TF: return FALSE\n"));
-
- return FALSE;
-}
-
BOOL MulticastFrame::TraceFrame(Thread *thread, BOOL fromPatch,
TraceDestination *trace, REGDISPLAY *regs)
{
diff --git a/src/vm/frames.h b/src/vm/frames.h
index 7f35dd0ad8..276096522c 100644
--- a/src/vm/frames.h
+++ b/src/vm/frames.h
@@ -69,11 +69,7 @@
// | | to either a EE runtime helper function or
// | | a framed method.
// | |
-// | +-StubHelperFrame - for instantiating stubs that need to grow stack arguments
-// | |
-// | +-SecureDelegateFrame - represents a call Delegate.Invoke for secure delegate
-// | |
-// | +-MulticastFrame - this frame protects arguments to a MulticastDelegate
+// | +-MulticastFrame - this frame protects arguments to a MulticastDelegate
// | Invoke() call while calling each subscriber.
// |
// | +-FramedMethodFrame - this abstract frame represents a call to a method
@@ -220,7 +216,6 @@ FRAME_TYPE_NAME(HelperMethodFrame_2OBJ)
FRAME_TYPE_NAME(HelperMethodFrame_3OBJ)
FRAME_TYPE_NAME(HelperMethodFrame_PROTECTOBJ)
FRAME_ABSTRACT_TYPE_NAME(FramedMethodFrame)
-FRAME_TYPE_NAME(SecureDelegateFrame)
FRAME_TYPE_NAME(MulticastFrame)
FRAME_ABSTRACT_TYPE_NAME(UnmanagedToManagedFrame)
#ifdef FEATURE_COMINTEROP
@@ -238,9 +233,6 @@ FRAME_TYPE_NAME(ExternalMethodFrame)
#ifdef FEATURE_READYTORUN
FRAME_TYPE_NAME(DynamicHelperFrame)
#endif
-#if !defined(_TARGET_X86_)
-FRAME_TYPE_NAME(StubHelperFrame)
-#endif
FRAME_TYPE_NAME(GCFrame)
#ifdef FEATURE_INTERPRETER
FRAME_TYPE_NAME(InterpreterFrame)
@@ -1743,31 +1735,20 @@ protected:
}
};
-//+----------------------------------------------------------------------------
-//
-// Class: TPMethodFrame private
-//
-// Synopsis: This frame is pushed onto the stack for calls on transparent
-// proxy
-//
-//
-//+----------------------------------------------------------------------------
-
//------------------------------------------------------------------------
-// This represents a call Delegate.Invoke for secure delegate
-// It's only used to gc-protect the arguments during the call.
-// Actually the only reason to have this frame is so a proper
-// Assembly can be reported
+// This represents a call Multicast.Invoke. It's only used to gc-protect
+// the arguments during the iteration.
//------------------------------------------------------------------------
-class SecureDelegateFrame : public TransitionFrame
+class MulticastFrame : public TransitionFrame
{
- VPTR_VTABLE_CLASS(SecureDelegateFrame, TransitionFrame)
+ VPTR_VTABLE_CLASS(MulticastFrame, TransitionFrame)
PTR_MethodDesc m_pMD;
TransitionBlock m_TransitionBlock;
public:
+
virtual MethodDesc* GetFunction()
{
LIMITED_METHOD_CONTRACT;
@@ -1777,20 +1758,14 @@ public:
virtual TADDR GetTransitionBlock()
{
LIMITED_METHOD_DAC_CONTRACT;
- return PTR_HOST_MEMBER_TADDR(SecureDelegateFrame, this,
+ return PTR_HOST_MEMBER_TADDR(MulticastFrame, this,
m_TransitionBlock);
}
- static BYTE GetOffsetOfDatum()
- {
- LIMITED_METHOD_DAC_CONTRACT;
- return offsetof(SecureDelegateFrame, m_pMD);
- }
-
static int GetOffsetOfTransitionBlock()
{
LIMITED_METHOD_DAC_CONTRACT;
- return offsetof(SecureDelegateFrame, m_TransitionBlock);
+ return offsetof(MulticastFrame, m_TransitionBlock);
}
virtual void GcScanRoots(promote_func *fn, ScanContext* sc)
@@ -1800,8 +1775,6 @@ public:
PromoteCallerStack(fn, sc);
}
- virtual Assembly *GetAssembly();
-
int GetFrameType()
{
LIMITED_METHOD_DAC_CONTRACT;
@@ -1822,37 +1795,6 @@ public:
TraceDestination *trace, REGDISPLAY *regs);
// Keep as last entry in class
- DEFINE_VTABLE_GETTER_AND_CTOR_AND_DTOR(SecureDelegateFrame)
-};
-
-
-//------------------------------------------------------------------------
-// This represents a call Multicast.Invoke. It's only used to gc-protect
-// the arguments during the iteration.
-//------------------------------------------------------------------------
-
-class MulticastFrame : public SecureDelegateFrame
-{
- VPTR_VTABLE_CLASS(MulticastFrame, SecureDelegateFrame)
-
- public:
-
- virtual Assembly *GetAssembly()
- {
- WRAPPER_NO_CONTRACT;
- return Frame::GetAssembly();
- }
-
- int GetFrameType()
- {
- LIMITED_METHOD_DAC_CONTRACT;
- return TYPE_MULTICAST;
- }
-
- virtual BOOL TraceFrame(Thread *thread, BOOL fromPatch,
- TraceDestination *trace, REGDISPLAY *regs);
-
- // Keep as last entry in class
DEFINE_VTABLE_GETTER_AND_CTOR_AND_DTOR(MulticastFrame)
};
@@ -2382,40 +2324,6 @@ public:
typedef VPTR(class DynamicHelperFrame) PTR_DynamicHelperFrame;
#endif // FEATURE_READYTORUN
-//------------------------------------------------------------------------
-// This frame is used for instantiating stubs when the argument transform
-// is too complex to generate a tail-calling stub.
-//------------------------------------------------------------------------
-#if !defined(_TARGET_X86_)
-class StubHelperFrame : public TransitionFrame
-{
- friend class CheckAsmOffsets;
- friend class StubLinkerCPU;
-
- VPTR_VTABLE_CLASS(StubHelperFrame, TransitionFrame)
- VPTR_UNIQUE(VPTR_UNIQUE_StubHelperFrame)
-
- TransitionBlock m_TransitionBlock;
-
- virtual TADDR GetTransitionBlock()
- {
- LIMITED_METHOD_DAC_CONTRACT;
- return PTR_HOST_MEMBER_TADDR(StubHelperFrame, this,
- m_TransitionBlock);
- }
-
- static int GetOffsetOfTransitionBlock()
- {
- LIMITED_METHOD_DAC_CONTRACT;
- return offsetof(StubHelperFrame, m_TransitionBlock);
- }
-
-private:
- // Keep as last entry in class
- DEFINE_VTABLE_GETTER_AND_CTOR_AND_DTOR(StubHelperFrame)
-};
-#endif // _TARGET_X86_
-
#ifdef FEATURE_COMINTEROP
//------------------------------------------------------------------------
diff --git a/src/vm/i386/stublinkerx86.cpp b/src/vm/i386/stublinkerx86.cpp
index 983fc3f36a..a658307bb2 100644
--- a/src/vm/i386/stublinkerx86.cpp
+++ b/src/vm/i386/stublinkerx86.cpp
@@ -2936,7 +2936,7 @@ void StubLinkerCPU::EmitSharedComMethodStubEpilog(TADDR pFrameVptr,
//========================================================================
#endif // defined(FEATURE_COMINTEROP) && defined(_TARGET_X86_)
-#ifndef FEATURE_STUBS_AS_IL
+#if !defined(FEATURE_STUBS_AS_IL) && defined(_TARGET_X86_)
/*==============================================================================
Pushes a TransitionFrame on the stack
If you make any changes to the prolog instruction sequence, be sure
@@ -2955,44 +2955,6 @@ VOID StubLinkerCPU::EmitMethodStubProlog(TADDR pFrameVptr, int transitionBlockOf
{
STANDARD_VM_CONTRACT;
-#ifdef _TARGET_AMD64_
- X86EmitPushReg(kR15); // CalleeSavedRegisters
- X86EmitPushReg(kR14);
- X86EmitPushReg(kR13);
- X86EmitPushReg(kR12);
- X86EmitPushReg(kRBP);
- X86EmitPushReg(kRBX);
- X86EmitPushReg(kRSI);
- X86EmitPushReg(kRDI);
-
- // Push m_datum
- X86EmitPushReg(SCRATCH_REGISTER_X86REG);
-
- // push edx ;leave room for m_next (edx is an arbitrary choice)
- X86EmitPushReg(kEDX);
-
- // push Frame vptr
- X86EmitPushImmPtr((LPVOID) pFrameVptr);
-
- // mov rsi, rsp
- X86EmitR2ROp(0x8b, kRSI, (X86Reg)4 /*kESP*/);
- UnwindSetFramePointer(kRSI);
-
- // Save ArgumentRegisters
- #define ARGUMENT_REGISTER(regname) X86EmitRegSave(k##regname, SecureDelegateFrame::GetOffsetOfTransitionBlock() + \
- sizeof(TransitionBlock) + offsetof(ArgumentRegisters, regname));
- ENUM_ARGUMENT_REGISTERS();
- #undef ARGUMENT_REGISTER
-
- _ASSERTE(((Frame*)&pFrameVptr)->GetGSCookiePtr() == PTR_GSCookie(PBYTE(&pFrameVptr) - sizeof(GSCookie)));
- X86EmitPushImmPtr((LPVOID)GetProcessGSCookie());
-
- // sub rsp, 4*sizeof(void*) ;; allocate callee scratch area and ensure rsp is 16-byte-aligned
- const INT32 padding = sizeof(ArgumentRegisters) + ((sizeof(FramedMethodFrame) % (2 * sizeof(LPVOID))) ? 0 : sizeof(LPVOID));
- X86EmitSubEsp(padding);
-#endif // _TARGET_AMD64_
-
-#ifdef _TARGET_X86_
// push ebp ;; save callee-saved register
// mov ebp,esp
// push ebx ;; save callee-saved register
@@ -3022,7 +2984,6 @@ VOID StubLinkerCPU::EmitMethodStubProlog(TADDR pFrameVptr, int transitionBlockOf
X86EmitMovRegSP(kESI);
X86EmitPushImmPtr((LPVOID)GetProcessGSCookie());
-#endif // _TARGET_X86_
// ebx <-- GetThread()
X86EmitCurrentThreadFetch(kEBX, 0);
@@ -3030,14 +2991,7 @@ VOID StubLinkerCPU::EmitMethodStubProlog(TADDR pFrameVptr, int transitionBlockOf
#if _DEBUG
// call ObjectRefFlush
-#ifdef _TARGET_AMD64_
-
- // mov rcx, rbx
- X86EmitR2ROp(0x8b, kECX, kEBX); // arg in reg
-
-#else // !_TARGET_AMD64_
X86EmitPushReg(kEBX); // arg on stack
-#endif // _TARGET_AMD64_
// Make the call
X86EmitCall(NewExternalCodeLabel((LPVOID) Thread::ObjectRefFlush), sizeof(void*));
@@ -3058,40 +3012,16 @@ VOID StubLinkerCPU::EmitMethodStubProlog(TADDR pFrameVptr, int transitionBlockOf
if (Frame::ShouldLogTransitions())
{
// call LogTransition
-#ifdef _TARGET_AMD64_
-
- // mov rcx, rsi
- X86EmitR2ROp(0x8b, kECX, kESI); // arg in reg
-
-#else // !_TARGET_AMD64_
X86EmitPushReg(kESI); // arg on stack
-#endif // _TARGET_AMD64_
-
- X86EmitCall(NewExternalCodeLabel((LPVOID) Frame::LogTransition), sizeof(void*));
-
-#ifdef _TARGET_AMD64_
- // Reload parameter registers
- // mov r, [esp+offs]
- #define ARGUMENT_REGISTER(regname) X86EmitEspOffset(0x8b, k##regname, sizeof(ArgumentRegisters) + \
- sizeof(TransitionFrame) + offsetof(ArgumentRegisters, regname));
- ENUM_ARGUMENT_REGISTERS();
- #undef ARGUMENT_REGISTER
-#endif // _TARGET_AMD64_
+ X86EmitCall(NewExternalCodeLabel((LPVOID) Frame::LogTransition), sizeof(void*));
}
#endif // _DEBUG
-#ifdef _TARGET_AMD64_
- // OK for the debugger to examine the new frame now
- // (Note that if it's not OK yet for some stub, another patch label
- // can be emitted later which will override this one.)
- EmitPatchLabel();
-#else
// For x86, the patch label can be specified only after the GSCookie is pushed
// Otherwise the debugger will see a Frame without a valid GSCookie
-#endif
}
/*==============================================================================
@@ -3115,15 +3045,9 @@ VOID StubLinkerCPU::EmitMethodStubEpilog(WORD numArgBytes, int transitionBlockOf
// mov [ebx + Thread.GetFrame()], edi ;; restore previous frame
X86EmitIndexRegStore(kEBX, Thread::GetOffsetOfCurrentFrame(), kEDI);
-#ifdef _TARGET_X86_
// deallocate Frame
X86EmitAddEsp(sizeof(GSCookie) + transitionBlockOffset + TransitionBlock::GetOffsetOfCalleeSavedRegisters());
-#elif defined(_TARGET_AMD64_)
- // lea rsp, [rsi + <offset of preserved registers>]
- X86EmitOffsetModRM(0x8d, (X86Reg)4 /*kRSP*/, kRSI, transitionBlockOffset + TransitionBlock::GetOffsetOfCalleeSavedRegisters());
-#endif // _TARGET_AMD64_
-
// pop edi ; restore callee-saved registers
// pop esi
// pop ebx
@@ -3133,14 +3057,7 @@ VOID StubLinkerCPU::EmitMethodStubEpilog(WORD numArgBytes, int transitionBlockOf
X86EmitPopReg(kEBX);
X86EmitPopReg(kEBP);
-#ifdef _TARGET_AMD64_
- X86EmitPopReg(kR12);
- X86EmitPopReg(kR13);
- X86EmitPopReg(kR14);
- X86EmitPopReg(kR15);
-#endif
-
-#if defined(_TARGET_AMD64_) || defined(UNIX_X86_ABI)
+#if defined(UNIX_X86_ABI)
// Caller deallocates argument space. (Bypasses ASSERT in
// X86EmitReturn.)
numArgBytes = 0;
@@ -3158,23 +3075,19 @@ VOID StubLinkerCPU::EmitCheckGSCookie(X86Reg frameReg, int gsCookieOffset)
#ifdef _DEBUG
// cmp dword ptr[frameReg-gsCookieOffset], gsCookie
-#ifdef _TARGET_X86_
X86EmitCmpRegIndexImm32(frameReg, gsCookieOffset, GetProcessGSCookie());
-#else
- X64EmitCmp32RegIndexImm32(frameReg, gsCookieOffset, (INT32)GetProcessGSCookie());
-#endif
-
+
CodeLabel * pLabel = NewCodeLabel();
X86EmitCondJump(pLabel, X86CondCode::kJE);
-
+
X86EmitCall(NewExternalCodeLabel((LPVOID) JIT_FailFast), 0);
EmitLabel(pLabel);
#endif
}
-#endif // !FEATURE_STUBS_AS_IL
-
+#endif // !defined(FEATURE_STUBS_AS_IL) && defined(_TARGET_X86_)
+#ifdef _TARGET_X86_
// This method unboxes the THIS pointer and then calls pRealMD
// If it's shared code for a method in a generic value class, then also extract the vtable pointer
// and pass it as an extra argument. Thus this stub generator really covers both
@@ -3189,7 +3102,7 @@ VOID StubLinkerCPU::EmitUnboxMethodStub(MethodDesc* pUnboxMD)
}
CONTRACTL_END;
-#ifdef FEATURE_STUBS_AS_IL
+#ifdef FEATURE_INSTANTIATINGSTUB_AS_IL
_ASSERTE(!pUnboxMD->RequiresInstMethodTableArg());
#else
if (pUnboxMD->RequiresInstMethodTableArg())
@@ -3202,42 +3115,109 @@ VOID StubLinkerCPU::EmitUnboxMethodStub(MethodDesc* pUnboxMD)
//
// unboxing a value class simply means adding sizeof(void*) to the THIS pointer
//
-#ifdef _TARGET_AMD64_
X86EmitAddReg(THIS_kREG, sizeof(void*));
+ EmitTailJumpToMethod(pUnboxMD);
+}
+#endif //_TARGET_X86_
- // Use direct call if possible
- if (pUnboxMD->HasStableEntryPoint())
+#if defined(FEATURE_SHARE_GENERIC_CODE) && defined(_TARGET_AMD64_)
+VOID StubLinkerCPU::EmitComputedInstantiatingMethodStub(MethodDesc* pSharedMD, struct ShuffleEntry *pShuffleEntryArray, void* extraArg)
+{
+ STANDARD_VM_CONTRACT;
+
+ for (ShuffleEntry* pEntry = pShuffleEntryArray; pEntry->srcofs != ShuffleEntry::SENTINEL; pEntry++)
{
- X86EmitRegLoad(kRAX, pUnboxMD->GetStableEntryPoint());// MOV RAX, DWORD
+ _ASSERTE((pEntry->srcofs & ShuffleEntry::REGMASK) && (pEntry->dstofs & ShuffleEntry::REGMASK));
+ // Source in a general purpose or float register, destination in the same kind of a register or on stack
+ int srcRegIndex = pEntry->srcofs & ShuffleEntry::OFSREGMASK;
+
+ // Both the srcofs and dstofs must be of the same kind of registers - float or general purpose.
+ _ASSERTE((pEntry->dstofs & ShuffleEntry::FPREGMASK) == (pEntry->srcofs & ShuffleEntry::FPREGMASK));
+ int dstRegIndex = pEntry->dstofs & ShuffleEntry::OFSREGMASK;
+
+ if (pEntry->srcofs & ShuffleEntry::FPREGMASK)
+ {
+ // movdqa dstReg, srcReg
+ X64EmitMovXmmXmm((X86Reg)(kXMM0 + dstRegIndex), (X86Reg)(kXMM0 + srcRegIndex));
+ }
+ else
+ {
+ // mov dstReg, srcReg
+ X86EmitMovRegReg(c_argRegs[dstRegIndex], c_argRegs[srcRegIndex]);
+ }
}
- else
+
+ MetaSig msig(pSharedMD);
+ ArgIterator argit(&msig);
+
+ if (argit.HasParamType())
{
- X86EmitRegLoad(kRAX, (UINT_PTR)pUnboxMD->GetAddrOfSlot()); // MOV RAX, DWORD
-
- X86EmitIndexRegLoad(kRAX, kRAX); // MOV RAX, [RAX]
+ int paramTypeArgOffset = argit.GetParamTypeArgOffset();
+ int paramTypeArgIndex = TransitionBlock::GetArgumentIndexFromOffset(paramTypeArgOffset);
+
+ if (extraArg == NULL)
+ {
+ if (pSharedMD->RequiresInstMethodTableArg())
+ {
+ // Unboxing stub case
+ // Extract MethodTable pointer (the hidden arg) from the object instance.
+ X86EmitIndexRegLoad(c_argRegs[paramTypeArgIndex], THIS_kREG);
+ }
+ }
+ else
+ {
+ X86EmitRegLoad(c_argRegs[paramTypeArgIndex], (UINT_PTR)extraArg);
+ }
}
- Emit16(X86_INSTR_JMP_EAX); // JMP EAX
-#else // _TARGET_AMD64_
- X86EmitAddReg(THIS_kREG, sizeof(void*));
+ if (extraArg == NULL)
+ {
+ // Unboxing stub case
+ // Skip over the MethodTable* to find the address of the unboxed value type.
+ X86EmitAddReg(THIS_kREG, sizeof(void*));
+ }
+
+ EmitTailJumpToMethod(pSharedMD);
+}
+#endif // defined(FEATURE_SHARE_GENERIC_CODE) && defined(_TARGET_AMD64_)
+#ifdef _TARGET_AMD64_
+VOID StubLinkerCPU::EmitLoadMethodAddressIntoAX(MethodDesc *pMD)
+{
+ if (pMD->HasStableEntryPoint())
+ {
+ X86EmitRegLoad(kRAX, pMD->GetStableEntryPoint());// MOV RAX, DWORD
+ }
+ else
+ {
+ X86EmitRegLoad(kRAX, (UINT_PTR)pMD->GetAddrOfSlot()); // MOV RAX, DWORD
+
+ X86EmitIndexRegLoad(kRAX, kRAX); // MOV RAX, [RAX]
+ }
+}
+#endif
+VOID StubLinkerCPU::EmitTailJumpToMethod(MethodDesc *pMD)
+{
+#ifdef _TARGET_AMD64_
+ EmitLoadMethodAddressIntoAX(pMD);
+ Emit16(X86_INSTR_JMP_EAX);
+#else
// Use direct call if possible
- if (pUnboxMD->HasStableEntryPoint())
+ if (pMD->HasStableEntryPoint())
{
- X86EmitNearJump(NewExternalCodeLabel((LPVOID) pUnboxMD->GetStableEntryPoint()));
+ X86EmitNearJump(NewExternalCodeLabel((LPVOID) pMD->GetStableEntryPoint()));
}
else
{
// jmp [slot]
Emit16(0x25ff);
- Emit32((DWORD)(size_t)pUnboxMD->GetAddrOfSlot());
+ Emit32((DWORD)(size_t)pMD->GetAddrOfSlot());
}
#endif //_TARGET_AMD64_
}
-
-#if defined(FEATURE_SHARE_GENERIC_CODE) && !defined(FEATURE_STUBS_AS_IL)
-// The stub generated by this method passes an extra dictionary argument before jumping to
+#if defined(FEATURE_SHARE_GENERIC_CODE) && !defined(FEATURE_INSTANTIATINGSTUB_AS_IL) && defined(_TARGET_X86_)
+// The stub generated by this method passes an extra dictionary argument before jumping to
// shared-instantiation generic code.
//
// pMD is either
@@ -3258,126 +3238,6 @@ VOID StubLinkerCPU::EmitInstantiatingMethodStub(MethodDesc* pMD, void* extra)
MetaSig msig(pMD);
ArgIterator argit(&msig);
-#ifdef _TARGET_AMD64_
- int paramTypeArgOffset = argit.GetParamTypeArgOffset();
- int paramTypeArgIndex = TransitionBlock::GetArgumentIndexFromOffset(paramTypeArgOffset);
-
- CorElementType argTypes[5];
-
- int firstRealArg = paramTypeArgIndex + 1;
- int argNum = firstRealArg;
-
- //
- // Compute types of the 4 register args and first stack arg
- //
-
- CorElementType sigType;
- while ((sigType = msig.NextArgNormalized()) != ELEMENT_TYPE_END)
- {
- argTypes[argNum++] = sigType;
- if (argNum > 4)
- break;
- }
- msig.Reset();
-
- BOOL fUseInstantiatingMethodStubWorker = FALSE;
-
- if (argNum > 4)
- {
- //
- // We will need to go through assembly helper.
- //
- fUseInstantiatingMethodStubWorker = TRUE;
-
- // Allocate space for frame before pushing the arguments for the assembly helper
- X86EmitSubEsp((INT32)(AlignUp(sizeof(void *) /* extra stack param */ + sizeof(GSCookie) + sizeof(StubHelperFrame), 16) - sizeof(void *) /* return address */));
-
- //
- // Store extra arg stack arg param for the helper.
- //
- CorElementType argType = argTypes[--argNum];
- switch (argType)
- {
- case ELEMENT_TYPE_R4:
- // movss dword ptr [rsp], xmm?
- X64EmitMovSSToMem(kXMM3, (X86Reg)4 /*kRSP*/);
- break;
- case ELEMENT_TYPE_R8:
- // movsd qword ptr [rsp], xmm?
- X64EmitMovSDToMem(kXMM3, (X86Reg)4 /*kRSP*/);
- break;
- default:
- X86EmitIndexRegStoreRSP(0, kR9);
- break;
- }
- }
-
- //
- // Shuffle the register arguments
- //
- while (argNum > firstRealArg)
- {
- CorElementType argType = argTypes[--argNum];
-
- switch (argType)
- {
- case ELEMENT_TYPE_R4:
- case ELEMENT_TYPE_R8:
- // mov xmm#, xmm#-1
- X64EmitMovXmmXmm((X86Reg)argNum, (X86Reg)(argNum - 1));
- break;
- default:
- //mov reg#, reg#-1
- X86EmitMovRegReg(c_argRegs[argNum], c_argRegs[argNum-1]);
- break;
- }
- }
-
- //
- // Setup the hidden instantiation argument
- //
- if (extra != NULL)
- {
- X86EmitRegLoad(c_argRegs[paramTypeArgIndex], (UINT_PTR)extra);
- }
- else
- {
- X86EmitIndexRegLoad(c_argRegs[paramTypeArgIndex], THIS_kREG);
-
- X86EmitAddReg(THIS_kREG, sizeof(void*));
- }
-
- // Use direct call if possible
- if (pMD->HasStableEntryPoint())
- {
- X86EmitRegLoad(kRAX, pMD->GetStableEntryPoint());// MOV RAX, DWORD
- }
- else
- {
- X86EmitRegLoad(kRAX, (UINT_PTR)pMD->GetAddrOfSlot()); // MOV RAX, DWORD
-
- X86EmitIndexRegLoad(kRAX, kRAX); // MOV RAX, [RAX]
- }
-
- if (fUseInstantiatingMethodStubWorker)
- {
- X86EmitPushReg(kRAX);
-
- UINT cbStack = argit.SizeOfArgStack();
- _ASSERTE(cbStack > 0);
-
- X86EmitPushImm32((AlignUp(cbStack, 16) / sizeof(void*)) - 1); // -1 for extra stack arg
-
- X86EmitRegLoad(kRAX, GetEEFuncEntryPoint(InstantiatingMethodStubWorker));// MOV RAX, DWORD
- }
- else
- {
- _ASSERTE(argit.SizeOfArgStack() == 0);
- }
-
- Emit16(X86_INSTR_JMP_EAX);
-
-#else
int paramTypeArgOffset = argit.GetParamTypeArgOffset();
// It's on the stack
@@ -3421,21 +3281,9 @@ VOID StubLinkerCPU::EmitInstantiatingMethodStub(MethodDesc* pMD, void* extra)
// Unboxing stub case.
X86EmitAddReg(THIS_kREG, sizeof(void*));
}
-
- // Use direct call if possible
- if (pMD->HasStableEntryPoint())
- {
- X86EmitNearJump(NewExternalCodeLabel((LPVOID) pMD->GetStableEntryPoint()));
- }
- else
- {
- // jmp [slot]
- Emit16(0x25ff);
- Emit32((DWORD)(size_t)pMD->GetAddrOfSlot());
- }
-#endif //
+ EmitTailJumpToMethod(pMD);
}
-#endif // FEATURE_SHARE_GENERIC_CODE && FEATURE_STUBS_AS_IL
+#endif // defined(FEATURE_SHARE_GENERIC_CODE) && !defined(FEATURE_INSTANTIATINGSTUB_AS_IL) && defined(_TARGET_X86_)
#if defined(_DEBUG) && defined(STUBLINKER_GENERATES_UNWIND_INFO)
@@ -3840,7 +3688,6 @@ VOID StubLinkerCPU::EmitShuffleThunk(ShuffleEntry *pShuffleEntryArray)
// Unix: mov r11, rdi
X86EmitMovRegReg(kR11, THIS_kREG);
-#ifdef UNIX_AMD64_ABI
for (ShuffleEntry* pEntry = pShuffleEntryArray; pEntry->srcofs != ShuffleEntry::SENTINEL; pEntry++)
{
if (pEntry->srcofs == ShuffleEntry::HELPERREG)
@@ -3962,80 +3809,6 @@ VOID StubLinkerCPU::EmitShuffleThunk(ShuffleEntry *pShuffleEntryArray)
X86EmitIndexRegStore (SCRATCH_REGISTER_X86REG, (pEntry->dstofs + 1) * sizeof(void*), kR10);
}
}
-#else // UNIX_AMD64_ABI
- UINT step = 1;
-
- if (pShuffleEntryArray->argtype == ELEMENT_TYPE_END)
- {
- // Special handling of open instance methods with return buffer. Move "this"
- // by two slots, and leave the "retbufptr" between the two slots intact.
-
- // mov rcx, r8
- X86EmitMovRegReg(kRCX, kR8);
-
- // Skip this entry
- pShuffleEntryArray++;
-
- // Skip this entry and leave retbufptr intact
- step += 2;
- }
-
- // Now shuffle the args by one position:
- // steps 1-3 : reg args (rcx, rdx, r8)
- // step 4 : stack->reg arg (r9)
- // step >4 : stack args
-
- for(;
- pShuffleEntryArray->srcofs != ShuffleEntry::SENTINEL;
- step++, pShuffleEntryArray++)
- {
- switch (step)
- {
- case 1:
- case 2:
- case 3:
- switch (pShuffleEntryArray->argtype)
- {
- case ELEMENT_TYPE_R4:
- case ELEMENT_TYPE_R8:
- // mov xmm-1#, xmm#
- X64EmitMovXmmXmm((X86Reg)(step - 1), (X86Reg)(step));
- break;
- default:
- // mov argRegs[step-1], argRegs[step]
- X86EmitMovRegReg(c_argRegs[step-1], c_argRegs[step]);
- break;
- }
- break;
-
- case 4:
- {
- switch (pShuffleEntryArray->argtype)
- {
- case ELEMENT_TYPE_R4:
- X64EmitMovSSFromMem(kXMM3, kRAX, 0x28);
- break;
-
- case ELEMENT_TYPE_R8:
- X64EmitMovSDFromMem(kXMM3, kRAX, 0x28);
- break;
-
- default:
- // mov r9, [rax + 28h]
- X86EmitIndexRegLoad (kR9, SCRATCH_REGISTER_X86REG, 5*sizeof(void*));
- }
- break;
- }
- default:
-
- // mov r10, [rax + (step+1)*sizeof(void*)]
- X86EmitIndexRegLoad (kR10, SCRATCH_REGISTER_X86REG, (step+1)*sizeof(void*));
-
- // mov [rax + step*sizeof(void*)], r10
- X86EmitIndexRegStore (SCRATCH_REGISTER_X86REG, step*sizeof(void*), kR10);
- }
- }
-#endif // UNIX_AMD64_ABI
// mov r10, [r11 + Delegate._methodptraux]
X86EmitIndexRegLoad(kR10, kR11, DelegateObject::GetOffsetOfMethodPtrAux());
@@ -4140,6 +3913,7 @@ VOID StubLinkerCPU::EmitShuffleThunk(ShuffleEntry *pShuffleEntryArray)
#if !defined(CROSSGEN_COMPILE) && !defined(FEATURE_STUBS_AS_IL)
+#if defined(_TARGET_X86_) && !defined(FEATURE_MULTICASTSTUB_AS_IL)
//===========================================================================
// Computes hash code for MulticastDelegate.Invoke()
UINT_PTR StubLinkerCPU::HashMulticastInvoke(MetaSig* pSig)
@@ -4155,58 +3929,9 @@ UINT_PTR StubLinkerCPU::HashMulticastInvoke(MetaSig* pSig)
UINT numStackBytes = argit.SizeOfArgStack();
- if (numStackBytes > 0x7FFF)
+ if (numStackBytes > 0x7FFF)
COMPlusThrow(kNotSupportedException, W("NotSupported_TooManyArgs"));
-#ifdef _TARGET_AMD64_
- // Generate a hash key as follows:
- // UINT Arg0Type:2; // R4 (1), R8 (2), other (3)
- // UINT Arg1Type:2; // R4 (1), R8 (2), other (3)
- // UINT Arg2Type:2; // R4 (1), R8 (2), other (3)
- // UINT Arg3Type:2; // R4 (1), R8 (2), other (3)
- // UINT NumArgs:24; // number of arguments
- // (This should cover all the prestub variations)
-
- _ASSERTE(!(numStackBytes & 7));
- UINT hash = (numStackBytes / sizeof(void*)) << 8;
-
- UINT argNum = 0;
-
- // NextArg() doesn't take into account the "this" pointer.
- // That's why we have to special case it here.
- if (argit.HasThis())
- {
- hash |= 3 << (2*argNum);
- argNum++;
- }
-
- if (argit.HasRetBuffArg())
- {
- hash |= 3 << (2*argNum);
- argNum++;
- }
-
- for (; argNum < 4; argNum++)
- {
- switch (pSig->NextArgNormalized())
- {
- case ELEMENT_TYPE_END:
- argNum = 4;
- break;
- case ELEMENT_TYPE_R4:
- hash |= 1 << (2*argNum);
- break;
- case ELEMENT_TYPE_R8:
- hash |= 2 << (2*argNum);
- break;
- default:
- hash |= 3 << (2*argNum);
- break;
- }
- }
-
-#else // _TARGET_AMD64_
-
// check if the function is returning a float, in which case the stub has to take
// care of popping the floating point stack except for the last invocation
@@ -4218,10 +3943,10 @@ UINT_PTR StubLinkerCPU::HashMulticastInvoke(MetaSig* pSig)
{
hash |= 2;
}
-#endif // _TARGET_AMD64_
return hash;
}
+#endif // defined(_TARGET_X86_) && !defined(FEATURE_MULTICASTSTUB_AS_IL)
#ifdef _TARGET_X86_
//===========================================================================
@@ -4260,6 +3985,7 @@ VOID StubLinkerCPU::EmitDelegateInvoke()
}
#endif // _TARGET_X86_
+#if defined(_TARGET_X86_) && !defined(FEATURE_MULTICASTSTUB_AS_IL)
VOID StubLinkerCPU::EmitMulticastInvoke(UINT_PTR hash)
{
STANDARD_VM_CONTRACT;
@@ -4274,60 +4000,8 @@ VOID StubLinkerCPU::EmitMulticastInvoke(UINT_PTR hash)
// Push a MulticastFrame on the stack.
EmitMethodStubProlog(MulticastFrame::GetMethodFrameVPtr(), MulticastFrame::GetOffsetOfTransitionBlock());
-#ifdef _TARGET_X86_
// Frame is ready to be inspected by debugger for patch location
EmitPatchLabel();
-#else // _TARGET_AMD64_
-
- // Save register arguments in their home locations.
- // Non-FP registers are already saved by EmitMethodStubProlog.
- // (Assumes Sig.NextArg() does not enum RetBuffArg or "this".)
-
- int argNum = 0;
- __int32 argOfs = MulticastFrame::GetOffsetOfTransitionBlock() + TransitionBlock::GetOffsetOfArgs();
- CorElementType argTypes[4];
- CorElementType argType;
-
- // 'this'
- argOfs += sizeof(void*);
- argTypes[argNum] = ELEMENT_TYPE_I8;
- argNum++;
-
- do
- {
- argType = ELEMENT_TYPE_END;
-
- switch ((hash >> (2 * argNum)) & 3)
- {
- case 0:
- argType = ELEMENT_TYPE_END;
- break;
- case 1:
- argType = ELEMENT_TYPE_R4;
-
- // movss dword ptr [rsp + argOfs], xmm?
- X64EmitMovSSToMem((X86Reg)argNum, kRSI, argOfs);
- break;
- case 2:
- argType = ELEMENT_TYPE_R8;
-
- // movsd qword ptr [rsp + argOfs], xmm?
- X64EmitMovSDToMem((X86Reg)argNum, kRSI, argOfs);
- break;
- default:
- argType = ELEMENT_TYPE_I;
- break;
- }
-
- argOfs += sizeof(void*);
- argTypes[argNum] = argType;
- argNum++;
- }
- while (argNum < 4 && ELEMENT_TYPE_END != argType);
-
- _ASSERTE(4 == argNum || ELEMENT_TYPE_END == argTypes[argNum-1]);
-
-#endif // _TARGET_AMD64_
// TODO: on AMD64, pick different regs for locals so don't need the pushes
@@ -4354,102 +4028,6 @@ VOID StubLinkerCPU::EmitMulticastInvoke(UINT_PTR hash)
// je ENDLOOP
X86EmitCondJump(pEndLoopLabel, X86CondCode::kJZ);
-#ifdef _TARGET_AMD64_
-
- INT32 numStackBytes = (INT32)((hash >> 8) * sizeof(void *));
-
- INT32 stackUsed, numStackArgs, ofs;
-
- // Push any stack args, plus an extra location
- // for rsp alignment if needed
-
- numStackArgs = numStackBytes / sizeof(void*);
-
- // 1 push above, so stack is currently misaligned
- const unsigned STACK_ALIGN_ADJUST = 8;
-
- if (!numStackArgs)
- {
- // sub rsp, 28h ;; 4 reg arg home locs + rsp alignment
- stackUsed = 0x20 + STACK_ALIGN_ADJUST;
- X86EmitSubEsp(stackUsed);
- }
- else
- {
- stackUsed = numStackArgs * sizeof(void*);
-
- // If the stack is misaligned, then an odd number of arguments
- // will naturally align the stack.
- if ( ((numStackArgs & 1) == 0)
- != (STACK_ALIGN_ADJUST == 0))
- {
- X86EmitPushReg(kRAX);
- stackUsed += sizeof(void*);
- }
-
- ofs = MulticastFrame::GetOffsetOfTransitionBlock() +
- TransitionBlock::GetOffsetOfArgs() + sizeof(ArgumentRegisters) + numStackBytes;
-
- while (numStackArgs--)
- {
- ofs -= sizeof(void*);
-
- // push [rsi + ofs] ;; Push stack args
- X86EmitIndexPush(kESI, ofs);
- }
-
- // sub rsp, 20h ;; Create 4 reg arg home locations
- X86EmitSubEsp(0x20);
-
- stackUsed += 0x20;
- }
-
- for(
- argNum = 0, argOfs = MulticastFrame::GetOffsetOfTransitionBlock() + TransitionBlock::GetOffsetOfArgs();
- argNum < 4 && argTypes[argNum] != ELEMENT_TYPE_END;
- argNum++, argOfs += sizeof(void*)
- )
- {
- switch (argTypes[argNum])
- {
- case ELEMENT_TYPE_R4:
- // movss xmm?, dword ptr [rsi + argOfs]
- X64EmitMovSSFromMem((X86Reg)argNum, kRSI, argOfs);
- break;
- case ELEMENT_TYPE_R8:
- // movsd xmm?, qword ptr [rsi + argOfs]
- X64EmitMovSDFromMem((X86Reg)argNum, kRSI, argOfs);
- break;
- default:
- if (c_argRegs[argNum] != THIS_kREG)
- {
- // mov r*, [rsi + dstOfs]
- X86EmitIndexRegLoad(c_argRegs[argNum], kESI,argOfs);
- }
- break;
- } // switch
- }
-
- // mov SCRATCHREG, [rcx+Delegate._invocationList] ;;fetch invocation list
- X86EmitIndexRegLoad(SCRATCH_REGISTER_X86REG, THIS_kREG, DelegateObject::GetOffsetOfInvocationList());
-
- // mov SCRATCHREG, [SCRATCHREG+m_Array+rdi*8] ;; index into invocation list
- X86EmitOp(0x8b, kEAX, SCRATCH_REGISTER_X86REG, static_cast<int>(PtrArray::GetDataOffset()), kEDI, sizeof(void*), k64BitOp);
-
- // mov THISREG, [SCRATCHREG+Delegate.object] ;;replace "this" pointer
- X86EmitIndexRegLoad(THIS_kREG, SCRATCH_REGISTER_X86REG, DelegateObject::GetOffsetOfTarget());
-
- // call [SCRATCHREG+Delegate.target] ;; call current subscriber
- X86EmitOffsetModRM(0xff, (X86Reg)2, SCRATCH_REGISTER_X86REG, DelegateObject::GetOffsetOfMethodPtr());
-
- // add rsp, stackUsed ;; Clean up stack
- X86EmitAddEsp(stackUsed);
-
- // inc edi
- Emit16(0xC7FF);
-
-#else // _TARGET_AMD64_
-
UINT16 numStackBytes = static_cast<UINT16>(hash & ~3);
// ..repush & reenregister args..
@@ -4508,8 +4086,6 @@ VOID StubLinkerCPU::EmitMulticastInvoke(UINT_PTR hash)
EmitLabel(pNoFloatStackPopLabel);
}
-#endif // _TARGET_AMD64_
-
// The debugger may need to stop here, so grab the offset of this code.
EmitPatchLabel();
@@ -4527,210 +4103,8 @@ VOID StubLinkerCPU::EmitMulticastInvoke(UINT_PTR hash)
// Epilog
EmitMethodStubEpilog(numStackBytes, MulticastFrame::GetOffsetOfTransitionBlock());
}
+#endif // defined(_TARGET_X86_) && !defined(FEATURE_MULTICASTSTUB_AS_IL)
-VOID StubLinkerCPU::EmitSecureDelegateInvoke(UINT_PTR hash)
-{
- STANDARD_VM_CONTRACT;
-
- int thisRegOffset = SecureDelegateFrame::GetOffsetOfTransitionBlock() +
- TransitionBlock::GetOffsetOfArgumentRegisters() + offsetof(ArgumentRegisters, THIS_REG);
-
- // push the methoddesc on the stack
- // mov eax, [ecx + offsetof(_invocationCount)]
- X86EmitIndexRegLoad(SCRATCH_REGISTER_X86REG, THIS_kREG, DelegateObject::GetOffsetOfInvocationCount());
-
- // Push a SecureDelegateFrame on the stack.
- EmitMethodStubProlog(SecureDelegateFrame::GetMethodFrameVPtr(), SecureDelegateFrame::GetOffsetOfTransitionBlock());
-
-#ifdef _TARGET_X86_
- // Frame is ready to be inspected by debugger for patch location
- EmitPatchLabel();
-#else // _TARGET_AMD64_
-
- // Save register arguments in their home locations.
- // Non-FP registers are already saved by EmitMethodStubProlog.
- // (Assumes Sig.NextArg() does not enum RetBuffArg or "this".)
-
- int argNum = 0;
- __int32 argOfs = SecureDelegateFrame::GetOffsetOfTransitionBlock() + TransitionBlock::GetOffsetOfArgs();
- CorElementType argTypes[4];
- CorElementType argType;
-
- // 'this'
- argOfs += sizeof(void*);
- argTypes[argNum] = ELEMENT_TYPE_I8;
- argNum++;
-
- do
- {
- argType = ELEMENT_TYPE_END;
-
- switch ((hash >> (2 * argNum)) & 3)
- {
- case 0:
- argType = ELEMENT_TYPE_END;
- break;
- case 1:
- argType = ELEMENT_TYPE_R4;
-
- // movss dword ptr [rsp + argOfs], xmm?
- X64EmitMovSSToMem((X86Reg)argNum, kRSI, argOfs);
- break;
- case 2:
- argType = ELEMENT_TYPE_R8;
-
- // movsd qword ptr [rsp + argOfs], xmm?
- X64EmitMovSSToMem((X86Reg)argNum, kRSI, argOfs);
- break;
- default:
- argType = ELEMENT_TYPE_I;
- break;
- }
-
- argOfs += sizeof(void*);
- argTypes[argNum] = argType;
- argNum++;
- }
- while (argNum < 4 && ELEMENT_TYPE_END != argType);
-
- _ASSERTE(4 == argNum || ELEMENT_TYPE_END == argTypes[argNum-1]);
-
-#endif // _TARGET_AMD64_
-
- // mov ecx, [esi + this] ;; get delegate
- X86EmitIndexRegLoad(THIS_kREG, kESI, thisRegOffset);
-
-#ifdef _TARGET_AMD64_
-
- INT32 numStackBytes = (INT32)((hash >> 8) * sizeof(void *));
-
- INT32 stackUsed, numStackArgs, ofs;
-
- // Push any stack args, plus an extra location
- // for rsp alignment if needed
-
- numStackArgs = numStackBytes / sizeof(void*);
-
- // 1 push above, so stack is currently misaligned
- const unsigned STACK_ALIGN_ADJUST = 0;
-
- if (!numStackArgs)
- {
- // sub rsp, 28h ;; 4 reg arg home locs + rsp alignment
- stackUsed = 0x20 + STACK_ALIGN_ADJUST;
- X86EmitSubEsp(stackUsed);
- }
- else
- {
- stackUsed = numStackArgs * sizeof(void*);
-
- // If the stack is misaligned, then an odd number of arguments
- // will naturally align the stack.
- if ( ((numStackArgs & 1) == 0)
- != (STACK_ALIGN_ADJUST == 0))
- {
- X86EmitPushReg(kRAX);
- stackUsed += sizeof(void*);
- }
-
- ofs = SecureDelegateFrame::GetOffsetOfTransitionBlock() +
- TransitionBlock::GetOffsetOfArgs() + sizeof(ArgumentRegisters) + numStackBytes;
-
- while (numStackArgs--)
- {
- ofs -= sizeof(void*);
-
- // push [rsi + ofs] ;; Push stack args
- X86EmitIndexPush(kESI, ofs);
- }
-
- // sub rsp, 20h ;; Create 4 reg arg home locations
- X86EmitSubEsp(0x20);
-
- stackUsed += 0x20;
- }
-
- int thisArgNum = 0;
-
- for(
- argNum = 0, argOfs = SecureDelegateFrame::GetOffsetOfTransitionBlock() + TransitionBlock::GetOffsetOfArgs();
- argNum < 4 && argTypes[argNum] != ELEMENT_TYPE_END;
- argNum++, argOfs += sizeof(void*)
- )
- {
- switch (argTypes[argNum])
- {
- case ELEMENT_TYPE_R4:
- // movss xmm?, dword ptr [rsi + argOfs]
- X64EmitMovSSFromMem((X86Reg)argNum, kRSI, argOfs);
- break;
- case ELEMENT_TYPE_R8:
- // movsd xmm?, qword ptr [rsi + argOfs]
- X64EmitMovSDFromMem((X86Reg)argNum, kRSI, argOfs);
- break;
- default:
- if (c_argRegs[argNum] != THIS_kREG)
- {
- // mov r*, [rsi + dstOfs]
- X86EmitIndexRegLoad(c_argRegs[argNum], kESI,argOfs);
- }
- break;
- } // switch
- }
-
- // mov SCRATCHREG, [rcx+Delegate._invocationList] ;;fetch the inner delegate
- X86EmitIndexRegLoad(SCRATCH_REGISTER_X86REG, THIS_kREG, DelegateObject::GetOffsetOfInvocationList());
-
- // mov THISREG, [SCRATCHREG+Delegate.object] ;;replace "this" pointer
- X86EmitIndexRegLoad(c_argRegs[thisArgNum], SCRATCH_REGISTER_X86REG, DelegateObject::GetOffsetOfTarget());
-
- // call [SCRATCHREG+Delegate.target] ;; call current subscriber
- X86EmitOffsetModRM(0xff, (X86Reg)2, SCRATCH_REGISTER_X86REG, DelegateObject::GetOffsetOfMethodPtr());
-
- // add rsp, stackUsed ;; Clean up stack
- X86EmitAddEsp(stackUsed);
-
-#else // _TARGET_AMD64_
-
- UINT16 numStackBytes = static_cast<UINT16>(hash & ~3);
-
- // ..repush & reenregister args..
- INT32 ofs = numStackBytes + SecureDelegateFrame::GetOffsetOfTransitionBlock() + TransitionBlock::GetOffsetOfArgs();
- while (ofs != SecureDelegateFrame::GetOffsetOfTransitionBlock() + TransitionBlock::GetOffsetOfArgs())
- {
- ofs -= sizeof(void*);
- X86EmitIndexPush(kESI, ofs);
- }
-
- #define ARGUMENT_REGISTER(regname) if (k##regname != THIS_kREG) { X86EmitIndexRegLoad(k##regname, kESI, \
- offsetof(ArgumentRegisters, regname) + SecureDelegateFrame::GetOffsetOfTransitionBlock() + TransitionBlock::GetOffsetOfArgumentRegisters()); }
-
- ENUM_ARGUMENT_REGISTERS_BACKWARD();
-
- #undef ARGUMENT_REGISTER
-
- // mov SCRATCHREG, [ecx+Delegate._invocationList] ;;fetch the inner delegate
- X86EmitIndexRegLoad(SCRATCH_REGISTER_X86REG, THIS_kREG, DelegateObject::GetOffsetOfInvocationList());
-
- // mov THISREG, [SCRATCHREG+Delegate.object] ;;replace "this" pointer
- X86EmitIndexRegLoad(THIS_kREG, SCRATCH_REGISTER_X86REG, DelegateObject::GetOffsetOfTarget());
-
- // call [SCRATCHREG+Delegate.target] ;; call current subscriber
- X86EmitOffsetModRM(0xff, (X86Reg)2, SCRATCH_REGISTER_X86REG, DelegateObject::GetOffsetOfMethodPtr());
- INDEBUG(Emit8(0x90)); // Emit a nop after the call in debug so that
- // we know that this is a call that can directly call
- // managed code
-
-#endif // _TARGET_AMD64_
-
- // The debugger may need to stop here, so grab the offset of this code.
- EmitPatchLabel();
-
- EmitCheckGSCookie(kESI, SecureDelegateFrame::GetOffsetOfGSCookie());
-
- // Epilog
- EmitMethodStubEpilog(numStackBytes, SecureDelegateFrame::GetOffsetOfTransitionBlock());
-}
#endif // !CROSSGEN_COMPILE && !FEATURE_STUBS_AS_IL
#if !defined(CROSSGEN_COMPILE) && !defined(FEATURE_ARRAYSTUB_AS_IL)
diff --git a/src/vm/i386/stublinkerx86.h b/src/vm/i386/stublinkerx86.h
index 49731a8ce4..ef1c113403 100644
--- a/src/vm/i386/stublinkerx86.h
+++ b/src/vm/i386/stublinkerx86.h
@@ -366,10 +366,18 @@ class StubLinkerCPU : public StubLinker
#endif // _TARGET_X86_
#endif // !FEATURE_STUBS_AS_IL
+#ifdef _TARGET_X86_
VOID EmitUnboxMethodStub(MethodDesc* pRealMD);
+#endif // _TARGET_X86_
+ VOID EmitTailJumpToMethod(MethodDesc *pMD);
+#ifdef _TARGET_AMD64_
+ VOID EmitLoadMethodAddressIntoAX(MethodDesc *pMD);
+#endif
+
#if defined(FEATURE_SHARE_GENERIC_CODE)
VOID EmitInstantiatingMethodStub(MethodDesc* pSharedMD, void* extra);
#endif // FEATURE_SHARE_GENERIC_CODE
+ VOID EmitComputedInstantiatingMethodStub(MethodDesc* pSharedMD, struct ShuffleEntry *pShuffleEntryArray, void* extraArg);
#if defined(FEATURE_COMINTEROP) && defined(_TARGET_X86_)
//========================================================================
@@ -393,13 +401,11 @@ class StubLinkerCPU : public StubLinker
VOID EmitDelegateInvoke();
#endif // _TARGET_X86_
+#if defined(_TARGET_X86_) && !defined(FEATURE_MULTICASTSTUB_AS_IL)
//===========================================================================
// Emits code for MulticastDelegate.Invoke() - sig specific
VOID EmitMulticastInvoke(UINT_PTR hash);
-
- //===========================================================================
- // Emits code for Delegate.Invoke() on delegates that recorded creator assembly
- VOID EmitSecureDelegateInvoke(UINT_PTR hash);
+#endif // defined(_TARGET_X86_) && !defined(FEATURE_MULTICASTSTUB_AS_IL)
#endif // !FEATURE_STUBS_AS_IL
//===========================================================================
diff --git a/src/vm/ilstubcache.cpp b/src/vm/ilstubcache.cpp
index a1b591fd6b..8c2cf19a1b 100644
--- a/src/vm/ilstubcache.cpp
+++ b/src/vm/ilstubcache.cpp
@@ -224,13 +224,13 @@ MethodDesc* ILStubCache::CreateNewMethodDesc(LoaderHeap* pCreationHeap, MethodTa
}
else
#endif
-#ifdef FEATURE_STUBS_AS_IL
- if (SF_IsSecureDelegateStub(dwStubFlags))
+ if (SF_IsWrapperDelegateStub(dwStubFlags))
{
- pMD->m_dwExtendedFlags |= DynamicMethodDesc::nomdSecureDelegateStub;
- pMD->GetILStubResolver()->SetStubType(ILStubResolver::SecureDelegateStub);
+ pMD->m_dwExtendedFlags |= DynamicMethodDesc::nomdWrapperDelegateStub;
+ pMD->GetILStubResolver()->SetStubType(ILStubResolver::WrapperDelegateStub);
}
else
+#ifdef FEATURE_INSTANTIATINGSTUB_AS_IL
if (SF_IsUnboxingILStub(dwStubFlags))
{
pMD->m_dwExtendedFlags |= DynamicMethodDesc::nomdUnboxingILStub;
diff --git a/src/vm/ilstubresolver.cpp b/src/vm/ilstubresolver.cpp
index 7f5964b927..d1f875816c 100644
--- a/src/vm/ilstubresolver.cpp
+++ b/src/vm/ilstubresolver.cpp
@@ -83,11 +83,11 @@ LPCUTF8 ILStubResolver::GetStubMethodName()
#ifdef FEATURE_MULTICASTSTUB_AS_IL
case MulticastDelegateStub: return "IL_STUB_MulticastDelegate_Invoke";
#endif
-#ifdef FEATURE_STUBS_AS_IL
+#ifdef FEATURE_INSTANTIATINGSTUB_AS_IL
case UnboxingILStub: return "IL_STUB_UnboxingStub";
case InstantiatingStub: return "IL_STUB_InstantiatingStub";
- case SecureDelegateStub: return "IL_STUB_SecureDelegate_Invoke";
#endif
+ case WrapperDelegateStub: return "IL_STUB_WrapperDelegate_Invoke";
default:
UNREACHABLE_MSG("Unknown stub type");
}
diff --git a/src/vm/ilstubresolver.h b/src/vm/ilstubresolver.h
index 6f49398a96..f15cc7fd4d 100644
--- a/src/vm/ilstubresolver.h
+++ b/src/vm/ilstubresolver.h
@@ -86,8 +86,8 @@ protected:
#ifdef FEATURE_MULTICASTSTUB_AS_IL
MulticastDelegateStub,
#endif
-#ifdef FEATURE_STUBS_AS_IL
- SecureDelegateStub,
+ WrapperDelegateStub,
+#ifdef FEATURE_INSTANTIATINGSTUB_AS_IL
UnboxingILStub,
InstantiatingStub,
#endif
diff --git a/src/vm/interpreter.cpp b/src/vm/interpreter.cpp
index df68465cb8..6930dd464c 100644
--- a/src/vm/interpreter.cpp
+++ b/src/vm/interpreter.cpp
@@ -232,9 +232,9 @@ void InterpreterMethodInfo::InitArgInfo(CEEInfo* comp, CORINFO_METHOD_INFO* meth
if (GetFlag<Flag_hasThisArg>())
{
m_argDescs[k].m_type = InterpreterType(CORINFO_TYPE_UNDEF);
-#ifdef FEATURE_STUBS_AS_IL
+#ifdef FEATURE_INSTANTIATINGSTUB_AS_IL
MethodDesc *pMD = reinterpret_cast<MethodDesc*>(methInfo->ftn);
- // The signature of the ILStubs may be misleading.
+ // The signature of the ILStubs may be misleading.
// If a StubTarget is ever set, we'll find the correct type by inspecting the
// target, rather than the stub.
if (pMD->IsILStub())
@@ -263,7 +263,7 @@ void InterpreterMethodInfo::InitArgInfo(CEEInfo* comp, CORINFO_METHOD_INFO* meth
}
}
-#endif // FEATURE_STUBS_AS_IL
+#endif // FEATURE_INSTANTIATINGSTUB_AS_IL
if (m_argDescs[k].m_type == InterpreterType(CORINFO_TYPE_UNDEF))
{
CORINFO_CLASS_HANDLE cls = comp->getMethodClass(methInfo->ftn);
diff --git a/src/vm/jitinterface.cpp b/src/vm/jitinterface.cpp
index f1d41a266e..bf4abe12f5 100644
--- a/src/vm/jitinterface.cpp
+++ b/src/vm/jitinterface.cpp
@@ -5789,18 +5789,16 @@ void CEEInfo::getCallInfo(
}
}
- pResult->secureDelegateInvoke = FALSE;
+ pResult->wrapperDelegateInvoke = FALSE;
-#ifdef FEATURE_STUBS_AS_IL
if (m_pMethodBeingCompiled->IsDynamicMethod())
{
auto pMD = m_pMethodBeingCompiled->AsDynamicMethodDesc();
- if (pMD->IsILStub() && pMD->IsSecureDelegateStub())
+ if (pMD->IsILStub() && pMD->IsWrapperDelegateStub())
{
- pResult->secureDelegateInvoke = TRUE;
+ pResult->wrapperDelegateInvoke = TRUE;
}
}
-#endif
EE_TO_JIT_TRANSITION();
}
@@ -10327,8 +10325,8 @@ void CEEInfo::getEEInfo(CORINFO_EE_INFO *pEEInfoOut)
pEEInfoOut->offsetOfDelegateInstance = OFFSETOF__DelegateObject__target;
pEEInfoOut->offsetOfDelegateFirstTarget = OFFSETOF__DelegateObject__methodPtr;
- // Secure delegate offsets
- pEEInfoOut->offsetOfSecureDelegateIndirectCell = OFFSETOF__DelegateObject__methodPtrAux;
+ // Wrapper delegate offsets
+ pEEInfoOut->offsetOfWrapperDelegateIndirectCell = OFFSETOF__DelegateObject__methodPtrAux;
// Remoting offsets
pEEInfoOut->offsetOfTransparentProxyRP = (DWORD)-1;
diff --git a/src/vm/method.hpp b/src/vm/method.hpp
index 91575da8d7..151d1cc40b 100644
--- a/src/vm/method.hpp
+++ b/src/vm/method.hpp
@@ -2559,7 +2559,7 @@ protected:
nomdStubNeedsCOMStarted = 0x0800, // EnsureComStarted must be called before executing the method
nomdMulticastStub = 0x1000,
nomdUnboxingILStub = 0x2000,
- nomdSecureDelegateStub = 0x4000,
+ nomdWrapperDelegateStub = 0x4000,
nomdILStub = 0x00010000,
nomdLCGMethod = 0x00020000,
@@ -2668,12 +2668,12 @@ public:
return !!(m_dwExtendedFlags & nomdMulticastStub);
}
#endif
-#ifdef FEATURE_STUBS_AS_IL
- bool IsSecureDelegateStub() {
+ bool IsWrapperDelegateStub() {
LIMITED_METHOD_DAC_CONTRACT;
_ASSERTE(IsILStub());
- return !!(m_dwExtendedFlags & nomdSecureDelegateStub);
+ return !!(m_dwExtendedFlags & nomdWrapperDelegateStub);
}
+#ifdef FEATURE_INSTANTIATINGSTUB_AS_IL
bool IsUnboxingILStub() {
LIMITED_METHOD_DAC_CONTRACT;
_ASSERTE(IsILStub());
diff --git a/src/vm/prestub.cpp b/src/vm/prestub.cpp
index 7d756c6728..a8e35a2842 100644
--- a/src/vm/prestub.cpp
+++ b/src/vm/prestub.cpp
@@ -1341,7 +1341,7 @@ CORJIT_FLAGS VersionedPrepareCodeConfig::GetJitCompilationFlags()
#endif //FEATURE_CODE_VERSIONING
-#ifdef FEATURE_STUBS_AS_IL
+#ifdef FEATURE_INSTANTIATINGSTUB_AS_IL
// CreateInstantiatingILStubTargetSig:
// This method is used to create the signature of the target of the ILStub
@@ -1603,18 +1603,58 @@ Stub * MakeUnboxingStubWorker(MethodDesc *pMD)
_ASSERTE(pUnboxedMD != NULL && pUnboxedMD != pMD);
-#ifdef FEATURE_STUBS_AS_IL
- if (pUnboxedMD->RequiresInstMethodTableArg())
+#ifdef FEATURE_PORTABLE_SHUFFLE_THUNKS
+ StackSArray<ShuffleEntry> portableShuffle;
+ BOOL usePortableShuffle = FALSE;
+ if (!pUnboxedMD->RequiresInstMethodTableArg())
{
- pstub = CreateUnboxingILStubForSharedGenericValueTypeMethods(pUnboxedMD);
+ ShuffleEntry entry;
+ entry.srcofs = ShuffleEntry::SENTINEL;
+ entry.dstofs = 0;
+ portableShuffle.Append(entry);
+ usePortableShuffle = TRUE;
}
else
-#endif
+ {
+ usePortableShuffle = GenerateShuffleArrayPortable(pMD, pUnboxedMD, &portableShuffle, ShuffleComputationType::InstantiatingStub);
+ }
+
+ if (usePortableShuffle)
{
CPUSTUBLINKER sl;
- sl.EmitUnboxMethodStub(pUnboxedMD);
+ _ASSERTE(pUnboxedMD != NULL && pUnboxedMD != pMD);
+
+ // The shuffle for an unboxing stub of a method that doesn't capture the
+ // type of the this pointer must be a no-op
+ _ASSERTE(pUnboxedMD->RequiresInstMethodTableArg() || (portableShuffle.GetCount() == 1));
+
+ sl.EmitComputedInstantiatingMethodStub(pUnboxedMD, &portableShuffle[0], NULL);
+
pstub = sl.Link(pMD->GetLoaderAllocator()->GetStubHeap());
}
+ else
+#endif
+ {
+#ifdef FEATURE_INSTANTIATINGSTUB_AS_IL
+#ifndef FEATURE_PORTABLE_SHUFFLE_THUNKS
+ if (pUnboxedMD->RequiresInstMethodTableArg())
+#endif // !FEATURE_PORTABLE_SHUFFLE_THUNKS
+ {
+ _ASSERTE(pUnboxedMD->RequiresInstMethodTableArg());
+ pstub = CreateUnboxingILStubForSharedGenericValueTypeMethods(pUnboxedMD);
+ }
+#ifndef FEATURE_PORTABLE_SHUFFLE_THUNKS
+ else
+#endif // !FEATURE_PORTABLE_SHUFFLE_THUNKS
+#endif // FEATURE_INSTANTIATINGSTUB_AS_IL
+#ifndef FEATURE_PORTABLE_SHUFFLE_THUNKS
+ {
+ CPUSTUBLINKER sl;
+ sl.EmitUnboxMethodStub(pUnboxedMD);
+ pstub = sl.Link(pMD->GetLoaderAllocator()->GetStubHeap());
+ }
+#endif // !FEATURE_PORTABLE_SHUFFLE_THUNKS
+ }
RETURN pstub;
}
@@ -1653,17 +1693,32 @@ Stub * MakeInstantiatingStubWorker(MethodDesc *pMD)
// It's a per-instantiation static method
extraArg = pMD->GetMethodTable();
}
+
Stub *pstub = NULL;
+
+#ifdef FEATURE_PORTABLE_SHUFFLE_THUNKS
+ StackSArray<ShuffleEntry> portableShuffle;
+ if (GenerateShuffleArrayPortable(pMD, pSharedMD, &portableShuffle, ShuffleComputationType::InstantiatingStub))
+ {
+ CPUSTUBLINKER sl;
+ _ASSERTE(pSharedMD != NULL && pSharedMD != pMD);
+ sl.EmitComputedInstantiatingMethodStub(pSharedMD, &portableShuffle[0], extraArg);
-#ifdef FEATURE_STUBS_AS_IL
- pstub = CreateInstantiatingILStub(pSharedMD, extraArg);
+ pstub = sl.Link(pMD->GetLoaderAllocator()->GetStubHeap());
+ }
+ else
+#endif
+ {
+#ifdef FEATURE_INSTANTIATINGSTUB_AS_IL
+ pstub = CreateInstantiatingILStub(pSharedMD, extraArg);
#else
- CPUSTUBLINKER sl;
- _ASSERTE(pSharedMD != NULL && pSharedMD != pMD);
- sl.EmitInstantiatingMethodStub(pSharedMD, extraArg);
+ CPUSTUBLINKER sl;
+ _ASSERTE(pSharedMD != NULL && pSharedMD != pMD);
+ sl.EmitInstantiatingMethodStub(pSharedMD, extraArg);
- pstub = sl.Link(pMD->GetLoaderAllocator()->GetStubHeap());
+ pstub = sl.Link(pMD->GetLoaderAllocator()->GetStubHeap());
#endif
+ }
RETURN pstub;
}
@@ -3231,7 +3286,7 @@ PCODE DynamicHelperFixup(TransitionBlock * pTransitionBlock, TADDR * pCell, DWOR
if (ctorData.pArg4 != NULL || ctorData.pArg5 != NULL)
{
- // This should never happen - we should never get collectible or secure delegates here
+ // This should never happen - we should never get collectible or wrapper delegates here
_ASSERTE(false);
pDelegateCtor = NULL;
}
diff --git a/src/vm/virtualcallstub.h b/src/vm/virtualcallstub.h
index 496d317cc5..332295d792 100644
--- a/src/vm/virtualcallstub.h
+++ b/src/vm/virtualcallstub.h
@@ -125,7 +125,7 @@ private:
// In these cases all calls are made by the platform equivalent of "call [addr]".
//
// DelegateCallSite are particular in that they can come in a variety of forms:
- // a direct delegate call has a sequence defined by the jit but a multicast or secure delegate
+ // a direct delegate call has a sequence defined by the jit but a multicast or wrapper delegate
// are defined in a stub and have a different shape
//
PTR_PCODE m_siteAddr; // Stores the address of an indirection cell
diff --git a/src/zap/zapinfo.cpp b/src/zap/zapinfo.cpp
index dd49d6ab2b..f3be112cb6 100644
--- a/src/zap/zapinfo.cpp
+++ b/src/zap/zapinfo.cpp
@@ -2867,7 +2867,7 @@ CORINFO_METHOD_HANDLE ZapInfo::GetDelegateCtor(CORINFO_METHOD_HANDLE methHnd,
{
if (pCtorData->pArg4)
{
- // cannot optimize any secure delegate, give up
+ // cannot optimize any wrapper delegate, give up
delegateCtor = methHnd;
}
else if (pCtorData->pArg3)