summaryrefslogtreecommitdiff
path: root/src/vm/dllimportcallback.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/vm/dllimportcallback.cpp')
-rw-r--r--src/vm/dllimportcallback.cpp259
1 files changed, 139 insertions, 120 deletions
diff --git a/src/vm/dllimportcallback.cpp b/src/vm/dllimportcallback.cpp
index 12613cb96b..c70d52d02e 100644
--- a/src/vm/dllimportcallback.cpp
+++ b/src/vm/dllimportcallback.cpp
@@ -39,45 +39,6 @@ EXTERN_C void STDCALL UM2MThunk_WrapperHelper(void *pThunkArgs,
UMEntryThunk *pEntryThunk,
Thread *pThread);
-EXTERN_C void __fastcall ReverseEnterRuntimeHelper(Thread *pThread)
-{
- CONTRACTL
- {
- THROWS;
- GC_TRIGGERS;
- SO_TOLERANT;
- MODE_ANY;
- }
- CONTRACTL_END;
-
- // ReverseEnterRuntimeThrowComplus probes.
- //BEGIN_ENTRYPOINT_THROWS;
-
- _ASSERTE (pThread == GetThread());
-
-#ifdef FEATURE_STACK_PROBE
- // The thread is calling into managed code. If we have the following sequence on stack
- // Managed code 1 -> Unmanaged code -> Managed code 2,
- // and we hit SO in managed code 2, in order to unwind stack for managed code 1, we need
- // to make sure the thread is in cooperative gc mode. Due to unmanaged code in between,
- // when we reach managed code 1, the thread is in preemptive GC mode. In order to switch
- // to cooperative, we need to have enough stack. This means that we need to reclaim stack
- // for managed code 2. Therefore we require that we have some amount of stack before entering
- // managed code 2.
- RetailStackProbe(static_cast<UINT>(ADJUST_PROBE(BACKOUT_CODE_STACK_LIMIT)),pThread);
-#endif
- pThread->ReverseEnterRuntimeThrowComplus();
- //END_ENTRYPOINT_THROWS
-}
-
-EXTERN_C void __fastcall ReverseLeaveRuntimeHelper(Thread *pThread)
-{
- WRAPPER_NO_CONTRACT;
-
- _ASSERTE (pThread == GetThread());
- pThread->ReverseLeaveRuntime();
-}
-
#ifdef MDA_SUPPORTED
EXTERN_C void __fastcall CallbackOnCollectedDelegateHelper(UMEntryThunk *pEntryThunk)
{
@@ -378,53 +339,6 @@ VOID UMEntryThunk::CompileUMThunkWorker(UMThunkStubInfo *pInfo,
// would deadlock).
pcpusl->EmitLabel(pDoADCallBackStartLabel);
-#ifdef FEATURE_INCLUDE_ALL_INTERFACES
- if (NDirect::IsHostHookEnabled())
- {
- // We call ReverseEnterRuntimeHelper before we link a frame.
- // So we know that when exception unwinds through our ReverseEnterRuntimeFrame,
- // we need call ReverseLeaveRuntime.
-
- // save registers
- pcpusl->X86EmitPushReg(kEAXentryThunk);
- pcpusl->X86EmitPushReg(kECXthread);
-
- // ecx still has Thread
- // ReverseEnterRuntimeHelper is a fast call
- pcpusl->X86EmitCall(pcpusl->NewExternalCodeLabel((LPVOID)ReverseEnterRuntimeHelper), 0);
-
- // restore registers
- pcpusl->X86EmitPopReg(kECXthread);
- pcpusl->X86EmitPopReg(kEAXentryThunk);
-
- // push reg; leave room for m_next
- pcpusl->X86EmitPushReg(kDummyPushReg);
-
- // push IMM32 ; push Frame vptr
- pcpusl->X86EmitPushImm32((UINT32)(size_t)ReverseEnterRuntimeFrame::GetMethodFrameVPtr());
-
- // mov edx, esp ;; set EDX -> new frame
- pcpusl->X86EmitMovRegSP(kEDX);
-
- // push IMM32 ; push gsCookie
- pcpusl->X86EmitPushImmPtr((LPVOID)GetProcessGSCookie());
-
- // save UMEntryThunk
- pcpusl->X86EmitPushReg(kEAXentryThunk);
-
- // mov eax,[ecx + Thread.GetFrame()] ;; get previous frame
- pcpusl->X86EmitIndexRegLoad(kEAXentryThunk, kECXthread, Thread::GetOffsetOfCurrentFrame());
-
- // mov [edx + Frame.m_next], eax
- pcpusl->X86EmitIndexRegStore(kEDX, Frame::GetOffsetOfNextLink(), kEAX);
-
- // mov [ecx + Thread.GetFrame()], edx
- pcpusl->X86EmitIndexRegStore(kECXthread, Thread::GetOffsetOfCurrentFrame(), kEDX);
-
- // restore EAX
- pcpusl->X86EmitPopReg(kEAXentryThunk);
- }
-#endif
#ifdef MDA_SUPPORTED
if ((pInfo->m_wFlags & umtmlSkipStub) && !(pInfo->m_wFlags & umtmlIsStatic) &&
@@ -624,39 +538,6 @@ VOID UMEntryThunk::CompileUMThunkWorker(UMThunkStubInfo *pInfo,
// restore the thread pointer
pcpusl->X86EmitPopReg(kECXthread);
-#ifdef FEATURE_INCLUDE_ALL_INTERFACES
- if (NDirect::IsHostHookEnabled())
- {
-#ifdef _DEBUG
- // lea edx, [esp + sizeof(GSCookie)] ; edx <- current Frame
- pcpusl->X86EmitEspOffset(0x8d, kEDX, sizeof(GSCookie));
- pcpusl->EmitCheckGSCookie(kEDX, ReverseEnterRuntimeFrame::GetOffsetOfGSCookie());
-#endif
-
- // Remove our frame
- // Get the previous frame into EDX
- // mov edx, [esp + GSCookie + Frame.m_next]
- static const BYTE initArg1[] = { 0x8b, 0x54, 0x24, 0x08 }; // mov edx, [esp+8]
- _ASSERTE(ReverseEnterRuntimeFrame::GetNegSpaceSize() + Frame::GetOffsetOfNextLink() == 0x8);
- pcpusl->EmitBytes(initArg1, sizeof(initArg1));
-
- // mov [ecx + Thread.GetFrame()], edx
- pcpusl->X86EmitIndexRegStore(kECXthread, Thread::GetOffsetOfCurrentFrame(), kEDX);
-
- // pop off stack
- // add esp, 8
- pcpusl->X86EmitAddEsp(sizeof(GSCookie) + sizeof(ReverseEnterRuntimeFrame));
-
- // Save pThread
- pcpusl->X86EmitPushReg(kECXthread);
-
- // ReverseEnterRuntimeHelper is a fast call
- pcpusl->X86EmitCall(pcpusl->NewExternalCodeLabel((LPVOID)ReverseLeaveRuntimeHelper), 0);
-
- // Restore pThread
- pcpusl->X86EmitPopReg(kECXthread);
- }
-#endif
// Check whether we got here via the switch AD case. We can tell this by looking at whether the
// caller's arguments immediately precede our EBP frame (they will for the non-switch case but
@@ -1491,13 +1372,71 @@ VOID UMThunkMarshInfo::RunTimeInit()
pStubMD = GetILStubMethodDesc(pMD, &sigInfo, dwStubFlags);
pFinalILStub = JitILStub(pStubMD);
+
}
}
+#if defined(_TARGET_X86_)
+ MetaSig sig(pMD);
+ int numRegistersUsed = 0;
+ UINT16 cbRetPop = 0;
+
+ //
+ // m_cbStackArgSize represents the number of arg bytes for the MANAGED signature
+ //
+ m_cbStackArgSize = 0;
+
+ int offs = 0;
+
+#ifdef UNIX_X86_ABI
+ if (HasRetBuffArgUnmanagedFixup(&sig))
+ {
+ // callee should pop retbuf
+ numRegistersUsed += 1;
+ offs += STACK_ELEM_SIZE;
+ cbRetPop += STACK_ELEM_SIZE;
+ }
+#endif // UNIX_X86_ABI
+
+ for (UINT i = 0 ; i < sig.NumFixedArgs(); i++)
+ {
+ TypeHandle thValueType;
+ CorElementType type = sig.NextArgNormalized(&thValueType);
+ int cbSize = sig.GetElemSize(type, thValueType);
+ if (ArgIterator::IsArgumentInRegister(&numRegistersUsed, type))
+ {
+ offs += STACK_ELEM_SIZE;
+ }
+ else
+ {
+ offs += StackElemSize(cbSize);
+ m_cbStackArgSize += StackElemSize(cbSize);
+ }
+ }
+ m_cbActualArgSize = (pStubMD != NULL) ? pStubMD->AsDynamicMethodDesc()->GetNativeStackArgSize() : offs;
+
+ PInvokeStaticSigInfo sigInfo;
+ if (pMD != NULL)
+ new (&sigInfo) PInvokeStaticSigInfo(pMD);
+ else
+ new (&sigInfo) PInvokeStaticSigInfo(GetSignature(), GetModule());
+ if (sigInfo.GetCallConv() == pmCallConvCdecl)
+ {
+ m_cbRetPop = cbRetPop;
+ }
+ else
+ {
+ // For all the other calling convention except cdecl, callee pops the stack arguments
+ m_cbRetPop = cbRetPop + static_cast<UINT16>(m_cbActualArgSize);
+ }
+#else // _TARGET_X86_
//
// m_cbActualArgSize gets the number of arg bytes for the NATIVE signature
//
- m_cbActualArgSize = (pStubMD != NULL) ? pStubMD->AsDynamicMethodDesc()->GetNativeStackArgSize() : pMD->SizeOfArgStack();
+ m_cbActualArgSize =
+ (pStubMD != NULL) ? pStubMD->AsDynamicMethodDesc()->GetNativeStackArgSize() : pMD->SizeOfArgStack();
+
+#endif // _TARGET_X86_
#endif // _TARGET_X86_ && !FEATURE_STUBS_AS_IL
@@ -1505,6 +1444,86 @@ VOID UMThunkMarshInfo::RunTimeInit()
InterlockedCompareExchangeT<PCODE>(&m_pILStub, pFinalILStub, (PCODE)1);
}
+#if defined(_TARGET_X86_) && defined(FEATURE_STUBS_AS_IL)
+VOID UMThunkMarshInfo::SetupArguments(char *pSrc, ArgumentRegisters *pArgRegs, char *pDst)
+{
+ MethodDesc *pMD = GetMethod();
+
+ _ASSERTE(pMD);
+
+ //
+ // x86 native uses the following stack layout:
+ // | saved eip |
+ // | --------- | <- CFA
+ // | stkarg 0 |
+ // | stkarg 1 |
+ // | ... |
+ // | stkarg N |
+ //
+ // x86 managed, however, uses a bit different stack layout:
+ // | saved eip |
+ // | --------- | <- CFA
+ // | stkarg M | (NATIVE/MANAGE may have different number of stack arguments)
+ // | ... |
+ // | stkarg 1 |
+ // | stkarg 0 |
+ //
+ // This stub bridges the gap between them.
+ //
+ char *pCurSrc = pSrc;
+ char *pCurDst = pDst + m_cbStackArgSize;
+
+ MetaSig sig(pMD);
+
+ int numRegistersUsed = 0;
+
+#ifdef UNIX_X86_ABI
+ if (HasRetBuffArgUnmanagedFixup(&sig))
+ {
+ // Pass retbuf via Ecx
+ numRegistersUsed += 1;
+ pArgRegs->Ecx = *((UINT32 *)pCurSrc);
+ pCurSrc += STACK_ELEM_SIZE;
+ }
+#endif // UNIX_X86_ABI
+
+ for (UINT i = 0 ; i < sig.NumFixedArgs(); i++)
+ {
+ TypeHandle thValueType;
+ CorElementType type = sig.NextArgNormalized(&thValueType);
+ int cbSize = sig.GetElemSize(type, thValueType);
+ int elemSize = StackElemSize(cbSize);
+
+ if (ArgIterator::IsArgumentInRegister(&numRegistersUsed, type))
+ {
+ _ASSERTE(elemSize == STACK_ELEM_SIZE);
+
+ if (numRegistersUsed == 1)
+ pArgRegs->Ecx = *((UINT32 *)pCurSrc);
+ else if (numRegistersUsed == 2)
+ pArgRegs->Edx = *((UINT32 *)pCurSrc);
+ }
+ else
+ {
+ pCurDst -= elemSize;
+ memcpy(pCurDst, pCurSrc, elemSize);
+ }
+
+ pCurSrc += elemSize;
+ }
+
+ _ASSERTE(pDst == pCurDst);
+}
+
+EXTERN_C VOID STDCALL UMThunkStubSetupArgumentsWorker(UMThunkMarshInfo *pMarshInfo,
+ char *pSrc,
+ UMThunkMarshInfo::ArgumentRegisters *pArgRegs,
+ char *pDst)
+{
+ pMarshInfo->SetupArguments(pSrc, pArgRegs, pDst);
+}
+#endif // _TARGET_X86_ && FEATURE_STUBS_AS_IL
+
#ifdef _DEBUG
void STDCALL LogUMTransition(UMEntryThunk* thunk)
{