summaryrefslogtreecommitdiff
path: root/src/vm/prestub.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/vm/prestub.cpp')
-rw-r--r--src/vm/prestub.cpp1372
1 files changed, 775 insertions, 597 deletions
diff --git a/src/vm/prestub.cpp b/src/vm/prestub.cpp
index 31a5670e00..f96135008d 100644
--- a/src/vm/prestub.cpp
+++ b/src/vm/prestub.cpp
@@ -13,12 +13,10 @@
#include "common.h"
#include "vars.hpp"
-#include "security.h"
#include "eeconfig.h"
#include "dllimport.h"
#include "comdelegate.h"
#include "dbginterface.h"
-#include "listlock.inl"
#include "stubgen.h"
#include "eventtrace.h"
#include "array.h"
@@ -52,15 +50,18 @@
#include "callcounter.h"
#endif
-#ifndef DACCESS_COMPILE
-
-EXTERN_C void STDCALL ThePreStub();
-
-#if defined(HAS_COMPACT_ENTRYPOINTS) && defined (_TARGET_ARM_)
+#if defined(FEATURE_GDBJIT)
+#include "gdbjit.h"
+#endif // FEATURE_GDBJIT
-EXTERN_C void STDCALL ThePreStubCompactARM();
+#ifndef DACCESS_COMPILE
-#endif // defined(HAS_COMPACT_ENTRYPOINTS) && defined (_TARGET_ARM_)
+#if defined(FEATURE_JIT_PITCHING)
+EXTERN_C void CheckStacksAndPitch();
+EXTERN_C void SavePitchingCandidate(MethodDesc* pMD, ULONG sizeOfCode);
+EXTERN_C void DeleteFromPitchingCandidate(MethodDesc* pMD);
+EXTERN_C void MarkMethodNotPitchingCandidate(MethodDesc* pMD);
+#endif
EXTERN_C void STDCALL ThePreStubPatch();
@@ -72,17 +73,11 @@ PCODE MethodDesc::DoBackpatch(MethodTable * pMT, MethodTable *pDispatchingMT, BO
{
STANDARD_VM_CHECK;
PRECONDITION(!ContainsGenericVariables());
-#ifndef FEATURE_INTERPRETER
PRECONDITION(HasStableEntryPoint());
-#endif // FEATURE_INTERPRETER
PRECONDITION(pMT == GetMethodTable());
}
CONTRACTL_END;
-#ifdef FEATURE_INTERPRETER
- PCODE pTarget = GetMethodEntryPoint();
-#else
PCODE pTarget = GetStableEntryPoint();
-#endif
if (!HasTemporaryEntryPoint())
return pTarget;
@@ -231,17 +226,13 @@ void DACNotifyCompilationFinished(MethodDesc *methodDesc)
_ASSERTE(modulePtr);
-#ifndef FEATURE_GDBJIT
// Are we listed?
USHORT jnt = jn.Requested((TADDR) modulePtr, t);
if (jnt & CLRDATA_METHNOTIFY_GENERATED)
{
// If so, throw an exception!
-#endif
DACNotify::DoJITNotification(methodDesc);
-#ifndef FEATURE_GDBJIT
}
-#endif
}
}
@@ -250,90 +241,350 @@ void DACNotifyCompilationFinished(MethodDesc *methodDesc)
#endif
// </TODO>
+PCODE MethodDesc::PrepareInitialCode()
+{
+ STANDARD_VM_CONTRACT;
+ PrepareCodeConfig config(NativeCodeVersion(this), TRUE, TRUE);
+ PCODE pCode = PrepareCode(&config);
-// ********************************************************************
-// README!!
-// ********************************************************************
+#if defined(FEATURE_GDBJIT) && defined(FEATURE_PAL) && !defined(CROSSGEN_COMPILE)
+ NotifyGdb::MethodPrepared(this);
+#endif
-// MakeJitWorker is the thread safe way to invoke the JIT compiler
-// If multiple threads get in here for the same pMD, ALL of them
-// MUST return the SAME value for pstub.
-//
-// This function creates a DeadlockAware list of methods being jitted
-// which prevents us from trying to JIT the same method more that once.
+ return pCode;
+}
+PCODE MethodDesc::PrepareCode(NativeCodeVersion codeVersion)
+{
+ STANDARD_VM_CONTRACT;
+
+#ifdef FEATURE_CODE_VERSIONING
+ if (codeVersion.IsDefaultVersion())
+ {
+#endif
+ // fast path
+ PrepareCodeConfig config(codeVersion, TRUE, TRUE);
+ return PrepareCode(&config);
+#ifdef FEATURE_CODE_VERSIONING
+ }
+ else
+ {
+ // a bit slower path (+1 usec?)
+ VersionedPrepareCodeConfig config;
+ {
+ CodeVersionManager::TableLockHolder lock(GetCodeVersionManager());
+ config = VersionedPrepareCodeConfig(codeVersion);
+ }
+ config.FinishConfiguration();
+ return PrepareCode(&config);
+ }
+#endif
+
+}
-PCODE MethodDesc::MakeJitWorker(COR_ILMETHOD_DECODER* ILHeader, CORJIT_FLAGS flags)
+PCODE MethodDesc::PrepareCode(PrepareCodeConfig* pConfig)
{
STANDARD_VM_CONTRACT;
- BOOL fIsILStub = IsILStub(); // @TODO: understand the need for this special case
+ // If other kinds of code need multi-versioning we could add more cases here,
+ // but for now generation of all other code/stubs occurs in other code paths
+ _ASSERTE(IsIL() || IsNoMetadata());
+ return PrepareILBasedCode(pConfig);
+}
- LOG((LF_JIT, LL_INFO1000000,
- "MakeJitWorker(" FMT_ADDR ", %s) for %s:%s\n",
- DBG_ADDR(this),
- fIsILStub ? " TRUE" : "FALSE",
- GetMethodTable()->GetDebugClassName(),
- m_pszDebugMethodName));
+PCODE MethodDesc::PrepareILBasedCode(PrepareCodeConfig* pConfig)
+{
+ STANDARD_VM_CONTRACT;
+ PCODE pCode = NULL;
+
+ if (pConfig->MayUsePrecompiledCode())
+ {
+ pCode = GetPrecompiledCode(pConfig);
+ }
+ if (pCode == NULL)
+ {
+ LOG((LF_CLASSLOADER, LL_INFO1000000,
+ " In PrepareILBasedCode, calling JitCompileCode\n"));
+ // Mark the code as hot in case the method ends up in the native image
+ g_IBCLogger.LogMethodCodeAccess(this);
+ pCode = JitCompileCode(pConfig);
+ }
+ return pCode;
+}
+
+PCODE MethodDesc::GetPrecompiledCode(PrepareCodeConfig* pConfig)
+{
+ STANDARD_VM_CONTRACT;
PCODE pCode = NULL;
- ULONG sizeOfCode = 0;
-#if defined(FEATURE_INTERPRETER) || defined(FEATURE_TIERED_COMPILATION)
- BOOL fStable = TRUE; // True iff the new code address (to be stored in pCode), is a stable entry point.
+
+#ifdef FEATURE_PREJIT
+ pCode = GetPrecompiledNgenCode();
#endif
-#ifdef FEATURE_INTERPRETER
- PCODE pPreviousInterpStub = NULL;
- BOOL fInterpreted = FALSE;
+
+#ifdef FEATURE_READYTORUN
+ if (pCode == NULL)
+ {
+ pCode = GetPrecompiledR2RCode();
+ if (pCode != NULL)
+ {
+ pConfig->SetNativeCode(pCode, &pCode);
+ }
+ }
+#endif // FEATURE_READYTORUN
+
+ return pCode;
+}
+
+PCODE MethodDesc::GetPrecompiledNgenCode()
+{
+ STANDARD_VM_CONTRACT;
+ PCODE pCode = NULL;
+
+#ifdef FEATURE_PREJIT
+ pCode = GetPreImplementedCode();
+
+#ifdef PROFILING_SUPPORTED
+
+ // The pre-existing cache search callbacks aren't implemented as you might expect.
+ // Instead of sending a cache search started for all methods, we only send the notification
+ // when we already know a pre-compiled version of the method exists. In the NGEN case we also
+ // don't send callbacks unless the method triggers the prestub which excludes a lot of methods.
+ // From the profiler's perspective this technique is only reliable/predictable when using profiler
+ // instrumented NGEN images (that virtually no profilers use). As-is the callback only
+ // gives an opportunity for the profiler to say whether or not it wants to use the ngen'ed
+ // code.
+ //
+ // Despite those oddities I am leaving this behavior as-is during refactoring because trying to
+ // improve it probably offers little value vs. the potential for compat issues and creating more
+ // complexity reasoning how the API behavior changed across runtime releases.
+ if (pCode != NULL)
+ {
+ BOOL fShouldSearchCache = TRUE;
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackCacheSearches());
+ g_profControlBlock.pProfInterface->JITCachedFunctionSearchStarted((FunctionID)this, &fShouldSearchCache);
+ END_PIN_PROFILER();
+ }
+
+ if (!fShouldSearchCache)
+ {
+ SetNativeCodeInterlocked(NULL, pCode);
+ _ASSERTE(!IsPreImplemented());
+ pCode = NULL;
+ }
+ }
+#endif // PROFILING_SUPPORTED
+
+ if (pCode != NULL)
+ {
+ LOG((LF_ZAP, LL_INFO10000,
+ "ZAP: Using code" FMT_ADDR "for %s.%s sig=\"%s\" (token %x).\n",
+ DBG_ADDR(pCode),
+ m_pszDebugClassName,
+ m_pszDebugMethodName,
+ m_pszDebugMethodSignature,
+ GetMemberDef()));
+
+ TADDR pFixupList = GetFixupList();
+ if (pFixupList != NULL)
+ {
+ Module *pZapModule = GetZapModule();
+ _ASSERTE(pZapModule != NULL);
+ if (!pZapModule->FixupDelayList(pFixupList))
+ {
+ _ASSERTE(!"FixupDelayList failed");
+ ThrowHR(COR_E_BADIMAGEFORMAT);
+ }
+ }
+
+#ifdef HAVE_GCCOVER
+ if (GCStress<cfg_instr_ngen>::IsEnabled())
+ SetupGcCoverage(this, (BYTE*)pCode);
+#endif // HAVE_GCCOVER
+
+#ifdef PROFILING_SUPPORTED
+ /*
+ * This notifies the profiler that a search to find a
+ * cached jitted function has been made.
+ */
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackCacheSearches());
+ g_profControlBlock.pProfInterface->
+ JITCachedFunctionSearchFinished((FunctionID)this, COR_PRF_CACHED_FUNCTION_FOUND);
+ END_PIN_PROFILER();
+ }
+#endif // PROFILING_SUPPORTED
+
+ }
+#endif // FEATURE_PREJIT
+
+ return pCode;
+}
+
+
+PCODE MethodDesc::GetPrecompiledR2RCode()
+{
+ STANDARD_VM_CONTRACT;
+
+ PCODE pCode = NULL;
+#ifdef FEATURE_READYTORUN
+ Module * pModule = GetModule();
+ if (pModule->IsReadyToRun())
+ {
+ pCode = pModule->GetReadyToRunInfo()->GetEntryPoint(this);
+ }
#endif
+ return pCode;
+}
+PCODE MethodDesc::GetMulticoreJitCode()
+{
+ STANDARD_VM_CONTRACT;
+
+ PCODE pCode = NULL;
#ifdef FEATURE_MULTICOREJIT
+ // Quick check before calling expensive out of line function on this method's domain has code JITted by background thread
MulticoreJitManager & mcJitManager = GetAppDomain()->GetMulticoreJitManager();
-
- bool fBackgroundThread = flags.IsSet(CORJIT_FLAGS::CORJIT_FLAG_MCJIT_BACKGROUND);
+ if (mcJitManager.GetMulticoreJitCodeStorage().GetRemainingMethodCount() > 0)
+ {
+ if (MulticoreJitManager::IsMethodSupported(this))
+ {
+ pCode = mcJitManager.RequestMethodCode(this); // Query multi-core JIT manager for compiled code
+ }
+ }
#endif
+ return pCode;
+}
- // If this is the first stage of a tiered compilation progression, use tier0, otherwise
- // use default compilation options
-#ifdef FEATURE_TIERED_COMPILATION
- if (!IsEligibleForTieredCompilation())
+COR_ILMETHOD_DECODER* MethodDesc::GetAndVerifyMetadataILHeader(PrepareCodeConfig* pConfig, COR_ILMETHOD_DECODER* pDecoderMemory)
+{
+ STANDARD_VM_CONTRACT;
+
+ _ASSERTE(!IsNoMetadata());
+
+ COR_ILMETHOD_DECODER* pHeader = NULL;
+ COR_ILMETHOD* ilHeader = pConfig->GetILHeader();
+ if (ilHeader == NULL)
+ {
+#ifdef FEATURE_COMINTEROP
+ // Abstract methods can be called through WinRT derivation if the deriving type
+ // is not implemented in managed code, and calls through the CCW to the abstract
+ // method. Throw a sensible exception in that case.
+ if (GetMethodTable()->IsExportedToWinRT() && IsAbstract())
+ {
+ COMPlusThrowHR(E_NOTIMPL);
+ }
+#endif // FEATURE_COMINTEROP
+
+ COMPlusThrowHR(COR_E_BADIMAGEFORMAT, BFA_BAD_IL);
+ }
+
+ COR_ILMETHOD_DECODER::DecoderStatus status = COR_ILMETHOD_DECODER::FORMAT_ERROR;
+ {
+ // Decoder ctor can AV on a malformed method header
+ AVInRuntimeImplOkayHolder AVOkay;
+ pHeader = new (pDecoderMemory) COR_ILMETHOD_DECODER(ilHeader, GetMDImport(), &status);
+ }
+
+ if (status == COR_ILMETHOD_DECODER::FORMAT_ERROR)
+ {
+ COMPlusThrowHR(COR_E_BADIMAGEFORMAT, BFA_BAD_IL);
+ }
+
+#ifdef _VER_EE_VERIFICATION_ENABLED
+ static ConfigDWORD peVerify;
+
+ if (peVerify.val(CLRConfig::EXTERNAL_PEVerify))
+ m_pMethod->Verify(pHeader, TRUE, FALSE); // Throws a VerifierException if verification fails
+#endif // _VER_EE_VERIFICATION_ENABLED
+
+ return pHeader;
+}
+
+COR_ILMETHOD_DECODER* MethodDesc::GetAndVerifyNoMetadataILHeader()
+{
+ STANDARD_VM_CONTRACT;
+
+ if (IsILStub())
+ {
+ ILStubResolver* pResolver = AsDynamicMethodDesc()->GetILStubResolver();
+ return pResolver->GetILHeader();
+ }
+ else
+ {
+ return NULL;
+ }
+
+ // NoMetadata currently doesn't verify the IL. I'm not sure if that was
+ // a deliberate decision in the past or not, but I've left the behavior
+ // as-is during refactoring.
+}
+
+COR_ILMETHOD_DECODER* MethodDesc::GetAndVerifyILHeader(PrepareCodeConfig* pConfig, COR_ILMETHOD_DECODER* pIlDecoderMemory)
+{
+ STANDARD_VM_CONTRACT;
+ _ASSERTE(IsIL() || IsNoMetadata());
+
+ if (IsNoMetadata())
{
- fStable = TRUE;
+ // The NoMetadata version already has a decoder to use, it doesn't need the stack allocated one
+ return GetAndVerifyNoMetadataILHeader();
}
else
{
- fStable = FALSE;
- flags.Add(CORJIT_FLAGS(CORJIT_FLAGS::CORJIT_FLAG_TIER0));
+ return GetAndVerifyMetadataILHeader(pConfig, pIlDecoderMemory);
}
+}
+
+// ********************************************************************
+// README!!
+// ********************************************************************
+
+// JitCompileCode is the thread safe way to invoke the JIT compiler
+// If multiple threads get in here for the same config, ALL of them
+// MUST return the SAME value for pcode.
+//
+// This function creates a DeadlockAware list of methods being jitted
+// which prevents us from trying to JIT the same method more that once.
+
+PCODE MethodDesc::JitCompileCode(PrepareCodeConfig* pConfig)
+{
+ STANDARD_VM_CONTRACT;
+
+ LOG((LF_JIT, LL_INFO1000000,
+ "JitCompileCode(" FMT_ADDR ", %s) for %s:%s\n",
+ DBG_ADDR(this),
+ IsILStub() ? " TRUE" : "FALSE",
+ GetMethodTable()->GetDebugClassName(),
+ m_pszDebugMethodName));
+
+#if defined(FEATURE_JIT_PITCHING)
+ CheckStacksAndPitch();
#endif
+ PCODE pCode = NULL;
{
// Enter the global lock which protects the list of all functions being JITd
- ListLockHolder pJitLock (GetDomain()->GetJitLock());
+ JitListLock::LockHolder pJitLock(GetDomain()->GetJitLock());
// It is possible that another thread stepped in before we entered the global lock for the first time.
- pCode = GetNativeCode();
- if (pCode != NULL)
+ if ((pCode = pConfig->IsJitCancellationRequested()))
{
-#ifdef FEATURE_INTERPRETER
- if (Interpreter::InterpretationStubToMethodInfo(pCode) == this)
- {
- pPreviousInterpStub = pCode;
- }
- else
-#endif // FEATURE_INTERPRETER
- goto Done;
+ return pCode;
}
const char *description = "jit lock";
INDEBUG(description = m_pszDebugMethodName;)
- ListLockEntryHolder pEntry(ListLockEntry::Find(pJitLock, this, description));
+ ReleaseHolder<JitListLockEntry> pEntry(JitListLockEntry::Find(
+ pJitLock, pConfig->GetCodeVersion(), description));
// We have an entry now, we can release the global lock
pJitLock.Release();
// Take the entry lock
{
- ListLockEntryLockHolder pEntryLock(pEntry, FALSE);
+ JitListLockEntry::LockHolder pEntryLock(pEntry, FALSE);
if (pEntryLock.DeadlockAwareAcquire())
{
@@ -374,313 +625,458 @@ PCODE MethodDesc::MakeJitWorker(COR_ILMETHOD_DECODER* ILHeader, CORJIT_FLAGS fla
}
// It is possible that another thread stepped in before we entered the lock.
- pCode = GetNativeCode();
-#ifdef FEATURE_INTERPRETER
- if (pCode != NULL && (pCode != pPreviousInterpStub))
-#else
- if (pCode != NULL)
-#endif // FEATURE_INTERPRETER
+ if ((pCode = pConfig->IsJitCancellationRequested()))
{
- goto Done;
+ return pCode;
}
- SString namespaceOrClassName, methodName, methodSignature;
-
- PCODE pOtherCode = NULL; // Need to move here due to 'goto GotNewCode'
-
-#ifdef FEATURE_MULTICOREJIT
-
- bool fCompiledInBackground = false;
-
- // If not called from multi-core JIT thread,
- if (! fBackgroundThread)
+ pCode = GetMulticoreJitCode();
+ if (pCode != NULL)
{
- // Quick check before calling expensive out of line function on this method's domain has code JITted by background thread
- if (mcJitManager.GetMulticoreJitCodeStorage().GetRemainingMethodCount() > 0)
- {
- if (MulticoreJitManager::IsMethodSupported(this))
- {
- pCode = mcJitManager.RequestMethodCode(this); // Query multi-core JIT manager for compiled code
-
- // Multicore JIT manager starts background thread to pre-compile methods, but it does not back-patch it/notify profiler/notify DAC,
- // Jumtp to GotNewCode to do so
- if (pCode != NULL)
- {
- fCompiledInBackground = true;
-
-#ifdef DEBUGGING_SUPPORTED
- // Notify the debugger of the jitted function
- if (g_pDebugInterface != NULL)
- {
- g_pDebugInterface->JITComplete(this, pCode);
- }
-#endif
-
- goto GotNewCode;
- }
- }
- }
+ pConfig->SetNativeCode(pCode, &pCode);
+ pEntry->m_hrResultCode = S_OK;
+ return pCode;
}
-#endif
-
- if (fIsILStub)
+ else
{
- // we race with other threads to JIT the code for an IL stub and the
- // IL header is released once one of the threads completes. As a result
- // we must be inside the lock to reliably get the IL header for the
- // stub.
-
- ILStubResolver* pResolver = AsDynamicMethodDesc()->GetILStubResolver();
- ILHeader = pResolver->GetILHeader();
+ return JitCompileCodeLockedEventWrapper(pConfig, pEntryLock);
}
+ }
+ }
+}
+
+PCODE MethodDesc::JitCompileCodeLockedEventWrapper(PrepareCodeConfig* pConfig, JitListLockEntry* pEntry)
+{
+ STANDARD_VM_CONTRACT;
+
+ PCODE pCode = NULL;
+ ULONG sizeOfCode = 0;
+ CORJIT_FLAGS flags;
#ifdef MDA_SUPPORTED
- MdaJitCompilationStart* pProbe = MDA_GET_ASSISTANT(JitCompilationStart);
- if (pProbe)
- pProbe->NowCompiling(this);
+ MdaJitCompilationStart* pProbe = MDA_GET_ASSISTANT(JitCompilationStart);
+ if (pProbe)
+ pProbe->NowCompiling(this);
#endif // MDA_SUPPORTED
#ifdef PROFILING_SUPPORTED
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackJITInfo());
+ // For methods with non-zero rejit id we send ReJITCompilationStarted, otherwise
+ // JITCompilationStarted. It isn't clear if this is the ideal policy for these
+ // notifications yet.
+ ReJITID rejitId = pConfig->GetCodeVersion().GetILCodeVersionId();
+ if (rejitId != 0)
+ {
+ g_profControlBlock.pProfInterface->ReJITCompilationStarted((FunctionID)this,
+ rejitId,
+ TRUE);
+ }
+ else
// If profiling, need to give a chance for a tool to examine and modify
// the IL before it gets to the JIT. This allows one to add probe calls for
// things like code coverage, performance, or whatever.
+ {
+ if (!IsNoMetadata())
{
- BEGIN_PIN_PROFILER(CORProfilerTrackJITInfo());
+ g_profControlBlock.pProfInterface->JITCompilationStarted((FunctionID)this, TRUE);
-#ifdef FEATURE_MULTICOREJIT
- // Multicore JIT should be disabled when CORProfilerTrackJITInfo is on
- // But there could be corner case in which profiler is attached when multicore background thread is calling MakeJitWorker
- // Disable this block when calling from multicore JIT background thread
- if (!fBackgroundThread)
-#endif
- {
- if (!IsNoMetadata())
- {
- g_profControlBlock.pProfInterface->JITCompilationStarted((FunctionID) this, TRUE);
- // The profiler may have changed the code on the callback. Need to
- // pick up the new code. Note that you have to be fully trusted in
- // this mode and the code will not be verified.
- COR_ILMETHOD *pilHeader = GetILHeader(TRUE);
- new (ILHeader) COR_ILMETHOD_DECODER(pilHeader, GetMDImport(), NULL);
- }
- else
- {
- unsigned int ilSize, unused;
- CorInfoOptions corOptions;
- LPCBYTE ilHeaderPointer = this->AsDynamicMethodDesc()->GetResolver()->GetCodeInfo(&ilSize, &unused, &corOptions, &unused);
-
- g_profControlBlock.pProfInterface->DynamicMethodJITCompilationStarted((FunctionID) this, TRUE, ilHeaderPointer, ilSize);
- }
- }
- END_PIN_PROFILER();
}
-#endif // PROFILING_SUPPORTED
-#ifdef FEATURE_INTERPRETER
- // We move the ETW event for start of JITting inward, after we make the decision
- // to JIT rather than interpret.
-#else // FEATURE_INTERPRETER
- // Fire an ETW event to mark the beginning of JIT'ing
- ETW::MethodLog::MethodJitting(this, &namespaceOrClassName, &methodName, &methodSignature);
-#endif // FEATURE_INTERPRETER
-
-#ifdef FEATURE_STACK_SAMPLING
-#ifdef FEATURE_MULTICOREJIT
- if (!fBackgroundThread)
-#endif // FEATURE_MULTICOREJIT
+ else
{
- StackSampler::RecordJittingInfo(this, flags);
- }
-#endif // FEATURE_STACK_SAMPLING
+ unsigned int ilSize, unused;
+ CorInfoOptions corOptions;
+ LPCBYTE ilHeaderPointer = this->AsDynamicMethodDesc()->GetResolver()->GetCodeInfo(&ilSize, &unused, &corOptions, &unused);
- EX_TRY
- {
- pCode = UnsafeJitFunction(this, ILHeader, flags, &sizeOfCode);
- }
- EX_CATCH
- {
- // If the current thread threw an exception, but a competing thread
- // somehow succeeded at JITting the same function (e.g., out of memory
- // encountered on current thread but not competing thread), then go ahead
- // and swallow this current thread's exception, since we somehow managed
- // to successfully JIT the code on the other thread.
- //
- // Note that if a deadlock cycle is broken, that does not result in an
- // exception--the thread would just pass through the lock and JIT the
- // function in competition with the other thread (with the winner of the
- // race decided later on when we do SetNativeCodeInterlocked). This
- // try/catch is purely to deal with the (unusual) case where a competing
- // thread succeeded where we aborted.
-
- pOtherCode = GetNativeCode();
-
- if (pOtherCode == NULL)
- {
- pEntry->m_hrResultCode = E_FAIL;
- EX_RETHROW;
- }
+ g_profControlBlock.pProfInterface->DynamicMethodJITCompilationStarted((FunctionID)this, TRUE, ilHeaderPointer, ilSize);
}
- EX_END_CATCH(RethrowTerminalExceptions)
+ }
+ END_PIN_PROFILER();
+ }
+#endif // PROFILING_SUPPORTED
- if (pOtherCode != NULL)
- {
- // Somebody finished jitting recursively while we were jitting the method.
- // Just use their method & leak the one we finished. (Normally we hope
- // not to finish our JIT in this case, as we will abort early if we notice
- // a reentrant jit has occurred. But we may not catch every place so we
- // do a definitive final check here.
- pCode = pOtherCode;
- goto Done;
- }
+ if (!ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context,
+ TRACE_LEVEL_VERBOSE,
+ CLR_JIT_KEYWORD))
+ {
+ pCode = JitCompileCodeLocked(pConfig, pEntry, &sizeOfCode, &flags);
+ }
+ else
+ {
+ SString namespaceOrClassName, methodName, methodSignature;
- _ASSERTE(pCode != NULL);
+ // Methods that may be interpreted defer this notification until it is certain
+ // we are jitting and not interpreting in CompileMethodWithEtwWrapper.
+ // Some further refactoring could consolidate the notification to always
+ // occur at the point the interpreter does it, but it might even better
+ // to fix the issues that cause us to avoid generating jit notifications
+ // for interpreted methods in the first place. The interpreter does generate
+ // a small stub of native code but no native-IL mapping.
+#ifndef FEATURE_INTERPRETER
+ ETW::MethodLog::MethodJitting(this,
+ &namespaceOrClassName,
+ &methodName,
+ &methodSignature);
+#endif
-#ifdef HAVE_GCCOVER
- if (GCStress<cfg_instr_jit>::IsEnabled())
- {
- SetupGcCoverage(this, (BYTE*) pCode);
- }
-#endif // HAVE_GCCOVER
+ pCode = JitCompileCodeLocked(pConfig, pEntry, &sizeOfCode, &flags);
+ // Interpretted methods skip this notification
#ifdef FEATURE_INTERPRETER
- // Determine whether the new code address is "stable"...= is not an interpreter stub.
- fInterpreted = (Interpreter::InterpretationStubToMethodInfo(pCode) == this);
- fStable = !fInterpreted;
-#endif // FEATURE_INTERPRETER
-
-#ifdef FEATURE_MULTICOREJIT
-
- // If called from multi-core JIT background thread, store code under lock, delay patching until code is queried from application threads
- if (fBackgroundThread)
- {
- // Fire an ETW event to mark the end of JIT'ing
- ETW::MethodLog::MethodJitted(this, &namespaceOrClassName, &methodName, &methodSignature, pCode, 0 /* ReJITID */);
-
-#ifdef FEATURE_PERFMAP
- // Save the JIT'd method information so that perf can resolve JIT'd call frames.
- PerfMap::LogJITCompiledMethod(this, pCode, sizeOfCode);
+ if (Interpreter::InterpretationStubToMethodInfo(pCode) == NULL)
#endif
-
- mcJitManager.GetMulticoreJitCodeStorage().StoreMethodCode(this, pCode);
-
- goto Done;
- }
+ {
+ // Fire an ETW event to mark the end of JIT'ing
+ ETW::MethodLog::MethodJitted(this,
+ &namespaceOrClassName,
+ &methodName,
+ &methodSignature,
+ pCode,
+ pConfig->GetCodeVersion().GetVersionId());
+ }
-GotNewCode:
-#endif
- // If this function had already been requested for rejit (before its original
- // code was jitted), then give the rejit manager a chance to jump-stamp the
- // code we just compiled so the first thread entering the function will jump
- // to the prestub and trigger the rejit. Note that the PublishMethodHolder takes
- // a lock to avoid a particular kind of rejit race. See
- // code:ReJitManager::PublishMethodHolder::PublishMethodHolder#PublishCode for
- // details on the rejit race.
- //
- // Aside from rejit, performing a SetNativeCodeInterlocked at this point
- // generally ensures that there is only one winning version of the native
- // code. This also avoid races with profiler overriding ngened code (see
- // matching SetNativeCodeInterlocked done after
- // JITCachedFunctionSearchStarted)
-#ifdef FEATURE_INTERPRETER
- PCODE pExpected = pPreviousInterpStub;
- if (pExpected == NULL) pExpected = GetTemporaryEntryPoint();
-#endif
- {
- ReJitPublishMethodHolder publishWorker(this, pCode);
- if (!SetNativeCodeInterlocked(pCode
-#ifdef FEATURE_INTERPRETER
- , pExpected, fStable
-#endif
- ))
- {
- // Another thread beat us to publishing its copy of the JITted code.
- pCode = GetNativeCode();
- goto Done;
- }
- }
+ }
-#ifdef FEATURE_INTERPRETER
- // State for dynamic methods cannot be freed if the method was ever interpreted,
- // since there is no way to ensure that it is not in use at the moment.
- if (IsDynamicMethod() && !fInterpreted && (pPreviousInterpStub == NULL))
- {
- AsDynamicMethodDesc()->GetResolver()->FreeCompileTimeState();
- }
-#endif // FEATURE_INTERPRETER
+#ifdef FEATURE_STACK_SAMPLING
+ StackSampler::RecordJittingInfo(this, flags);
+#endif // FEATURE_STACK_SAMPLING
- // We succeeded in jitting the code, and our jitted code is the one that's going to run now.
- pEntry->m_hrResultCode = S_OK;
+#ifdef PROFILING_SUPPORTED
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackJITInfo());
+ // For methods with non-zero rejit id we send ReJITCompilationFinished, otherwise
+ // JITCompilationFinished. It isn't clear if this is the ideal policy for these
+ // notifications yet.
+ ReJITID rejitId = pConfig->GetCodeVersion().GetILCodeVersionId();
+ if (rejitId != 0)
+ {
- #ifdef PROFILING_SUPPORTED
+ g_profControlBlock.pProfInterface->ReJITCompilationFinished((FunctionID)this,
+ rejitId,
+ S_OK,
+ TRUE);
+ }
+ else
// Notify the profiler that JIT completed.
// Must do this after the address has been set.
// @ToDo: Why must we set the address before notifying the profiler ??
- // Note that if IsInterceptedForDeclSecurity is set no one should access the jitted code address anyway.
+ {
+ if (!IsNoMetadata())
{
- BEGIN_PIN_PROFILER(CORProfilerTrackJITInfo());
- if (!IsNoMetadata())
- {
- g_profControlBlock.pProfInterface->
- JITCompilationFinished((FunctionID) this,
- pEntry->m_hrResultCode,
- TRUE);
- }
- else
- {
- g_profControlBlock.pProfInterface->DynamicMethodJITCompilationFinished((FunctionID) this, pEntry->m_hrResultCode, TRUE);
- }
- END_PIN_PROFILER();
+ g_profControlBlock.pProfInterface->
+ JITCompilationFinished((FunctionID)this,
+ pEntry->m_hrResultCode,
+ TRUE);
+ }
+ else
+ {
+ g_profControlBlock.pProfInterface->DynamicMethodJITCompilationFinished((FunctionID)this, pEntry->m_hrResultCode, TRUE);
}
+ }
+ END_PIN_PROFILER();
+ }
#endif // PROFILING_SUPPORTED
-#ifdef FEATURE_MULTICOREJIT
- if (! fCompiledInBackground)
-#endif
#ifdef FEATURE_INTERPRETER
- // If we didn't JIT, but rather, created an interpreter stub (i.e., fStable is false), don't tell ETW that we did.
- if (fStable)
-#endif // FEATURE_INTERPRETER
- {
- // Fire an ETW event to mark the end of JIT'ing
- ETW::MethodLog::MethodJitted(this, &namespaceOrClassName, &methodName, &methodSignature, pCode, 0 /* ReJITID */);
+ bool isJittedMethod = (Interpreter::InterpretationStubToMethodInfo(pCode) == NULL);
+#endif
+ // Interpretted methods skip this notification
+#ifdef FEATURE_INTERPRETER
+ if (isJittedMethod)
+#endif
+ {
#ifdef FEATURE_PERFMAP
- // Save the JIT'd method information so that perf can resolve JIT'd call frames.
- PerfMap::LogJITCompiledMethod(this, pCode, sizeOfCode);
+ // Save the JIT'd method information so that perf can resolve JIT'd call frames.
+ PerfMap::LogJITCompiledMethod(this, pCode, sizeOfCode);
#endif
- }
-
+ }
-#ifdef FEATURE_MULTICOREJIT
- // If not called from multi-core JIT thread, not got code from storage, quick check before calling out of line function
- if (! fBackgroundThread && ! fCompiledInBackground && mcJitManager.IsRecorderActive())
+#ifdef FEATURE_MULTICOREJIT
+ // Non-initial code versions and multicore jit initial compilation all skip this
+ if (pConfig->NeedsMulticoreJitNotification())
+ {
+ MulticoreJitManager & mcJitManager = GetAppDomain()->GetMulticoreJitManager();
+ if (mcJitManager.IsRecorderActive())
+ {
+ if (MulticoreJitManager::IsMethodSupported(this))
{
- if (MulticoreJitManager::IsMethodSupported(this))
- {
- mcJitManager.RecordMethodJit(this); // Tell multi-core JIT manager to record method on successful JITting
- }
+ mcJitManager.RecordMethodJit(this); // Tell multi-core JIT manager to record method on successful JITting
}
+ }
+ }
#endif
- if (!fIsILStub)
- {
- // The notification will only occur if someone has registered for this method.
- DACNotifyCompilationFinished(this);
- }
+#ifdef FEATURE_INTERPRETER
+ if (isJittedMethod)
+#endif
+ {
+ // The notification will only occur if someone has registered for this method.
+ DACNotifyCompilationFinished(this);
+ }
+
+ return pCode;
+}
+
+PCODE MethodDesc::JitCompileCodeLocked(PrepareCodeConfig* pConfig, JitListLockEntry* pEntry, ULONG* pSizeOfCode, CORJIT_FLAGS* pFlags)
+{
+ STANDARD_VM_CONTRACT;
+
+ PCODE pCode = NULL;
+
+ // The profiler may have changed the code on the callback. Need to
+ // pick up the new code.
+ COR_ILMETHOD_DECODER ilDecoderTemp;
+ COR_ILMETHOD_DECODER *pilHeader = GetAndVerifyILHeader(pConfig, &ilDecoderTemp);
+ *pFlags = pConfig->GetJitCompilationFlags();
+ PCODE pOtherCode = NULL;
+ EX_TRY
+ {
+ pCode = UnsafeJitFunction(this, pilHeader, *pFlags, pSizeOfCode);
+ }
+ EX_CATCH
+ {
+ // If the current thread threw an exception, but a competing thread
+ // somehow succeeded at JITting the same function (e.g., out of memory
+ // encountered on current thread but not competing thread), then go ahead
+ // and swallow this current thread's exception, since we somehow managed
+ // to successfully JIT the code on the other thread.
+ //
+ // Note that if a deadlock cycle is broken, that does not result in an
+ // exception--the thread would just pass through the lock and JIT the
+ // function in competition with the other thread (with the winner of the
+ // race decided later on when we do SetNativeCodeInterlocked). This
+ // try/catch is purely to deal with the (unusual) case where a competing
+ // thread succeeded where we aborted.
+
+ if (!(pOtherCode = pConfig->IsJitCancellationRequested()))
+ {
+ pEntry->m_hrResultCode = E_FAIL;
+ EX_RETHROW;
}
}
+ EX_END_CATCH(RethrowTerminalExceptions)
-Done:
+ if (pOtherCode != NULL)
+ {
+ // Somebody finished jitting recursively while we were jitting the method.
+ // Just use their method & leak the one we finished. (Normally we hope
+ // not to finish our JIT in this case, as we will abort early if we notice
+ // a reentrant jit has occurred. But we may not catch every place so we
+ // do a definitive final check here.
+ return pOtherCode;
+ }
- // We must have a code by now.
_ASSERTE(pCode != NULL);
+
+ // Aside from rejit, performing a SetNativeCodeInterlocked at this point
+ // generally ensures that there is only one winning version of the native
+ // code. This also avoid races with profiler overriding ngened code (see
+ // matching SetNativeCodeInterlocked done after
+ // JITCachedFunctionSearchStarted)
+ {
+ if (!pConfig->SetNativeCode(pCode, &pOtherCode))
+ {
+ // Another thread beat us to publishing its copy of the JITted code.
+ return pOtherCode;
+ }
+#if defined(FEATURE_JIT_PITCHING)
+ else
+ {
+ SavePitchingCandidate(this, sizeOfCode);
+ }
+#endif
+ }
+
+#ifdef HAVE_GCCOVER
+ if (GCStress<cfg_instr_jit>::IsEnabled())
+ {
+ SetupGcCoverage(this, (BYTE*)pCode);
+ }
+#endif // HAVE_GCCOVER
- LOG((LF_CORDB, LL_EVERYTHING, "MethodDesc::MakeJitWorker finished. Stub is" FMT_ADDR "\n",
- DBG_ADDR(pCode)));
+ // We succeeded in jitting the code, and our jitted code is the one that's going to run now.
+ pEntry->m_hrResultCode = S_OK;
return pCode;
}
+
+
+PrepareCodeConfig::PrepareCodeConfig() {}
+
+PrepareCodeConfig::PrepareCodeConfig(NativeCodeVersion codeVersion, BOOL needsMulticoreJitNotification, BOOL mayUsePrecompiledCode) :
+ m_pMethodDesc(codeVersion.GetMethodDesc()),
+ m_nativeCodeVersion(codeVersion),
+ m_needsMulticoreJitNotification(needsMulticoreJitNotification),
+ m_mayUsePrecompiledCode(mayUsePrecompiledCode)
+{}
+
+MethodDesc* PrepareCodeConfig::GetMethodDesc()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pMethodDesc;
+}
+
+PCODE PrepareCodeConfig::IsJitCancellationRequested()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pMethodDesc->GetNativeCode();
+}
+
+BOOL PrepareCodeConfig::NeedsMulticoreJitNotification()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_needsMulticoreJitNotification;
+}
+
+NativeCodeVersion PrepareCodeConfig::GetCodeVersion()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_nativeCodeVersion;
+}
+
+BOOL PrepareCodeConfig::SetNativeCode(PCODE pCode, PCODE * ppAlternateCodeToUse)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // If this function had already been requested for rejit (before its original
+ // code was jitted), then give the CodeVersionManager a chance to jump-stamp the
+ // code we just compiled so the first thread entering the function will jump
+ // to the prestub and trigger the rejit. Note that the PublishMethodHolder takes
+ // a lock to avoid a particular kind of rejit race. See
+ // code:CodeVersionManager::PublishMethodHolder::PublishMethodHolder#PublishCode for
+ // details on the rejit race.
+ //
+ if (m_pMethodDesc->IsVersionableWithJumpStamp())
+ {
+ PublishMethodHolder publishWorker(GetMethodDesc(), pCode);
+ if (m_pMethodDesc->SetNativeCodeInterlocked(pCode, NULL))
+ {
+ return TRUE;
+ }
+ }
+ else
+ {
+ if (m_pMethodDesc->SetNativeCodeInterlocked(pCode, NULL))
+ {
+ return TRUE;
+ }
+ }
+
+ *ppAlternateCodeToUse = m_pMethodDesc->GetNativeCode();
+ return FALSE;
+}
+
+COR_ILMETHOD* PrepareCodeConfig::GetILHeader()
+{
+ STANDARD_VM_CONTRACT;
+ return m_pMethodDesc->GetILHeader(TRUE);
+}
+
+CORJIT_FLAGS PrepareCodeConfig::GetJitCompilationFlags()
+{
+ STANDARD_VM_CONTRACT;
+
+ CORJIT_FLAGS flags;
+ if (m_pMethodDesc->IsILStub())
+ {
+ ILStubResolver* pResolver = m_pMethodDesc->AsDynamicMethodDesc()->GetILStubResolver();
+ flags = pResolver->GetJitFlags();
+ }
+#ifdef FEATURE_TIERED_COMPILATION
+ flags.Add(TieredCompilationManager::GetJitFlags(m_nativeCodeVersion));
+#endif
+ return flags;
+}
+
+BOOL PrepareCodeConfig::MayUsePrecompiledCode()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_mayUsePrecompiledCode;
+}
+
+#ifdef FEATURE_CODE_VERSIONING
+VersionedPrepareCodeConfig::VersionedPrepareCodeConfig() {}
+
+VersionedPrepareCodeConfig::VersionedPrepareCodeConfig(NativeCodeVersion codeVersion) :
+ PrepareCodeConfig(codeVersion, TRUE, FALSE)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(!m_nativeCodeVersion.IsDefaultVersion());
+ _ASSERTE(m_pMethodDesc->GetCodeVersionManager()->LockOwnedByCurrentThread());
+ m_ilCodeVersion = m_nativeCodeVersion.GetILCodeVersion();
+}
+
+HRESULT VersionedPrepareCodeConfig::FinishConfiguration()
+{
+ STANDARD_VM_CONTRACT;
+
+ _ASSERTE(!GetMethodDesc()->GetCodeVersionManager()->LockOwnedByCurrentThread());
+
+ // Any code build stages that do just in time configuration should
+ // be configured now
+#ifdef FEATURE_REJIT
+ if (m_ilCodeVersion.GetRejitState() != ILCodeVersion::kStateActive)
+ {
+ ReJitManager::ConfigureILCodeVersion(m_ilCodeVersion);
+ }
+ _ASSERTE(m_ilCodeVersion.GetRejitState() == ILCodeVersion::kStateActive);
+#endif
+
+ return S_OK;
+}
+
+PCODE VersionedPrepareCodeConfig::IsJitCancellationRequested()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_nativeCodeVersion.GetNativeCode();
+}
+
+BOOL VersionedPrepareCodeConfig::SetNativeCode(PCODE pCode, PCODE * ppAlternateCodeToUse)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ //This isn't the default version so jumpstamp is never needed
+ _ASSERTE(!m_nativeCodeVersion.IsDefaultVersion());
+ if (m_nativeCodeVersion.SetNativeCodeInterlocked(pCode, NULL))
+ {
+ return TRUE;
+ }
+ else
+ {
+ *ppAlternateCodeToUse = m_nativeCodeVersion.GetNativeCode();
+ return FALSE;
+ }
+}
+
+COR_ILMETHOD* VersionedPrepareCodeConfig::GetILHeader()
+{
+ STANDARD_VM_CONTRACT;
+ return m_ilCodeVersion.GetIL();
+}
+
+CORJIT_FLAGS VersionedPrepareCodeConfig::GetJitCompilationFlags()
+{
+ STANDARD_VM_CONTRACT;
+ CORJIT_FLAGS flags;
+
+#ifdef FEATURE_REJIT
+ DWORD profilerFlags = m_ilCodeVersion.GetJitFlags();
+ flags.Add(ReJitManager::JitFlagsFromProfCodegenFlags(profilerFlags));
+#endif
+
+#ifdef FEATURE_TIERED_COMPILATION
+ flags.Add(TieredCompilationManager::GetJitFlags(m_nativeCodeVersion));
+#endif
+
+ return flags;
+}
+
+#endif //FEATURE_CODE_VERSIONING
+
#ifdef FEATURE_STUBS_AS_IL
// CreateInstantiatingILStubTargetSig:
@@ -1264,21 +1660,6 @@ PCODE MethodDesc::DoPrestub(MethodTable *pDispatchingMT)
GCStress<cfg_any, EeconfigFastGcSPolicy, CoopGcModePolicy>::MaybeTrigger();
- // Are we in the prestub because of a rejit request? If so, let the ReJitManager
- // take it from here.
- pCode = ReJitManager::DoReJitIfNecessary(this);
- if (pCode != NULL)
- {
- // A ReJIT was performed, so nothing left for DoPrestub() to do. Return now.
- //
- // The stable entrypoint will either be a pointer to the original JITted code
- // (with a jmp at the top to jump to the newly-rejitted code) OR a pointer to any
- // stub code that must be executed first (e.g., a remoting stub), which in turn
- // will call the original JITted code (which then jmps to the newly-rejitted
- // code).
- RETURN GetStableEntryPoint();
- }
-
#ifdef FEATURE_COMINTEROP
/************************** INTEROP *************************/
@@ -1317,40 +1698,54 @@ PCODE MethodDesc::DoPrestub(MethodTable *pDispatchingMT)
pMT->CheckRunClassInitThrowing();
}
- /************************** BACKPATCHING *************************/
- // See if the addr of code has changed from the pre-stub
-#ifdef FEATURE_INTERPRETER
- if (!IsReallyPointingToPrestub())
-#else
- if (!IsPointingToPrestub())
+
+ /*************************** CALL COUNTER ***********************/
+ // If we are counting calls for tiered compilation, leave the prestub
+ // in place so that we can continue intercepting method invocations.
+ // When the TieredCompilationManager has received enough call notifications
+ // for this method only then do we back-patch it.
+ BOOL fCanBackpatchPrestub = TRUE;
+#ifdef FEATURE_TIERED_COMPILATION
+ BOOL fEligibleForTieredCompilation = IsEligibleForTieredCompilation();
+ if (fEligibleForTieredCompilation)
+ {
+ CallCounter * pCallCounter = GetCallCounter();
+ fCanBackpatchPrestub = pCallCounter->OnMethodCalled(this);
+ }
#endif
+
+ /*************************** VERSIONABLE CODE *********************/
+
+ BOOL fIsPointingToPrestub = IsPointingToPrestub();
+#ifdef FEATURE_CODE_VERSIONING
+ if (IsVersionableWithPrecode() ||
+ (!fIsPointingToPrestub && IsVersionableWithJumpStamp()))
{
- // If we are counting calls for tiered compilation, leave the prestub
- // in place so that we can continue intercepting method invocations.
- // When the TieredCompilationManager has received enough call notifications
- // for this method only then do we back-patch it.
-#ifdef FEATURE_TIERED_COMPILATION
- PCODE pNativeCode = GetNativeCode();
- if (pNativeCode && IsEligibleForTieredCompilation())
- {
- CallCounter * pCallCounter = GetAppDomain()->GetCallCounter();
- BOOL doBackPatch = pCallCounter->OnMethodCalled(this);
- if (!doBackPatch)
- {
- return pNativeCode;
- }
- }
+ pCode = GetCodeVersionManager()->PublishVersionableCodeIfNecessary(this, fCanBackpatchPrestub);
+ fIsPointingToPrestub = IsPointingToPrestub();
+ }
#endif
+
+ /************************** BACKPATCHING *************************/
+ // See if the addr of code has changed from the pre-stub
+ if (!fIsPointingToPrestub)
+ {
LOG((LF_CLASSLOADER, LL_INFO10000,
" In PreStubWorker, method already jitted, backpatching call point\n"));
-
+#if defined(FEATURE_JIT_PITCHING)
+ MarkMethodNotPitchingCandidate(this);
+#endif
RETURN DoBackpatch(pMT, pDispatchingMT, TRUE);
}
-
- // record if remoting needs to intercept this call
- BOOL fRemotingIntercepted = IsRemotingInterceptedViaPrestub();
-
- BOOL fReportCompilationFinished = FALSE;
+
+ if (pCode)
+ {
+ // The only reason we are still pointing to prestub is because the call counter
+ // prevented it. We should still short circuit and return the code without
+ // backpatching.
+ _ASSERTE(!fCanBackpatchPrestub);
+ RETURN pCode;
+ }
/************************** CODE CREATION *************************/
if (IsUnboxingStub())
@@ -1365,209 +1760,11 @@ PCODE MethodDesc::DoPrestub(MethodTable *pDispatchingMT)
#endif // defined(FEATURE_SHARE_GENERIC_CODE)
else if (IsIL() || IsNoMetadata())
{
- // remember if we need to backpatch the MethodTable slot
- BOOL fBackpatch = !fRemotingIntercepted
- && IsNativeCodeStableAfterInit();
-
-#ifdef FEATURE_PREJIT
- //
- // See if we have any prejitted code to use.
- //
-
- pCode = GetPreImplementedCode();
-
-#ifdef PROFILING_SUPPORTED
- if (pCode != NULL)
- {
- BOOL fShouldSearchCache = TRUE;
-
- {
- BEGIN_PIN_PROFILER(CORProfilerTrackCacheSearches());
- g_profControlBlock.pProfInterface->
- JITCachedFunctionSearchStarted((FunctionID) this,
- &fShouldSearchCache);
- END_PIN_PROFILER();
- }
-
- if (!fShouldSearchCache)
- {
-#ifdef FEATURE_INTERPRETER
- SetNativeCodeInterlocked(NULL, pCode, FALSE);
-#else
- SetNativeCodeInterlocked(NULL, pCode);
-#endif
- _ASSERTE(!IsPreImplemented());
- pCode = NULL;
- }
- }
-#endif // PROFILING_SUPPORTED
-
- if (pCode != NULL)
+ if (!IsNativeCodeStableAfterInit())
{
- LOG((LF_ZAP, LL_INFO10000,
- "ZAP: Using code" FMT_ADDR "for %s.%s sig=\"%s\" (token %x).\n",
- DBG_ADDR(pCode),
- m_pszDebugClassName,
- m_pszDebugMethodName,
- m_pszDebugMethodSignature,
- GetMemberDef()));
-
- TADDR pFixupList = GetFixupList();
- if (pFixupList != NULL)
- {
- Module *pZapModule = GetZapModule();
- _ASSERTE(pZapModule != NULL);
- if (!pZapModule->FixupDelayList(pFixupList))
- {
- _ASSERTE(!"FixupDelayList failed");
- ThrowHR(COR_E_BADIMAGEFORMAT);
- }
- }
-
-#ifdef HAVE_GCCOVER
- if (GCStress<cfg_instr_ngen>::IsEnabled())
- SetupGcCoverage(this, (BYTE*) pCode);
-#endif // HAVE_GCCOVER
-
-#ifdef PROFILING_SUPPORTED
- /*
- * This notifies the profiler that a search to find a
- * cached jitted function has been made.
- */
- {
- BEGIN_PIN_PROFILER(CORProfilerTrackCacheSearches());
- g_profControlBlock.pProfInterface->
- JITCachedFunctionSearchFinished((FunctionID) this, COR_PRF_CACHED_FUNCTION_FOUND);
- END_PIN_PROFILER();
- }
-#endif // PROFILING_SUPPORTED
- }
-
- //
- // If not, try to jit it
- //
-
-#endif // FEATURE_PREJIT
-
-#ifdef FEATURE_READYTORUN
- if (pCode == NULL)
- {
- Module * pModule = GetModule();
- if (pModule->IsReadyToRun())
- {
- pCode = pModule->GetReadyToRunInfo()->GetEntryPoint(this);
- if (pCode != NULL)
- fReportCompilationFinished = TRUE;
- }
+ GetOrCreatePrecode();
}
-#endif // FEATURE_READYTORUN
-
- if (pCode == NULL)
- {
- NewHolder<COR_ILMETHOD_DECODER> pHeader(NULL);
- // Get the information on the method
- if (!IsNoMetadata())
- {
- COR_ILMETHOD* ilHeader = GetILHeader(TRUE);
- if(ilHeader == NULL)
- {
-#ifdef FEATURE_COMINTEROP
- // Abstract methods can be called through WinRT derivation if the deriving type
- // is not implemented in managed code, and calls through the CCW to the abstract
- // method. Throw a sensible exception in that case.
- if (pMT->IsExportedToWinRT() && IsAbstract())
- {
- COMPlusThrowHR(E_NOTIMPL);
- }
-#endif // FEATURE_COMINTEROP
-
- COMPlusThrowHR(COR_E_BADIMAGEFORMAT, BFA_BAD_IL);
- }
-
- COR_ILMETHOD_DECODER::DecoderStatus status = COR_ILMETHOD_DECODER::FORMAT_ERROR;
-
- {
- // Decoder ctor can AV on a malformed method header
- AVInRuntimeImplOkayHolder AVOkay;
- pHeader = new COR_ILMETHOD_DECODER(ilHeader, GetMDImport(), &status);
- if(pHeader == NULL)
- status = COR_ILMETHOD_DECODER::FORMAT_ERROR;
- }
-
- if (status == COR_ILMETHOD_DECODER::VERIFICATION_ERROR &&
- Security::CanSkipVerification(GetModule()->GetDomainAssembly()))
- {
- status = COR_ILMETHOD_DECODER::SUCCESS;
- }
-
- if (status != COR_ILMETHOD_DECODER::SUCCESS)
- {
- if (status == COR_ILMETHOD_DECODER::VERIFICATION_ERROR)
- {
- // Throw a verification HR
- COMPlusThrowHR(COR_E_VERIFICATION);
- }
- else
- {
- COMPlusThrowHR(COR_E_BADIMAGEFORMAT, BFA_BAD_IL);
- }
- }
-
-#ifdef _VER_EE_VERIFICATION_ENABLED
- static ConfigDWORD peVerify;
-
- if (peVerify.val(CLRConfig::EXTERNAL_PEVerify))
- Verify(pHeader, TRUE, FALSE); // Throws a VerifierException if verification fails
-#endif // _VER_EE_VERIFICATION_ENABLED
- } // end if (!IsNoMetadata())
-
- // JIT it
- LOG((LF_CLASSLOADER, LL_INFO1000000,
- " In PreStubWorker, calling MakeJitWorker\n"));
-
- // Create the precode eagerly if it is going to be needed later.
- if (!fBackpatch)
- {
- GetOrCreatePrecode();
- }
-
- // Mark the code as hot in case the method ends up in the native image
- g_IBCLogger.LogMethodCodeAccess(this);
-
- pCode = MakeJitWorker(pHeader, CORJIT_FLAGS());
-
-#ifdef FEATURE_INTERPRETER
- if ((pCode != NULL) && !HasStableEntryPoint())
- {
- // We don't yet have a stable entry point, so don't do backpatching yet.
- // But we do have to handle some extra cases that occur in backpatching.
- // (Perhaps I *should* get to the backpatching code, but in a mode where we know
- // we're not dealing with the stable entry point...)
- if (HasNativeCodeSlot())
- {
- // We called "SetNativeCodeInterlocked" in MakeJitWorker, which updated the native
- // code slot, but I think we also want to update the regular slot...
- PCODE tmpEntry = GetTemporaryEntryPoint();
- PCODE pFound = FastInterlockCompareExchangePointer(GetAddrOfSlot(), pCode, tmpEntry);
- // Doesn't matter if we failed -- if we did, it's because somebody else made progress.
- if (pFound != tmpEntry) pCode = pFound;
- }
-
- // Now we handle the case of a FuncPtrPrecode.
- FuncPtrStubs * pFuncPtrStubs = GetLoaderAllocator()->GetFuncPtrStubsNoCreate();
- if (pFuncPtrStubs != NULL)
- {
- Precode* pFuncPtrPrecode = pFuncPtrStubs->Lookup(this);
- if (pFuncPtrPrecode != NULL)
- {
- // If there is a funcptr precode to patch, attempt to patch it. If we lose, that's OK,
- // somebody else made progress.
- pFuncPtrPrecode->SetTargetInterlocked(pCode);
- }
- }
- }
-#endif // FEATURE_INTERPRETER
- } // end if (pCode == NULL)
+ pCode = PrepareInitialCode();
} // end else if (IsIL() || IsNoMetadata())
else if (IsNDirect())
{
@@ -1603,13 +1800,7 @@ PCODE MethodDesc::DoPrestub(MethodTable *pDispatchingMT)
}
/************************** POSTJIT *************************/
-#ifndef FEATURE_INTERPRETER
_ASSERTE(pCode == NULL || GetNativeCode() == NULL || pCode == GetNativeCode());
-#else // FEATURE_INTERPRETER
- // Interpreter adds a new possiblity == someone else beat us to installing an intepreter stub.
- _ASSERTE(pCode == NULL || GetNativeCode() == NULL || pCode == GetNativeCode()
- || Interpreter::InterpretationStubToMethodInfo(pCode) == this);
-#endif // FEATURE_INTERPRETER
// At this point we must have either a pointer to managed code or to a stub. All of the above code
// should have thrown an exception if it couldn't make a stub.
@@ -1638,42 +1829,15 @@ PCODE MethodDesc::DoPrestub(MethodTable *pDispatchingMT)
MemoryBarrier();
#endif
- // If we are counting calls for tiered compilation, leave the prestub
- // in place so that we can continue intercepting method invocations.
- // When the TieredCompilationManager has received enough call notifications
- // for this method only then do we back-patch it.
-#ifdef FEATURE_TIERED_COMPILATION
- if (pCode && IsEligibleForTieredCompilation())
- {
- CallCounter * pCallCounter = GetAppDomain()->GetCallCounter();
- BOOL doBackPatch = pCallCounter->OnMethodCalled(this);
- if (!doBackPatch)
- {
- return pCode;
- }
- }
-#endif
-
if (pCode != NULL)
{
if (HasPrecode())
GetPrecode()->SetTargetInterlocked(pCode);
else
- if (!HasStableEntryPoint())
- {
- // Is the result an interpreter stub?
-#ifdef FEATURE_INTERPRETER
- if (Interpreter::InterpretationStubToMethodInfo(pCode) == this)
- {
- SetEntryPointInterlocked(pCode);
- }
- else
-#endif // FEATURE_INTERPRETER
+ if (!HasStableEntryPoint())
{
- ReJitPublishMethodHolder publishWorker(this, pCode);
SetStableEntryPointInterlocked(pCode);
}
- }
}
else
{
@@ -1690,15 +1854,8 @@ PCODE MethodDesc::DoPrestub(MethodTable *pDispatchingMT)
}
}
-#ifdef FEATURE_INTERPRETER
- _ASSERTE(!IsReallyPointingToPrestub());
-#else // FEATURE_INTERPRETER
_ASSERTE(!IsPointingToPrestub());
_ASSERTE(HasStableEntryPoint());
-#endif // FEATURE_INTERPRETER
-
- if (fReportCompilationFinished)
- DACNotifyCompilationFinished(this);
RETURN DoBackpatch(pMT, pDispatchingMT, FALSE);
}
@@ -2127,6 +2284,10 @@ EXTERN_C PCODE STDCALL ExternalMethodFixupWorker(TransitionBlock * pTransitionBl
pCode = PatchNonVirtualExternalMethod(pMD, pCode, pImportSection, pIndirection);
}
}
+
+#if defined (FEATURE_JIT_PITCHING)
+ DeleteFromPitchingCandidate(pMD);
+#endif
}
// Force a GC on every jit if the stress level is high enough
@@ -2385,6 +2546,7 @@ void ProcessDynamicDictionaryLookup(TransitionBlock * pTransitionBlock
pResult->signature = NULL;
pResult->indirectFirstOffset = 0;
+ pResult->indirectSecondOffset = 0;
pResult->indirections = CORINFO_USEHELPER;
@@ -2424,6 +2586,9 @@ void ProcessDynamicDictionaryLookup(TransitionBlock * pTransitionBlock
//
// Optimization cases
//
+ // TODO-ARM : If the optimization cases are implemented in CreateDictionaryLookupHelper,
+ // It's ifndef for ARM will be removed.
+#ifndef _TARGET_ARM_
if (signatureKind == ENCODE_TYPE_HANDLE)
{
SigPointer sigptr(pBlob, -1);
@@ -2457,9 +2622,16 @@ void ProcessDynamicDictionaryLookup(TransitionBlock * pTransitionBlock
IfFailThrow(sigptr.GetData(&data));
pResult->offsets[2] = sizeof(TypeHandle) * data;
+ if (MethodTable::IsPerInstInfoRelative())
+ {
+ pResult->indirectFirstOffset = 1;
+ pResult->indirectSecondOffset = 1;
+ }
+
return;
}
}
+#endif // !_TARGET_ARM_
if (pContextMT != NULL && pContextMT->GetNumDicts() > 0xFFFF)
ThrowHR(COR_E_BADIMAGEFORMAT);
@@ -2502,6 +2674,12 @@ void ProcessDynamicDictionaryLookup(TransitionBlock * pTransitionBlock
// Next indirect through the dictionary appropriate to this instantiated type
pResult->offsets[1] = sizeof(TypeHandle*) * (pContextMT->GetNumDicts() - 1);
+ if (MethodTable::IsPerInstInfoRelative())
+ {
+ pResult->indirectFirstOffset = 1;
+ pResult->indirectSecondOffset = 1;
+ }
+
*pDictionaryIndexAndSlot |= dictionarySlot;
}
}