summaryrefslogtreecommitdiff
path: root/src/vm/codeman.h
diff options
context:
space:
mode:
Diffstat (limited to 'src/vm/codeman.h')
-rw-r--r--src/vm/codeman.h1883
1 files changed, 1883 insertions, 0 deletions
diff --git a/src/vm/codeman.h b/src/vm/codeman.h
new file mode 100644
index 0000000000..f143dd642c
--- /dev/null
+++ b/src/vm/codeman.h
@@ -0,0 +1,1883 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+
+
+
+/******************************************************************************
+
+Module Name:
+
+ codeman.h
+
+Abstract:
+
+ Wrapper to facilitate multiple JITcompiler support in the COM+ Runtime
+
+ The ExecutionManager is responsible for managing the RangeSections.
+ Given an IP, it can find the RangeSection which holds that IP.
+
+ RangeSections contain the JITed codes. Each RangeSection knows the
+ IJitManager which created it.
+
+ An IJitManager knows about which method bodies live in each RangeSection.
+ It can handle methods of one given CodeType. It can map a method body to
+ a MethodDesc. It knows where the GCInfo about the method lives.
+ Today, we have three IJitManagers viz.
+ 1. EEJitManager for JITcompiled code generated by clrjit.dll
+ 2. NativeImageJitManager for ngenned code.
+ 3. ReadyToRunJitManager for version resiliant ReadyToRun code
+
+ An ICodeManager knows how to crack a specific format of GCInfo. There is
+ a default format (handled by ExecutionManager::GetDefaultCodeManager())
+ which can be shared by different IJitManagers/IJitCompilers.
+
+ An ICorJitCompiler knows how to generate code for a method IL, and produce
+ GCInfo in a format which the corresponding IJitManager's ICodeManager
+ can handle.
+
+ ExecutionManager
+ |
+ +-----------+---------------+---------------+-----------+--- ...
+ | | | |
+ CodeType | CodeType |
+ | | | |
+ v v v v
++---------------+ +--------+<---- R +---------------+ +--------+<---- R
+|ICorJitCompiler|<---->|IJitMan |<---- R |ICorJitCompiler|<---->|IJitMan |<---- R
++---------------+ +--------+<---- R +---------------+ +--------+<---- R
+ | x . | x .
+ | \ . | \ .
+ v \ . v \ .
+ +--------+ R +--------+ R
+ |ICodeMan| |ICodeMan| (RangeSections)
+ +--------+ +--------+
+
+******************************************************************************/
+
+#ifndef __CODEMAN_HPP__
+
+#define __CODEMAN_HPP__
+
+#include "crst.h"
+#include "eetwain.h"
+#include "ceeload.h"
+#include "jitinterface.h"
+#include "debuginfostore.h"
+#include "shash.h"
+#include "pedecoder.h"
+#include "gcinfo.h"
+
+class MethodDesc;
+class ICorJitCompiler;
+class IJitManager;
+class EEJitManager;
+class NativeImageJitManager;
+class ReadyToRunJitManager;
+class ExecutionManager;
+class Thread;
+class CrawlFrame;
+struct EE_ILEXCEPTION;
+struct EE_ILEXCEPTION_CLAUSE;
+typedef struct
+{
+ unsigned iCurrentPos;
+ TADDR pExceptionClauseArray;
+} EH_CLAUSE_ENUMERATOR;
+class EECodeInfo;
+
+#define PAGE_MASK (PAGE_SIZE-1)
+#define PAGE_ALIGN ~(PAGE_MASK)
+#define ROUND_DOWN_TO_PAGE(x) ( (size_t) (x) & PAGE_ALIGN)
+#define ROUND_UP_TO_PAGE(x) (((size_t) (x) + PAGE_MASK) & PAGE_ALIGN)
+
+enum StubCodeBlockKind : int
+{
+ STUB_CODE_BLOCK_UNKNOWN,
+ STUB_CODE_BLOCK_JUMPSTUB,
+ STUB_CODE_BLOCK_PRECODE,
+ STUB_CODE_BLOCK_DYNAMICHELPER,
+ // Last valid value. Note that the definition is duplicated in debug\daccess\fntableaccess.cpp
+ STUB_CODE_BLOCK_LAST = 0xF,
+ // Placeholders returned by code:GetStubCodeBlockKind
+ STUB_CODE_BLOCK_NOCODE,
+ STUB_CODE_BLOCK_MANAGED,
+ STUB_CODE_BLOCK_STUBLINK,
+ // Placeholdes used by NGen images
+ STUB_CODE_BLOCK_VIRTUAL_METHOD_THUNK,
+ STUB_CODE_BLOCK_EXTERNAL_METHOD_THUNK,
+ // Placeholdes used by ReadyToRun images
+ STUB_CODE_BLOCK_METHOD_CALL_THUNK,
+};
+
+//-----------------------------------------------------------------------------
+// Method header which exists just before the code.
+// Every IJitManager could have its own format for the header.
+// Today CodeHeader is used by the EEJitManager.
+// The GCInfo version is always current GCINFO_VERSION in this header.
+
+#ifdef USE_INDIRECT_CODEHEADER
+typedef DPTR(struct _hpRealCodeHdr) PTR_RealCodeHeader;
+typedef DPTR(struct _hpCodeHdr) PTR_CodeHeader;
+
+#else // USE_INDIRECT_CODEHEADER
+typedef DPTR(struct _hpCodeHdr) PTR_CodeHeader;
+
+#endif // USE_INDIRECT_CODEHEADER
+
+#ifdef USE_INDIRECT_CODEHEADER
+typedef struct _hpRealCodeHdr
+#else // USE_INDIRECT_CODEHEADER
+typedef struct _hpCodeHdr
+#endif // USE_INDIRECT_CODEHEADER
+{
+public:
+ PTR_BYTE phdrDebugInfo;
+
+ // Note - *(&(pCodeHeader->phdrJitEHInfo) - sizeof(size_t))
+ // contains the number of EH clauses, See EEJitManager::allocEHInfo
+ PTR_EE_ILEXCEPTION phdrJitEHInfo;
+ PTR_BYTE phdrJitGCInfo;
+
+ PTR_MethodDesc phdrMDesc;
+
+#ifdef WIN64EXCEPTIONS
+ DWORD nUnwindInfos;
+ T_RUNTIME_FUNCTION unwindInfos[0];
+#endif // WIN64EXCEPTIONS
+
+public:
+#ifndef USE_INDIRECT_CODEHEADER
+ //
+ // Note: that the JITted code follows immediately after the MethodDesc*
+ //
+ PTR_BYTE GetDebugInfo()
+ {
+ SUPPORTS_DAC;
+
+ return phdrDebugInfo;
+ }
+ PTR_EE_ILEXCEPTION GetEHInfo()
+ {
+ return phdrJitEHInfo;
+ }
+ PTR_BYTE GetGCInfo()
+ {
+ SUPPORTS_DAC;
+ return phdrJitGCInfo;
+ }
+ PTR_MethodDesc GetMethodDesc()
+ {
+ SUPPORTS_DAC;
+ return phdrMDesc;
+ }
+ TADDR GetCodeStartAddress()
+ {
+ SUPPORTS_DAC;
+ return dac_cast<TADDR>(dac_cast<PTR_CodeHeader>(this) + 1);
+ }
+ StubCodeBlockKind GetStubCodeBlockKind()
+ {
+ SUPPORTS_DAC;
+ return (StubCodeBlockKind)dac_cast<TADDR>(phdrMDesc);
+ }
+ BOOL IsStubCodeBlock()
+ {
+ SUPPORTS_DAC;
+ // Note that it is important for this comparison to be unsigned
+ return dac_cast<TADDR>(phdrMDesc) <= (TADDR)STUB_CODE_BLOCK_LAST;
+ }
+
+ void SetDebugInfo(PTR_BYTE pDI)
+ {
+ phdrDebugInfo = pDI;
+ }
+ void SetEHInfo(PTR_EE_ILEXCEPTION pEH)
+ {
+ phdrJitEHInfo = pEH;
+ }
+ void SetGCInfo(PTR_BYTE pGC)
+ {
+ phdrJitGCInfo = pGC;
+ }
+ void SetMethodDesc(PTR_MethodDesc pMD)
+ {
+ phdrMDesc = pMD;
+ }
+ void SetStubCodeBlockKind(StubCodeBlockKind kind)
+ {
+ phdrMDesc = (PTR_MethodDesc)kind;
+ }
+#endif // !USE_INDIRECT_CODEHEADER
+
+// if we're using the indirect codeheaders then all enumeration is done by the code header
+#ifndef USE_INDIRECT_CODEHEADER
+#ifdef DACCESS_COMPILE
+ void EnumMemoryRegions(CLRDataEnumMemoryFlags flags, IJitManager* pJitMan);
+#endif // DACCESS_COMPILE
+#endif // USE_INDIRECT_CODEHEADER
+#ifdef USE_INDIRECT_CODEHEADER
+} RealCodeHeader;
+#else // USE_INDIRECT_CODEHEADER
+} CodeHeader;
+#endif // USE_INDIRECT_CODEHEADER
+
+#ifdef USE_INDIRECT_CODEHEADER
+typedef struct _hpCodeHdr
+{
+ PTR_RealCodeHeader pRealCodeHeader;
+
+public:
+ PTR_BYTE GetDebugInfo()
+ {
+ SUPPORTS_DAC;
+ return pRealCodeHeader->phdrDebugInfo;
+ }
+ PTR_EE_ILEXCEPTION GetEHInfo()
+ {
+ return pRealCodeHeader->phdrJitEHInfo;
+ }
+ PTR_BYTE GetGCInfo()
+ {
+ SUPPORTS_DAC;
+ return pRealCodeHeader->phdrJitGCInfo;
+ }
+ PTR_MethodDesc GetMethodDesc()
+ {
+ SUPPORTS_DAC;
+ return pRealCodeHeader->phdrMDesc;
+ }
+ TADDR GetCodeStartAddress()
+ {
+ SUPPORTS_DAC;
+ return dac_cast<PCODE>(dac_cast<PTR_CodeHeader>(this) + 1);
+ }
+ StubCodeBlockKind GetStubCodeBlockKind()
+ {
+ SUPPORTS_DAC;
+ return (StubCodeBlockKind)dac_cast<TADDR>(pRealCodeHeader);
+ }
+ BOOL IsStubCodeBlock()
+ {
+ SUPPORTS_DAC;
+ // Note that it is important for this comparison to be unsigned
+ return dac_cast<TADDR>(pRealCodeHeader) <= (TADDR)STUB_CODE_BLOCK_LAST;
+ }
+
+ void SetRealCodeHeader(BYTE* pRCH)
+ {
+ pRealCodeHeader = PTR_RealCodeHeader((RealCodeHeader*)pRCH);
+ }
+
+ void SetDebugInfo(PTR_BYTE pDI)
+ {
+ pRealCodeHeader->phdrDebugInfo = pDI;
+ }
+ void SetEHInfo(PTR_EE_ILEXCEPTION pEH)
+ {
+ pRealCodeHeader->phdrJitEHInfo = pEH;
+ }
+ void SetGCInfo(PTR_BYTE pGC)
+ {
+ pRealCodeHeader->phdrJitGCInfo = pGC;
+ }
+ void SetMethodDesc(PTR_MethodDesc pMD)
+ {
+ pRealCodeHeader->phdrMDesc = pMD;
+ }
+ void SetStubCodeBlockKind(StubCodeBlockKind kind)
+ {
+ pRealCodeHeader = (PTR_RealCodeHeader)kind;
+ }
+
+#if defined(WIN64EXCEPTIONS)
+ UINT GetNumberOfUnwindInfos()
+ {
+ SUPPORTS_DAC;
+ return pRealCodeHeader->nUnwindInfos;
+ }
+ void SetNumberOfUnwindInfos(UINT nUnwindInfos)
+ {
+ LIMITED_METHOD_CONTRACT;
+ pRealCodeHeader->nUnwindInfos = nUnwindInfos;
+ }
+ PTR_RUNTIME_FUNCTION GetUnwindInfo(UINT iUnwindInfo)
+ {
+ SUPPORTS_DAC;
+ _ASSERTE(iUnwindInfo < GetNumberOfUnwindInfos());
+ return dac_cast<PTR_RUNTIME_FUNCTION>(
+ PTR_TO_MEMBER_TADDR(RealCodeHeader, pRealCodeHeader, unwindInfos) + iUnwindInfo * sizeof(T_RUNTIME_FUNCTION));
+ }
+#endif // WIN64EXCEPTIONS
+
+#ifdef DACCESS_COMPILE
+ void EnumMemoryRegions(CLRDataEnumMemoryFlags flags, IJitManager* pJitMan);
+#endif // DACCESS_COMPILE
+
+} CodeHeader;
+#endif // USE_INDIRECT_CODEHEADER
+
+
+//-----------------------------------------------------------------------------
+// This is a structure used to consolidate the information that we
+// need we creating new code heaps.
+// When creating new JumpStubs we have a constarint that the address used
+// should be in the range [loAddr..hiAddr]
+//
+struct CodeHeapRequestInfo
+{
+ MethodDesc * m_pMD;
+ LoaderAllocator* m_pAllocator;
+ const BYTE * m_loAddr; // lowest address to use to satisfy our request (0 -- don't care)
+ const BYTE * m_hiAddr; // hihest address to use to satisfy our request (0 -- don't care)
+ size_t m_requestSize; // minimum size that must be made available
+ size_t m_reserveSize; // Amount that VirtualAlloc will reserved
+ bool m_isDynamicDomain;
+ bool m_isCollectible;
+
+ bool IsDynamicDomain() { return m_isDynamicDomain; }
+ bool IsCollectible() { return m_isCollectible; }
+
+ size_t getRequestSize() { return m_requestSize; }
+ void setRequestSize(size_t requestSize) { m_requestSize = requestSize; }
+
+ size_t getReserveSize() { return m_reserveSize; }
+ void setReserveSize(size_t reserveSize) { m_reserveSize = reserveSize; }
+
+ void Init();
+
+ CodeHeapRequestInfo(MethodDesc *pMD)
+ : m_pMD(pMD), m_pAllocator(0),
+ m_loAddr(0), m_hiAddr(0),
+ m_requestSize(0), m_reserveSize(0)
+ { WRAPPER_NO_CONTRACT; Init(); }
+
+ CodeHeapRequestInfo(MethodDesc *pMD, LoaderAllocator* pAllocator,
+ BYTE * loAddr, BYTE * hiAddr)
+ : m_pMD(pMD), m_pAllocator(pAllocator),
+ m_loAddr(loAddr), m_hiAddr(hiAddr),
+ m_requestSize(0), m_reserveSize(0)
+ { WRAPPER_NO_CONTRACT; Init(); }
+};
+
+//-----------------------------------------------------------------------------
+//
+// A CodeHeap is the abstraction the IJitManager uses to allocate memory
+// needed to the jitting of a method.
+// The CodeHeap works together with the HeapList to manage a contiguous block of memory.
+// The CodeHeap is a non growable chunk of memory (it can be reserved and
+// committed on demand).
+//
+// A CodeHeap is naturally protected from multiple threads by the code heap
+// critical section - m_pCodeHeapCritSec - so if the implementation of the heap
+// is only for the code manager, no locking needs to occur.
+// It's important however that a delete operation on the CodeHeap (if any) happens
+// via EEJitManager::FreeCodeMemory(HostCodeHeap*, void*)
+//
+// The heap to be created depends on the MethodDesc that is being compiled.
+// Standard code uses the LoaderCodeHeap, a heap based on the LoaderHeap.
+// DynamicMethods - and only those - use a HostCodeHeap, a heap that does
+// normal Alloc/Free so reclamation can be performed.
+//
+// The convention is that every heap implementation would have a static create
+// function that returns a HeapList. The HeapList *must* be properly initialized
+// on return except for the next pointer
+//
+
+typedef VPTR(class CodeHeap) PTR_CodeHeap;
+
+class CodeHeap
+{
+ VPTR_BASE_VTABLE_CLASS(CodeHeap)
+
+public:
+ CodeHeap() {}
+
+ // virtual dtor. Clean up heap
+ virtual ~CodeHeap() {}
+
+ // Alloc the specified numbers of bytes for code. Returns NULL if the request does not fit
+ // Space for header is reserved immediately before. It is not included in size.
+ virtual void* AllocMemForCode_NoThrow(size_t header, size_t size, DWORD alignment) = 0;
+
+#ifdef DACCESS_COMPILE
+ virtual void EnumMemoryRegions(CLRDataEnumMemoryFlags flags) = 0;
+#endif
+
+protected:
+ friend class EEJitManager;
+};
+
+//-----------------------------------------------------------------------------
+// The HeapList works together with the CodeHeap to manage a contiguous block of memory.
+//
+// A single HeapList contains code only for a single AppDomain. EEJitManager uses
+// EEJitManager::DomainCodeHeapList to keep a list of HeapLists for each AppDomain.
+
+// The number of code heaps at which we increase the size of new code heaps.
+#define CODE_HEAP_SIZE_INCREASE_THRESHOLD 5
+
+typedef DPTR(struct _HeapList) PTR_HeapList;
+
+typedef struct _HeapList
+{
+ PTR_HeapList hpNext;
+
+ PTR_CodeHeap pHeap;
+
+ TADDR startAddress;
+ TADDR endAddress; // the current end of the used portion of the Heap
+
+ TADDR mapBase; // "startAddress" rounded down to PAGE_SIZE. pHdrMap is relative to this address
+ PTR_DWORD pHdrMap; // bit array used to find the start of methods
+
+ size_t maxCodeHeapSize;// Size of the entire contiguous block of memory
+ DWORD cBlocks; // Number of allocations
+ bool bFull; // Heap is considered full do not use for new allocations
+ bool bFullForJumpStubs; // Heap is considered full do not use for new allocations of jump stubs
+
+#if defined(_TARGET_AMD64_)
+ BYTE CLRPersonalityRoutine[JUMP_ALLOCATE_SIZE]; // jump thunk to personality routine
+#elif defined(_TARGET_ARM64_)
+ UINT32 CLRPersonalityRoutine[JUMP_ALLOCATE_SIZE/sizeof(UINT32)]; // jump thunk to personality routine
+#endif
+
+ PTR_HeapList GetNext()
+ { SUPPORTS_DAC; return hpNext; }
+
+ void SetNext(PTR_HeapList next)
+ { hpNext = next; }
+
+ void SetHeapFull()
+ { VolatileStore(&bFull, true); }
+
+ bool IsHeapFull()
+ { return VolatileLoad(&bFull); }
+
+ void SetHeapFullForJumpStubs()
+ { VolatileStore(&bFullForJumpStubs, true); }
+
+ bool IsHeapFullForJumpStubs()
+ { return VolatileLoad(&bFullForJumpStubs); }
+
+} HeapList;
+
+//-----------------------------------------------------------------------------
+// Implementation of the standard CodeHeap.
+// Use the ExplicitControlLoaderHeap for allocations
+// (Check the base class above - CodeHeap - for comments on the functions)
+//
+typedef VPTR(class LoaderCodeHeap) PTR_LoaderCodeHeap;
+
+class LoaderCodeHeap : CodeHeap
+{
+#ifdef DACCESS_COMPILE
+ friend class ClrDataAccess;
+#endif
+
+ VPTR_VTABLE_CLASS(LoaderCodeHeap, CodeHeap)
+
+private:
+ ExplicitControlLoaderHeap m_LoaderHeap;
+ SSIZE_T m_cbMinNextPad;
+
+ LoaderCodeHeap(size_t * pPrivatePCLBytes);
+
+public:
+ static HeapList* CreateCodeHeap(CodeHeapRequestInfo *pInfo, LoaderHeap *pJitMetaHeap);
+
+public:
+ virtual ~LoaderCodeHeap()
+ {
+ WRAPPER_NO_CONTRACT;
+ }
+
+ virtual void* AllocMemForCode_NoThrow(size_t header, size_t size, DWORD alignment) DAC_EMPTY_RET(NULL);
+
+#ifdef DACCESS_COMPILE
+ virtual void EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+ {
+ WRAPPER_NO_CONTRACT;
+ m_LoaderHeap.EnumMemoryRegions(flags);
+ }
+#endif
+};
+
+#if defined(_WIN64)
+// On non X86 platforms, the OS defined UnwindInfo (accessed from RUNTIME_FUNCTION
+// structures) to support the ability unwind the stack. Unfortunatey the pre-Win8
+// APIs defined a callback API for publishing this data dynamically that ETW does
+// not use (and really can't because the walk happens in the kernel). In Win8
+// new APIs were defined that allow incremental publishing via a table.
+//
+// UnwindInfoTable is a class that wraps the OS APIs that we use to publish
+// this table. Its job is to allocate the table, deallocate it when we are
+// done and allow us to add new entries one at a time (AddToUnwindInfoTable)
+//
+// Each _rangesection has a UnwindInfoTable's which hold the
+// RUNTIME_FUNCTION array as well as other bookeeping (the current and maximum
+// size of the array, and the handle used to publish it to the OS.
+//
+// Ideally we would just use this new API when it is available, however to mininmize
+// risk and to make the change perfectly pay-for-play, we us the original mechanism
+// ALWAYS, and in addition publish via the Table ONLY WHEN ETW JIT events are turned
+// on.
+//
+// This class implements a 'catchup' routine that allows us to publish existing JITTed
+// methods when ETW turns on. Currently this is 'sticky' (once we start publishing
+// both ways, we do so for the life of the process.
+//
+typedef DPTR(class UnwindInfoTable) PTR_UnwindInfoTable;
+class UnwindInfoTable {
+public:
+ // All public functions are thread-safe.
+
+ // These are wrapper functions over the UnwindInfoTable functions that are specific to JIT compile code
+ static void PublishUnwindInfoForMethod(TADDR baseAddress, T_RUNTIME_FUNCTION* unwindInfo, int unwindInfoCount);
+ static void UnpublishUnwindInfoForMethod(TADDR entryPoint);
+
+ // These are lower level functions that assume you have found the list of UnwindInfoTable entries
+ // These are used by the stublinker and the high-level method functions above
+ static void AddToUnwindInfoTable(UnwindInfoTable** unwindInfoPtr, T_RUNTIME_FUNCTION* data, TADDR rangeStart, TADDR rangeEnd);
+ static void RemoveFromUnwindInfoTable(UnwindInfoTable** unwindInfoPtr, TADDR baseAddress, TADDR entryPoint);
+
+ // By default this publishing is off, this routine turns it on (and optionally publishes existing methods)
+ static void PublishUnwindInfo(bool publishExisting);
+ ~UnwindInfoTable();
+
+private:
+ void UnRegister();
+ void Register();
+ UnwindInfoTable(ULONG_PTR rangeStart, ULONG_PTR rangeEnd, ULONG size);
+ static void PublishUnwindInfoForExistingMethods();
+
+private:
+ static Volatile<bool> s_publishingActive; // Publishing to ETW is turned on
+ static class Crst* s_pUnwindInfoTableLock; // lock protects all public UnwindInfoTable functions
+
+ PVOID hHandle; // OS handle for a published RUNTIME_FUNCTION table
+ ULONG_PTR iRangeStart; // Start of memory described by this table
+ ULONG_PTR iRangeEnd; // End of memory described by this table
+ T_RUNTIME_FUNCTION* pTable; // The actual list of method unwind info, sorted by address
+ ULONG cTableCurCount;
+ ULONG cTableMaxCount;
+ int cDeletedEntries; // Number of slots we removed.
+};
+
+#endif // defined(_WIN64)
+
+//-----------------------------------------------------------------------------
+// The ExecutionManager uses RangeSection as the abstraction of a contiguous
+// address range to track the code heaps.
+
+typedef DPTR(struct RangeSection) PTR_RangeSection;
+
+struct RangeSection
+{
+ TADDR LowAddress;
+ TADDR HighAddress;
+
+ PTR_IJitManager pjit; // The owner of this address range
+
+#ifndef DACCESS_COMPILE
+ // Volatile because of the list can be walked lock-free
+ Volatile<RangeSection *> pnext; // link rangesections in a sorted list
+#else
+ PTR_RangeSection pnext;
+#endif
+
+ PTR_RangeSection pLastUsed; // for the head node only: a link to rangesections that was used most recently
+
+ enum RangeSectionFlags
+ {
+ RANGE_SECTION_NONE = 0x0,
+ RANGE_SECTION_COLLECTIBLE = 0x1,
+ RANGE_SECTION_CODEHEAP = 0x2,
+#ifdef FEATURE_READYTORUN
+ RANGE_SECTION_READYTORUN = 0x4,
+#endif
+ };
+
+ DWORD flags;
+
+ // union
+ // {
+ // PTR_CodeHeap pCodeHeap; // valid if RANGE_SECTION_HEAP is set
+ // PTR_Module pZapModule; // valid if RANGE_SECTION_HEAP is not set
+ // };
+ TADDR pHeapListOrZapModule;
+#if defined(_WIN64)
+ PTR_UnwindInfoTable pUnwindInfoTable; // Points to unwind information for this memory range.
+#endif // defined(_WIN64)
+};
+
+/*****************************************************************************/
+
+#ifdef CROSSGEN_COMPILE
+#define CodeFragmentHeap LoaderHeap
+#else
+
+//
+// A simple linked-list based allocator to expose code heap as loader heap for allocation of precodes.
+// The loader heap like interface is necessary to support backout. It is also conveniently used to reduce space overhead
+// for small blocks that are common for precodes.
+//
+// Allocating precodes on code heap makes them close to other code, it reduces need for jump stubs and thus chance
+// that we run into bogus OOM because of not being able to allocate memory in particular memory range.
+//
+class CodeFragmentHeap : public ILoaderHeapBackout
+{
+ PTR_LoaderAllocator m_pAllocator;
+
+ struct FreeBlock
+ {
+ DPTR(FreeBlock) m_pNext; // Next block
+ SIZE_T m_dwSize; // Size of this block (includes size of FreeBlock)
+ };
+ typedef DPTR(FreeBlock) PTR_FreeBlock;
+
+ PTR_FreeBlock m_pFreeBlocks;
+ StubCodeBlockKind m_kind;
+
+ Crst m_CritSec;
+
+ void AddBlock(VOID * pMem, size_t dwSize);
+ void RemoveBlock(FreeBlock ** ppBlock);
+
+public:
+ CodeFragmentHeap(LoaderAllocator * pAllocator, StubCodeBlockKind kind);
+ virtual ~CodeFragmentHeap() {}
+
+ TaggedMemAllocPtr RealAllocAlignedMem(size_t dwRequestedSize
+ ,unsigned dwAlignment
+#ifdef _DEBUG
+ ,__in __in_z const char *szFile
+ ,int lineNum
+#endif
+ );
+
+ virtual void RealBackoutMem(void *pMem
+ , size_t dwSize
+#ifdef _DEBUG
+ , __in __in_z const char *szFile
+ , int lineNum
+ , __in __in_z const char *szAllocFile
+ , int allocLineNum
+#endif
+ ) DAC_EMPTY();
+
+#ifdef DACCESS_COMPILE
+ void EnumMemoryRegions(enum CLRDataEnumMemoryFlags flags)
+ {
+ WRAPPER_NO_CONTRACT;
+ DAC_ENUM_DTHIS();
+ }
+#endif
+};
+#endif // CROSSGEN_COMPILE
+
+typedef DPTR(class CodeFragmentHeap) PTR_CodeFragmentHeap;
+
+//-----------------------------------------------------------------------------
+//
+// Manages the CodeHeap for some of the RangeSections in the ExecutionManager
+//
+//-----------------------------------------------------------------------------
+
+class IJitManager
+{
+ VPTR_BASE_VTABLE_CLASS(IJitManager)
+
+public:
+ struct MethodRegionInfo
+ {
+ TADDR hotStartAddress;
+ size_t hotSize;
+ TADDR coldStartAddress;
+ size_t coldSize;
+ };
+
+#ifndef DACCESS_COMPILE
+ IJitManager();
+#endif // !DACCESS_COMPILE
+
+ virtual DWORD GetCodeType() = 0;
+
+ // Used to read debug info.
+ // 1) Caller passes an allocator which these functions use to allocate memory.
+ // This is b/c the store may need to decompress the information just to figure out the size.
+ // 2) Note that these methods use Uncompressed (Normal) jit data.
+ // Compression is just an implementation detail.
+ // 3) These throw on OOM (exceptional case), and may return a
+ // failing HR if no data is available (not exceptional)
+
+ virtual BOOL GetBoundariesAndVars(
+ const DebugInfoRequest & request,
+ IN FP_IDS_NEW fpNew, IN void * pNewData,
+ OUT ULONG32 * pcMap,
+ OUT ICorDebugInfo::OffsetMapping **ppMap,
+ OUT ULONG32 * pcVars,
+ OUT ICorDebugInfo::NativeVarInfo **ppVars) = 0;
+
+ virtual BOOL JitCodeToMethodInfo(
+ RangeSection * pRangeSection,
+ PCODE currentPC,
+ MethodDesc** ppMethodDesc,
+ OUT EECodeInfo * pCodeInfo) = 0;
+
+ virtual PCODE GetCodeAddressForRelOffset(const METHODTOKEN& MethodToken, DWORD relOffset) = 0;
+
+ virtual TADDR JitTokenToStartAddress(const METHODTOKEN& MethodToken)=0;
+ virtual void JitTokenToMethodRegionInfo(const METHODTOKEN& MethodToken, MethodRegionInfo *methodRegionInfo) = 0;
+ virtual unsigned InitializeEHEnumeration(const METHODTOKEN& MethodToken, EH_CLAUSE_ENUMERATOR* pEnumState)=0;
+ virtual PTR_EXCEPTION_CLAUSE_TOKEN GetNextEHClause(EH_CLAUSE_ENUMERATOR* pEnumState,
+ EE_ILEXCEPTION_CLAUSE* pEHclause)=0;
+#ifndef DACCESS_COMPILE
+ virtual TypeHandle ResolveEHClause(EE_ILEXCEPTION_CLAUSE* pEHClause,
+ CrawlFrame *pCf)=0;
+#endif // #ifndef DACCESS_COMPILE
+
+ virtual GCInfoToken GetGCInfoToken(const METHODTOKEN& MethodToken)=0;
+ PTR_VOID GetGCInfo(const METHODTOKEN& MethodToken)
+ {
+ return GetGCInfoToken(MethodToken).Info;
+ }
+
+ TADDR JitTokenToModuleBase(const METHODTOKEN& MethodToken);
+
+#if defined(WIN64EXCEPTIONS)
+ virtual PTR_RUNTIME_FUNCTION LazyGetFunctionEntry(EECodeInfo * pCodeInfo) = 0;
+
+ // GetFuncletStartAddress returns the starting address of the function or funclet indicated by the EECodeInfo address.
+ virtual TADDR GetFuncletStartAddress(EECodeInfo * pCodeInfo);
+
+ virtual DWORD GetFuncletStartOffsets(const METHODTOKEN& MethodToken, DWORD* pStartFuncletOffsets, DWORD dwLength) = 0;
+
+ BOOL IsFunclet(EECodeInfo * pCodeInfo);
+ virtual BOOL IsFilterFunclet(EECodeInfo * pCodeInfo);
+#endif // WIN64EXCEPTIONS
+
+ virtual StubCodeBlockKind GetStubCodeBlockKind(RangeSection * pRangeSection, PCODE currentPC) = 0;
+
+ // DAC-specific virtual functions.
+ // Note that these MUST occur below any other virtual function definitions to ensure that the vtable in
+ // DAC builds is compatible with the non-DAC one so that DAC virtual dispatch will work correctly.
+#if defined(DACCESS_COMPILE)
+ virtual void EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+ virtual void EnumMemoryRegionsForMethodDebugInfo(CLRDataEnumMemoryFlags flags, MethodDesc * pMD) = 0;
+#if defined(WIN64EXCEPTIONS)
+ // Enumerate the memory necessary to retrieve the unwind info for a specific method
+ virtual void EnumMemoryRegionsForMethodUnwindInfo(CLRDataEnumMemoryFlags flags, EECodeInfo * pCodeInfo) = 0;
+#endif // WIN64EXCEPTIONS
+#endif // DACCESS_COMPILE
+
+#ifndef DACCESS_COMPILE
+ void SetCodeManager(ICodeManager *codeMgr)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ m_runtimeSupport = codeMgr;
+ }
+#endif // !DACCESS_COMPILE
+
+ ICodeManager *GetCodeManager()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_runtimeSupport;
+ }
+
+protected:
+ PTR_ICodeManager m_runtimeSupport;
+};
+
+//-----------------------------------------------------------------------------
+
+class HostCodeHeap;
+typedef VPTR(class HostCodeHeap) PTR_HostCodeHeap;
+
+typedef VPTR(class EEJitManager) PTR_EEJitManager;
+typedef VPTR(class NativeImageJitManager) PTR_NativeImageJitManager;
+typedef VPTR(class ReadyToRunJitManager) PTR_ReadyToRunJitManager;
+
+struct JumpStubBlockHeader
+{
+ JumpStubBlockHeader * m_next;
+ UINT32 m_used;
+ UINT32 m_allocated;
+
+ LoaderAllocator* GetLoaderAllocator()
+ {
+ _ASSERTE(m_zero == 0);
+ return m_Allocator;
+ }
+
+ void SetLoaderAllocator(LoaderAllocator * loaderAllocator)
+ {
+ m_zero = 0;
+ m_Allocator = loaderAllocator;
+ }
+
+ HostCodeHeap* GetHostCodeHeap()
+ {
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(m_zero == -1);
+ return m_CodeHeap;
+ }
+
+ void SetHostCodeHeap(HostCodeHeap * hostCodeHeap)
+ {
+ m_zero = -1;
+ m_CodeHeap = hostCodeHeap;
+ }
+
+private:
+ union {
+ HostCodeHeap *m_CodeHeap;
+ LoaderAllocator *m_Allocator;
+ };
+
+ INT64 m_zero; // 0 for normal methods and -1 for LCG methods
+};
+
+
+/*****************************************************************************/
+
+class EEJitManager : public IJitManager
+{
+#ifdef DACCESS_COMPILE
+ friend class ClrDataAccess;
+#endif
+ friend class CheckDuplicatedStructLayouts;
+ friend class CodeHeapIterator;
+
+ VPTR_VTABLE_CLASS(EEJitManager, IJitManager)
+
+public:
+
+ // Failing to load the main JIT is a failure.
+ // If the user requested an altjit and we failed to load an altjit, that is also a failure.
+ BOOL IsJitLoaded()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return (m_jit != NULL)
+#ifdef ALLOW_SXS_JIT
+ && (!m_AltJITRequired || (m_alternateJit != NULL))
+#endif // ALLOW_SXS_JIT
+ ;
+ }
+
+#ifdef ALLOW_SXS_JIT
+ BOOL IsMainJitLoaded()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return (m_jit != NULL);
+ }
+
+ BOOL IsAltJitLoaded()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return (m_alternateJit != NULL);
+ }
+#endif // ALLOW_SXS_JIT
+
+ VOID ClearCache()
+ {
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+ if( m_jit != NULL )
+ {
+ m_jit->clearCache();
+ }
+#ifdef ALLOW_SXS_JIT
+ if( m_alternateJit != NULL )
+ {
+ m_alternateJit->clearCache();
+ }
+#endif // ALLOW_SXS_JIT
+ }
+
+ BOOL IsCacheCleanupRequired()
+ {
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+ BOOL ret = FALSE;
+
+ if( m_jit != NULL )
+ {
+ if (m_jit->isCacheCleanupRequired())
+ ret = TRUE;
+ }
+
+#ifdef ALLOW_SXS_JIT
+ if( !ret && m_alternateJit != NULL )
+ {
+ if (m_alternateJit->isCacheCleanupRequired())
+ ret = TRUE;
+ }
+#endif // ALLOW_SXS_JIT
+
+ return ret;
+ }
+
+#if !defined CROSSGEN_COMPILE && !defined DACCESS_COMPILE
+ EEJitManager();
+
+ // No destructor necessary. Only one instance of this class that is destroyed at process shutdown.
+ // ~EEJitManager();
+#endif // !CROSSGEN_COMPILE && !DACCESS_COMPILE
+
+
+ virtual DWORD GetCodeType()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return (miManaged | miIL);
+ }
+
+#ifndef CROSSGEN_COMPILE
+ // Used to read debug info.
+ virtual BOOL GetBoundariesAndVars(
+ const DebugInfoRequest & request,
+ IN FP_IDS_NEW fpNew, IN void * pNewData,
+ OUT ULONG32 * pcMap,
+ OUT ICorDebugInfo::OffsetMapping **ppMap,
+ OUT ULONG32 * pcVars,
+ OUT ICorDebugInfo::NativeVarInfo **ppVars);
+
+ virtual PCODE GetCodeAddressForRelOffset(const METHODTOKEN& MethodToken, DWORD relOffset);
+#endif // !CROSSGEN_COMPILE
+
+ virtual BOOL JitCodeToMethodInfo(RangeSection * pRangeSection,
+ PCODE currentPC,
+ MethodDesc ** ppMethodDesc,
+ EECodeInfo * pCodeInfo);
+
+ virtual TADDR JitTokenToStartAddress(const METHODTOKEN& MethodToken);
+ virtual void JitTokenToMethodRegionInfo(const METHODTOKEN& MethodToken, MethodRegionInfo *methodRegionInfo);
+
+#ifndef CROSSGEN_COMPILE
+ virtual unsigned InitializeEHEnumeration(const METHODTOKEN& MethodToken, EH_CLAUSE_ENUMERATOR* pEnumState);
+ virtual PTR_EXCEPTION_CLAUSE_TOKEN GetNextEHClause(EH_CLAUSE_ENUMERATOR* pEnumState,
+ EE_ILEXCEPTION_CLAUSE* pEHclause);
+#ifndef DACCESS_COMPILE
+ virtual TypeHandle ResolveEHClause(EE_ILEXCEPTION_CLAUSE* pEHClause,
+ CrawlFrame *pCf);
+#endif // !DACCESS_COMPILE
+ GCInfoToken GetGCInfoToken(const METHODTOKEN& MethodToken);
+#endif // !CROSSGEN_COMPILE
+#if !defined DACCESS_COMPILE && !defined CROSSGEN_COMPILE
+ void RemoveJitData(CodeHeader * pCHdr, size_t GCinfo_len, size_t EHinfo_len);
+ void Unload(LoaderAllocator* pAllocator);
+ void CleanupCodeHeaps();
+
+ BOOL LoadJIT();
+
+ CodeHeader* allocCode(MethodDesc* pFD, size_t blockSize, CorJitAllocMemFlag flag
+#ifdef WIN64EXCEPTIONS
+ , UINT nUnwindInfos
+ , TADDR * pModuleBase
+#endif
+ );
+ BYTE * allocGCInfo(CodeHeader* pCodeHeader, DWORD blockSize, size_t * pAllocationSize);
+ EE_ILEXCEPTION* allocEHInfo(CodeHeader* pCodeHeader, unsigned numClauses, size_t * pAllocationSize);
+ JumpStubBlockHeader* allocJumpStubBlock(MethodDesc* pMD, DWORD numJumps,
+ BYTE * loAddr, BYTE * hiAddr,
+ LoaderAllocator *pLoaderAllocator);
+
+ void * allocCodeFragmentBlock(size_t blockSize, unsigned alignment, LoaderAllocator *pLoaderAllocator, StubCodeBlockKind kind);
+#endif // !DACCESS_COMPILE && !CROSSGEN_COMPILE
+
+ static CodeHeader * GetCodeHeader(const METHODTOKEN& MethodToken);
+ static CodeHeader * GetCodeHeaderFromStartAddress(TADDR methodStartAddress);
+
+#ifndef CROSSGEN_COMPILE
+#if defined(WIN64EXCEPTIONS)
+ // Compute function entry lazily. Do not call directly. Use EECodeInfo::GetFunctionEntry instead.
+ virtual PTR_RUNTIME_FUNCTION LazyGetFunctionEntry(EECodeInfo * pCodeInfo);
+
+ virtual DWORD GetFuncletStartOffsets(const METHODTOKEN& MethodToken, DWORD* pStartFuncletOffsets, DWORD dwLength);
+#endif // WIN64EXCEPTIONS
+
+ virtual StubCodeBlockKind GetStubCodeBlockKind(RangeSection * pRangeSection, PCODE currentPC);
+
+#if defined(DACCESS_COMPILE)
+ virtual void EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+ virtual void EnumMemoryRegionsForMethodDebugInfo(CLRDataEnumMemoryFlags flags, MethodDesc * pMD);
+#endif // DACCESS_COMPILE
+#if defined(WIN64EXCEPTIONS)
+ // Enumerate the memory necessary to retrieve the unwind info for a specific method
+ virtual void EnumMemoryRegionsForMethodUnwindInfo(CLRDataEnumMemoryFlags flags, EECodeInfo * pCodeInfo)
+ {
+ // We don't need to do explicitly enumerate the memory for unwind information for JITted methods because
+ // it is stored using the Win64 standard dynamic function table mechanism, and dump generation code knows
+ // it needs to call our code:OutOfProcessFunctionTableCallback in order to save the function table including
+ // unwind information at dump generation time (since it's dynamic, it will not be otherwise
+ // available at debug time).
+ }
+#endif // WIN64EXCEPTIONS
+#endif // !CROSSGEN_COMPILE
+
+#ifndef CROSSGEN_COMPILE
+#ifndef DACCESS_COMPILE
+ // Heap Management functions
+ void NibbleMapSet(HeapList * pHp, TADDR pCode, BOOL bSet);
+#endif // !DACCESS_COMPILE
+
+ static TADDR FindMethodCode(RangeSection * pRangeSection, PCODE currentPC);
+ static TADDR FindMethodCode(PCODE currentPC);
+#endif // !CROSSGEN_COMPILE
+
+#if !defined DACCESS_COMPILE && !defined CROSSGEN_COMPILE
+ void FreeCodeMemory(HostCodeHeap *pCodeHeap, void * codeStart);
+ void RemoveFromCleanupList(HostCodeHeap *pCodeHeap);
+ void AddToCleanupList(HostCodeHeap *pCodeHeap);
+ void DeleteCodeHeap(HeapList *pHeapList);
+ void RemoveCodeHeapFromDomainList(CodeHeap *pHeap, LoaderAllocator *pAllocator);
+#endif // !DACCESS_COMPILE && !CROSSGEN_COMPILE
+
+private :
+#ifndef CROSSGEN_COMPILE
+ struct DomainCodeHeapList {
+ LoaderAllocator *m_pAllocator;
+ CDynArray<HeapList *> m_CodeHeapList;
+ DomainCodeHeapList();
+ ~DomainCodeHeapList();
+ };
+#endif
+
+#ifndef DACCESS_COMPILE
+#ifndef CROSSGEN_COMPILE
+ HeapList* NewCodeHeap(CodeHeapRequestInfo *pInfo, DomainCodeHeapList *pADHeapList);
+ HeapList* GetCodeHeap(CodeHeapRequestInfo *pInfo);
+ bool CanUseCodeHeap(CodeHeapRequestInfo *pInfo, HeapList *pCodeHeap);
+ void* allocCodeRaw(CodeHeapRequestInfo *pInfo,
+ size_t header, size_t blockSize, unsigned align,
+ HeapList ** ppCodeHeap /* Writeback, Can be null */ );
+
+ DomainCodeHeapList *GetCodeHeapList(MethodDesc *pMD, LoaderAllocator *pAllocator, BOOL fDynamicOnly = FALSE);
+ DomainCodeHeapList *CreateCodeHeapList(CodeHeapRequestInfo *pInfo);
+ LoaderHeap* GetJitMetaHeap(MethodDesc *pMD);
+#endif // !CROSSGEN_COMPILE
+
+ HeapList * GetCodeHeapList()
+ {
+ return m_pCodeHeap;
+ }
+
+#ifndef CROSSGEN_COMPILE
+protected:
+ void * allocEHInfoRaw(CodeHeader* pCodeHeader, DWORD blockSize, size_t * pAllocationSize);
+private:
+#endif
+#endif // !DACCESS_COMPILE
+
+ PTR_HeapList m_pCodeHeap;
+
+protected :
+ Crst m_CodeHeapCritSec;
+
+#if !defined(DACCESS_COMPILE)
+public:
+ class CodeHeapIterator
+ {
+ CrstHolder m_lockHolder;
+ HeapList *m_pHeapList;
+ LoaderAllocator *m_pLoaderAllocator;
+ BaseDomain *m_pDomain;
+ MethodSectionIterator m_Iterator;
+ MethodDesc *m_pCurrent;
+
+ public:
+ CodeHeapIterator(BaseDomain *pDomainFilter = NULL, LoaderAllocator *pLoaderAllocatorFilter = NULL);
+ ~CodeHeapIterator();
+ BOOL Next();
+
+ MethodDesc *GetMethod()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pCurrent;
+ }
+
+ TADDR GetMethodCode()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (TADDR)m_Iterator.GetMethodCode();
+ }
+ };
+#endif // !DACCESS_COMPILE
+
+private:
+ DWORD m_dwCPUCompileFlags;
+
+#if !defined CROSSGEN_COMPILE && !defined DACCESS_COMPILE
+ void SetCpuInfo();
+#endif
+
+public:
+ inline DWORD GetCPUCompileFlags()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_dwCPUCompileFlags;
+ }
+
+private :
+ PTR_HostCodeHeap m_cleanupList;
+ //When EH Clauses are resolved we need to atomically update the TypeHandle
+ Crst m_EHClauseCritSec;
+
+#if !defined CROSSGEN_COMPILE
+ // must hold critical section to access this structure.
+ CUnorderedArray<DomainCodeHeapList *, 5> m_DomainCodeHeaps;
+ CUnorderedArray<DomainCodeHeapList *, 5> m_DynamicDomainCodeHeaps;
+#endif
+
+#ifdef _TARGET_AMD64_
+private:
+ //
+ // List of reserved memory blocks to be used for jump stub allocation if no suitable memory block is found
+ // via the regular mechanism
+ //
+ struct EmergencyJumpStubReserve
+ {
+ EmergencyJumpStubReserve * m_pNext;
+ BYTE * m_ptr;
+ SIZE_T m_size;
+ SIZE_T m_free;
+ };
+ EmergencyJumpStubReserve * m_pEmergencyJumpStubReserveList;
+
+public:
+ BYTE * AllocateFromEmergencyJumpStubReserve(const BYTE * loAddr, const BYTE * hiAddr, SIZE_T * pReserveSize);
+ VOID EnsureJumpStubReserve(BYTE * pImageBase, SIZE_T imageSize, SIZE_T reserveSize);
+#endif
+
+public:
+ ICorJitCompiler * m_jit;
+ HINSTANCE m_JITCompiler;
+#ifdef _TARGET_AMD64_
+ HINSTANCE m_JITCompilerOther; // Stores the handle of the legacy JIT, if one is loaded.
+#endif
+
+#ifdef ALLOW_SXS_JIT
+ //put these at the end so that we don't mess up the offsets in the DAC.
+ ICorJitCompiler * m_alternateJit;
+ HINSTANCE m_AltJITCompiler;
+ bool m_AltJITRequired;
+#endif //ALLOW_SXS_JIT
+};
+
+//*****************************************************************************
+//
+// This class manages IJitManagers and ICorJitCompilers. It has only static
+// members. It should never be constucted.
+//
+//*****************************************************************************
+
+class ExecutionManager
+{
+ friend class CorExternalDataAccess;
+ friend struct _DacGlobals;
+
+#ifdef DACCESS_COMPILE
+ friend class ClrDataAccess;
+#endif
+
+public:
+ static void Init();
+
+ enum ScanFlag
+ {
+ // When this is passed to a function, it must directly acquire a reader lock
+ // before it may continue
+ ScanReaderLock,
+
+ // This means the function need not directly acquire a reader lock; however, it
+ // may call other functions that may require other reader locks (e.g.,
+ // ExecutionManager::FindJitMan may be called with ScanNoReaderLock, but
+ // still calls IJitManager::JitCodeToMethodInfo which acquires its own
+ // IJitManager reader lock)
+ ScanNoReaderLock
+ };
+
+ // Returns default scan flag for current thread
+ static ScanFlag GetScanFlags();
+
+ // Returns whether currentPC is in managed code. Returns false for jump stubs on WIN64.
+ static BOOL IsManagedCode(PCODE currentPC);
+
+ // Special version with profiler hook
+ static BOOL IsManagedCode(PCODE currentPC, HostCallPreference hostCallPreference, BOOL *pfFailedReaderLock);
+
+ // Returns methodDesc for given PC
+ static MethodDesc * GetCodeMethodDesc(PCODE currentPC);
+
+ static IJitManager* FindJitMan(PCODE currentPC)
+ {
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+
+ RangeSection * pRange = FindCodeRange(currentPC, GetScanFlags());
+ return (pRange != NULL) ? pRange->pjit : NULL;
+ }
+
+ static RangeSection * FindCodeRange(PCODE currentPC, ScanFlag scanFlag);
+
+ static BOOL IsCollectibleMethod(const METHODTOKEN& MethodToken);
+
+ class ReaderLockHolder
+ {
+ public:
+ ReaderLockHolder(HostCallPreference hostCallPreference = AllowHostCalls);
+ ~ReaderLockHolder();
+
+ BOOL Acquired();
+ };
+
+#ifdef _WIN64
+ static ULONG GetCLRPersonalityRoutineValue()
+ {
+ LIMITED_METHOD_CONTRACT;
+ static_assert_no_msg(offsetof(HeapList, CLRPersonalityRoutine) ==
+ (size_t)((ULONG)offsetof(HeapList, CLRPersonalityRoutine)));
+ return offsetof(HeapList, CLRPersonalityRoutine);
+ }
+#endif // _WIN64
+
+ static EEJitManager * GetEEJitManager()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_pEEJitManager;
+ }
+
+#ifdef FEATURE_PREJIT
+ static NativeImageJitManager * GetNativeImageJitManager()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_pNativeImageJitManager;
+ }
+#endif
+
+#ifdef FEATURE_READYTORUN
+ static ReadyToRunJitManager * GetReadyToRunJitManager()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_pReadyToRunJitManager;
+ }
+#endif
+
+ static void ClearCaches( void );
+ static BOOL IsCacheCleanupRequired();
+
+ static LPCWSTR GetJitName();
+
+ static void Unload(LoaderAllocator *pLoaderAllocator);
+
+ static void AddCodeRange(TADDR StartRange, TADDR EndRange,
+ IJitManager* pJit,
+ RangeSection::RangeSectionFlags flags,
+ void * pHp);
+
+ static void AddNativeImageRange(TADDR StartRange,
+ SIZE_T Size,
+ Module * pModule);
+
+ static void DeleteRange(TADDR StartRange);
+
+ static void CleanupCodeHeaps();
+
+ static ICodeManager* GetDefaultCodeManager()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (ICodeManager *)m_pDefaultCodeMan;
+ }
+
+ static PTR_Module FindZapModule(TADDR currentData);
+ static PTR_Module FindReadyToRunModule(TADDR currentData);
+
+ // FindZapModule flavor to be used during GC to find GCRefMap
+ static PTR_Module FindModuleForGCRefMap(TADDR currentData);
+
+ static RangeSection* GetRangeSectionAndPrev(RangeSection *pRS, TADDR addr, RangeSection **ppPrev);
+
+#ifdef DACCESS_COMPILE
+ static void EnumRangeList(RangeSection* list,
+ CLRDataEnumMemoryFlags flags);
+ static void EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+#endif
+
+#ifndef DACCESS_COMPILE
+ static PCODE jumpStub(MethodDesc* pMD,
+ PCODE target,
+ BYTE * loAddr,
+ BYTE * hiAddr,
+ LoaderAllocator *pLoaderAllocator = NULL);
+#endif
+
+private:
+ static RangeSection * FindCodeRangeWithLock(PCODE currentPC);
+
+ static BOOL IsManagedCodeWithLock(PCODE currentPC);
+ static BOOL IsManagedCodeWorker(PCODE currentPC);
+
+ static RangeSection* GetRangeSection(TADDR addr);
+
+ SPTR_DECL(EECodeManager, m_pDefaultCodeMan);
+
+ SPTR_DECL(EEJitManager, m_pEEJitManager);
+#ifdef FEATURE_PREJIT
+ SPTR_DECL(NativeImageJitManager, m_pNativeImageJitManager);
+#endif
+#ifdef FEATURE_READYTORUN
+ SPTR_DECL(ReadyToRunJitManager, m_pReadyToRunJitManager);
+#endif
+
+ static CrstStatic m_JumpStubCrst;
+ static CrstStatic m_RangeCrst; // Aquire before writing into m_CodeRangeList and m_DataRangeList
+
+ // infrastructure to manage readers so we can lock them out and delete domain data
+ // make ReaderCount volatile because we have order dependency in READER_INCREMENT
+#ifndef DACCESS_COMPILE
+ static Volatile<RangeSection *> m_CodeRangeList;
+ static Volatile<LONG> m_dwReaderCount;
+ static Volatile<LONG> m_dwWriterLock;
+#else
+ SPTR_DECL(RangeSection, m_CodeRangeList);
+ SVAL_DECL(LONG, m_dwReaderCount);
+ SVAL_DECL(LONG, m_dwWriterLock);
+#endif
+
+#ifndef DACCESS_COMPILE
+ class WriterLockHolder
+ {
+ public:
+ WriterLockHolder();
+ ~WriterLockHolder();
+ };
+#endif
+
+#if defined(_DEBUG)
+ // The LOCK_TAKEN/RELEASED macros need a "pointer" to the lock object to do
+ // comparisons between takes & releases (and to provide debugging info to the
+ // developer). Since Inc/Dec Reader/Writer are static, there's no object to
+ // use. So we just use the pointer to m_dwReaderCount. Note that both
+ // readers & writers use this same pointer, which follows the general convention
+ // of other ReaderWriter locks in the EE code base: each reader/writer locking object
+ // instance protects only 1 piece of data or code. Readers & writers both access the
+ // same locking object & shared resource, so conceptually they would share the same
+ // lock pointer.
+ static void * GetPtrForLockContract()
+ {
+ return (void *) &m_dwReaderCount;
+ }
+#endif // defined(_DEBUG)
+
+ static void AddRangeHelper(TADDR StartRange,
+ TADDR EndRange,
+ IJitManager* pJit,
+ RangeSection::RangeSectionFlags flags,
+ TADDR pHeapListOrZapModule);
+ static void DeleteRangeHelper(RangeSection** ppRangeList,
+ TADDR StartRange);
+
+#ifndef DACCESS_COMPILE
+ static PCODE getNextJumpStub(MethodDesc* pMD,
+ PCODE target,
+ BYTE * loAddr, BYTE * hiAddr,
+ LoaderAllocator *pLoaderAllocator);
+#endif
+
+private:
+ // ***************************************************************************
+ // Hashtable for JumpStubs for jitted code
+
+ struct JumpStubEntry {
+ PCODE m_target;
+ PCODE m_jumpStub;
+ };
+
+ class JumpStubTraits : public DefaultSHashTraits<JumpStubEntry>
+ {
+ public:
+ typedef PCODE key_t;
+
+ static key_t GetKey(element_t e)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return e.m_target;
+ }
+ static BOOL Equals(key_t k1, key_t k2)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return k1 == k2;
+ }
+ static count_t Hash(key_t k)
+ {
+ LIMITED_METHOD_CONTRACT;
+#ifdef _WIN64
+ return (count_t) ((size_t) k ^ ((size_t) k >> 32));
+#else
+ return (count_t)(size_t)k;
+#endif
+ }
+
+ static const element_t Null() { LIMITED_METHOD_CONTRACT; JumpStubEntry e; e.m_target = NULL; e.m_jumpStub = NULL; return e; }
+ static bool IsNull(const element_t &e) { LIMITED_METHOD_CONTRACT; return e.m_target == NULL; }
+ static const element_t Deleted() { LIMITED_METHOD_CONTRACT; JumpStubEntry e; e.m_target = (PCODE)-1; e.m_jumpStub = NULL; return e; }
+ static bool IsDeleted(const element_t &e) { LIMITED_METHOD_CONTRACT; return e.m_target == (PCODE)-1; }
+ };
+ typedef SHash<JumpStubTraits> JumpStubTable;
+
+ struct JumpStubCache
+ {
+ JumpStubCache()
+ : m_pBlocks(NULL)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ JumpStubBlockHeader * m_pBlocks;
+ JumpStubTable m_Table;
+ };
+};
+
+inline CodeHeader * EEJitManager::GetCodeHeader(const METHODTOKEN& MethodToken)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ _ASSERTE(!MethodToken.IsNull());
+ return dac_cast<PTR_CodeHeader>(MethodToken.m_pCodeHeader);
+}
+
+inline CodeHeader * EEJitManager::GetCodeHeaderFromStartAddress(TADDR methodStartAddress)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ _ASSERTE(methodStartAddress != NULL);
+ ARM_ONLY(_ASSERTE((methodStartAddress & THUMB_CODE) == 0));
+ return dac_cast<PTR_CodeHeader>(methodStartAddress - sizeof(CodeHeader));
+}
+
+inline TADDR EEJitManager::JitTokenToStartAddress(const METHODTOKEN& MethodToken)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ HOST_NOCALLS;
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+
+ CodeHeader * pCH = GetCodeHeader(MethodToken);
+ return pCH->GetCodeStartAddress();
+}
+
+inline void EEJitManager::JitTokenToMethodRegionInfo(const METHODTOKEN& MethodToken,
+ MethodRegionInfo * methodRegionInfo)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ HOST_NOCALLS;
+ SUPPORTS_DAC;
+ PRECONDITION(methodRegionInfo != NULL);
+ } CONTRACTL_END;
+
+ methodRegionInfo->hotStartAddress = JitTokenToStartAddress(MethodToken);
+ methodRegionInfo->hotSize = GetCodeManager()->GetFunctionSize(GetGCInfoToken(MethodToken));
+ methodRegionInfo->coldStartAddress = 0;
+ methodRegionInfo->coldSize = 0;
+}
+
+
+//-----------------------------------------------------------------------------
+#ifdef FEATURE_PREJIT
+
+//*****************************************************************************
+// Stub JitManager for Managed native.
+
+class NativeImageJitManager : public IJitManager
+{
+ VPTR_VTABLE_CLASS(NativeImageJitManager, IJitManager)
+
+public:
+#ifndef DACCESS_COMPILE
+ NativeImageJitManager();
+#endif // #ifndef DACCESS_COMPILE
+
+ virtual DWORD GetCodeType()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return (miManaged | miNative);
+ }
+
+ // Used to read debug info.
+ virtual BOOL GetBoundariesAndVars(
+ const DebugInfoRequest & request,
+ IN FP_IDS_NEW fpNew, IN void * pNewData,
+ OUT ULONG32 * pcMap,
+ OUT ICorDebugInfo::OffsetMapping **ppMap,
+ OUT ULONG32 * pcVars,
+ OUT ICorDebugInfo::NativeVarInfo **ppVars);
+
+ virtual BOOL JitCodeToMethodInfo(RangeSection * pRangeSection,
+ PCODE currentPC,
+ MethodDesc ** ppMethodDesc,
+ EECodeInfo * pCodeInfo);
+
+ virtual PCODE GetCodeAddressForRelOffset(const METHODTOKEN& MethodToken, DWORD relOffset);
+
+ static PTR_Module JitTokenToZapModule(const METHODTOKEN& MethodToken);
+ virtual TADDR JitTokenToStartAddress(const METHODTOKEN& MethodToken);
+ virtual void JitTokenToMethodRegionInfo(const METHODTOKEN& MethodToken, MethodRegionInfo * methodRegionInfo);
+
+ virtual unsigned InitializeEHEnumeration(const METHODTOKEN& MethodToken, EH_CLAUSE_ENUMERATOR* pEnumState);
+
+ virtual PTR_EXCEPTION_CLAUSE_TOKEN GetNextEHClause(EH_CLAUSE_ENUMERATOR* pEnumState,
+ EE_ILEXCEPTION_CLAUSE* pEHclause);
+
+#ifndef DACCESS_COMPILE
+ virtual TypeHandle ResolveEHClause(EE_ILEXCEPTION_CLAUSE* pEHClause,
+ CrawlFrame *pCf);
+#endif // #ifndef DACCESS_COMPILE
+
+ virtual GCInfoToken GetGCInfoToken(const METHODTOKEN& MethodToken);
+
+#if defined(WIN64EXCEPTIONS)
+ virtual PTR_RUNTIME_FUNCTION LazyGetFunctionEntry(EECodeInfo * pCodeInfo);
+
+ virtual TADDR GetFuncletStartAddress(EECodeInfo * pCodeInfo);
+ virtual DWORD GetFuncletStartOffsets(const METHODTOKEN& MethodToken, DWORD* pStartFuncletOffsets, DWORD dwLength);
+ virtual BOOL IsFilterFunclet(EECodeInfo * pCodeInfo);
+#endif // WIN64EXCEPTIONS
+
+ virtual StubCodeBlockKind GetStubCodeBlockKind(RangeSection * pRangeSection, PCODE currentPC);
+
+#if defined(DACCESS_COMPILE)
+ virtual void EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+ virtual void EnumMemoryRegionsForMethodDebugInfo(CLRDataEnumMemoryFlags flags, MethodDesc * pMD);
+#if defined(WIN64EXCEPTIONS)
+ // Enumerate the memory necessary to retrieve the unwind info for a specific method
+ virtual void EnumMemoryRegionsForMethodUnwindInfo(CLRDataEnumMemoryFlags flags, EECodeInfo * pCodeInfo);
+#endif //WIN64EXCEPTIONS
+#endif //DACCESS_COMPILE
+};
+
+class NativeExceptionInfoLookupTable
+{
+public:
+ static DWORD LookupExceptionInfoRVAForMethod(PTR_CORCOMPILE_EXCEPTION_LOOKUP_TABLE pTable,
+ COUNT_T numLookupEntries,
+ DWORD methodStartRVA,
+ COUNT_T* pSize);
+};
+
+class NativeUnwindInfoLookupTable
+{
+public:
+ static int LookupUnwindInfoForMethod(DWORD codeOffset,
+ PTR_RUNTIME_FUNCTION pRuntimeFunctionTable,
+ int StartIndex,
+ int EndIndex);
+
+ static BOOL HasExceptionInfo(NGenLayoutInfo * pNgenLayout, PTR_RUNTIME_FUNCTION pMainRuntimeFunction);
+ static PTR_MethodDesc GetMethodDesc(NGenLayoutInfo * pNgenLayout, PTR_RUNTIME_FUNCTION pMainRuntimeFunction, TADDR moduleBase);
+
+private:
+ static DWORD GetMethodDescRVA(NGenLayoutInfo * pNgenLayout, PTR_RUNTIME_FUNCTION pMainRuntimeFunction);
+};
+
+inline TADDR NativeImageJitManager::JitTokenToStartAddress(const METHODTOKEN& MethodToken)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ HOST_NOCALLS;
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+
+ return JitTokenToModuleBase(MethodToken) +
+ RUNTIME_FUNCTION__BeginAddress(dac_cast<PTR_RUNTIME_FUNCTION>(MethodToken.m_pCodeHeader));
+}
+
+#endif // FEATURE_PREJIT
+
+#ifdef FEATURE_READYTORUN
+
+class ReadyToRunJitManager : public IJitManager
+{
+ VPTR_VTABLE_CLASS(ReadyToRunJitManager, IJitManager)
+
+public:
+#ifndef DACCESS_COMPILE
+ ReadyToRunJitManager();
+#endif // #ifndef DACCESS_COMPILE
+
+ virtual DWORD GetCodeType()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return (miManaged | miNative);
+ }
+
+ // Used to read debug info.
+ virtual BOOL GetBoundariesAndVars(
+ const DebugInfoRequest & request,
+ IN FP_IDS_NEW fpNew, IN void * pNewData,
+ OUT ULONG32 * pcMap,
+ OUT ICorDebugInfo::OffsetMapping **ppMap,
+ OUT ULONG32 * pcVars,
+ OUT ICorDebugInfo::NativeVarInfo **ppVars);
+
+ virtual BOOL JitCodeToMethodInfo(RangeSection * pRangeSection,
+ PCODE currentPC,
+ MethodDesc** ppMethodDesc,
+ OUT EECodeInfo * pCodeInfo);
+
+ virtual PCODE GetCodeAddressForRelOffset(const METHODTOKEN& MethodToken, DWORD relOffset);
+
+ static ReadyToRunInfo * JitTokenToReadyToRunInfo(const METHODTOKEN& MethodToken);
+ static UINT32 JitTokenToGCInfoVersion(const METHODTOKEN& MethodToken);
+
+ static PTR_RUNTIME_FUNCTION JitTokenToRuntimeFunction(const METHODTOKEN& MethodToken);
+
+ virtual TADDR JitTokenToStartAddress(const METHODTOKEN& MethodToken);
+ virtual void JitTokenToMethodRegionInfo(const METHODTOKEN& MethodToken, MethodRegionInfo * methodRegionInfo);
+
+ virtual unsigned InitializeEHEnumeration(const METHODTOKEN& MethodToken, EH_CLAUSE_ENUMERATOR* pEnumState);
+
+ virtual PTR_EXCEPTION_CLAUSE_TOKEN GetNextEHClause(EH_CLAUSE_ENUMERATOR* pEnumState,
+ EE_ILEXCEPTION_CLAUSE* pEHclause);
+
+#ifndef DACCESS_COMPILE
+ virtual TypeHandle ResolveEHClause(EE_ILEXCEPTION_CLAUSE* pEHClause,
+ CrawlFrame *pCf);
+#endif // #ifndef DACCESS_COMPILE
+
+ virtual GCInfoToken GetGCInfoToken(const METHODTOKEN& MethodToken);
+
+#if defined(WIN64EXCEPTIONS)
+ virtual PTR_RUNTIME_FUNCTION LazyGetFunctionEntry(EECodeInfo * pCodeInfo);
+
+ virtual TADDR GetFuncletStartAddress(EECodeInfo * pCodeInfo);
+ virtual DWORD GetFuncletStartOffsets(const METHODTOKEN& MethodToken, DWORD* pStartFuncletOffsets, DWORD dwLength);
+ virtual BOOL IsFilterFunclet(EECodeInfo * pCodeInfo);
+#endif // WIN64EXCEPTIONS
+
+ virtual StubCodeBlockKind GetStubCodeBlockKind(RangeSection * pRangeSection, PCODE currentPC);
+
+#if defined(DACCESS_COMPILE)
+ virtual void EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+ virtual void EnumMemoryRegionsForMethodDebugInfo(CLRDataEnumMemoryFlags flags, MethodDesc * pMD);
+#if defined(WIN64EXCEPTIONS)
+ // Enumerate the memory necessary to retrieve the unwind info for a specific method
+ virtual void EnumMemoryRegionsForMethodUnwindInfo(CLRDataEnumMemoryFlags flags, EECodeInfo * pCodeInfo);
+#endif //WIN64EXCEPTIONS
+#endif //DACCESS_COMPILE
+};
+
+#endif
+
+//*****************************************************************************
+// EECodeInfo provides information about code at particular address:
+// - Start of the method and relative offset
+// - GC Info of the method
+// etc.
+//
+// EECodeInfo caches information from IJitManager and thus avoids
+// quering IJitManager repeatedly for same data.
+//
+class EECodeInfo
+{
+ friend BOOL EEJitManager::JitCodeToMethodInfo(RangeSection * pRangeSection, PCODE currentPC, MethodDesc** ppMethodDesc, EECodeInfo * pCodeInfo);
+ friend BOOL NativeImageJitManager::JitCodeToMethodInfo(RangeSection * pRangeSection, PCODE currentPC, MethodDesc** ppMethodDesc, EECodeInfo * pCodeInfo);
+#ifdef FEATURE_READYTORUN
+ friend BOOL ReadyToRunJitManager::JitCodeToMethodInfo(RangeSection * pRangeSection, PCODE currentPC, MethodDesc** ppMethodDesc, EECodeInfo * pCodeInfo);
+#endif
+
+public:
+ EECodeInfo();
+
+ EECodeInfo(PCODE codeAddress)
+ {
+ Init(codeAddress);
+ }
+
+ // Explicit initialization
+ void Init(PCODE codeAddress);
+ void Init(PCODE codeAddress, ExecutionManager::ScanFlag scanFlag);
+
+ TADDR GetSavedMethodCode();
+
+ TADDR GetStartAddress();
+
+ BOOL IsValid()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_pJM != NULL;
+ }
+
+ IJitManager* GetJitManager()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ _ASSERTE(m_pJM != NULL);
+ return m_pJM;
+ }
+
+ ICodeManager* GetCodeManager()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return GetJitManager()->GetCodeManager();
+ }
+
+ const METHODTOKEN& GetMethodToken()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_methodToken;
+ }
+
+ // This returns a pointer to the start of an instruction; conceptually, a PINSTR.
+ TADDR GetCodeAddress()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return PCODEToPINSTR(m_codeAddress);
+ }
+
+ MethodDesc * GetMethodDesc()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_pMD;
+ }
+
+ DWORD GetRelOffset()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_relOffset;
+ }
+
+ GCInfoToken GetGCInfoToken()
+ {
+ WRAPPER_NO_CONTRACT;
+ return GetJitManager()->GetGCInfoToken(GetMethodToken());
+ }
+
+ PTR_VOID GetGCInfo()
+ {
+ WRAPPER_NO_CONTRACT;
+ return GetGCInfoToken().Info;
+ }
+
+ void GetMethodRegionInfo(IJitManager::MethodRegionInfo *methodRegionInfo)
+ {
+ WRAPPER_NO_CONTRACT;
+ return GetJitManager()->JitTokenToMethodRegionInfo(GetMethodToken(), methodRegionInfo);
+ }
+
+ TADDR GetModuleBase()
+ {
+ WRAPPER_NO_CONTRACT;
+ return GetJitManager()->JitTokenToModuleBase(GetMethodToken());
+ }
+
+#ifdef WIN64EXCEPTIONS
+ PTR_RUNTIME_FUNCTION GetFunctionEntry();
+ BOOL IsFunclet() { WRAPPER_NO_CONTRACT; return GetJitManager()->IsFunclet(this); }
+ EECodeInfo GetMainFunctionInfo();
+ ULONG GetFixedStackSize();
+
+#if defined(_TARGET_AMD64_)
+ BOOL HasFrameRegister();
+#endif // _TARGET_AMD64_
+
+#else // WIN64EXCEPTIONS
+ ULONG GetFixedStackSize()
+ {
+ WRAPPER_NO_CONTRACT;
+ return GetCodeManager()->GetFrameSize(GetGCInfo());
+ }
+#endif // WIN64EXCEPTIONS
+
+#if defined(_TARGET_AMD64_)
+ void GetOffsetsFromUnwindInfo(ULONG* pRSPOffset, ULONG* pRBPOffset);
+
+#if defined(_DEBUG) && defined(HAVE_GCCOVER)
+ // Find first funclet inside (pvFuncletStart, pvFuncletStart + cbCode)
+ static LPVOID findNextFunclet (LPVOID pvFuncletStart, SIZE_T cbCode, LPVOID *ppvFuncletEnd);
+#endif // _DEBUG && HAVE_GCCOVER
+#endif // _TARGET_AMD64_
+
+private:
+ PCODE m_codeAddress;
+ METHODTOKEN m_methodToken;
+ MethodDesc *m_pMD;
+ IJitManager *m_pJM;
+ DWORD m_relOffset;
+#ifdef WIN64EXCEPTIONS
+ PTR_RUNTIME_FUNCTION m_pFunctionEntry;
+#endif // WIN64EXCEPTIONS
+
+#ifdef _TARGET_AMD64_
+ // Simple helper to return a pointer to the UNWIND_INFO given the offset to the unwind info.
+ UNWIND_INFO * GetUnwindInfoHelper(ULONG unwindInfoOffset);
+#endif // _TARGET_AMD64_
+};
+
+#include "codeman.inl"
+
+
+#ifdef FEATURE_PREJIT
+class MethodSectionIterator;
+
+//
+// MethodIterator class is used to iterate all the methods in an ngen image.
+// It will match and report hot (and cold, if any) sections of a method at the same time.
+// GcInfo version is always current
+class MethodIterator
+{
+public:
+ enum MethodIteratorOptions
+ {
+ Hot = 0x1,
+ Unprofiled =0x2,
+ All = Hot | Unprofiled
+ };
+private:
+ TADDR m_ModuleBase;
+ MethodIteratorOptions methodIteratorOptions;
+
+ NGenLayoutInfo * m_pNgenLayout;
+ BOOL m_fHotMethodsDone;
+ COUNT_T m_CurrentRuntimeFunctionIndex;
+ COUNT_T m_CurrentColdRuntimeFunctionIndex;
+
+ void Init(PTR_Module pModule, PEDecoder * pPEDecoder, MethodIteratorOptions mio);
+
+ public:
+ MethodIterator(PTR_Module pModule, MethodIteratorOptions mio = All);
+ MethodIterator(PTR_Module pModule, PEDecoder * pPEDecoder, MethodIteratorOptions mio = All);
+
+ BOOL Next();
+
+ PTR_MethodDesc GetMethodDesc();
+ GCInfoToken GetGCInfoToken();
+ TADDR GetMethodStartAddress();
+ TADDR GetMethodColdStartAddress();
+ ULONG GetHotCodeSize();
+
+ PTR_RUNTIME_FUNCTION GetRuntimeFunction();
+
+ void GetMethodRegionInfo(IJitManager::MethodRegionInfo *methodRegionInfo);
+};
+#endif //FEATURE_PREJIT
+
+void ThrowOutOfMemoryWithinRange();
+
+#endif // !__CODEMAN_HPP__