summaryrefslogtreecommitdiff
path: root/src/vm/codeman.h
diff options
context:
space:
mode:
authorJan Kotas <jkotas@microsoft.com>2017-11-30 23:21:40 -0800
committerGitHub <noreply@github.com>2017-11-30 23:21:40 -0800
commitc1e44d9db9cc0d03e5f44af665acefa3589a6883 (patch)
tree2b9ad535324b85548de84a5eec8eabfe39ca3de5 /src/vm/codeman.h
parent845c2c248c6450107acd37c5469f56d05b391183 (diff)
downloadcoreclr-c1e44d9db9cc0d03e5f44af665acefa3589a6883.tar.gz
coreclr-c1e44d9db9cc0d03e5f44af665acefa3589a6883.tar.bz2
coreclr-c1e44d9db9cc0d03e5f44af665acefa3589a6883.zip
Jumpstub fixes (#15296)
- Reserve space for jump stubs for precodes and other code fragments at the end of each code heap segment. This is trying to ensure that eventual allocation of jump stubs for precodes and other code fragments succeeds. Accounting is done conservatively - reserves more than strictly required. It wastes a bit of address space, but no actual memory. Also, this reserve is not used to allocate jump stubs for JITed code since the JITing can recover from failure to allocate the jump stub now. Fixes #14996. - Improve algorithm to reuse HostCodeHeap segments: Maintain estimated size of the largest free block in HostCodeHeap. This estimate is updated when allocation request fails, and also when memory is returned to the HostCodeHeap. Fixes #14995. - Retry JITing on failure to allocate jump stub. Failure to allocate jump during JITing is not fatal anymore. There is extra memory reserved for jump stubs on retry to ensure that the retry succeeds allocating the jump stubs that it needs with high probability. - Respect CodeHeapRequestInfo::getRequestSize for HostCodeHeap. CodeHeapRequestInfo::getRequestSize is used to throttle code heap segment size for large workloads. Not respecting it in HostCodeHeap lead to too many too small code heap segments in large workloads. - Switch HostCodeHeap nibble map to be allocated on regular heap as part. It simplied the math required to estimate the nibble map size, and allocating on regular heap is overall goodness since it does not need to be executable.
Diffstat (limited to 'src/vm/codeman.h')
-rw-r--r--src/vm/codeman.h48
1 files changed, 22 insertions, 26 deletions
diff --git a/src/vm/codeman.h b/src/vm/codeman.h
index afef682e2a..983e2ca555 100644
--- a/src/vm/codeman.h
+++ b/src/vm/codeman.h
@@ -365,9 +365,11 @@ struct CodeHeapRequestInfo
const BYTE * m_hiAddr; // hihest address to use to satisfy our request (0 -- don't care)
size_t m_requestSize; // minimum size that must be made available
size_t m_reserveSize; // Amount that VirtualAlloc will reserved
+ size_t m_reserveForJumpStubs; // Amount to reserve for jump stubs (won't be allocated)
bool m_isDynamicDomain;
bool m_isCollectible;
-
+ bool m_throwOnOutOfMemoryWithinRange;
+
bool IsDynamicDomain() { return m_isDynamicDomain; }
void SetDynamicDomain() { m_isDynamicDomain = true; }
@@ -378,20 +380,26 @@ struct CodeHeapRequestInfo
size_t getReserveSize() { return m_reserveSize; }
void setReserveSize(size_t reserveSize) { m_reserveSize = reserveSize; }
+
+ size_t getReserveForJumpStubs() { return m_reserveForJumpStubs; }
+ void setReserveForJumpStubs(size_t size) { m_reserveForJumpStubs = size; }
+
+ bool getThrowOnOutOfMemoryWithinRange() { return m_throwOnOutOfMemoryWithinRange; }
+ void setThrowOnOutOfMemoryWithinRange(bool value) { m_throwOnOutOfMemoryWithinRange = value; }
void Init();
CodeHeapRequestInfo(MethodDesc *pMD)
: m_pMD(pMD), m_pAllocator(0),
m_loAddr(0), m_hiAddr(0),
- m_requestSize(0), m_reserveSize(0)
+ m_requestSize(0), m_reserveSize(0), m_reserveForJumpStubs(0)
{ WRAPPER_NO_CONTRACT; Init(); }
CodeHeapRequestInfo(MethodDesc *pMD, LoaderAllocator* pAllocator,
BYTE * loAddr, BYTE * hiAddr)
: m_pMD(pMD), m_pAllocator(pAllocator),
m_loAddr(loAddr), m_hiAddr(hiAddr),
- m_requestSize(0), m_reserveSize(0)
+ m_requestSize(0), m_reserveSize(0), m_reserveForJumpStubs(0)
{ WRAPPER_NO_CONTRACT; Init(); }
};
@@ -433,7 +441,7 @@ public:
// Alloc the specified numbers of bytes for code. Returns NULL if the request does not fit
// Space for header is reserved immediately before. It is not included in size.
- virtual void* AllocMemForCode_NoThrow(size_t header, size_t size, DWORD alignment) = 0;
+ virtual void* AllocMemForCode_NoThrow(size_t header, size_t size, DWORD alignment, size_t reserveForJumpStubs) = 0;
#ifdef DACCESS_COMPILE
virtual void EnumMemoryRegions(CLRDataEnumMemoryFlags flags) = 0;
@@ -467,9 +475,7 @@ typedef struct _HeapList
PTR_DWORD pHdrMap; // bit array used to find the start of methods
size_t maxCodeHeapSize;// Size of the entire contiguous block of memory
- DWORD cBlocks; // Number of allocations
- bool bFull; // Heap is considered full do not use for new allocations
- bool bFullForJumpStubs; // Heap is considered full do not use for new allocations of jump stubs
+ size_t reserveForJumpStubs; // Amount of memory reserved for jump stubs in this block
#if defined(_TARGET_AMD64_)
BYTE CLRPersonalityRoutine[JUMP_ALLOCATE_SIZE]; // jump thunk to personality routine
@@ -483,18 +489,6 @@ typedef struct _HeapList
void SetNext(PTR_HeapList next)
{ hpNext = next; }
- void SetHeapFull()
- { VolatileStore(&bFull, true); }
-
- bool IsHeapFull()
- { return VolatileLoad(&bFull); }
-
- void SetHeapFullForJumpStubs()
- { VolatileStore(&bFullForJumpStubs, true); }
-
- bool IsHeapFullForJumpStubs()
- { return VolatileLoad(&bFullForJumpStubs); }
-
} HeapList;
//-----------------------------------------------------------------------------
@@ -527,7 +521,7 @@ public:
WRAPPER_NO_CONTRACT;
}
- virtual void* AllocMemForCode_NoThrow(size_t header, size_t size, DWORD alignment) DAC_EMPTY_RET(NULL);
+ virtual void* AllocMemForCode_NoThrow(size_t header, size_t size, DWORD alignment, size_t reserveForJumpStubs) DAC_EMPTY_RET(NULL);
#ifdef DACCESS_COMPILE
virtual void EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
@@ -1015,7 +1009,7 @@ public:
BOOL LoadJIT();
- CodeHeader* allocCode(MethodDesc* pFD, size_t blockSize, CorJitAllocMemFlag flag
+ CodeHeader* allocCode(MethodDesc* pFD, size_t blockSize, size_t reserveForJumpStubs, CorJitAllocMemFlag flag
#ifdef WIN64EXCEPTIONS
, UINT nUnwindInfos
, TADDR * pModuleBase
@@ -1025,7 +1019,8 @@ public:
EE_ILEXCEPTION* allocEHInfo(CodeHeader* pCodeHeader, unsigned numClauses, size_t * pAllocationSize);
JumpStubBlockHeader* allocJumpStubBlock(MethodDesc* pMD, DWORD numJumps,
BYTE * loAddr, BYTE * hiAddr,
- LoaderAllocator *pLoaderAllocator);
+ LoaderAllocator *pLoaderAllocator,
+ bool throwOnOutOfMemoryWithinRange);
void * allocCodeFragmentBlock(size_t blockSize, unsigned alignment, LoaderAllocator *pLoaderAllocator, StubCodeBlockKind kind);
#endif // !DACCESS_COMPILE && !CROSSGEN_COMPILE
@@ -1091,11 +1086,10 @@ private :
#ifndef DACCESS_COMPILE
#ifndef CROSSGEN_COMPILE
HeapList* NewCodeHeap(CodeHeapRequestInfo *pInfo, DomainCodeHeapList *pADHeapList);
- HeapList* GetCodeHeap(CodeHeapRequestInfo *pInfo);
bool CanUseCodeHeap(CodeHeapRequestInfo *pInfo, HeapList *pCodeHeap);
void* allocCodeRaw(CodeHeapRequestInfo *pInfo,
size_t header, size_t blockSize, unsigned align,
- HeapList ** ppCodeHeap /* Writeback, Can be null */ );
+ HeapList ** ppCodeHeap);
DomainCodeHeapList *GetCodeHeapList(CodeHeapRequestInfo *pInfo, LoaderAllocator *pAllocator, BOOL fDynamicOnly = FALSE);
DomainCodeHeapList *CreateCodeHeapList(CodeHeapRequestInfo *pInfo);
@@ -1357,7 +1351,8 @@ public:
PCODE target,
BYTE * loAddr,
BYTE * hiAddr,
- LoaderAllocator *pLoaderAllocator = NULL);
+ LoaderAllocator *pLoaderAllocator = NULL,
+ bool throwOnOutOfMemoryWithinRange = true);
#endif
private:
@@ -1430,7 +1425,8 @@ private:
static PCODE getNextJumpStub(MethodDesc* pMD,
PCODE target,
BYTE * loAddr, BYTE * hiAddr,
- LoaderAllocator *pLoaderAllocator);
+ LoaderAllocator *pLoaderAllocator,
+ bool throwOnOutOfMemoryWithinRange);
#endif
private: