summaryrefslogtreecommitdiff
path: root/src/utilcode
diff options
context:
space:
mode:
authorKoundinya Veluri <kouvel@microsoft.com>2017-05-08 12:16:21 -0700
committerGitHub <noreply@github.com>2017-05-08 12:16:21 -0700
commit8514226271bd125d62fbab552d9ebb107bd67fb1 (patch)
treeddf68784978643588b40e680478cdd00ecb184c0 /src/utilcode
parent1db5bbc0b29c2801a2d548ee852424d6ea207388 (diff)
downloadcoreclr-8514226271bd125d62fbab552d9ebb107bd67fb1.tar.gz
coreclr-8514226271bd125d62fbab552d9ebb107bd67fb1.tar.bz2
coreclr-8514226271bd125d62fbab552d9ebb107bd67fb1.zip
Make CoreCLR work properly under PaX's RANDMMAP (#11382)
Make CoreCLR work properly under PaX's RANDMMAP Issues: - The ExecutableMemoryAllocator is used to attempt to map native images into a memory range near libcoreclr so that its helper table can use short relative addresses for jumps to libcoreclr - RANDMMAP typically prevents mmap calls with a specific address from reserving memory at the requested address, so the executable memory allocator fails to reserve any memory. When Server GC is enabled, the large GC heap can exacerbate the issue by taking address space near libcoreclr. - Native images are loaded far from libcoreclr, and now jump stub space needs to be allocated near the native image, but RANDMMAP typically prevents this too - NGenReserveForJumpStubs is intended to reserve some memory near mapped native images for jump stubs, but that reservation is done with a separate mmap call in the same way as above and RANDMMAP typically prevents this too - The JIT needs to allocate memory for code that may need to jump/call to a native image or libcoreclr, which may require jump stubs near the code that cannot be allocated. CodeHeapReserveForJumpStubs reserves space in code heap blocks without using a separate call to mmap, so this works, but without this environment variable by default there is still a good chance of failing. - See https://github.com/dotnet/coreclr/blob/56d550d4f8aec2dd40b72a182205d0a2463a1bc9/Documentation/design-docs/jump-stubs.md for more details Fixes #8480 - It would be ideal to fix all of the above properly, such that there would never be a need to attempt reserving memory within a certain range. Since we're running out of time for 2.0, I figured the following simpler, temporary solution that should cover most of the practical cases, may be appropriate for 2.0. - Extended ExecutableMemoryAllocator to reserve address space even when it cannot do so near libcoreclr - Had ClrVirtualAllocWithinRange use the executable memory allocator to reserve memory for jump stubs when the requested range is satisfied - This covers a maximum of ~2 GB of executable code and should cover most of the practical cases. Once this space is exhausted, under RANDMMAP, native images loaded later will fail, and for jitted code the environment variable above can be used.
Diffstat (limited to 'src/utilcode')
-rw-r--r--src/utilcode/util.cpp25
1 files changed, 20 insertions, 5 deletions
diff --git a/src/utilcode/util.cpp b/src/utilcode/util.cpp
index a8786def92..b7fca3ed9b 100644
--- a/src/utilcode/util.cpp
+++ b/src/utilcode/util.cpp
@@ -573,7 +573,9 @@ static DWORD ShouldInjectFaultInRange()
// Reserves free memory within the range [pMinAddr..pMaxAddr] using
// ClrVirtualQuery to find free memory and ClrVirtualAlloc to reserve it.
//
-// This method only supports the flAllocationType of MEM_RESERVE
+// This method only supports the flAllocationType of MEM_RESERVE, and expects that the memory
+// is being reserved for the purpose of eventually storing executable code.
+//
// Callers also should set dwSize to a multiple of sysInfo.dwAllocationGranularity (64k).
// That way they can reserve a large region and commit smaller sized pages
// from that region until it fills up.
@@ -603,6 +605,11 @@ BYTE * ClrVirtualAllocWithinRange(const BYTE *pMinAddr,
static unsigned countOfCalls = 0; // We log the number of tims we call this method
countOfCalls++; // increment the call counter
+ if (dwSize == 0)
+ {
+ return nullptr;
+ }
+
//
// First lets normalize the pMinAddr and pMaxAddr values
//
@@ -618,18 +625,26 @@ BYTE * ClrVirtualAllocWithinRange(const BYTE *pMinAddr,
pMaxAddr = (BYTE *) TOP_MEMORY;
}
+ // If pMaxAddr is not greater than pMinAddr we can not make an allocation
+ if (pMaxAddr <= pMinAddr)
+ {
+ return nullptr;
+ }
+
// If pMinAddr is BOT_MEMORY and pMaxAddr is TOP_MEMORY
// then we can call ClrVirtualAlloc instead
if ((pMinAddr == (BYTE *) BOT_MEMORY) && (pMaxAddr == (BYTE *) TOP_MEMORY))
{
- return (BYTE*) ClrVirtualAlloc(NULL, dwSize, flAllocationType, flProtect);
+ return (BYTE*) ClrVirtualAlloc(nullptr, dwSize, flAllocationType, flProtect);
}
- // If pMaxAddr is not greater than pMinAddr we can not make an allocation
- if (dwSize == 0 || pMaxAddr <= pMinAddr)
+#ifdef FEATURE_PAL
+ pResult = (BYTE *)PAL_VirtualReserveFromExecutableMemoryAllocatorWithinRange(pMinAddr, pMaxAddr, dwSize);
+ if (pResult != nullptr)
{
- return NULL;
+ return pResult;
}
+#endif // FEATURE_PAL
// We will do one scan from [pMinAddr .. pMaxAddr]
// First align the tryAddr up to next 64k base address.