summaryrefslogtreecommitdiff
path: root/src/pal/src/map/virtual.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/pal/src/map/virtual.cpp')
-rw-r--r--src/pal/src/map/virtual.cpp229
1 files changed, 199 insertions, 30 deletions
diff --git a/src/pal/src/map/virtual.cpp b/src/pal/src/map/virtual.cpp
index 7e00843b7a..41bd37c9b4 100644
--- a/src/pal/src/map/virtual.cpp
+++ b/src/pal/src/map/virtual.cpp
@@ -18,15 +18,19 @@ Abstract:
--*/
+#include "pal/dbgmsg.h"
+
+SET_DEFAULT_DEBUG_CHANNEL(VIRTUAL); // some headers have code with asserts, so do this first
+
#include "pal/thread.hpp"
#include "pal/cs.hpp"
#include "pal/malloc.hpp"
#include "pal/file.hpp"
#include "pal/seh.hpp"
-#include "pal/dbgmsg.h"
#include "pal/virtual.h"
#include "pal/map.h"
#include "pal/init.h"
+#include "pal/utils.h"
#include "common.h"
#include <sys/types.h>
@@ -43,8 +47,6 @@ Abstract:
using namespace CorUnix;
-SET_DEFAULT_DEBUG_CHANNEL(VIRTUAL);
-
CRITICAL_SECTION virtual_critsec;
// The first node in our list of allocated blocks.
@@ -93,6 +95,7 @@ namespace VirtualMemoryLogging
Decommit = 0x40,
Release = 0x50,
Reset = 0x60,
+ ReserveFromExecutableMemoryAllocatorWithinRange = 0x70
};
// Indicates that the attempted operation has failed
@@ -884,8 +887,13 @@ static LPVOID VIRTUALReserveMemory(
// First, figure out where we're trying to reserve the memory and
// how much we need. On most systems, requests to mmap must be
- // page-aligned and at multiples of the page size.
- StartBoundary = (UINT_PTR)lpAddress & ~BOUNDARY_64K;
+ // page-aligned and at multiples of the page size. Unlike on Windows, on
+ // Unix, the allocation granularity is the page size, so the memory size to
+ // reserve is not aligned to 64 KB. Nor should the start boundary need to
+ // to be aligned down to 64 KB, but it is expected that there are other
+ // components that rely on this alignment when providing a specific address
+ // (note that mmap itself does not make any such guarantees).
+ StartBoundary = (UINT_PTR)ALIGN_DOWN(lpAddress, VIRTUAL_64KB);
/* Add the sizes, and round down to the nearest page boundary. */
MemSize = ( ((UINT_PTR)lpAddress + dwSize + VIRTUAL_PAGE_MASK) & ~VIRTUAL_PAGE_MASK ) -
StartBoundary;
@@ -894,7 +902,14 @@ static LPVOID VIRTUALReserveMemory(
// try to get memory from the executable memory allocator to satisfy the request.
if (((flAllocationType & MEM_RESERVE_EXECUTABLE) != 0) && (lpAddress == NULL))
{
- pRetVal = g_executableMemoryAllocator.AllocateMemory(MemSize);
+ // Alignment to a 64 KB granularity should not be necessary (alignment to page size should be sufficient), but see
+ // ExecutableMemoryAllocator::AllocateMemory() for the reason why it is done
+ SIZE_T reservationSize = ALIGN_UP(MemSize, VIRTUAL_64KB);
+ pRetVal = g_executableMemoryAllocator.AllocateMemory(reservationSize);
+ if (pRetVal != nullptr)
+ {
+ MemSize = reservationSize;
+ }
}
if (pRetVal == NULL)
@@ -1227,6 +1242,72 @@ done:
/*++
Function:
+ PAL_VirtualReserveFromExecutableMemoryAllocatorWithinRange
+
+ This function attempts to allocate the requested amount of memory in the specified address range, from the executable memory
+ allocator. If unable to do so, the function returns nullptr and does not set the last error.
+
+ lpBeginAddress - Inclusive beginning of range
+ lpEndAddress - Exclusive end of range
+ dwSize - Number of bytes to allocate
+--*/
+LPVOID
+PALAPI
+PAL_VirtualReserveFromExecutableMemoryAllocatorWithinRange(
+ IN LPCVOID lpBeginAddress,
+ IN LPCVOID lpEndAddress,
+ IN SIZE_T dwSize)
+{
+#ifdef BIT64
+ PERF_ENTRY(PAL_VirtualReserveFromExecutableMemoryAllocatorWithinRange);
+ ENTRY(
+ "PAL_VirtualReserveFromExecutableMemoryAllocatorWithinRange(lpBeginAddress = %p, lpEndAddress = %p, dwSize = %Iu)\n",
+ lpBeginAddress,
+ lpEndAddress,
+ dwSize);
+
+ _ASSERTE(lpBeginAddress <= lpEndAddress);
+
+ // Alignment to a 64 KB granularity should not be necessary (alignment to page size should be sufficient), but see
+ // ExecutableMemoryAllocator::AllocateMemory() for the reason why it is done
+ SIZE_T reservationSize = ALIGN_UP(dwSize, VIRTUAL_64KB);
+
+ CPalThread *currentThread = InternalGetCurrentThread();
+ InternalEnterCriticalSection(currentThread, &virtual_critsec);
+
+ void *address = g_executableMemoryAllocator.AllocateMemoryWithinRange(lpBeginAddress, lpEndAddress, reservationSize);
+ if (address != nullptr)
+ {
+ _ASSERTE(IS_ALIGNED(address, VIRTUAL_PAGE_SIZE));
+ if (!VIRTUALStoreAllocationInfo((UINT_PTR)address, reservationSize, MEM_RESERVE | MEM_RESERVE_EXECUTABLE, PAGE_NOACCESS))
+ {
+ ASSERT("Unable to store the structure in the list.\n");
+ munmap(address, reservationSize);
+ address = nullptr;
+ }
+ }
+
+ LogVaOperation(
+ VirtualMemoryLogging::VirtualOperation::ReserveFromExecutableMemoryAllocatorWithinRange,
+ nullptr,
+ dwSize,
+ MEM_RESERVE | MEM_RESERVE_EXECUTABLE,
+ PAGE_NOACCESS,
+ address,
+ TRUE);
+
+ InternalLeaveCriticalSection(currentThread, &virtual_critsec);
+
+ LOGEXIT("PAL_VirtualReserveFromExecutableMemoryAllocatorWithinRange returning %p\n", address);
+ PERF_EXIT(PAL_VirtualReserveFromExecutableMemoryAllocatorWithinRange);
+ return address;
+#else // !BIT64
+ return nullptr;
+#endif // BIT64
+}
+
+/*++
+Function:
VirtualAlloc
Note:
@@ -1982,11 +2063,15 @@ Function :
--*/
void* ReserveMemoryFromExecutableAllocator(CPalThread* pThread, SIZE_T allocationSize)
{
+#ifdef BIT64
InternalEnterCriticalSection(pThread, &virtual_critsec);
void* mem = g_executableMemoryAllocator.AllocateMemory(allocationSize);
InternalLeaveCriticalSection(pThread, &virtual_critsec);
return mem;
+#else // !BIT64
+ return nullptr;
+#endif // BIT64
}
/*++
@@ -2024,14 +2109,14 @@ Function:
void ExecutableMemoryAllocator::TryReserveInitialMemory()
{
CPalThread* pthrCurrent = InternalGetCurrentThread();
- int32_t sizeOfAllocation = MaxExecutableMemorySize;
- int32_t startAddressIncrement;
- UINT_PTR startAddress;
+ int32_t sizeOfAllocation = MaxExecutableMemorySizeNearCoreClr;
+ int32_t preferredStartAddressIncrement;
+ UINT_PTR preferredStartAddress;
UINT_PTR coreclrLoadAddress;
const int32_t MemoryProbingIncrement = 128 * 1024 * 1024;
// Try to find and reserve an available region of virtual memory that is located
- // within 2GB range (defined by the MaxExecutableMemorySize constant) from the
+ // within 2GB range (defined by the MaxExecutableMemorySizeNearCoreClr constant) from the
// location of the coreclr library.
// Potentially, as a possible future improvement, we can get precise information
// about available memory ranges by parsing data from '/proc/self/maps'.
@@ -2045,40 +2130,69 @@ void ExecutableMemoryAllocator::TryReserveInitialMemory()
// (thus avoiding reserving memory below 4GB; besides some operating systems do not allow that).
// If libcoreclr is loaded at high addresses then try to reserve memory below its location.
coreclrLoadAddress = (UINT_PTR)PAL_GetSymbolModuleBase((void*)VirtualAlloc);
- if ((coreclrLoadAddress < 0xFFFFFFFF) || ((coreclrLoadAddress - MaxExecutableMemorySize) < 0xFFFFFFFF))
+ if ((coreclrLoadAddress < 0xFFFFFFFF) || ((coreclrLoadAddress - MaxExecutableMemorySizeNearCoreClr) < 0xFFFFFFFF))
{
// Try to allocate above the location of libcoreclr
- startAddress = coreclrLoadAddress + CoreClrLibrarySize;
- startAddressIncrement = MemoryProbingIncrement;
+ preferredStartAddress = coreclrLoadAddress + CoreClrLibrarySize;
+ preferredStartAddressIncrement = MemoryProbingIncrement;
}
else
{
// Try to allocate below the location of libcoreclr
- startAddress = coreclrLoadAddress - MaxExecutableMemorySize;
- startAddressIncrement = 0;
+ preferredStartAddress = coreclrLoadAddress - MaxExecutableMemorySizeNearCoreClr;
+ preferredStartAddressIncrement = 0;
}
// Do actual memory reservation.
do
{
- m_startAddress = ReserveVirtualMemory(pthrCurrent, (void*)startAddress, sizeOfAllocation);
- if (m_startAddress != NULL)
+ m_startAddress = ReserveVirtualMemory(pthrCurrent, (void*)preferredStartAddress, sizeOfAllocation);
+ if (m_startAddress != nullptr)
{
- // Memory has been successfully reserved.
- m_totalSizeOfReservedMemory = sizeOfAllocation;
-
- // Randomize the location at which we start allocating from the reserved memory range.
- int32_t randomOffset = GenerateRandomStartOffset();
- m_nextFreeAddress = (void*)(((UINT_PTR)m_startAddress) + randomOffset);
- m_remainingReservedMemory = sizeOfAllocation - randomOffset;
break;
}
// Try to allocate a smaller region
sizeOfAllocation -= MemoryProbingIncrement;
- startAddress += startAddressIncrement;
+ preferredStartAddress += preferredStartAddressIncrement;
} while (sizeOfAllocation >= MemoryProbingIncrement);
+
+ if (m_startAddress == nullptr)
+ {
+ // We were not able to reserve any memory near libcoreclr. Try to reserve approximately 2 GB of address space somewhere
+ // anyway:
+ // - This sets aside address space that can be used for executable code, such that jumps/calls between such code may
+ // continue to use short relative addresses instead of long absolute addresses that would currently require jump
+ // stubs.
+ // - The inability to allocate memory in a specific range for jump stubs is an unrecoverable problem. This reservation
+ // would mitigate such issues that can become prevalent depending on which security features are enabled and to what
+ // extent, such as in particular, PaX's RANDMMAP:
+ // - https://en.wikibooks.org/wiki/Grsecurity/Appendix/Grsecurity_and_PaX_Configuration_Options
+ // - Jump stubs for executable code residing in this region can request memory from this allocator
+ // - Native images can be loaded into this address space, including any jump stubs that are required for its helper
+ // table. This satisfies the vast majority of practical cases where the total amount of loaded native image memory
+ // does not exceed approximately 2 GB.
+ // - The code heap allocator for the JIT can allocate from this address space. Beyond this reservation, one can use
+ // the COMPlus_CodeHeapReserveForJumpStubs environment variable to reserve space for jump stubs.
+ sizeOfAllocation = MaxExecutableMemorySize;
+ m_startAddress = ReserveVirtualMemory(pthrCurrent, nullptr, sizeOfAllocation);
+ if (m_startAddress == nullptr)
+ {
+ return;
+ }
+ }
+
+ // Memory has been successfully reserved.
+ m_totalSizeOfReservedMemory = sizeOfAllocation;
+
+ // Randomize the location at which we start allocating from the reserved memory range. Alignment to a 64 KB granularity
+ // should not be necessary, but see AllocateMemory() for the reason why it is done.
+ int32_t randomOffset = GenerateRandomStartOffset();
+ m_nextFreeAddress = ALIGN_UP((void*)(((UINT_PTR)m_startAddress) + randomOffset), VIRTUAL_64KB);
+ _ASSERTE(sizeOfAllocation >= (UINT_PTR)m_nextFreeAddress - (UINT_PTR)m_startAddress);
+ m_remainingReservedMemory =
+ ALIGN_DOWN(sizeOfAllocation - ((UINT_PTR)m_nextFreeAddress - (UINT_PTR)m_startAddress), VIRTUAL_64KB);
}
/*++
@@ -2086,7 +2200,7 @@ Function:
ExecutableMemoryAllocator::AllocateMemory
This function attempts to allocate the requested amount of memory from its reserved virtual
- address space. The function will return NULL if the allocation request cannot
+ address space. The function will return null if the allocation request cannot
be satisfied by the memory that is currently available in the allocator.
Note: This function MUST be called with the virtual_critsec lock held.
@@ -2094,10 +2208,15 @@ Function:
--*/
void* ExecutableMemoryAllocator::AllocateMemory(SIZE_T allocationSize)
{
- void* allocatedMemory = NULL;
+#ifdef BIT64
+ void* allocatedMemory = nullptr;
- // Allocation size must be in multiples of the virtual page size.
- _ASSERTE((allocationSize & VIRTUAL_PAGE_MASK) == 0);
+ // Alignment to a 64 KB granularity should not be necessary (alignment to page size should be sufficient), but
+ // VIRTUALReserveMemory() aligns down the specified address to a 64 KB granularity, and as long as that is necessary, the
+ // reservation size here must be aligned to a 64 KB granularity to guarantee that all returned addresses are also aligned to
+ // a 64 KB granularity. Otherwise, attempting to reserve memory starting from an unaligned address returned by this function
+ // would fail in VIRTUALReserveMemory.
+ _ASSERTE(IS_ALIGNED(allocationSize, VIRTUAL_64KB));
// The code below assumes that the caller owns the virtual_critsec lock.
// So the calculations are not done in thread-safe manner.
@@ -2106,10 +2225,60 @@ void* ExecutableMemoryAllocator::AllocateMemory(SIZE_T allocationSize)
allocatedMemory = m_nextFreeAddress;
m_nextFreeAddress = (void*)(((UINT_PTR)m_nextFreeAddress) + allocationSize);
m_remainingReservedMemory -= allocationSize;
-
}
return allocatedMemory;
+#else // !BIT64
+ return nullptr;
+#endif // BIT64
+}
+
+/*++
+Function:
+ AllocateMemory
+
+ This function attempts to allocate the requested amount of memory from its reserved virtual
+ address space, if memory is available within the specified range. The function will return
+ null if the allocation request cannot satisfied by the memory that is currently available in
+ the allocator.
+
+ Note: This function MUST be called with the virtual_critsec lock held.
+--*/
+void *ExecutableMemoryAllocator::AllocateMemoryWithinRange(const void *beginAddress, const void *endAddress, SIZE_T allocationSize)
+{
+#ifdef BIT64
+ _ASSERTE(beginAddress <= endAddress);
+
+ // Alignment to a 64 KB granularity should not be necessary (alignment to page size should be sufficient), but see
+ // AllocateMemory() for the reason why it is necessary
+ _ASSERTE(IS_ALIGNED(allocationSize, VIRTUAL_64KB));
+
+ // The code below assumes that the caller owns the virtual_critsec lock.
+ // So the calculations are not done in thread-safe manner.
+
+ if (allocationSize == 0 || allocationSize > m_remainingReservedMemory)
+ {
+ return nullptr;
+ }
+
+ void *address = m_nextFreeAddress;
+ if (address < beginAddress)
+ {
+ return nullptr;
+ }
+
+ void *nextFreeAddress = (void *)((UINT_PTR)address + allocationSize);
+ if (nextFreeAddress > endAddress)
+ {
+ return nullptr;
+ }
+
+ m_nextFreeAddress = nextFreeAddress;
+ m_remainingReservedMemory -= allocationSize;
+ return address;
+#else // !BIT64
+ return nullptr;
+#endif // BIT64
}
/*++