summaryrefslogtreecommitdiff
path: root/src/pal/src
diff options
context:
space:
mode:
authorJiyoung Yun <jy910.yun@samsung.com>2017-06-13 18:47:36 +0900
committerJiyoung Yun <jy910.yun@samsung.com>2017-06-13 18:47:36 +0900
commit61d6a817e39d3bae0f47dbc09838d51db22a5d30 (patch)
treecb37caa1784bc738b976273335d6ed04a7cc80b0 /src/pal/src
parent5b975f8233e8c8d17b215372f89ca713b45d6a0b (diff)
downloadcoreclr-61d6a817e39d3bae0f47dbc09838d51db22a5d30.tar.gz
coreclr-61d6a817e39d3bae0f47dbc09838d51db22a5d30.tar.bz2
coreclr-61d6a817e39d3bae0f47dbc09838d51db22a5d30.zip
Imported Upstream version 2.0.0.11992upstream/2.0.0.11992
Diffstat (limited to 'src/pal/src')
-rw-r--r--src/pal/src/CMakeLists.txt4
-rw-r--r--src/pal/src/config.h.in3
-rw-r--r--src/pal/src/configure.cmake20
-rw-r--r--src/pal/src/file/file.cpp6
-rw-r--r--src/pal/src/include/pal/context.h10
-rw-r--r--src/pal/src/include/pal/virtual.h20
-rw-r--r--src/pal/src/map/map.cpp21
-rw-r--r--src/pal/src/map/virtual.cpp229
-rw-r--r--src/pal/src/synchmgr/synchmanager.cpp12
-rw-r--r--src/pal/src/thread/process.cpp4
-rw-r--r--src/pal/src/thread/threadsusp.cpp12
11 files changed, 288 insertions, 53 deletions
diff --git a/src/pal/src/CMakeLists.txt b/src/pal/src/CMakeLists.txt
index 145c2c9ed9..b8a9fe9e46 100644
--- a/src/pal/src/CMakeLists.txt
+++ b/src/pal/src/CMakeLists.txt
@@ -93,10 +93,10 @@ elseif(PAL_CMAKE_PLATFORM_ARCH_I386)
set(PAL_ARCH_SOURCES_DIR i386)
endif()
-if(CMAKE_SYSTEM_NAME STREQUAL Linux AND NOT CLR_CMAKE_PLATFORM_ALPINE_LINUX)
+if(PAL_CMAKE_PLATFORM_ARCH_AMD64 AND CMAKE_SYSTEM_NAME STREQUAL Linux AND NOT CLR_CMAKE_PLATFORM_ALPINE_LINUX)
# Currently the _xstate is not available on Alpine Linux
add_definitions(-DXSTATE_SUPPORTED)
-endif(CMAKE_SYSTEM_NAME STREQUAL Linux AND NOT CLR_CMAKE_PLATFORM_ALPINE_LINUX)
+endif(PAL_CMAKE_PLATFORM_ARCH_AMD64 AND CMAKE_SYSTEM_NAME STREQUAL Linux AND NOT CLR_CMAKE_PLATFORM_ALPINE_LINUX)
if(CLR_CMAKE_PLATFORM_ALPINE_LINUX)
# Setting RLIMIT_NOFILE breaks debugging of coreclr on Alpine Linux for some reason
diff --git a/src/pal/src/config.h.in b/src/pal/src/config.h.in
index 7f37f42222..03513a1264 100644
--- a/src/pal/src/config.h.in
+++ b/src/pal/src/config.h.in
@@ -61,11 +61,14 @@
#cmakedefine01 HAS_SYSV_SEMAPHORES
#cmakedefine01 HAS_PTHREAD_MUTEXES
#cmakedefine01 HAVE_TTRACE
+#cmakedefine01 HAVE_PIPE2
#cmakedefine01 HAVE_SCHED_GETAFFINITY
#cmakedefine HAVE_UNW_GET_SAVE_LOC
#cmakedefine HAVE_UNW_GET_ACCESSORS
#cmakedefine01 HAVE_XSWDEV
#cmakedefine01 HAVE_XSW_USAGE
+#cmakedefine01 HAVE_PUBLIC_XSTATE_STRUCT
+#cmakedefine01 HAVE_PR_SET_PTRACER
#cmakedefine01 HAVE_STAT_TIMESPEC
#cmakedefine01 HAVE_STAT_NSEC
diff --git a/src/pal/src/configure.cmake b/src/pal/src/configure.cmake
index b5b98d5b2d..2f17f6b08c 100644
--- a/src/pal/src/configure.cmake
+++ b/src/pal/src/configure.cmake
@@ -99,6 +99,7 @@ check_function_exists(directio HAVE_DIRECTIO)
check_function_exists(semget HAS_SYSV_SEMAPHORES)
check_function_exists(pthread_mutex_init HAS_PTHREAD_MUTEXES)
check_function_exists(ttrace HAVE_TTRACE)
+check_function_exists(pipe2 HAVE_PIPE2)
set(CMAKE_REQUIRED_LIBRARIES unwind unwind-generic)
check_cxx_source_compiles("
#include <libunwind.h>
@@ -1022,6 +1023,25 @@ int main(int argc, char **argv)
return 0;
}" HAVE_XSW_USAGE)
+check_cxx_source_compiles("
+#include <signal.h>
+
+int main(int argc, char **argv)
+{
+ struct _xstate xstate;
+ struct _fpx_sw_bytes bytes;
+ return 0;
+}" HAVE_PUBLIC_XSTATE_STRUCT)
+
+check_cxx_source_compiles("
+#include <sys/prctl.h>
+
+int main(int argc, char **argv)
+{
+ int flag = (int)PR_SET_PTRACER;
+ return 0;
+}" HAVE_PR_SET_PTRACER)
+
set(CMAKE_REQUIRED_LIBRARIES pthread)
check_cxx_source_compiles("
#include <errno.h>
diff --git a/src/pal/src/file/file.cpp b/src/pal/src/file/file.cpp
index a4ad20db32..feec65531c 100644
--- a/src/pal/src/file/file.cpp
+++ b/src/pal/src/file/file.cpp
@@ -4056,14 +4056,14 @@ CorUnix::InternalCreatePipe(
/* enable close-on-exec for both pipes; if one gets passed to CreateProcess
it will be "uncloseonexeced" in order to be inherited */
- if(-1 == fcntl(readWritePipeDes[0],F_SETFD,1))
+ if(-1 == fcntl(readWritePipeDes[0],F_SETFD,FD_CLOEXEC))
{
ASSERT("can't set close-on-exec flag; fcntl() failed. errno is %d "
"(%s)\n", errno, strerror(errno));
palError = ERROR_INTERNAL_ERROR;
goto InternalCreatePipeExit;
}
- if(-1 == fcntl(readWritePipeDes[1],F_SETFD,1))
+ if(-1 == fcntl(readWritePipeDes[1],F_SETFD,FD_CLOEXEC))
{
ASSERT("can't set close-on-exec flag; fcntl() failed. errno is %d "
"(%s)\n", errno, strerror(errno));
@@ -4564,7 +4564,7 @@ static HANDLE init_std_handle(HANDLE * pStd, FILE *stream)
/* duplicate the FILE *, so that we can fclose() in FILECloseHandle without
closing the original */
- new_fd = dup(fileno(stream));
+ new_fd = fcntl(fileno(stream), F_DUPFD_CLOEXEC, 0); // dup, but with CLOEXEC
if(-1 == new_fd)
{
ERROR("dup() failed; errno is %d (%s)\n", errno, strerror(errno));
diff --git a/src/pal/src/include/pal/context.h b/src/pal/src/include/pal/context.h
index db6d69579a..2c86a03d69 100644
--- a/src/pal/src/include/pal/context.h
+++ b/src/pal/src/include/pal/context.h
@@ -39,6 +39,16 @@ typedef ucontext_t native_context_t;
#else // HAVE_UCONTEXT_T
#error Native context type is not known on this platform!
#endif // HAVE_UCONTEXT_T
+
+#if defined(XSTATE_SUPPORTED) && !HAVE_PUBLIC_XSTATE_STRUCT
+namespace asm_sigcontext
+{
+#include <asm/sigcontext.h>
+};
+using asm_sigcontext::_fpx_sw_bytes;
+using asm_sigcontext::_xstate;
+#endif // defined(XSTATE_SUPPORTED) && !HAVE_PUBLIC_XSTATE_STRUCT
+
#else // !HAVE_MACH_EXCEPTIONS
#include <mach/kern_return.h>
#include <mach/mach_port.h>
diff --git a/src/pal/src/include/pal/virtual.h b/src/pal/src/include/pal/virtual.h
index 31d225fc04..36eaf81e3a 100644
--- a/src/pal/src/include/pal/virtual.h
+++ b/src/pal/src/include/pal/virtual.h
@@ -60,7 +60,7 @@ enum VIRTUAL_CONSTANTS
VIRTUAL_PAGE_SIZE = 0x1000,
VIRTUAL_PAGE_MASK = VIRTUAL_PAGE_SIZE - 1,
- BOUNDARY_64K = 0xffff
+ VIRTUAL_64KB = 0x10000
};
/*++
@@ -130,11 +130,22 @@ public:
AllocateMemory
This function attempts to allocate the requested amount of memory from its reserved virtual
- address space. The function will return NULL if the allocation request cannot
+ address space. The function will return null if the allocation request cannot
be satisfied by the memory that is currently available in the allocator.
--*/
void* AllocateMemory(SIZE_T allocationSize);
+ /*++
+ Function:
+ AllocateMemory
+
+ This function attempts to allocate the requested amount of memory from its reserved virtual
+ address space, if memory is available within the specified range. The function will return
+ null if the allocation request cannot satisfied by the memory that is currently available in
+ the allocator.
+ --*/
+ void *AllocateMemoryWithinRange(const void *beginAddress, const void *endAddress, SIZE_T allocationSize);
+
private:
/*++
Function:
@@ -160,12 +171,13 @@ private:
// that can be used to calculate an approximate location of the memory that
// is in 2GB range from the coreclr library. In addition, having precise size of libcoreclr
// is not necessary for the calculations.
- const int32_t CoreClrLibrarySize = 100 * 1024 * 1024;
+ static const int32_t CoreClrLibrarySize = 100 * 1024 * 1024;
// This constant represent the max size of the virtual memory that this allocator
// will try to reserve during initialization. We want all JIT-ed code and the
// entire libcoreclr to be located in a 2GB range.
- const int32_t MaxExecutableMemorySize = 0x7FFF0000 - CoreClrLibrarySize;
+ static const int32_t MaxExecutableMemorySize = 0x7FFF0000;
+ static const int32_t MaxExecutableMemorySizeNearCoreClr = MaxExecutableMemorySize - CoreClrLibrarySize;
// Start address of the reserved virtual address space
void* m_startAddress;
diff --git a/src/pal/src/map/map.cpp b/src/pal/src/map/map.cpp
index 5fdb6fda38..b8ffc84db4 100644
--- a/src/pal/src/map/map.cpp
+++ b/src/pal/src/map/map.cpp
@@ -246,7 +246,7 @@ FileMappingInitializationRoutine(
pProcessLocalData->UnixFd = InternalOpen(
pImmutableData->szFileName,
- MAPProtectionToFileOpenFlags(pImmutableData->flProtect)
+ MAPProtectionToFileOpenFlags(pImmutableData->flProtect) | O_CLOEXEC
);
if (-1 == pProcessLocalData->UnixFd)
@@ -510,7 +510,7 @@ CorUnix::InternalCreateFileMapping(
#if HAVE_MMAP_DEV_ZERO
- UnixFd = InternalOpen(pImmutableData->szFileName, O_RDWR);
+ UnixFd = InternalOpen(pImmutableData->szFileName, O_RDWR | O_CLOEXEC);
if ( -1 == UnixFd )
{
ERROR( "Unable to open the file.\n");
@@ -587,7 +587,7 @@ CorUnix::InternalCreateFileMapping(
// information, though...
//
- UnixFd = dup(pFileLocalData->unix_fd);
+ UnixFd = fcntl(pFileLocalData->unix_fd, F_DUPFD_CLOEXEC, 0); // dup, but with CLOEXEC
if (-1 == UnixFd)
{
ERROR( "Unable to duplicate the Unix file descriptor!\n" );
@@ -2440,20 +2440,21 @@ void * MAPMapPEFile(HANDLE hFile)
// We're going to start adding mappings to the mapping list, so take the critical section
InternalEnterCriticalSection(pThread, &mapping_critsec);
-#if !defined(_AMD64_)
- loadedBase = mmap((void*)preferredBase, virtualSize, PROT_NONE, MAP_ANON|MAP_PRIVATE, -1, 0);
-#else // defined(_AMD64_)
+#ifdef BIT64
// First try to reserve virtual memory using ExecutableAllcator. This allows all PE images to be
// near each other and close to the coreclr library which also allows the runtime to generate
- // more efficient code (by avoiding usage of jump stubs).
- loadedBase = ReserveMemoryFromExecutableAllocator(pThread, ALIGN_UP(virtualSize, GetVirtualPageSize()));
+ // more efficient code (by avoiding usage of jump stubs). Alignment to a 64 KB granularity should
+ // not be necessary (alignment to page size should be sufficient), but see
+ // ExecutableMemoryAllocator::AllocateMemory() for the reason why it is done.
+ loadedBase = ReserveMemoryFromExecutableAllocator(pThread, ALIGN_UP(virtualSize, VIRTUAL_64KB));
+#endif // BIT64
+
if (loadedBase == NULL)
{
// MAC64 requires we pass MAP_SHARED (or MAP_PRIVATE) flags - otherwise, the call is failed.
// Refer to mmap documentation at http://www.manpagez.com/man/2/mmap/ for details.
- loadedBase = mmap((void*)preferredBase, virtualSize, PROT_NONE, MAP_ANON|MAP_PRIVATE, -1, 0);
+ loadedBase = mmap(NULL, virtualSize, PROT_NONE, MAP_ANON|MAP_PRIVATE, -1, 0);
}
-#endif // !defined(_AMD64_)
if (MAP_FAILED == loadedBase)
{
diff --git a/src/pal/src/map/virtual.cpp b/src/pal/src/map/virtual.cpp
index 7e00843b7a..41bd37c9b4 100644
--- a/src/pal/src/map/virtual.cpp
+++ b/src/pal/src/map/virtual.cpp
@@ -18,15 +18,19 @@ Abstract:
--*/
+#include "pal/dbgmsg.h"
+
+SET_DEFAULT_DEBUG_CHANNEL(VIRTUAL); // some headers have code with asserts, so do this first
+
#include "pal/thread.hpp"
#include "pal/cs.hpp"
#include "pal/malloc.hpp"
#include "pal/file.hpp"
#include "pal/seh.hpp"
-#include "pal/dbgmsg.h"
#include "pal/virtual.h"
#include "pal/map.h"
#include "pal/init.h"
+#include "pal/utils.h"
#include "common.h"
#include <sys/types.h>
@@ -43,8 +47,6 @@ Abstract:
using namespace CorUnix;
-SET_DEFAULT_DEBUG_CHANNEL(VIRTUAL);
-
CRITICAL_SECTION virtual_critsec;
// The first node in our list of allocated blocks.
@@ -93,6 +95,7 @@ namespace VirtualMemoryLogging
Decommit = 0x40,
Release = 0x50,
Reset = 0x60,
+ ReserveFromExecutableMemoryAllocatorWithinRange = 0x70
};
// Indicates that the attempted operation has failed
@@ -884,8 +887,13 @@ static LPVOID VIRTUALReserveMemory(
// First, figure out where we're trying to reserve the memory and
// how much we need. On most systems, requests to mmap must be
- // page-aligned and at multiples of the page size.
- StartBoundary = (UINT_PTR)lpAddress & ~BOUNDARY_64K;
+ // page-aligned and at multiples of the page size. Unlike on Windows, on
+ // Unix, the allocation granularity is the page size, so the memory size to
+ // reserve is not aligned to 64 KB. Nor should the start boundary need to
+ // to be aligned down to 64 KB, but it is expected that there are other
+ // components that rely on this alignment when providing a specific address
+ // (note that mmap itself does not make any such guarantees).
+ StartBoundary = (UINT_PTR)ALIGN_DOWN(lpAddress, VIRTUAL_64KB);
/* Add the sizes, and round down to the nearest page boundary. */
MemSize = ( ((UINT_PTR)lpAddress + dwSize + VIRTUAL_PAGE_MASK) & ~VIRTUAL_PAGE_MASK ) -
StartBoundary;
@@ -894,7 +902,14 @@ static LPVOID VIRTUALReserveMemory(
// try to get memory from the executable memory allocator to satisfy the request.
if (((flAllocationType & MEM_RESERVE_EXECUTABLE) != 0) && (lpAddress == NULL))
{
- pRetVal = g_executableMemoryAllocator.AllocateMemory(MemSize);
+ // Alignment to a 64 KB granularity should not be necessary (alignment to page size should be sufficient), but see
+ // ExecutableMemoryAllocator::AllocateMemory() for the reason why it is done
+ SIZE_T reservationSize = ALIGN_UP(MemSize, VIRTUAL_64KB);
+ pRetVal = g_executableMemoryAllocator.AllocateMemory(reservationSize);
+ if (pRetVal != nullptr)
+ {
+ MemSize = reservationSize;
+ }
}
if (pRetVal == NULL)
@@ -1227,6 +1242,72 @@ done:
/*++
Function:
+ PAL_VirtualReserveFromExecutableMemoryAllocatorWithinRange
+
+ This function attempts to allocate the requested amount of memory in the specified address range, from the executable memory
+ allocator. If unable to do so, the function returns nullptr and does not set the last error.
+
+ lpBeginAddress - Inclusive beginning of range
+ lpEndAddress - Exclusive end of range
+ dwSize - Number of bytes to allocate
+--*/
+LPVOID
+PALAPI
+PAL_VirtualReserveFromExecutableMemoryAllocatorWithinRange(
+ IN LPCVOID lpBeginAddress,
+ IN LPCVOID lpEndAddress,
+ IN SIZE_T dwSize)
+{
+#ifdef BIT64
+ PERF_ENTRY(PAL_VirtualReserveFromExecutableMemoryAllocatorWithinRange);
+ ENTRY(
+ "PAL_VirtualReserveFromExecutableMemoryAllocatorWithinRange(lpBeginAddress = %p, lpEndAddress = %p, dwSize = %Iu)\n",
+ lpBeginAddress,
+ lpEndAddress,
+ dwSize);
+
+ _ASSERTE(lpBeginAddress <= lpEndAddress);
+
+ // Alignment to a 64 KB granularity should not be necessary (alignment to page size should be sufficient), but see
+ // ExecutableMemoryAllocator::AllocateMemory() for the reason why it is done
+ SIZE_T reservationSize = ALIGN_UP(dwSize, VIRTUAL_64KB);
+
+ CPalThread *currentThread = InternalGetCurrentThread();
+ InternalEnterCriticalSection(currentThread, &virtual_critsec);
+
+ void *address = g_executableMemoryAllocator.AllocateMemoryWithinRange(lpBeginAddress, lpEndAddress, reservationSize);
+ if (address != nullptr)
+ {
+ _ASSERTE(IS_ALIGNED(address, VIRTUAL_PAGE_SIZE));
+ if (!VIRTUALStoreAllocationInfo((UINT_PTR)address, reservationSize, MEM_RESERVE | MEM_RESERVE_EXECUTABLE, PAGE_NOACCESS))
+ {
+ ASSERT("Unable to store the structure in the list.\n");
+ munmap(address, reservationSize);
+ address = nullptr;
+ }
+ }
+
+ LogVaOperation(
+ VirtualMemoryLogging::VirtualOperation::ReserveFromExecutableMemoryAllocatorWithinRange,
+ nullptr,
+ dwSize,
+ MEM_RESERVE | MEM_RESERVE_EXECUTABLE,
+ PAGE_NOACCESS,
+ address,
+ TRUE);
+
+ InternalLeaveCriticalSection(currentThread, &virtual_critsec);
+
+ LOGEXIT("PAL_VirtualReserveFromExecutableMemoryAllocatorWithinRange returning %p\n", address);
+ PERF_EXIT(PAL_VirtualReserveFromExecutableMemoryAllocatorWithinRange);
+ return address;
+#else // !BIT64
+ return nullptr;
+#endif // BIT64
+}
+
+/*++
+Function:
VirtualAlloc
Note:
@@ -1982,11 +2063,15 @@ Function :
--*/
void* ReserveMemoryFromExecutableAllocator(CPalThread* pThread, SIZE_T allocationSize)
{
+#ifdef BIT64
InternalEnterCriticalSection(pThread, &virtual_critsec);
void* mem = g_executableMemoryAllocator.AllocateMemory(allocationSize);
InternalLeaveCriticalSection(pThread, &virtual_critsec);
return mem;
+#else // !BIT64
+ return nullptr;
+#endif // BIT64
}
/*++
@@ -2024,14 +2109,14 @@ Function:
void ExecutableMemoryAllocator::TryReserveInitialMemory()
{
CPalThread* pthrCurrent = InternalGetCurrentThread();
- int32_t sizeOfAllocation = MaxExecutableMemorySize;
- int32_t startAddressIncrement;
- UINT_PTR startAddress;
+ int32_t sizeOfAllocation = MaxExecutableMemorySizeNearCoreClr;
+ int32_t preferredStartAddressIncrement;
+ UINT_PTR preferredStartAddress;
UINT_PTR coreclrLoadAddress;
const int32_t MemoryProbingIncrement = 128 * 1024 * 1024;
// Try to find and reserve an available region of virtual memory that is located
- // within 2GB range (defined by the MaxExecutableMemorySize constant) from the
+ // within 2GB range (defined by the MaxExecutableMemorySizeNearCoreClr constant) from the
// location of the coreclr library.
// Potentially, as a possible future improvement, we can get precise information
// about available memory ranges by parsing data from '/proc/self/maps'.
@@ -2045,40 +2130,69 @@ void ExecutableMemoryAllocator::TryReserveInitialMemory()
// (thus avoiding reserving memory below 4GB; besides some operating systems do not allow that).
// If libcoreclr is loaded at high addresses then try to reserve memory below its location.
coreclrLoadAddress = (UINT_PTR)PAL_GetSymbolModuleBase((void*)VirtualAlloc);
- if ((coreclrLoadAddress < 0xFFFFFFFF) || ((coreclrLoadAddress - MaxExecutableMemorySize) < 0xFFFFFFFF))
+ if ((coreclrLoadAddress < 0xFFFFFFFF) || ((coreclrLoadAddress - MaxExecutableMemorySizeNearCoreClr) < 0xFFFFFFFF))
{
// Try to allocate above the location of libcoreclr
- startAddress = coreclrLoadAddress + CoreClrLibrarySize;
- startAddressIncrement = MemoryProbingIncrement;
+ preferredStartAddress = coreclrLoadAddress + CoreClrLibrarySize;
+ preferredStartAddressIncrement = MemoryProbingIncrement;
}
else
{
// Try to allocate below the location of libcoreclr
- startAddress = coreclrLoadAddress - MaxExecutableMemorySize;
- startAddressIncrement = 0;
+ preferredStartAddress = coreclrLoadAddress - MaxExecutableMemorySizeNearCoreClr;
+ preferredStartAddressIncrement = 0;
}
// Do actual memory reservation.
do
{
- m_startAddress = ReserveVirtualMemory(pthrCurrent, (void*)startAddress, sizeOfAllocation);
- if (m_startAddress != NULL)
+ m_startAddress = ReserveVirtualMemory(pthrCurrent, (void*)preferredStartAddress, sizeOfAllocation);
+ if (m_startAddress != nullptr)
{
- // Memory has been successfully reserved.
- m_totalSizeOfReservedMemory = sizeOfAllocation;
-
- // Randomize the location at which we start allocating from the reserved memory range.
- int32_t randomOffset = GenerateRandomStartOffset();
- m_nextFreeAddress = (void*)(((UINT_PTR)m_startAddress) + randomOffset);
- m_remainingReservedMemory = sizeOfAllocation - randomOffset;
break;
}
// Try to allocate a smaller region
sizeOfAllocation -= MemoryProbingIncrement;
- startAddress += startAddressIncrement;
+ preferredStartAddress += preferredStartAddressIncrement;
} while (sizeOfAllocation >= MemoryProbingIncrement);
+
+ if (m_startAddress == nullptr)
+ {
+ // We were not able to reserve any memory near libcoreclr. Try to reserve approximately 2 GB of address space somewhere
+ // anyway:
+ // - This sets aside address space that can be used for executable code, such that jumps/calls between such code may
+ // continue to use short relative addresses instead of long absolute addresses that would currently require jump
+ // stubs.
+ // - The inability to allocate memory in a specific range for jump stubs is an unrecoverable problem. This reservation
+ // would mitigate such issues that can become prevalent depending on which security features are enabled and to what
+ // extent, such as in particular, PaX's RANDMMAP:
+ // - https://en.wikibooks.org/wiki/Grsecurity/Appendix/Grsecurity_and_PaX_Configuration_Options
+ // - Jump stubs for executable code residing in this region can request memory from this allocator
+ // - Native images can be loaded into this address space, including any jump stubs that are required for its helper
+ // table. This satisfies the vast majority of practical cases where the total amount of loaded native image memory
+ // does not exceed approximately 2 GB.
+ // - The code heap allocator for the JIT can allocate from this address space. Beyond this reservation, one can use
+ // the COMPlus_CodeHeapReserveForJumpStubs environment variable to reserve space for jump stubs.
+ sizeOfAllocation = MaxExecutableMemorySize;
+ m_startAddress = ReserveVirtualMemory(pthrCurrent, nullptr, sizeOfAllocation);
+ if (m_startAddress == nullptr)
+ {
+ return;
+ }
+ }
+
+ // Memory has been successfully reserved.
+ m_totalSizeOfReservedMemory = sizeOfAllocation;
+
+ // Randomize the location at which we start allocating from the reserved memory range. Alignment to a 64 KB granularity
+ // should not be necessary, but see AllocateMemory() for the reason why it is done.
+ int32_t randomOffset = GenerateRandomStartOffset();
+ m_nextFreeAddress = ALIGN_UP((void*)(((UINT_PTR)m_startAddress) + randomOffset), VIRTUAL_64KB);
+ _ASSERTE(sizeOfAllocation >= (UINT_PTR)m_nextFreeAddress - (UINT_PTR)m_startAddress);
+ m_remainingReservedMemory =
+ ALIGN_DOWN(sizeOfAllocation - ((UINT_PTR)m_nextFreeAddress - (UINT_PTR)m_startAddress), VIRTUAL_64KB);
}
/*++
@@ -2086,7 +2200,7 @@ Function:
ExecutableMemoryAllocator::AllocateMemory
This function attempts to allocate the requested amount of memory from its reserved virtual
- address space. The function will return NULL if the allocation request cannot
+ address space. The function will return null if the allocation request cannot
be satisfied by the memory that is currently available in the allocator.
Note: This function MUST be called with the virtual_critsec lock held.
@@ -2094,10 +2208,15 @@ Function:
--*/
void* ExecutableMemoryAllocator::AllocateMemory(SIZE_T allocationSize)
{
- void* allocatedMemory = NULL;
+#ifdef BIT64
+ void* allocatedMemory = nullptr;
- // Allocation size must be in multiples of the virtual page size.
- _ASSERTE((allocationSize & VIRTUAL_PAGE_MASK) == 0);
+ // Alignment to a 64 KB granularity should not be necessary (alignment to page size should be sufficient), but
+ // VIRTUALReserveMemory() aligns down the specified address to a 64 KB granularity, and as long as that is necessary, the
+ // reservation size here must be aligned to a 64 KB granularity to guarantee that all returned addresses are also aligned to
+ // a 64 KB granularity. Otherwise, attempting to reserve memory starting from an unaligned address returned by this function
+ // would fail in VIRTUALReserveMemory.
+ _ASSERTE(IS_ALIGNED(allocationSize, VIRTUAL_64KB));
// The code below assumes that the caller owns the virtual_critsec lock.
// So the calculations are not done in thread-safe manner.
@@ -2106,10 +2225,60 @@ void* ExecutableMemoryAllocator::AllocateMemory(SIZE_T allocationSize)
allocatedMemory = m_nextFreeAddress;
m_nextFreeAddress = (void*)(((UINT_PTR)m_nextFreeAddress) + allocationSize);
m_remainingReservedMemory -= allocationSize;
-
}
return allocatedMemory;
+#else // !BIT64
+ return nullptr;
+#endif // BIT64
+}
+
+/*++
+Function:
+ AllocateMemory
+
+ This function attempts to allocate the requested amount of memory from its reserved virtual
+ address space, if memory is available within the specified range. The function will return
+ null if the allocation request cannot satisfied by the memory that is currently available in
+ the allocator.
+
+ Note: This function MUST be called with the virtual_critsec lock held.
+--*/
+void *ExecutableMemoryAllocator::AllocateMemoryWithinRange(const void *beginAddress, const void *endAddress, SIZE_T allocationSize)
+{
+#ifdef BIT64
+ _ASSERTE(beginAddress <= endAddress);
+
+ // Alignment to a 64 KB granularity should not be necessary (alignment to page size should be sufficient), but see
+ // AllocateMemory() for the reason why it is necessary
+ _ASSERTE(IS_ALIGNED(allocationSize, VIRTUAL_64KB));
+
+ // The code below assumes that the caller owns the virtual_critsec lock.
+ // So the calculations are not done in thread-safe manner.
+
+ if (allocationSize == 0 || allocationSize > m_remainingReservedMemory)
+ {
+ return nullptr;
+ }
+
+ void *address = m_nextFreeAddress;
+ if (address < beginAddress)
+ {
+ return nullptr;
+ }
+
+ void *nextFreeAddress = (void *)((UINT_PTR)address + allocationSize);
+ if (nextFreeAddress > endAddress)
+ {
+ return nullptr;
+ }
+
+ m_nextFreeAddress = nextFreeAddress;
+ m_remainingReservedMemory -= allocationSize;
+ return address;
+#else // !BIT64
+ return nullptr;
+#endif // BIT64
}
/*++
diff --git a/src/pal/src/synchmgr/synchmanager.cpp b/src/pal/src/synchmgr/synchmanager.cpp
index d836a177bb..73b5644dbd 100644
--- a/src/pal/src/synchmgr/synchmanager.cpp
+++ b/src/pal/src/synchmgr/synchmanager.cpp
@@ -3525,12 +3525,22 @@ namespace CorUnix
}
#else // !CORECLR
int rgiPipe[] = { -1, -1 };
- if (pipe(rgiPipe) == -1)
+ int pipeRv =
+#if HAVE_PIPE2
+ pipe2(rgiPipe, O_CLOEXEC);
+#else
+ pipe(rgiPipe);
+#endif // HAVE_PIPE2
+ if (pipeRv == -1)
{
ERROR("Unable to create the process pipe\n");
fRet = false;
goto CPP_exit;
}
+#if !HAVE_PIPE2
+ fcntl(rgiPipe[0], F_SETFD, FD_CLOEXEC); // make pipe non-inheritable, if possible
+ fcntl(rgiPipe[1], F_SETFD, FD_CLOEXEC);
+#endif // !HAVE_PIPE2
#endif // !CORECLR
#if HAVE_KQUEUE && !HAVE_BROKEN_FIFO_KEVENT
diff --git a/src/pal/src/thread/process.cpp b/src/pal/src/thread/process.cpp
index 2a93d3c57d..6db9bf6f51 100644
--- a/src/pal/src/thread/process.cpp
+++ b/src/pal/src/thread/process.cpp
@@ -2981,7 +2981,7 @@ PROCAbort()
// Do any shutdown cleanup before aborting or creating a core dump
PROCNotifyProcessShutdown();
-#if HAVE_PRCTL_H
+#if HAVE_PRCTL_H && HAVE_PR_SET_PTRACER
// If enabled, launch the create minidump utility and wait until it completes
if (g_argvCreateDump[0] != nullptr)
{
@@ -3018,7 +3018,7 @@ PROCAbort()
}
}
}
-#endif // HAVE_PRCTL_H
+#endif // HAVE_PRCTL_H && HAVE_PR_SET_PTRACER
// Abort the process after waiting for the core dump to complete
abort();
}
diff --git a/src/pal/src/thread/threadsusp.cpp b/src/pal/src/thread/threadsusp.cpp
index c7787bef68..f8a435c022 100644
--- a/src/pal/src/thread/threadsusp.cpp
+++ b/src/pal/src/thread/threadsusp.cpp
@@ -74,11 +74,21 @@ CThreadSuspensionInfo::InternalSuspendNewThreadFromData(
ReleaseSuspensionLock(pThread);
int pipe_descs[2];
- if (pipe(pipe_descs) == -1)
+ int pipeRv =
+#if HAVE_PIPE2
+ pipe2(pipe_descs, O_CLOEXEC);
+#else
+ pipe(pipe_descs);
+#endif // HAVE_PIPE2
+ if (pipeRv == -1)
{
ERROR("pipe() failed! error is %d (%s)\n", errno, strerror(errno));
return ERROR_NOT_ENOUGH_MEMORY;
}
+#if !HAVE_PIPE2
+ fcntl(pipe_descs[0], F_SETFD, FD_CLOEXEC); // make pipe non-inheritable, if possible
+ fcntl(pipe_descs[1], F_SETFD, FD_CLOEXEC);
+#endif // !HAVE_PIPE2
// [0] is the read end of the pipe, and [1] is the write end.
pThread->suspensionInfo.SetBlockingPipe(pipe_descs[1]);