summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorMukul Sabharwal <mjsabby@gmail.com>2019-03-28 21:33:24 -0700
committerMukul Sabharwal <mjsabby@gmail.com>2019-04-08 10:27:30 -0700
commitd33f73f69051d2861454081bb3211615413d8ed0 (patch)
tree34636ea9773dae699607c357013075f75850b1a1 /src
parent0d581c79f0b40bd43ad3eb62574bfe78a665fe80 (diff)
downloadcoreclr-d33f73f69051d2861454081bb3211615413d8ed0.tar.gz
coreclr-d33f73f69051d2861454081bb3211615413d8ed0.tar.bz2
coreclr-d33f73f69051d2861454081bb3211615413d8ed0.zip
Add Large pages support in GC
Diffstat (limited to 'src')
-rw-r--r--src/gc/CMakeLists.txt3
-rw-r--r--src/gc/env/gcenv.os.h7
-rw-r--r--src/gc/gc.cpp36
-rw-r--r--src/gc/gcconfig.h1
-rw-r--r--src/gc/gcpriv.h4
-rw-r--r--src/gc/sample/CMakeLists.txt12
-rw-r--r--src/gc/unix/config.h.in1
-rw-r--r--src/gc/unix/configure.cmake9
-rw-r--r--src/gc/unix/gcenv.unix.cpp38
-rw-r--r--src/gc/windows/gcenv.windows.cpp63
-rw-r--r--src/inc/clrconfigvalues.h1
-rw-r--r--src/pal/inc/pal.h1
-rw-r--r--src/vm/gcenv.os.cpp69
13 files changed, 231 insertions, 14 deletions
diff --git a/src/gc/CMakeLists.txt b/src/gc/CMakeLists.txt
index d92834bd5c..1e59ebaa26 100644
--- a/src/gc/CMakeLists.txt
+++ b/src/gc/CMakeLists.txt
@@ -84,7 +84,8 @@ if(WIN32)
set (GC_LINK_LIBRARIES
${STATIC_MT_CRT_LIB}
${STATIC_MT_VCRT_LIB}
- kernel32.lib)
+ kernel32.lib
+ advapi32.lib)
else()
set (GC_LINK_LIBRARIES)
endif(WIN32)
diff --git a/src/gc/env/gcenv.os.h b/src/gc/env/gcenv.os.h
index 4f86cd75ea..05dccf7a6d 100644
--- a/src/gc/env/gcenv.os.h
+++ b/src/gc/env/gcenv.os.h
@@ -275,6 +275,13 @@ public:
// true if it has succeeded, false if it has failed
static bool VirtualCommit(void *address, size_t size, uint16_t node = NUMA_NODE_UNDEFINED);
+ // Reserve and Commit virtual memory range for Large Pages
+ // Parameters:
+ // size - size of the virtual memory range
+ // Return:
+ // Address of the allocated memory
+ static void* VirtualReserveAndCommitLargePages(size_t size);
+
// Decomit virtual memory range.
// Parameters:
// address - starting virtual address
diff --git a/src/gc/gc.cpp b/src/gc/gc.cpp
index 0dc7e36982..9d909f173d 100644
--- a/src/gc/gc.cpp
+++ b/src/gc/gc.cpp
@@ -2403,6 +2403,7 @@ void qsort1(uint8_t** low, uint8_t** high, unsigned int depth);
#endif //USE_INTROSORT
void* virtual_alloc (size_t size);
+void* virtual_alloc (size_t size, bool use_large_pages_p);
void virtual_free (void* add, size_t size);
/* per heap static initialization */
@@ -2818,6 +2819,7 @@ GCSpinLock gc_heap::gc_lock;
size_t gc_heap::eph_gen_starts_size = 0;
heap_segment* gc_heap::segment_standby_list;
+size_t gc_heap::use_large_pages_p = 0;
size_t gc_heap::last_gc_index = 0;
#ifdef SEG_MAPPING_TABLE
size_t gc_heap::min_segment_size = 0;
@@ -4263,7 +4265,7 @@ typedef struct
initial_memory_details memory_details;
-BOOL reserve_initial_memory (size_t normal_size, size_t large_size, size_t num_heaps)
+BOOL reserve_initial_memory (size_t normal_size, size_t large_size, size_t num_heaps, bool use_large_pages_p)
{
BOOL reserve_success = FALSE;
@@ -4304,7 +4306,7 @@ BOOL reserve_initial_memory (size_t normal_size, size_t large_size, size_t num_h
size_t requestedMemory = memory_details.block_count * (normal_size + large_size);
- uint8_t* allatonce_block = (uint8_t*)virtual_alloc (requestedMemory);
+ uint8_t* allatonce_block = (uint8_t*)virtual_alloc (requestedMemory, use_large_pages_p);
if (allatonce_block)
{
g_gc_lowest_address = allatonce_block;
@@ -4324,10 +4326,10 @@ BOOL reserve_initial_memory (size_t normal_size, size_t large_size, size_t num_h
// try to allocate 2 blocks
uint8_t* b1 = 0;
uint8_t* b2 = 0;
- b1 = (uint8_t*)virtual_alloc (memory_details.block_count * normal_size);
+ b1 = (uint8_t*)virtual_alloc (memory_details.block_count * normal_size, use_large_pages_p);
if (b1)
{
- b2 = (uint8_t*)virtual_alloc (memory_details.block_count * large_size);
+ b2 = (uint8_t*)virtual_alloc (memory_details.block_count * large_size, use_large_pages_p);
if (b2)
{
memory_details.allocation_pattern = initial_memory_details::TWO_STAGE;
@@ -4360,7 +4362,7 @@ BOOL reserve_initial_memory (size_t normal_size, size_t large_size, size_t num_h
memory_details.block_size_normal :
memory_details.block_size_large);
current_block->memory_base =
- (uint8_t*)virtual_alloc (block_size);
+ (uint8_t*)virtual_alloc (block_size, use_large_pages_p);
if (current_block->memory_base == 0)
{
// Free the blocks that we've allocated so far
@@ -4469,6 +4471,11 @@ heap_segment* get_initial_segment (size_t size, int h_number)
void* virtual_alloc (size_t size)
{
+ return virtual_alloc(size, false);
+}
+
+void* virtual_alloc (size_t size, bool use_large_pages_p)
+{
size_t requested_size = size;
if ((gc_heap::reserved_memory_limit - gc_heap::reserved_memory) < requested_size)
@@ -4488,7 +4495,8 @@ void* virtual_alloc (size_t size)
flags = VirtualReserveFlags::WriteWatch;
}
#endif // !FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
- void* prgmem = GCToOSInterface::VirtualReserve (requested_size, card_size * card_word_width, flags);
+
+ void* prgmem = use_large_pages_p ? GCToOSInterface::VirtualReserveAndCommitLargePages(requested_size) : GCToOSInterface::VirtualReserve(requested_size, card_size * card_word_width, flags);
void *aligned_mem = prgmem;
// We don't want (prgmem + size) to be right at the end of the address space
@@ -5466,9 +5474,10 @@ bool gc_heap::virtual_commit (void* address, size_t size, int h_number, bool* ha
}
// If it's a valid heap number it means it's commiting for memory on the GC heap.
- bool commit_succeeded_p = ((h_number >= 0) ?
- virtual_alloc_commit_for_heap (address, size, h_number) :
- GCToOSInterface::VirtualCommit(address, size));
+ // In addition if large pages is enabled, we set commit_succeeded_p to true because memory is already committed.
+ bool commit_succeeded_p = ((h_number >= 0) ? (use_large_pages_p ? true :
+ virtual_alloc_commit_for_heap (address, size, h_number)) :
+ GCToOSInterface::VirtualCommit(address, size));
if (!commit_succeeded_p && heap_hard_limit)
{
@@ -9219,7 +9228,7 @@ heap_segment* gc_heap::make_heap_segment (uint8_t* new_pages, size_t size, int h
heap_segment_mem (new_segment) = start;
heap_segment_used (new_segment) = start;
heap_segment_reserved (new_segment) = new_pages + size;
- heap_segment_committed (new_segment) = new_pages + initial_commit;
+ heap_segment_committed (new_segment) = (use_large_pages_p ? heap_segment_reserved(new_segment) : (new_pages + initial_commit));
init_heap_segment (new_segment);
dprintf (2, ("Creating heap segment %Ix", (size_t)new_segment));
return new_segment;
@@ -9310,6 +9319,8 @@ void gc_heap::reset_heap_segment_pages (heap_segment* seg)
void gc_heap::decommit_heap_segment_pages (heap_segment* seg,
size_t extra_space)
{
+ if (use_large_pages_p)
+ return;
uint8_t* page_start = align_on_page (heap_segment_allocated(seg));
size_t size = heap_segment_committed (seg) - page_start;
extra_space = align_on_page (extra_space);
@@ -10019,12 +10030,15 @@ HRESULT gc_heap::initialize_gc (size_t segment_size,
block_count = 1;
#endif //MULTIPLE_HEAPS
+ use_large_pages_p = false;
+
if (heap_hard_limit)
{
check_commit_cs.Initialize();
+ use_large_pages_p = GCConfig::GetGCLargePages();
}
- if (!reserve_initial_memory(segment_size,heap_size,block_count))
+ if (!reserve_initial_memory(segment_size,heap_size,block_count,use_large_pages_p))
return E_OUTOFMEMORY;
#ifdef CARD_BUNDLE
diff --git a/src/gc/gcconfig.h b/src/gc/gcconfig.h
index 95252adfc2..ea3a89eb8c 100644
--- a/src/gc/gcconfig.h
+++ b/src/gc/gcconfig.h
@@ -74,6 +74,7 @@ public:
"Specifies the name of the GC config log file") \
BOOL_CONFIG(GCNumaAware, "GCNumaAware", true, "Enables numa allocations in the GC") \
BOOL_CONFIG(GCCpuGroup, "GCCpuGroup", false, "Enables CPU groups in the GC") \
+ BOOL_CONFIG(GCLargePages, "GCLargePages", false, "Enables using Large Pages in the GC") \
INT_CONFIG(HeapVerifyLevel, "HeapVerify", HEAPVERIFY_NONE, \
"When set verifies the integrity of the managed heap on entry and exit of each GC") \
INT_CONFIG(LOHCompactionMode, "GCLOHCompact", 0, "Specifies the LOH compaction mode") \
diff --git a/src/gc/gcpriv.h b/src/gc/gcpriv.h
index 9f863e6bb0..8909d2973d 100644
--- a/src/gc/gcpriv.h
+++ b/src/gc/gcpriv.h
@@ -3145,6 +3145,10 @@ public:
PER_HEAP_ISOLATED
size_t current_total_committed_gc_own;
+ // This is if large pages should be used.
+ PER_HEAP_ISOLATED
+ size_t use_large_pages_p;
+
PER_HEAP_ISOLATED
size_t last_gc_index;
diff --git a/src/gc/sample/CMakeLists.txt b/src/gc/sample/CMakeLists.txt
index 5a02d0262e..6eb4b27f22 100644
--- a/src/gc/sample/CMakeLists.txt
+++ b/src/gc/sample/CMakeLists.txt
@@ -25,6 +25,14 @@ set(SOURCES
)
if(WIN32)
+ set (GC_LINK_LIBRARIES
+ ${STATIC_MT_CRT_LIB}
+ ${STATIC_MT_VCRT_LIB}
+ kernel32.lib
+ advapi32.lib)
+endif(WIN32)
+
+if(WIN32)
list(APPEND SOURCES
../windows/gcenv.windows.cpp)
add_definitions(-DUNICODE=1)
@@ -36,3 +44,7 @@ endif()
_add_executable(gcsample
${SOURCES}
)
+
+if(WIN32)
+ target_link_libraries(gcsample ${GC_LINK_LIBRARIES})
+endif() \ No newline at end of file
diff --git a/src/gc/unix/config.h.in b/src/gc/unix/config.h.in
index f43709a393..99866cdadc 100644
--- a/src/gc/unix/config.h.in
+++ b/src/gc/unix/config.h.in
@@ -9,6 +9,7 @@
#cmakedefine01 HAVE_SYS_MMAN_H
#cmakedefine01 HAVE_PTHREAD_THREADID_NP
#cmakedefine01 HAVE_PTHREAD_GETTHREADID_NP
+#cmakedefine01 HAVE_MAP_HUGETLB
#cmakedefine01 HAVE_SCHED_GETCPU
#cmakedefine01 HAVE_NUMA_H
#cmakedefine01 HAVE_VM_ALLOCATE
diff --git a/src/gc/unix/configure.cmake b/src/gc/unix/configure.cmake
index 7eb9053bca..2e31766612 100644
--- a/src/gc/unix/configure.cmake
+++ b/src/gc/unix/configure.cmake
@@ -24,6 +24,15 @@ check_cxx_source_compiles("
}
" HAVE_PTHREAD_GETTHREADID_NP)
+check_cxx_source_compiles("
+ #include <sys/mman.h>
+
+ int main()
+ {
+ return MAP_HUGETLB;
+ }
+ " HAVE_MAP_HUGETLB)
+
check_cxx_source_runs("
#include <sched.h>
diff --git a/src/gc/unix/gcenv.unix.cpp b/src/gc/unix/gcenv.unix.cpp
index 07706d7227..c71d211e01 100644
--- a/src/gc/unix/gcenv.unix.cpp
+++ b/src/gc/unix/gcenv.unix.cpp
@@ -379,7 +379,7 @@ void GCToOSInterface::YieldThread(uint32_t switchCount)
// flags - flags to control special settings like write watching
// Return:
// Starting virtual address of the reserved range
-void* GCToOSInterface::VirtualReserve(size_t size, size_t alignment, uint32_t flags)
+static void* VirtualReserveInner(size_t size, size_t alignment, uint32_t flags, uint32_t hugePagesFlag = 0)
{
assert(!(flags & VirtualReserveFlags::WriteWatch) && "WriteWatch not supported on Unix");
if (alignment == 0)
@@ -388,7 +388,7 @@ void* GCToOSInterface::VirtualReserve(size_t size, size_t alignment, uint32_t fl
}
size_t alignedSize = size + (alignment - OS_PAGE_SIZE);
- void * pRetVal = mmap(nullptr, alignedSize, PROT_NONE, MAP_ANON | MAP_PRIVATE, -1, 0);
+ void * pRetVal = mmap(nullptr, alignedSize, PROT_NONE, MAP_ANON | MAP_PRIVATE | hugePagesFlag, -1, 0);
if (pRetVal != NULL)
{
@@ -413,6 +413,18 @@ void* GCToOSInterface::VirtualReserve(size_t size, size_t alignment, uint32_t fl
return pRetVal;
}
+// Reserve virtual memory range.
+// Parameters:
+// size - size of the virtual memory range
+// alignment - requested memory alignment, 0 means no specific alignment requested
+// flags - flags to control special settings like write watching
+// Return:
+// Starting virtual address of the reserved range
+void* GCToOSInterface::VirtualReserve(size_t size, size_t alignment, uint32_t flags)
+{
+ return VirtualReserveInner(size, alignment, flags);
+}
+
// Release virtual memory range previously reserved using VirtualReserve
// Parameters:
// address - starting virtual address
@@ -426,6 +438,28 @@ bool GCToOSInterface::VirtualRelease(void* address, size_t size)
return (ret == 0);
}
+// Commit virtual memory range.
+// Parameters:
+// size - size of the virtual memory range
+// Return:
+// Starting virtual address of the committed range
+void* GCToOSInterface::VirtualReserveAndCommitLargePages(size_t size)
+{
+#if HAVE_MAP_HUGETLB
+ uint32_t largePagesFlag = MAP_HUGETLB;
+#else
+ uint32_t largePagesFlag = 0;
+#endif
+
+ void* pRetVal = VirtualReserveInner(size, OS_PAGE_SIZE, 0, largePagesFlag);
+ if (VirtualCommit(pRetVal, size, NUMA_NODE_UNDEFINED))
+ {
+ return pRetVal;
+ }
+
+ return nullptr;
+}
+
// Commit virtual memory range. It must be part of a range reserved using VirtualReserve.
// Parameters:
// address - starting virtual address
diff --git a/src/gc/windows/gcenv.windows.cpp b/src/gc/windows/gcenv.windows.cpp
index fa13e544f5..86bd7038c0 100644
--- a/src/gc/windows/gcenv.windows.cpp
+++ b/src/gc/windows/gcenv.windows.cpp
@@ -27,6 +27,8 @@ static size_t g_RestrictedPhysicalMemoryLimit = (size_t)UINTPTR_MAX;
// memory on the machine/in the container, we need to restrict by the VM.
static bool g_UseRestrictedVirtualMemory = false;
+static bool g_SeLockMemoryPrivilegeAcquired = false;
+
static AffinitySet g_processAffinitySet;
typedef BOOL (WINAPI *PIS_PROCESS_IN_JOB)(HANDLE processHandle, HANDLE jobHandle, BOOL* result);
@@ -114,6 +116,42 @@ DWORD LCM(DWORD u, DWORD v)
}
#endif
+bool InitLargePagesPrivilege()
+{
+ TOKEN_PRIVILEGES tp;
+ LUID luid;
+ if (!LookupPrivilegeValueW(nullptr, SE_LOCK_MEMORY_NAME, &luid))
+ {
+ return false;
+ }
+
+ tp.PrivilegeCount = 1;
+ tp.Privileges[0].Luid = luid;
+ tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED;
+
+ HANDLE token;
+ if (!OpenProcessToken(::GetCurrentProcess(), TOKEN_ADJUST_PRIVILEGES, &token))
+ {
+ return false;
+ }
+
+ BOOL retVal = AdjustTokenPrivileges(token, FALSE, &tp, 0, nullptr, 0);
+ DWORD gls = GetLastError();
+ CloseHandle(token);
+
+ if (!retVal)
+ {
+ return false;
+ }
+
+ if (gls != 0)
+ {
+ return false;
+ }
+
+ return true;
+}
+
bool InitCPUGroupInfoArray()
{
#if (defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_))
@@ -699,6 +737,31 @@ bool GCToOSInterface::VirtualRelease(void* address, size_t size)
return !!::VirtualFree(address, 0, MEM_RELEASE);
}
+// Commit virtual memory range.
+// Parameters:
+// size - size of the virtual memory range
+// Return:
+// Starting virtual address of the committed range
+void* GCToOSInterface::VirtualReserveAndCommitLargePages(size_t size)
+{
+ void* pRetVal = nullptr;
+
+ if (!g_SeLockMemoryPrivilegeAcquired)
+ {
+ if (!InitLargePagesPrivilege())
+ {
+ return nullptr;
+ }
+
+ g_SeLockMemoryPrivilegeAcquired = true;
+ }
+
+ SIZE_T largePageMinimum = GetLargePageMinimum();
+ size = (size + (largePageMinimum - 1)) & ~(largePageMinimum - 1);
+
+ return ::VirtualAlloc(nullptr, size, MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES, PAGE_READWRITE);
+}
+
// Commit virtual memory range. It must be part of a range reserved using VirtualReserve.
// Parameters:
// address - starting virtual address
diff --git a/src/inc/clrconfigvalues.h b/src/inc/clrconfigvalues.h
index c8f7461d87..441ae8e44b 100644
--- a/src/inc/clrconfigvalues.h
+++ b/src/inc/clrconfigvalues.h
@@ -330,6 +330,7 @@ RETAIL_CONFIG_STRING_INFO(EXTERNAL_GCName, W("GCName"), "")
RETAIL_CONFIG_DWORD_INFO_DIRECT_ACCESS(EXTERNAL_GCHeapHardLimit, W("GCHeapHardLimit"), "Specifies the maximum commit size for the GC heap")
RETAIL_CONFIG_DWORD_INFO_DIRECT_ACCESS(EXTERNAL_GCHeapHardLimitPercent, W("GCHeapHardLimitPercent"), "Specifies the GC heap usage as a percentage of the total memory")
RETAIL_CONFIG_STRING_INFO(EXTERNAL_GCHeapAffinitizeRanges, W("GCHeapAffinitizeRanges"), "Specifies list of processors for Server GC threads. The format is a comma separated list of processor numbers or ranges of processor numbers. Example: 1,3,5,7-9,12")
+RETAIL_CONFIG_DWORD_INFO_DIRECT_ACCESS(EXTERNAL_GCLargePages, W("GCLargePages"), "Specifies whether large pages should be used when a heap hard limit is set")
///
/// IBC
diff --git a/src/pal/inc/pal.h b/src/pal/inc/pal.h
index 66d83f1c42..0c9b5a70b1 100644
--- a/src/pal/inc/pal.h
+++ b/src/pal/inc/pal.h
@@ -2558,6 +2558,7 @@ SetErrorMode(
#define MEM_MAPPED 0x40000
#define MEM_TOP_DOWN 0x100000
#define MEM_WRITE_WATCH 0x200000
+#define MEM_LARGE_PAGES 0x20000000
#define MEM_RESERVE_EXECUTABLE 0x40000000 // reserve memory using executable memory allocator
PALIMPORT
diff --git a/src/vm/gcenv.os.cpp b/src/vm/gcenv.os.cpp
index 909319889e..a56215a207 100644
--- a/src/vm/gcenv.os.cpp
+++ b/src/vm/gcenv.os.cpp
@@ -55,6 +55,48 @@ public:
uint16_t GetCombinedValue() { return m_groupProc; }
};
+#if !defined(FEATURE_PAL)
+
+static bool g_SeLockMemoryPrivilegeAcquired = false;
+
+bool InitLargePagesPrivilege()
+{
+ TOKEN_PRIVILEGES tp;
+ LUID luid;
+ if (!LookupPrivilegeValueW(nullptr, SE_LOCK_MEMORY_NAME, &luid))
+ {
+ return false;
+ }
+
+ tp.PrivilegeCount = 1;
+ tp.Privileges[0].Luid = luid;
+ tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED;
+
+ HANDLE token;
+ if (!OpenProcessToken(::GetCurrentProcess(), TOKEN_ADJUST_PRIVILEGES, &token))
+ {
+ return false;
+ }
+
+ BOOL retVal = AdjustTokenPrivileges(token, FALSE, &tp, 0, nullptr, 0);
+ DWORD gls = GetLastError();
+ CloseHandle(token);
+
+ if (!retVal)
+ {
+ return false;
+ }
+
+ if (gls != 0)
+ {
+ return false;
+ }
+
+ return true;
+}
+
+#endif // FEATURE_PAL
+
// Initialize the interface implementation
// Return:
// true if it has succeeded, false if it has failed
@@ -271,6 +313,33 @@ bool GCToOSInterface::VirtualRelease(void* address, size_t size)
return !!::ClrVirtualFree(address, 0, MEM_RELEASE);
}
+// Commit virtual memory range.
+// Parameters:
+// size - size of the virtual memory range
+// Return:
+// Starting virtual address of the committed range
+void* GCToOSInterface::VirtualReserveAndCommitLargePages(size_t size)
+{
+ LIMITED_METHOD_CONTRACT;
+
+#if !defined(FEATURE_PAL)
+ if (!g_SeLockMemoryPrivilegeAcquired)
+ {
+ if (!InitLargePagesPrivilege())
+ {
+ return nullptr;
+ }
+
+ g_SeLockMemoryPrivilegeAcquired = true;
+ }
+
+ SIZE_T largePageMinimum = GetLargePageMinimum();
+ size = (size + (largePageMinimum - 1)) & ~(largePageMinimum - 1);
+#endif
+
+ return ::ClrVirtualAlloc(nullptr, size, MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES, PAGE_READWRITE);
+}
+
// Commit virtual memory range. It must be part of a range reserved using VirtualReserve.
// Parameters:
// address - starting virtual address