diff options
50 files changed, 1396 insertions, 849 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt index f55b54c498..f6e0987ebe 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -585,6 +585,10 @@ if(FEATURE_STANDALONE_GC) endif(CLR_CMAKE_PLATFORM_UNIX) endif(FEATURE_STANDALONE_GC) +if(FEATURE_STANDALONE_GC_ONLY) + add_definitions(-DFEATURE_STANDALONE_GC_ONLY) +endif(FEATURE_STANDALONE_GC_ONLY) + if (CLR_CMAKE_PLATFORM_UNIX) include_directories("src/pal/inc") include_directories("src/pal/inc/rt") @@ -76,6 +76,7 @@ set __BuildTypeDebug=0 set __BuildTypeChecked=0 set __BuildTypeRelease=0 set __BuildStandaloneGC="-DFEATURE_STANDALONE_GC=0" +set __BuildStandaloneGCOnly="-DFEATURE_STANDALONE_GC_ONLY=0" set __PgoInstrument=0 set __IbcTuning= @@ -139,7 +140,12 @@ if /i "%1" == "usenmakemakefiles" (set __NMakeMakefiles=1&set __ConfigureOnly= if /i "%1" == "pgoinstrument" (set __PgoInstrument=1&set processedArgs=!processedArgs! %1&shift&goto Arg_Loop) if /i "%1" == "ibcinstrument" (set __IbcTuning=/Tuning&set processedArgs=!processedArgs! %1&shift&goto Arg_Loop) if /i "%1" == "toolset_dir" (set __ToolsetDir=%2&set __PassThroughArgs=%__PassThroughArgs% %2&set processedArgs=!processedArgs! %1 %2&shift&shift&goto Arg_Loop) -if /i "%1" == "buildstandalonegc" (set __BuildStandaloneGC="-DFEATURE_STANDALONE_GC=1"&set processedArgs=!processedArgs! %1&shift&goto Arg_Loop) +if /i "%1" == "buildstandalonegc" ( + set __BuildStandaloneGC="-DFEATURE_STANDALONE_GC=1" + set __BuildStandaloneGCOnly="-DFEATURE_STANDALONE_GC_ONLY=1" + set processedArgs=!processedArgs! %1 + shift&goto Arg_Loop +) @REM The following can be deleted once the CI system that passes it is updated to not pass it. if /i "%1" == "altjitcrossgen" (set processedArgs=!processedArgs! %1&shift&goto Arg_Loop) @@ -808,7 +808,7 @@ while :; do fi ;; buildstandalonegc) - __cmakeargs="-DFEATURE_STANDALONE_GC=1" + __cmakeargs="-DFEATURE_STANDALONE_GC=1 -DFEATURE_STANDALONE_GC_ONLY=1" ;; msbuildonunsupportedplatform) __msbuildonunsupportedplatform=1 diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index b761b6a82d..9359580468 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -149,7 +149,9 @@ add_subdirectory(utilcode) add_subdirectory(gcinfo) add_subdirectory(coreclr) add_subdirectory(jit) -add_subdirectory(gc) +if(FEATURE_STANDALONE_GC) + add_subdirectory(gc) +endif(FEATURE_STANDALONE_GC) add_subdirectory(vm) add_subdirectory(md) add_subdirectory(debug) diff --git a/src/debug/ee/debugger.cpp b/src/debug/ee/debugger.cpp index ae698e2234..f8ebfc56fe 100644 --- a/src/debug/ee/debugger.cpp +++ b/src/debug/ee/debugger.cpp @@ -15573,7 +15573,9 @@ HRESULT Debugger::SetReference(void *objectRefAddress, // fixup the handle. OBJECTHANDLE h = vmObjectHandle.GetRawPtr(); OBJECTREF src = *((OBJECTREF*)&newReference); - HndAssignHandle(h, src); + + IGCHandleManager* mgr = GCHandleUtilities::GetGCHandleManager(); + mgr->StoreObjectInHandle(h, OBJECTREFToObject(src)); } return S_OK; diff --git a/src/dlls/mscordac/CMakeLists.txt b/src/dlls/mscordac/CMakeLists.txt index afe5bea7d0..82582f4def 100644 --- a/src/dlls/mscordac/CMakeLists.txt +++ b/src/dlls/mscordac/CMakeLists.txt @@ -89,7 +89,6 @@ set(COREDAC_LIBRARIES strongname_dac utilcode_dac unwinder_dac - gc_dac ${END_LIBRARY_GROUP} # End group of libraries that have circular references ) diff --git a/src/dlls/mscoree/coreclr/CMakeLists.txt b/src/dlls/mscoree/coreclr/CMakeLists.txt index 7a4617fc52..81aaad45f0 100644 --- a/src/dlls/mscoree/coreclr/CMakeLists.txt +++ b/src/dlls/mscoree/coreclr/CMakeLists.txt @@ -73,6 +73,10 @@ if(FEATURE_MERGE_JIT_AND_ENGINE) set(CLRJIT_STATIC clrjit_static) endif(FEATURE_MERGE_JIT_AND_ENGINE) +if(FEATURE_STANDALONE_GC_ONLY) + set(STANDALONE_GC gc_standalone) +endif(FEATURE_STANDALONE_GC_ONLY) + # IMPORTANT! Please do not rearrange the order of the libraries. The linker on Linux is # order dependent and changing the order can result in undefined symbols in the shared # library. @@ -83,7 +87,7 @@ set(CORECLR_LIBRARIES debug-pal ${LIB_UNWINDER} cee_wks - gc_wks + ${STANDALONE_GC} ${END_LIBRARY_GROUP} # End group of libraries that have circular references mdcompiler_wks mdruntime_wks diff --git a/src/dlls/mscoree/mscorwks_unixexports.src b/src/dlls/mscoree/mscorwks_unixexports.src index 271ff8ae7c..73ffc0c1c5 100644 --- a/src/dlls/mscoree/mscorwks_unixexports.src +++ b/src/dlls/mscoree/mscorwks_unixexports.src @@ -104,3 +104,6 @@ GlobalMemoryStatusEx VirtualQuery WideCharToMultiByte WriteFile + +; Function for initializing a standalone GC +InitializeGarbageCollector diff --git a/src/gc/CMakeLists.txt b/src/gc/CMakeLists.txt index 59c18ffd87..4de3f4e412 100644 --- a/src/gc/CMakeLists.txt +++ b/src/gc/CMakeLists.txt @@ -1,24 +1,17 @@ set(CMAKE_INCLUDE_CURRENT_DIR ON) -include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR}) +include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR}) include_directories(BEFORE ${CLR_DIR}/src/vm) include_directories(BEFORE ${CLR_DIR}/src/vm/${ARCH_SOURCES_DIR}) +add_definitions(-DBUILD_AS_STANDALONE) + if(CLR_CMAKE_PLATFORM_UNIX) add_compile_options(-fPIC) endif(CLR_CMAKE_PLATFORM_UNIX) -if(CMAKE_CONFIGURATION_TYPES) - foreach (Config DEBUG CHECKED) - set_property(DIRECTORY APPEND PROPERTY COMPILE_DEFINITIONS $<$<CONFIG:${Config}>:WRITE_BARRIER_CHECK=1>) - endforeach (Config) -else() - if(UPPERCASE_CMAKE_BUILD_TYPE STREQUAL DEBUG OR UPPERCASE_CMAKE_BUILD_TYPE STREQUAL CHECKED) - add_definitions(-DWRITE_BARRIER_CHECK=1) - endif(UPPERCASE_CMAKE_BUILD_TYPE STREQUAL DEBUG OR UPPERCASE_CMAKE_BUILD_TYPE STREQUAL CHECKED) -endif(CMAKE_CONFIGURATION_TYPES) - -set( GC_SOURCES_DAC_AND_WKS_COMMON +set( GC_SOURCES + gcconfig.cpp gccommon.cpp gcscan.cpp gcsvr.cpp @@ -27,28 +20,18 @@ set( GC_SOURCES_DAC_AND_WKS_COMMON handletablecore.cpp handletablescan.cpp objecthandle.cpp - softwarewritewatch.cpp) - -set( GC_SOURCES_WKS - ${GC_SOURCES_DAC_AND_WKS_COMMON} + softwarewritewatch.cpp gchandletable.cpp gceesvr.cpp gceewks.cpp handletablecache.cpp) -set( GC_SOURCES_DAC - ${GC_SOURCES_DAC_AND_WKS_COMMON}) - -if(FEATURE_STANDALONE_GC) - if(NOT CLR_CMAKE_PLATFORM_UNIX) - set ( GC_SOURCES_WKS - ${GC_SOURCES_WKS} - windows/gcenv.windows.cpp) - endif(NOT CLR_CMAKE_PLATFORM_UNIX) -endif(FEATURE_STANDALONE_GC) +if(NOT CLR_CMAKE_PLATFORM_UNIX) +set ( GC_SOURCES + ${GC_SOURCES} + windows/gcenv.windows.cpp) +endif(NOT CLR_CMAKE_PLATFORM_UNIX) -convert_to_absolute_path(GC_SOURCES_WKS ${GC_SOURCES_WKS}) -convert_to_absolute_path(GC_SOURCES_DAC ${GC_SOURCES_DAC}) +convert_to_absolute_path(GC_SOURCES ${GC_SOURCES}) -add_subdirectory(wks) -add_subdirectory(dac) +add_library_clr(gc_standalone STATIC ${GC_SOURCES}) diff --git a/src/gc/dac/CMakeLists.txt b/src/gc/dac/CMakeLists.txt deleted file mode 100644 index 1f1c9ebe5c..0000000000 --- a/src/gc/dac/CMakeLists.txt +++ /dev/null @@ -1,2 +0,0 @@ -include(${CLR_DIR}/dac.cmake) -add_library_clr(gc_dac STATIC ${GC_SOURCES_DAC}) diff --git a/src/gc/env/gcenv.ee.h b/src/gc/env/gcenv.ee.h index aa00d19780..bb335c6834 100644 --- a/src/gc/env/gcenv.ee.h +++ b/src/gc/env/gcenv.ee.h @@ -74,6 +74,10 @@ public: static bool ForceFullGCToBeBlocking(); static bool EagerFinalized(Object* obj); static MethodTable* GetFreeObjectMethodTable(); + static bool GetBooleanConfigValue(const char* key, bool* value); + static bool GetIntConfigValue(const char* key, int64_t* value); + static bool GetStringConfigValue(const char* key, const char** value); + static void FreeStringConfigValue(const char* key); }; #endif // __GCENV_EE_H__ diff --git a/src/gc/env/gcenv.os.h b/src/gc/env/gcenv.os.h index d3e40ac4ff..8c533222ef 100644 --- a/src/gc/env/gcenv.os.h +++ b/src/gc/env/gcenv.os.h @@ -158,6 +158,15 @@ public: // flags - flags to control special settings like write watching // Return: // Starting virtual address of the reserved range + // Notes: + // Previous uses of this API aligned the `size` parameter to the platform + // allocation granularity. This is not required by POSIX or Windows. Windows will + // round the size up to the nearest page boundary. POSIX does not specify what is done, + // but Linux probably also rounds up. If an implementation of GCToOSInterface needs to + // align to the allocation granularity, it will do so in its implementation. + // + // Windows guarantees that the returned mapping will be aligned to the allocation + // granularity. static void* VirtualReserve(size_t size, size_t alignment, uint32_t flags); // Release virtual memory range previously reserved using VirtualReserve @@ -357,6 +366,12 @@ public: // Return: // Time stamp in milliseconds static uint32_t GetLowPrecisionTimeStamp(); + + // Gets the total number of processors on the machine, not taking + // into account current process affinity. + // Return: + // Number of processors on the machine + static uint32_t GetTotalProcessorCount(); }; #endif // __GCENV_OS_H__ diff --git a/src/gc/gc.cpp b/src/gc/gc.cpp index 653f379b66..962e09b445 100644 --- a/src/gc/gc.cpp +++ b/src/gc/gc.cpp @@ -21,7 +21,6 @@ #define USE_INTROSORT - #if defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE) BOOL bgc_heap_walk_for_etw_p = FALSE; #endif //BACKGROUND_GC && FEATURE_EVENT_TRACE @@ -176,7 +175,7 @@ size_t GetHighPrecisionTimeStamp() GCStatistics g_GCStatistics; GCStatistics g_LastGCStatistics; -TCHAR* GCStatistics::logFileName = NULL; +char* GCStatistics::logFileName = NULL; FILE* GCStatistics::logFile = NULL; void GCStatistics::AddGCStats(const gc_mechanisms& settings, size_t timeInMSec) @@ -749,7 +748,7 @@ public: if (color == join_struct.lock_color) { respin: - int spin_count = 4096 * g_SystemInfo.dwNumberOfProcessors; + int spin_count = 4096 * g_num_processors; for (int j = 0; j < spin_count; j++) { if (color != join_struct.lock_color) @@ -850,7 +849,7 @@ respin: if (!join_struct.wait_done) { respin: - int spin_count = 2 * 4096 * g_SystemInfo.dwNumberOfProcessors; + int spin_count = 2 * 4096 * g_num_processors; for (int j = 0; j < spin_count; j++) { if (join_struct.wait_done) @@ -1043,7 +1042,7 @@ class exclusive_sync public: void init() { - spin_count = 32 * (g_SystemInfo.dwNumberOfProcessors - 1); + spin_count = 32 * (g_num_processors - 1); rwp_object = 0; needs_checking = 0; for (int i = 0; i < max_pending_allocs; i++) @@ -1424,15 +1423,6 @@ int mark_time, plan_time, sweep_time, reloc_time, compact_time; #endif // MULTIPLE_HEAPS -#ifdef TRACE_GC - -int print_level = DEFAULT_GC_PRN_LVL; //level of detail of the debug trace -BOOL trace_gc = FALSE; -int gc_trace_fac = 0; -hlet* hlet::bindings = 0; - -#endif //TRACE_GC - void reset_memory (uint8_t* o, size_t sizeo); #ifdef WRITE_WATCH @@ -1508,7 +1498,7 @@ void WaitLongerNoInstru (int i) // if we're waiting for gc to finish, we should block immediately if (!g_TrapReturningThreads) { - if (g_SystemInfo.dwNumberOfProcessors > 1) + if (g_num_processors > 1) { YieldProcessor(); // indicate to the processor that we are spining if (i & 0x01f) @@ -1580,12 +1570,12 @@ retry: { if ((++i & 7) && !IsGCInProgress()) { - if (g_SystemInfo.dwNumberOfProcessors > 1) + if (g_num_processors > 1) { #ifndef MULTIPLE_HEAPS - int spin_count = 1024 * g_SystemInfo.dwNumberOfProcessors; + int spin_count = 1024 * g_num_processors; #else //!MULTIPLE_HEAPS - int spin_count = 32 * g_SystemInfo.dwNumberOfProcessors; + int spin_count = 32 * g_num_processors; #endif //!MULTIPLE_HEAPS for (int j = 0; j < spin_count; j++) { @@ -1696,7 +1686,7 @@ void WaitLonger (int i #ifdef SYNCHRONIZATION_STATS (spin_lock->num_switch_thread_w)++; #endif //SYNCHRONIZATION_STATS - if (g_SystemInfo.dwNumberOfProcessors > 1) + if (g_num_processors > 1) { YieldProcessor(); // indicate to the processor that we are spining if (i & 0x01f) @@ -1741,12 +1731,12 @@ retry: { if ((++i & 7) && !gc_heap::gc_started) { - if (g_SystemInfo.dwNumberOfProcessors > 1) + if (g_num_processors > 1) { #ifndef MULTIPLE_HEAPS - int spin_count = 1024 * g_SystemInfo.dwNumberOfProcessors; + int spin_count = 1024 * g_num_processors; #else //!MULTIPLE_HEAPS - int spin_count = 32 * g_SystemInfo.dwNumberOfProcessors; + int spin_count = 32 * g_num_processors; #endif //!MULTIPLE_HEAPS for (int j = 0; j < spin_count; j++) { @@ -3790,7 +3780,7 @@ public: _ASSERTE(pMT->SanityCheck()); bool noRangeChecks = - (g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_NO_RANGE_CHECKS) == EEConfig::HEAPVERIFY_NO_RANGE_CHECKS; + (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_NO_RANGE_CHECKS) == GCConfig::HEAPVERIFY_NO_RANGE_CHECKS; BOOL fSmallObjectHeapPtr = FALSE, fLargeObjectHeapPtr = FALSE; if (!noRangeChecks) @@ -3814,7 +3804,7 @@ public: #endif // FEATURE_64BIT_ALIGNMENT #ifdef VERIFY_HEAP - if (bDeep && (g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_GC)) + if (bDeep && (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC)) g_theGCHeap->ValidateObjectMember(this); #endif if (fSmallObjectHeapPtr) @@ -3905,7 +3895,7 @@ public: //This introduces a bug in the free list management. //((void**) this)[-1] = 0; // clear the sync block, assert (*numComponentsPtr >= 0); - if (g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_GC) + if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC) memset (((uint8_t*)this)+sizeof(ArrayBase), 0xcc, *numComponentsPtr); #endif //VERIFY_HEAP } @@ -4388,12 +4378,12 @@ static size_t get_valid_segment_size (BOOL large_seg=FALSE) if (!large_seg) { initial_seg_size = INITIAL_ALLOC; - seg_size = g_pConfig->GetSegmentSize(); + seg_size = static_cast<size_t>(GCConfig::GetSegmentSize()); } else { initial_seg_size = LHEAP_ALLOC; - seg_size = g_pConfig->GetSegmentSize() / 2; + seg_size = static_cast<size_t>(GCConfig::GetSegmentSize()) / 2; } #ifdef MULTIPLE_HEAPS @@ -4401,9 +4391,9 @@ static size_t get_valid_segment_size (BOOL large_seg=FALSE) if (!large_seg) #endif // BIT64 { - if (g_SystemInfo.dwNumberOfProcessors > 4) + if (g_num_processors > 4) initial_seg_size /= 2; - if (g_SystemInfo.dwNumberOfProcessors > 8) + if (g_num_processors > 8) initial_seg_size /= 2; } #endif //MULTIPLE_HEAPS @@ -5334,7 +5324,7 @@ void gc_heap::gc_thread_function () } else { - int spin_count = 32 * (g_SystemInfo.dwNumberOfProcessors - 1); + int spin_count = 32 * (g_num_processors - 1); // wait until RestartEE has progressed to a stage where we can restart user threads while (!gc_heap::internal_gc_done && !GCHeap::SafeToRestartManagedThreads()) @@ -5670,7 +5660,7 @@ void gc_mechanisms::first_init() #ifdef BACKGROUND_GC pause_mode = gc_heap::gc_can_use_concurrent ? pause_interactive : pause_batch; #ifdef _DEBUG - int debug_pause_mode = g_pConfig->GetGCLatencyMode(); + int debug_pause_mode = static_cast<int>(GCConfig::GetLatencyMode()); if (debug_pause_mode >= 0) { assert (debug_pause_mode <= pause_sustained_low_latency); @@ -7009,9 +6999,7 @@ uint32_t* gc_heap::make_card_table (uint8_t* start, uint8_t* end) // it is impossible for alloc_size to overflow due bounds on each of // its components. size_t alloc_size = sizeof (uint8_t)*(sizeof(card_table_info) + cs + bs + cb + wws + st + ms); - size_t alloc_size_aligned = Align (alloc_size, g_SystemInfo.dwAllocationGranularity-1); - - uint8_t* mem = (uint8_t*)GCToOSInterface::VirtualReserve (alloc_size_aligned, 0, virtual_reserve_flags); + uint8_t* mem = (uint8_t*)GCToOSInterface::VirtualReserve (alloc_size, 0, virtual_reserve_flags); if (!mem) return 0; @@ -7025,7 +7013,7 @@ uint32_t* gc_heap::make_card_table (uint8_t* start, uint8_t* end) if (!GCToOSInterface::VirtualCommit (mem, commit_size)) { dprintf (2, ("Card table commit failed")); - GCToOSInterface::VirtualRelease (mem, alloc_size_aligned); + GCToOSInterface::VirtualRelease (mem, alloc_size); return 0; } @@ -7035,7 +7023,7 @@ uint32_t* gc_heap::make_card_table (uint8_t* start, uint8_t* end) card_table_lowest_address (ct) = start; card_table_highest_address (ct) = end; card_table_brick_table (ct) = (short*)((uint8_t*)ct + cs); - card_table_size (ct) = alloc_size_aligned; + card_table_size (ct) = alloc_size; card_table_next (ct) = 0; #ifdef CARD_BUNDLE @@ -7216,11 +7204,10 @@ int gc_heap::grow_brick_card_tables (uint8_t* start, // it is impossible for alloc_size to overflow due bounds on each of // its components. size_t alloc_size = sizeof (uint8_t)*(sizeof(card_table_info) + cs + bs + cb + wws + st + ms); - size_t alloc_size_aligned = Align (alloc_size, g_SystemInfo.dwAllocationGranularity-1); dprintf (GC_TABLE_LOG, ("card table: %Id; brick table: %Id; card bundle: %Id; sw ww table: %Id; seg table: %Id; mark array: %Id", cs, bs, cb, wws, st, ms)); - uint8_t* mem = (uint8_t*)GCToOSInterface::VirtualReserve (alloc_size_aligned, 0, virtual_reserve_flags); + uint8_t* mem = (uint8_t*)GCToOSInterface::VirtualReserve (alloc_size, 0, virtual_reserve_flags); if (!mem) { @@ -7417,7 +7404,7 @@ fail: #endif //delete (uint32_t*)((uint8_t*)ct - sizeof(card_table_info)); - if (!GCToOSInterface::VirtualRelease (mem, alloc_size_aligned)) + if (!GCToOSInterface::VirtualRelease (mem, alloc_size)) { dprintf (GC_TABLE_LOG, ("GCToOSInterface::VirtualRelease failed")); assert (!"release failed"); @@ -9343,7 +9330,7 @@ void gc_heap::rearrange_large_heap_segments() while (seg) { heap_segment* next_seg = heap_segment_next (seg); - delete_heap_segment (seg, (g_pConfig->GetGCRetainVM() != 0)); + delete_heap_segment (seg, GCConfig::GetRetainVM()); seg = next_seg; } freeable_large_heap_segment = 0; @@ -9386,7 +9373,7 @@ void gc_heap::rearrange_heap_segments(BOOL compacting) assert (prev_seg); assert (seg != ephemeral_heap_segment); heap_segment_next (prev_seg) = next_seg; - delete_heap_segment (seg, (g_pConfig->GetGCRetainVM() != 0)); + delete_heap_segment (seg, GCConfig::GetRetainVM()); dprintf (2, ("Deleting heap segment %Ix", (size_t)seg)); } @@ -9789,28 +9776,20 @@ void gc_heap::adjust_ephemeral_limits () } #if defined(TRACE_GC) || defined(GC_CONFIG_DRIVEN) -FILE* CreateLogFile(const CLRConfig::ConfigStringInfo & info, BOOL is_config) +FILE* CreateLogFile(const GCConfigStringHolder& temp_logfile_name, bool is_config) { FILE* logFile; - TCHAR * temp_logfile_name = NULL; - CLRConfig::GetConfigValue(info, &temp_logfile_name); - TCHAR logfile_name[MAX_LONGPATH+1]; - if (temp_logfile_name != 0) + if (!temp_logfile_name.Get()) { - _tcscpy(logfile_name, temp_logfile_name); + return nullptr; } - size_t logfile_name_len = _tcslen(logfile_name); - TCHAR* szPid = logfile_name + logfile_name_len; - size_t remaining_space = MAX_LONGPATH + 1 - logfile_name_len; - - _stprintf_s(szPid, remaining_space, _T(".%d%s"), GCToOSInterface::GetCurrentProcessId(), (is_config ? _T(".config.log") : _T(".log"))); - - logFile = _tfopen(logfile_name, _T("wb")); - - delete temp_logfile_name; - + char logfile_name[MAX_LONGPATH+1]; + uint32_t pid = GCToOSInterface::GetCurrentProcessId(); + const char* suffix = is_config ? ".config.log" : ".log"; + _snprintf_s(logfile_name, MAX_LONGPATH+1, _TRUNCATE, "%s.%d%s", temp_logfile_name.Get(), pid, suffix); + logFile = fopen(logfile_name, "wb"); return logFile; } #endif //TRACE_GC || GC_CONFIG_DRIVEN @@ -9823,18 +9802,17 @@ HRESULT gc_heap::initialize_gc (size_t segment_size, ) { #ifdef TRACE_GC - int log_last_gcs = CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_GCLogEnabled); - if (log_last_gcs) + if (GCConfig::GetLogEnabled()) { - gc_log = CreateLogFile(CLRConfig::UNSUPPORTED_GCLogFile, FALSE); + gc_log = CreateLogFile(GCConfig::GetLogFile(), false); if (gc_log == NULL) return E_FAIL; // GCLogFileSize in MBs. - gc_log_file_size = CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_GCLogFileSize); + gc_log_file_size = static_cast<size_t>(GCConfig::GetLogFileSize()); - if (gc_log_file_size > 500) + if (gc_log_file_size <= 0 || gc_log_file_size > 500) { fclose (gc_log); return E_FAIL; @@ -9855,10 +9833,9 @@ HRESULT gc_heap::initialize_gc (size_t segment_size, #endif // TRACE_GC #ifdef GC_CONFIG_DRIVEN - gc_config_log_on = CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_GCConfigLogEnabled); - if (gc_config_log_on) + if (GCConfig::GetConfigLogEnabled()) { - gc_config_log = CreateLogFile(CLRConfig::UNSUPPORTED_GCConfigLogFile, TRUE); + gc_config_log = CreateLogFile(GCConfig::GetConfigLogFile(), true); if (gc_config_log == NULL) return E_FAIL; @@ -9870,7 +9847,7 @@ HRESULT gc_heap::initialize_gc (size_t segment_size, return E_FAIL; } - compact_ratio = CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_GCCompactRatio); + compact_ratio = static_cast<int>(GCConfig::GetCompactRatio()); // h# | GC | gen | C | EX | NF | BF | ML | DM || PreS | PostS | Merge | Conv | Pre | Post | PrPo | PreP | PostP | cprintf (("%2s | %6s | %1s | %1s | %2s | %2s | %2s | %2s | %2s || %5s | %5s | %5s | %5s | %5s | %5s | %5s | %5s | %5s |", @@ -9897,10 +9874,15 @@ HRESULT gc_heap::initialize_gc (size_t segment_size, #endif //GC_CONFIG_DRIVEN #ifdef GC_STATS - GCStatistics::logFileName = CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_GCMixLog); - if (GCStatistics::logFileName != NULL) + GCConfigStringHolder logFileName = GCConfig::GetMixLogFile(); + if (logFileName.Get() != nullptr) { - GCStatistics::logFile = _tfopen(GCStatistics::logFileName, _T("a")); + GCStatistics::logFileName = _strdup(logFileName.Get()); + GCStatistics::logFile = fopen(GCStatistics::logFileName, "a"); + if (!GCStatistics::logFile) + { + return E_FAIL; + } } #endif // GC_STATS @@ -9909,7 +9891,7 @@ HRESULT gc_heap::initialize_gc (size_t segment_size, #ifdef WRITE_WATCH hardware_write_watch_api_supported(); #ifdef BACKGROUND_GC - if (can_use_write_watch_for_gc_heap() && g_pConfig->GetGCconcurrent() != 0) + if (can_use_write_watch_for_gc_heap() && GCConfig::GetConcurrentGC()) { gc_can_use_concurrent = true; #ifndef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP @@ -10009,11 +9991,6 @@ HRESULT gc_heap::initialize_gc (size_t segment_size, #endif //MULTIPLE_HEAPS -#ifdef TRACE_GC - print_level = g_pConfig->GetGCprnLvl(); - gc_trace_fac = g_pConfig->GetGCtraceFac(); -#endif //TRACE_GC - if (!init_semi_shared()) { hres = E_FAIL; @@ -10095,14 +10072,14 @@ gc_heap::init_semi_shared() should_expand_in_full_gc = FALSE; #ifdef FEATURE_LOH_COMPACTION - loh_compaction_always_p = (g_pConfig->GetGCLOHCompactionMode() != 0); + loh_compaction_always_p = GCConfig::GetLOHCompactionMode() != 0; loh_compaction_mode = loh_compaction_default; #endif //FEATURE_LOH_COMPACTION #ifdef BACKGROUND_GC memset (ephemeral_fgc_counts, 0, sizeof (ephemeral_fgc_counts)); - bgc_alloc_spin_count = CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_BGCSpinCount); - bgc_alloc_spin = CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_BGCSpin); + bgc_alloc_spin_count = static_cast<uint32_t>(GCConfig::GetBGCSpinCount()); + bgc_alloc_spin = static_cast<uint32_t>(GCConfig::GetBGCSpin()); { int number_bgc_threads = 1; @@ -10267,9 +10244,9 @@ retry: { while (gc_done_event_lock >= 0) { - if (g_SystemInfo.dwNumberOfProcessors > 1) + if (g_num_processors > 1) { - int spin_count = 32 * g_SystemInfo.dwNumberOfProcessors; + int spin_count = 32 * g_num_processors; for (int j = 0; j < spin_count; j++) { if (gc_done_event_lock < 0) @@ -11534,7 +11511,7 @@ void gc_heap::handle_oom (int heap_num, oom_reason reason, size_t alloc_size, // Break early - before the more_space_lock is release so no other threads // could have allocated on the same heap when OOM happened. - if (g_pConfig->IsGCBreakOnOOMEnabled()) + if (GCConfig::GetBreakOnOOM()) { GCToOSInterface::DebugBreak(); } @@ -11927,7 +11904,7 @@ void gc_heap::bgc_loh_alloc_clr (uint8_t* alloc_start, #ifdef VERIFY_HEAP // since we filled in 0xcc for free object when we verify heap, // we need to make sure we clear those bytes. - if (g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_GC) + if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC) { if (size_to_clear < saved_size_to_clear) { @@ -13136,7 +13113,7 @@ int gc_heap::try_allocate_more_space (alloc_context* acontext, size_t size, #ifdef SYNCHRONIZATION_STATS good_suspension++; #endif //SYNCHRONIZATION_STATS - BOOL fStress = (g_pConfig->GetGCStressLevel() & EEConfig::GCSTRESS_TRANSITION) != 0; + BOOL fStress = (g_pConfig->GetGCStressLevel() & GCConfig::GCSTRESS_TRANSITION) != 0; if (!fStress) { //Rendez vous early (MP scaling issue) @@ -14484,6 +14461,8 @@ int gc_heap::joined_generation_to_condemn (BOOL should_evaluate_elevation, // We can only do Concurrent GC Stress if the caller did not explicitly ask for all // generations to be collected, + // [LOCALGC TODO] STRESS_HEAP is not defined for a standalone GC so there are multiple + // things that need to be fixed in this code block. if (n_original != max_generation && g_pConfig->GetGCStressLevel() && gc_can_use_concurrent) { @@ -15543,7 +15522,6 @@ void gc_heap::gc1() } descr_generations (FALSE); - descr_card_table(); verify_soh_segment_list(); @@ -15577,7 +15555,7 @@ void gc_heap::gc1() // value. If we ever allow randomly adjusting this as the process runs, // we cannot call it this way as joins need to match - we must have the same // value for all heaps like we do with bgc_heap_walk_for_etw_p. - || (g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_GC) + || (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC) #endif #if defined(FEATURE_EVENT_TRACE) && defined(BACKGROUND_GC) || (bgc_heap_walk_for_etw_p && settings.concurrent) @@ -15639,7 +15617,7 @@ void gc_heap::gc1() #endif //BACKGROUND_GC #ifdef VERIFY_HEAP - if (g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_GC) + if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC) verify_heap (FALSE); #endif // VERIFY_HEAP @@ -16594,14 +16572,6 @@ int gc_heap::garbage_collect (int n) if (gc_t_join.joined()) #endif //MULTIPLE_HEAPS { -#ifdef TRACE_GC - int gc_count = (int)dd_collection_count (dynamic_data_of (0)); - if (gc_count >= g_pConfig->GetGCtraceStart()) - trace_gc = 1; - if (gc_count >= g_pConfig->GetGCtraceEnd()) - trace_gc = 0; -#endif //TRACE_GC - #ifdef MULTIPLE_HEAPS #if !defined(SEG_MAPPING_TABLE) && !defined(FEATURE_BASICFREEZE) //delete old slots from the segment table @@ -16748,12 +16718,12 @@ int gc_heap::garbage_collect (int n) // descr_card_table(); #ifdef VERIFY_HEAP - if ((g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_GC) && - !(g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_POST_GC_ONLY)) + if ((GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC) && + !(GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_POST_GC_ONLY)) { verify_heap (TRUE); } - if (g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_BARRIERCHECK) + if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_BARRIERCHECK) checkGCWriteBarrier(); #endif // VERIFY_HEAP @@ -17125,7 +17095,7 @@ uint8_t* gc_heap::find_object (uint8_t* interior, uint8_t* low) heap_segment* seg = find_segment_per_heap (interior, FALSE); if (seg #ifdef FEATURE_CONSERVATIVE_GC - && (!g_pConfig->GetGCConservative() || interior <= heap_segment_allocated(seg)) + && (GCConfig::GetConservativeGC() || interior <= heap_segment_allocated(seg)) #endif ) { @@ -17133,7 +17103,7 @@ uint8_t* gc_heap::find_object (uint8_t* interior, uint8_t* low) // we don't have brick entry for it, and we may incorrectly treat it as on large object heap. int align_const = get_alignment_constant (heap_segment_read_only_p (seg) #ifdef FEATURE_CONSERVATIVE_GC - || (g_pConfig->GetGCConservative() && !heap_segment_loh_p (seg)) + || (GCConfig::GetConservativeGC() && !heap_segment_loh_p (seg)) #endif ); //int align_const = get_alignment_constant (heap_segment_read_only_p (seg)); @@ -18603,7 +18573,7 @@ void gc_heap::background_promote (Object** ppObject, ScanContext* sc, uint32_t f #ifdef FEATURE_CONSERVATIVE_GC // For conservative GC, a value on stack may point to middle of a free object. // In this case, we don't need to promote the pointer. - if (g_pConfig->GetGCConservative() && ((CObjectHeader*)o)->IsFree()) + if (GCConfig::GetConservativeGC() && ((CObjectHeader*)o)->IsFree()) { return; } @@ -23714,7 +23684,7 @@ void gc_heap::relocate_survivor_helper (uint8_t* plug, uint8_t* plug_end) void gc_heap::verify_pins_with_post_plug_info (const char* msg) { #if defined (_DEBUG) && defined (VERIFY_HEAP) - if (g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_GC) + if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC) { if (!verify_pinned_queue_p) return; @@ -26596,7 +26566,7 @@ void gc_heap::background_promote_callback (Object** ppObject, ScanContext* sc, #ifdef FEATURE_CONSERVATIVE_GC // For conservative GC, a value on stack may point to middle of a free object. // In this case, we don't need to promote the pointer. - if (g_pConfig->GetGCConservative() && ((CObjectHeader*)o)->IsFree()) + if (GCConfig::GetConservativeGC() && ((CObjectHeader*)o)->IsFree()) { return; } @@ -29192,7 +29162,7 @@ gc_heap::realloc_plugs (generation* consing_gen, heap_segment* seg, void gc_heap::verify_no_pins (uint8_t* start, uint8_t* end) { #ifdef VERIFY_HEAP - if (g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_GC) + if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC) { BOOL contains_pinned_plugs = FALSE; size_t mi = 0; @@ -30259,7 +30229,7 @@ BOOL gc_heap::decide_on_compacting (int condemned_gen_number, #endif // GC_STATS #endif //STRESS_HEAP - if (g_pConfig->GetGCForceCompact()) + if (GCConfig::GetForceCompact()) should_compact = TRUE; if ((condemned_gen_number == max_generation) && last_gc_before_oom) @@ -30596,7 +30566,7 @@ CObjectHeader* gc_heap::allocate_large_object (size_t jsize, int64_t& alloc_byte if (jsize >= maxObjectSize) { - if (g_pConfig->IsGCBreakOnOOMEnabled()) + if (GCConfig::GetBreakOnOOM()) { GCToOSInterface::DebugBreak(); } @@ -30822,8 +30792,8 @@ void gc_heap::set_mem_verify (uint8_t* start, uint8_t* end, uint8_t b) #ifdef VERIFY_HEAP if (end > start) { - if ((g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_GC) && - !(g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_NO_MEM_FILL)) + if ((GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC) && + !(GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_NO_MEM_FILL)) { dprintf (3, ("setting mem to %c [%Ix, [%Ix", b, start, end)); memset (start, b, (end - start)); @@ -31182,7 +31152,7 @@ void gc_heap::background_ephemeral_sweep() // the following line is temporary. heap_segment_saved_bg_allocated (ephemeral_heap_segment) = plug_end; #ifdef VERIFY_HEAP - if (g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_GC) + if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC) { make_unused_array (plug_end, (end - plug_end)); } @@ -31982,36 +31952,6 @@ void gc_heap::descr_segment (heap_segment* seg ) #endif // TRACE_GC } -void gc_heap::descr_card_table () -{ -#ifdef TRACE_GC - if (trace_gc && (print_level >= 4)) - { - ptrdiff_t min = -1; - dprintf(3,("Card Table set at: ")); - for (size_t i = card_of (lowest_address); i < card_of (highest_address); i++) - { - if (card_set_p (i)) - { - if (min == -1) - { - min = i; - } - } - else - { - if (! ((min == -1))) - { - dprintf (3,("[%Ix %Ix[, ", - (size_t)card_address (min), (size_t)card_address (i))); - min = -1; - } - } - } - } -#endif //TRACE_GC -} - void gc_heap::descr_generations_to_profiler (gen_walk_fn fn, void *context) { #ifdef MULTIPLE_HEAPS @@ -32395,7 +32335,7 @@ BOOL gc_heap::bgc_mark_array_range (heap_segment* seg, void gc_heap::bgc_verify_mark_array_cleared (heap_segment* seg) { #if defined (VERIFY_HEAP) && defined (MARK_ARRAY) - if (recursive_gc_sync::background_running_p() && g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_GC) + if (recursive_gc_sync::background_running_p() && GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC) { uint8_t* range_beg = 0; uint8_t* range_end = 0; @@ -32579,7 +32519,7 @@ void gc_heap::verify_mark_array_cleared (heap_segment* seg) void gc_heap::verify_mark_array_cleared () { #if defined (VERIFY_HEAP) && defined (MARK_ARRAY) - if (recursive_gc_sync::background_running_p() && g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_GC) + if (recursive_gc_sync::background_running_p() && GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC) { generation* gen = generation_of (max_generation); heap_segment* seg = heap_segment_rw (generation_start_segment (gen)); @@ -32609,7 +32549,7 @@ void gc_heap::verify_mark_array_cleared () void gc_heap::verify_seg_end_mark_array_cleared() { #if defined (VERIFY_HEAP) && defined (MARK_ARRAY) - if (g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_GC) + if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC) { generation* gen = generation_of (max_generation); heap_segment* seg = heap_segment_rw (generation_start_segment (gen)); @@ -32671,7 +32611,7 @@ void gc_heap::verify_seg_end_mark_array_cleared() void gc_heap::verify_soh_segment_list() { #ifdef VERIFY_HEAP - if (g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_GC) + if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC) { generation* gen = generation_of (max_generation); heap_segment* seg = heap_segment_rw (generation_start_segment (gen)); @@ -32857,7 +32797,7 @@ gc_heap::verify_free_lists () void gc_heap::verify_heap (BOOL begin_gc_p) { - int heap_verify_level = g_pConfig->GetHeapVerifyLevel(); + int heap_verify_level = static_cast<int>(GCConfig::GetHeapVerifyLevel()); size_t last_valid_brick = 0; BOOL bCurrentBrickInvalid = FALSE; BOOL large_brick_p = TRUE; @@ -32919,7 +32859,7 @@ gc_heap::verify_heap (BOOL begin_gc_p) if (!settings.concurrent) #endif //BACKGROUND_GC { - if (!(heap_verify_level & EEConfig::HEAPVERIFY_NO_MEM_FILL)) + if (!(heap_verify_level & GCConfig::HEAPVERIFY_NO_MEM_FILL)) { //uninit the unused portions of segments. generation* gen1 = large_object_generation; @@ -33199,7 +33139,7 @@ gc_heap::verify_heap (BOOL begin_gc_p) #endif //BACKGROUND_GC BOOL deep_verify_obj = can_verify_deep; - if ((heap_verify_level & EEConfig::HEAPVERIFY_DEEP_ON_COMPACT) && !settings.compaction) + if ((heap_verify_level & GCConfig::HEAPVERIFY_DEEP_ON_COMPACT) && !settings.compaction) deep_verify_obj = FALSE; ((CObjectHeader*)curr_object)->ValidateHeap((Object*)curr_object, deep_verify_obj); @@ -33492,7 +33432,10 @@ HRESULT GCHeap::Initialize () return E_FAIL; } + g_gc_pFreeObjectMethodTable = GCToEEInterface::GetFreeObjectMethodTable(); + g_num_processors = GCToOSInterface::GetTotalProcessorCount(); + assert(g_num_processors != 0); //Initialize the static members. #ifdef TRACE_GC @@ -33507,10 +33450,10 @@ HRESULT GCHeap::Initialize () gc_heap::min_segment_size = min (seg_size, large_seg_size); #ifdef MULTIPLE_HEAPS - if (g_pConfig->GetGCNoAffinitize()) + if (GCConfig::GetNoAffinitize()) gc_heap::gc_thread_no_affinitize_p = true; - uint32_t nhp_from_config = g_pConfig->GetGCHeapCount(); + uint32_t nhp_from_config = static_cast<uint32_t>(GCConfig::GetHeapCount()); // GetGCProcessCpuCount only returns up to 64 procs. uint32_t nhp_from_process = CPUGroupInfo::CanEnableGCCPUGroups() ? CPUGroupInfo::GetNumActiveProcessors(): @@ -33533,7 +33476,7 @@ HRESULT GCHeap::Initialize () gc_heap::mem_one_percent = gc_heap::total_physical_mem / 100; #ifndef MULTIPLE_HEAPS - gc_heap::mem_one_percent /= g_SystemInfo.dwNumberOfProcessors; + gc_heap::mem_one_percent /= g_num_processors; #endif //!MULTIPLE_HEAPS // We should only use this if we are in the "many process" mode which really is only applicable @@ -33545,7 +33488,7 @@ HRESULT GCHeap::Initialize () int available_mem_th = 10; if (gc_heap::total_physical_mem >= ((uint64_t)80 * 1024 * 1024 * 1024)) { - int adjusted_available_mem_th = 3 + (int)((float)47 / (float)(g_SystemInfo.dwNumberOfProcessors)); + int adjusted_available_mem_th = 3 + (int)((float)47 / (float)(g_num_processors)); available_mem_th = min (available_mem_th, adjusted_available_mem_th); } @@ -33813,7 +33756,7 @@ void GCHeap::Promote(Object** ppObject, ScanContext* sc, uint32_t flags) #ifdef FEATURE_CONSERVATIVE_GC // For conservative GC, a value on stack may point to middle of a free object. // In this case, we don't need to promote the pointer. - if (g_pConfig->GetGCConservative() + if (GCConfig::GetConservativeGC() && ((CObjectHeader*)o)->IsFree()) { return; @@ -34086,14 +34029,7 @@ bool GCHeap::StressHeap(gc_alloc_context * context) str->SetMethodTable (g_pStringClass); str->SetStringLength (strLen); -#if CHECK_APP_DOMAIN_LEAKS - if (g_pConfig->AppDomainLeaks() && str->SetAppDomainNoThrow()) - { -#endif - HndAssignHandle(m_StressObjs[i], ObjectToOBJECTREF(str)); -#if CHECK_APP_DOMAIN_LEAKS - } -#endif + HndAssignHandle(m_StressObjs[i], ObjectToOBJECTREF(str)); } i = (i + 1) % NUM_HEAP_STRESS_OBJS; if (i == m_CurStressObj) break; @@ -35510,7 +35446,7 @@ size_t GCHeap::GetValidSegmentSize(bool large_seg) // Get the max gen0 heap size, making sure it conforms. size_t GCHeap::GetValidGen0MaxSize(size_t seg_size) { - size_t gen0size = g_pConfig->GetGCgen0size(); + size_t gen0size = static_cast<size_t>(GCConfig::GetGen0Size()); if ((gen0size == 0) || !g_theGCHeap->IsValidGen0MaxSize(gen0size)) { @@ -35716,7 +35652,7 @@ bool CFinalize::Initialize() { ASSERT (m_Array); STRESS_LOG_OOM_STACK(sizeof(Object*[100])); - if (g_pConfig->IsGCBreakOnOOMEnabled()) + if (GCConfig::GetBreakOnOOM()) { GCToOSInterface::DebugBreak(); } @@ -35825,7 +35761,7 @@ CFinalize::RegisterForFinalization (int gen, Object* obj, size_t size) ((CObjectHeader*)obj)->SetFree(size); } STRESS_LOG_OOM_STACK(0); - if (g_pConfig->IsGCBreakOnOOMEnabled()) + if (GCConfig::GetBreakOnOOM()) { GCToOSInterface::DebugBreak(); } @@ -36506,7 +36442,7 @@ void deleteGCShadow() // Called at startup and right after a GC, get a snapshot of the GC Heap void initGCShadow() { - if (!(g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_BARRIERCHECK)) + if (!(GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_BARRIERCHECK)) return; size_t len = g_gc_highest_address - g_gc_lowest_address; diff --git a/src/gc/gc.h b/src/gc/gc.h index 07ae6c916c..822fd42a54 100644 --- a/src/gc/gc.h +++ b/src/gc/gc.h @@ -27,16 +27,18 @@ Module Name: #include "gcinterface.h" #include "env/gcenv.os.h" -#include "env/gcenv.ee.h" -#ifdef FEATURE_STANDALONE_GC +#ifdef BUILD_AS_STANDALONE #include "gcenv.ee.standalone.inl" // GCStress does not currently work with Standalone GC #ifdef STRESS_HEAP #undef STRESS_HEAP #endif // STRESS_HEAP -#endif // FEATURE_STANDALONE_GC +#else +#include "env/gcenv.ee.h" +#endif // BUILD_AS_STANDALONE +#include "gcconfig.h" /* * Promotion Function Prototypes @@ -114,6 +116,7 @@ extern "C" uint8_t* g_gc_highest_address; extern "C" GCHeapType g_gc_heap_type; extern "C" uint32_t g_max_generation; extern "C" MethodTable* g_gc_pFreeObjectMethodTable; +extern "C" uint32_t g_num_processors; ::IGCHandleManager* CreateGCHandleManager(); diff --git a/src/gc/gccommon.cpp b/src/gc/gccommon.cpp index 4950809cda..932f4a2c33 100644 --- a/src/gc/gccommon.cpp +++ b/src/gc/gccommon.cpp @@ -17,9 +17,9 @@ IGCHeapInternal* g_theGCHeap; IGCHandleManager* g_theGCHandleManager; -#ifdef FEATURE_STANDALONE_GC +#ifdef BUILD_AS_STANDALONE IGCToCLR* g_theGCToCLR; -#endif // FEATURE_STANDALONE_GC +#endif // BUILD_AS_STANDALONE #ifdef GC_CONFIG_DRIVEN size_t gc_global_mechanisms[MAX_GLOBAL_GC_MECHANISMS_COUNT]; @@ -44,6 +44,7 @@ uint8_t* g_gc_highest_address = 0; GCHeapType g_gc_heap_type = GC_HEAP_INVALID; uint32_t g_max_generation = max_generation; MethodTable* g_gc_pFreeObjectMethodTable = nullptr; +uint32_t g_num_processors = 0; #ifdef GC_CONFIG_DRIVEN void record_global_mechanism (int mech_index) @@ -113,26 +114,6 @@ void record_changed_seg (uint8_t* start, uint8_t* end, } } -// The runtime needs to know whether we're using workstation or server GC -// long before the GCHeap is created. -void InitializeHeapType(bool bServerHeap) -{ - LIMITED_METHOD_CONTRACT; -#ifdef FEATURE_SVR_GC - g_gc_heap_type = bServerHeap ? GC_HEAP_SVR : GC_HEAP_WKS; -#ifdef WRITE_BARRIER_CHECK - if (g_gc_heap_type == GC_HEAP_SVR) - { - g_GCShadow = 0; - g_GCShadowEnd = 0; - } -#endif // WRITE_BARRIER_CHECK -#else // FEATURE_SVR_GC - UNREFERENCED_PARAMETER(bServerHeap); - CONSISTENCY_CHECK(bServerHeap == false); -#endif // FEATURE_SVR_GC -} - namespace WKS { extern void PopulateDacVars(GcDacVars* dacVars); @@ -143,7 +124,30 @@ namespace SVR extern void PopulateDacVars(GcDacVars* dacVars); } -bool InitializeGarbageCollector(IGCToCLR* clrToGC, IGCHeap** gcHeap, IGCHandleManager** gcHandleManager, GcDacVars* gcDacVars) +//------------------------------------------------------------------ +// Externally-facing GC symbols, used to initialize the GC +// ----------------------------------------------------------------- + +#ifdef _MSC_VER +#define DLLEXPORT __declspec(dllexport) +#else +#define DLLEXPORT __attribute__ ((visibility ("default"))) +#endif // _MSC_VER + +#ifdef BUILD_AS_STANDALONE +#define GC_API extern "C" DLLEXPORT +#else +#define GC_API extern "C" +#endif // BUILD_AS_STANDALONE + +GC_API +bool +InitializeGarbageCollector( + /* In */ IGCToCLR* clrToGC, + /* Out */ IGCHeap** gcHeap, + /* Out */ IGCHandleManager** gcHandleManager, + /* Out */ GcDacVars* gcDacVars + ) { LIMITED_METHOD_CONTRACT; @@ -153,6 +157,18 @@ bool InitializeGarbageCollector(IGCToCLR* clrToGC, IGCHeap** gcHeap, IGCHandleMa assert(gcHeap != nullptr); assert(gcHandleManager != nullptr); +#ifdef BUILD_AS_STANDALONE + assert(clrToGC != nullptr); + g_theGCToCLR = clrToGC; +#else + UNREFERENCED_PARAMETER(clrToGC); + assert(clrToGC == nullptr); +#endif + + // Initialize GCConfig before anything else - initialization of our + // various components may want to query the current configuration. + GCConfig::Initialize(); + IGCHandleManager* handleManager = CreateGCHandleManager(); if (handleManager == nullptr) { @@ -160,19 +176,25 @@ bool InitializeGarbageCollector(IGCToCLR* clrToGC, IGCHeap** gcHeap, IGCHandleMa } #ifdef FEATURE_SVR_GC - assert(g_gc_heap_type != GC_HEAP_INVALID); - - if (g_gc_heap_type == GC_HEAP_SVR) + if (GCConfig::GetServerGC()) { +#ifdef WRITE_BARRIER_CHECK + g_GCShadow = 0; + g_GCShadowEnd = 0; +#endif // WRITE_BARRIER_CHECK + + g_gc_heap_type = GC_HEAP_SVR; heap = SVR::CreateGCHeap(); SVR::PopulateDacVars(gcDacVars); } else { + g_gc_heap_type = GC_HEAP_WKS; heap = WKS::CreateGCHeap(); WKS::PopulateDacVars(gcDacVars); } #else + g_gc_heap_type = GC_HEAP_WKS; heap = WKS::CreateGCHeap(); WKS::PopulateDacVars(gcDacVars); #endif @@ -183,15 +205,6 @@ bool InitializeGarbageCollector(IGCToCLR* clrToGC, IGCHeap** gcHeap, IGCHandleMa } g_theGCHeap = heap; - -#ifdef FEATURE_STANDALONE_GC - assert(clrToGC != nullptr); - g_theGCToCLR = clrToGC; -#else - UNREFERENCED_PARAMETER(clrToGC); - assert(clrToGC == nullptr); -#endif - *gcHandleManager = handleManager; *gcHeap = heap; return true; diff --git a/src/gc/gcconfig.cpp b/src/gc/gcconfig.cpp new file mode 100644 index 0000000000..d84a5a5801 --- /dev/null +++ b/src/gc/gcconfig.cpp @@ -0,0 +1,48 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. +// See the LICENSE file in the project root for more information. + +#include "common.h" +#include "gcenv.h" +#include "gc.h" + +#define BOOL_CONFIG(name, key, default, unused_doc) \ + bool GCConfig::Get##name() { return s_##name; } \ + bool GCConfig::s_##name = default; + +#define INT_CONFIG(name, key, default, unused_doc) \ + int64_t GCConfig::Get##name() { return s_##name; } \ + int64_t GCConfig::s_##name = default; + +// String configs are not cached because 1) they are rare and +// not on hot paths and 2) they involve transfers of ownership +// of EE-allocated strings, which is potentially complicated. +#define STRING_CONFIG(name, key, unused_doc) \ + GCConfigStringHolder GCConfig::Get##name() \ + { \ + const char* resultStr = nullptr; \ + GCToEEInterface::GetStringConfigValue(key, &resultStr); \ + return GCConfigStringHolder(resultStr); \ + } + +GC_CONFIGURATION_KEYS + +#undef BOOL_CONFIG +#undef INT_CONFIG +#undef STRING_CONFIG + +void GCConfig::Initialize() +{ +#define BOOL_CONFIG(name, key, default, unused_doc) \ + GCToEEInterface::GetBooleanConfigValue(key, &s_##name); + +#define INT_CONFIG(name, key, default, unused_doc) \ + GCToEEInterface::GetIntConfigValue(key, &s_##name); + +#define STRING_CONFIG(unused_name, unused_key, unused_doc) + +GC_CONFIGURATION_KEYS + +#undef BOOL_CONFIG +#undef INT_CONFIG +} diff --git a/src/gc/gcconfig.h b/src/gc/gcconfig.h new file mode 100644 index 0000000000..3a95857430 --- /dev/null +++ b/src/gc/gcconfig.h @@ -0,0 +1,137 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. +// See the LICENSE file in the project root for more information. + +#ifndef __GCCONFIG_H__ +#define __GCCONFIG_H__ + +// gcconfig.h - GC configuration management and retrieval. +// +// This file and the GCConfig class are designed to be the primary entry point +// for querying configuration information from within the GC. + +// GCConfigStringHolder is a wrapper around a configuration string obtained +// from the EE. Such strings must be disposed using GCToEEInterface::FreeStringConfigValue, +// so this class ensures that is done correctly. +// +// The name is unfortunately a little long, but "ConfigStringHolder" is already taken by the +// EE's config mechanism. +class GCConfigStringHolder +{ +private: + const char* m_str; + +public: + // Constructs a new GCConfigStringHolder around a string obtained from + // GCToEEInterface::GetStringConfigValue. + explicit GCConfigStringHolder(const char* str) + : m_str(str) {} + + // No copy operators - this type cannot be copied. + GCConfigStringHolder(const GCConfigStringHolder&) = delete; + GCConfigStringHolder& operator=(const GCConfigStringHolder&) = delete; + + // This type is returned by-value by string config functions, so it + // requires a move constructor. + GCConfigStringHolder(GCConfigStringHolder&&) = default; + + // Frees a string config value by delegating to GCToEEInterface::FreeStringConfigValue. + ~GCConfigStringHolder() + { + if (m_str) + { + GCToEEInterface::FreeStringConfigValue(m_str); + } + + m_str = nullptr; + } + + // Retrieves the wrapped config string. + const char* Get() const { return m_str; } +}; + +// Each one of these keys produces a method on GCConfig with the name "Get{name}", where {name} +// is the first parameter of the *_CONFIG macros below. +#define GC_CONFIGURATION_KEYS \ + BOOL_CONFIG(ServerGC, "gcServer", false, "Whether we should be using Server GC") \ + BOOL_CONFIG(ConcurrentGC, "gcConcurrent", true, "Whether we should be using Concurrent GC") \ + BOOL_CONFIG(ConservativeGC, "gcConservative", false, "Enables/Disables conservative GC") \ + BOOL_CONFIG(ForceCompact, "gcForceCompact", false, \ + "When set to true, always do compacting GC") \ + BOOL_CONFIG(RetainVM, "GCRetainVM", false, \ + "When set we put the segments that should be deleted on a standby list (instead of " \ + "releasing them back to the OS) which will be considered to satisfy new segment requests"\ + " (note that the same thing can be specified via API which is the supported way)") \ + BOOL_CONFIG(StressMix, "GCStressMix", false, \ + "Specifies whether the GC mix mode is enabled or not") \ + BOOL_CONFIG(BreakOnOOM, "GCBreakOnOOM", false, \ + "Does a DebugBreak at the soonest time we detect an OOM") \ + BOOL_CONFIG(NoAffinitize, "GCNoAffinitize", false, \ + "If set, do not affinitize server GC threads") \ + BOOL_CONFIG(LogEnabled, "GCLogEnabled", false, \ + "Specifies if you want to turn on logging in GC") \ + BOOL_CONFIG(ConfigLogEnabled, "GCConfigLogEnabled", false, \ + "Specifies the name of the GC config log file") \ + INT_CONFIG(HeapVerifyLevel, "HeapVerify", HEAPVERIFY_NONE, \ + "When set verifies the integrity of the managed heap on entry and exit of each GC") \ + INT_CONFIG(LOHCompactionMode, "GCLOHCompact", 0, "Specifies the LOH compaction mode") \ + INT_CONFIG(BGCSpinCount, "BGCSpinCount", 140, "Specifies the bgc spin count") \ + INT_CONFIG(BGCSpin, "BGCSpin", 2, "Specifies the bgc spin time") \ + INT_CONFIG(HeapCount, "GCHeapCount", 0, "Specifies the number of server GC heaps") \ + INT_CONFIG(Gen0Size, "GCgen0size", 0, "Specifies the smallest gen0 size") \ + INT_CONFIG(SegmentSize, "GCSegmentSize", 0, "Specifies the managed heap segment size") \ + INT_CONFIG(LatencyMode, "GCLatencyMode", -1, \ + "Specifies the GC latency mode - batch, interactive or low latency (note that the same " \ + "thing can be specified via API which is the supported way") \ + INT_CONFIG(LogFileSize, "GCLogFileSize", 0, "Specifies the GC log file size") \ + INT_CONFIG(CompactRatio, "GCCompactRatio", 0, \ + "Specifies the ratio compacting GCs vs sweeping") \ + STRING_CONFIG(LogFile, "GCLogFile", "Specifies the name of the GC log file") \ + STRING_CONFIG(ConfigLogFile, "GCConfigLogFile", \ + "Specifies the name of the GC config log file") \ + STRING_CONFIG(MixLogFile, "GCMixLog", \ + "Specifies the name of the log file for GC mix statistics") + +// This class is responsible for retreiving configuration information +// for how the GC should operate. +class GCConfig +{ +#define BOOL_CONFIG(name, unused_key, unused_default, unused_doc) \ + public: static bool Get##name(); \ + private: static bool s_##name; +#define INT_CONFIG(name, unused_key, unused_default, unused_doc) \ + public: static int64_t Get##name(); \ + private: static int64_t s_##name; +#define STRING_CONFIG(name, unused_key, unused_doc) \ + public: static GCConfigStringHolder Get##name(); +GC_CONFIGURATION_KEYS +#undef BOOL_CONFIG +#undef INT_CONFIG +#undef STRING_CONFIG + +public: +// Flags that may inhabit the number returned for the HeapVerifyLevel config option. +// Keep this in sync with vm\eeconfig.h if this ever changes. +enum HeapVerifyFlags { + HEAPVERIFY_NONE = 0, + HEAPVERIFY_GC = 1, // Verify the heap at beginning and end of GC + HEAPVERIFY_BARRIERCHECK = 2, // Verify the brick table + HEAPVERIFY_SYNCBLK = 4, // Verify sync block scanning + + // the following options can be used to mitigate some of the overhead introduced + // by heap verification. some options might cause heap verifiction to be less + // effective depending on the scenario. + + HEAPVERIFY_NO_RANGE_CHECKS = 0x10, // Excludes checking if an OBJECTREF is within the bounds of the managed heap + HEAPVERIFY_NO_MEM_FILL = 0x20, // Excludes filling unused segment portions with fill pattern + HEAPVERIFY_POST_GC_ONLY = 0x40, // Performs heap verification post-GCs only (instead of before and after each GC) + HEAPVERIFY_DEEP_ON_COMPACT = 0x80 // Performs deep object verfication only on compacting GCs. +}; + +// Initializes the GCConfig subsystem. Must be called before accessing any +// configuration information. +static void Initialize(); + +}; + +#endif // __GCCONFIG_H__ diff --git a/src/gc/gcenv.ee.standalone.inl b/src/gc/gcenv.ee.standalone.inl index f6954fc476..642d150976 100644 --- a/src/gc/gcenv.ee.standalone.inl +++ b/src/gc/gcenv.ee.standalone.inl @@ -5,12 +5,17 @@ #ifndef __GCTOENV_EE_STANDALONE_INL__ #define __GCTOENV_EE_STANDALONE_INL__ -#include "env/gcenv.ee.h" +#include "gcinterface.h" // The singular interface instance. All calls in GCToEEInterface // will be fowarded to this interface instance. extern IGCToCLR* g_theGCToCLR; +namespace +{ + +#include "env/gcenv.ee.h" + // A note about this: // In general, we don't want to pretend to be smarter than the compiler // and force it to inline things. However, inlining is here is required @@ -236,6 +241,33 @@ ALWAYS_INLINE MethodTable* GCToEEInterface::GetFreeObjectMethodTable() assert(g_theGCToCLR != nullptr); return g_theGCToCLR->GetFreeObjectMethodTable(); } + +ALWAYS_INLINE bool GCToEEInterface::GetBooleanConfigValue(const char* key, bool* value) +{ + assert(g_theGCToCLR != nullptr); + return g_theGCToCLR->GetBooleanConfigValue(key, value); +} + +ALWAYS_INLINE bool GCToEEInterface::GetIntConfigValue(const char* key, int64_t* value) +{ + assert(g_theGCToCLR != nullptr); + return g_theGCToCLR->GetIntConfigValue(key, value); +} + +ALWAYS_INLINE bool GCToEEInterface::GetStringConfigValue(const char* key, const char** value) +{ + assert(g_theGCToCLR != nullptr); + return g_theGCToCLR->GetStringConfigValue(key, value); +} + +ALWAYS_INLINE void GCToEEInterface::FreeStringConfigValue(const char* value) +{ + assert(g_theGCToCLR != nullptr); + g_theGCToCLR->FreeStringConfigValue(value); +} + #undef ALWAYS_INLINE +} // anonymous namespace + #endif // __GCTOENV_EE_STANDALONE_INL__ diff --git a/src/gc/gchandletable.cpp b/src/gc/gchandletable.cpp index 52fede6299..63f2f79711 100644 --- a/src/gc/gchandletable.cpp +++ b/src/gc/gchandletable.cpp @@ -56,6 +56,18 @@ OBJECTHANDLE GCHandleStore::CreateDependentHandle(Object* primary, Object* secon return handle; } +void GCHandleStore::RelocateAsyncPinnedHandles(IGCHandleStore* pTarget) +{ + // assumption - the IGCHandleStore is an instance of GCHandleStore + GCHandleStore* other = static_cast<GCHandleStore*>(pTarget); + ::Ref_RelocateAsyncPinHandles(&_underlyingBucket, &other->_underlyingBucket); +} + +bool GCHandleStore::EnumerateAsyncPinnedHandles(async_pin_enum_fn callback, void* context) +{ + return !!::Ref_HandleAsyncPinHandles(callback, context); +} + GCHandleStore::~GCHandleStore() { ::Ref_DestroyHandleTableBucket(&_underlyingBucket); @@ -147,6 +159,16 @@ bool GCHandleManager::StoreObjectInHandleIfNull(OBJECTHANDLE handle, Object* obj return !!::HndFirstAssignHandle(handle, ObjectToOBJECTREF(object)); } +void GCHandleManager::SetDependentHandleSecondary(OBJECTHANDLE handle, Object* object) +{ + ::SetDependentHandleSecondary(handle, ObjectToOBJECTREF(object)); +} + +Object* GCHandleManager::GetDependentHandleSecondary(OBJECTHANDLE handle) +{ + return OBJECTREFToObject(::GetDependentHandleSecondary(handle)); +} + Object* GCHandleManager::InterlockedCompareExchangeObjectInHandle(OBJECTHANDLE handle, Object* object, Object* comparandObject) { return (Object*)::HndInterlockedCompareExchangeHandle(handle, ObjectToOBJECTREF(object), ObjectToOBJECTREF(comparandObject)); diff --git a/src/gc/gchandletableimpl.h b/src/gc/gchandletableimpl.h index 01c1c130ed..4be346fb28 100644 --- a/src/gc/gchandletableimpl.h +++ b/src/gc/gchandletableimpl.h @@ -23,6 +23,10 @@ public: virtual OBJECTHANDLE CreateDependentHandle(Object* primary, Object* secondary); + virtual void RelocateAsyncPinnedHandles(IGCHandleStore* pTarget); + + virtual bool EnumerateAsyncPinnedHandles(async_pin_enum_fn callback, void* context); + virtual ~GCHandleStore(); HandleTableBucket _underlyingBucket; @@ -59,6 +63,10 @@ public: virtual bool StoreObjectInHandleIfNull(OBJECTHANDLE handle, Object* object); + virtual void SetDependentHandleSecondary(OBJECTHANDLE handle, Object* object); + + virtual Object* GetDependentHandleSecondary(OBJECTHANDLE handle); + virtual Object* InterlockedCompareExchangeObjectInHandle(OBJECTHANDLE handle, Object* object, Object* comparandObject); }; diff --git a/src/gc/gcinterface.ee.h b/src/gc/gcinterface.ee.h index 7b868e780e..32dcc039ef 100644 --- a/src/gc/gcinterface.ee.h +++ b/src/gc/gcinterface.ee.h @@ -9,16 +9,16 @@ // of the execution engine. Everything that the GC does that requires the EE // to be informed or that requires EE action must go through this interface. // -// When FEATURE_STANDALONE_GC is defined, this class is named IGCToCLR and is +// When BUILD_AS_STANDALONE is defined, this class is named IGCToCLR and is // an abstract class. The EE will provide a class that fulfills this interface, -// and the GC will dispatch virtually on it to call into the EE. When FEATURE_STANDALONE_GC +// and the GC will dispatch virtually on it to call into the EE. When BUILD_AS_STANDALONE // is not defined, this class is named GCToEEInterface and the GC will dispatch statically on it. class IGCToCLR { public: // Suspends the EE for the given reason. virtual void SuspendEE(SUSPEND_REASON reason) = 0; - + // Resumes all paused threads, with a boolean indicating // if the EE is being restarted because a GC is complete. virtual @@ -166,6 +166,22 @@ public: // field to see how many bytes to skip before the next object on a heap segment begins. virtual MethodTable* GetFreeObjectMethodTable() = 0; + + // Asks the EE for the value of a given configuration key. If the EE does not know or does not + // have a value for the requeested config key, false is returned and the value of the passed-in + // pointer is undefined. Otherwise, true is returned and the config key's value is written to + // the passed-in pointer. + virtual + bool GetBooleanConfigValue(const char* key, bool* value) = 0; + + virtual + bool GetIntConfigValue(const char* key, int64_t* value) = 0; + + virtual + bool GetStringConfigValue(const char* key, const char** value) = 0; + + virtual + void FreeStringConfigValue(const char* value) = 0; }; #endif // _GCINTERFACE_EE_H_ diff --git a/src/gc/gcinterface.h b/src/gc/gcinterface.h index 552a8caec8..aefa84b99b 100644 --- a/src/gc/gcinterface.h +++ b/src/gc/gcinterface.h @@ -171,16 +171,19 @@ class Object; class IGCHeap; class IGCHandleManager; -// Initializes the garbage collector. Should only be called -// once, during EE startup. Returns true if the initialization -// was successful, false otherwise. -bool InitializeGarbageCollector(IGCToCLR* clrToGC, IGCHeap** gcHeap, IGCHandleManager** gcHandleTable, GcDacVars* gcDacVars); - -// The runtime needs to know whether we're using workstation or server GC -// long before the GCHeap is created. This function sets the type of -// heap that will be created, before InitializeGarbageCollector is called -// and the heap is actually recated. -void InitializeHeapType(bool bServerHeap); +// The function that initialzes the garbage collector. +// Should only be called once: here, during EE startup. +// Returns true if the initialization was successful, false otherwise. +typedef bool (*InitializeGarbageCollectorFunction)( + /* In */ IGCToCLR*, + /* Out */ IGCHeap**, + /* Out */ IGCHandleManager**, + /* Out */ GcDacVars* +); + +// The name of the function that initializes the garbage collector, +// to be used as an argument to GetProcAddress. +#define INITIALIZE_GC_FUNCTION_NAME "InitializeGarbageCollector" #ifdef WRITE_BARRIER_CHECK //always defined, but should be 0 in Server GC @@ -390,6 +393,7 @@ typedef void (* record_surv_fn)(uint8_t* begin, uint8_t* end, ptrdiff_t reloc, v typedef void (* fq_walk_fn)(bool, void*); typedef void (* fq_scan_fn)(Object** ppObject, ScanContext *pSC, uint32_t dwFlags); typedef void (* handle_scan_fn)(Object** pRef, Object* pSec, uint32_t flags, ScanContext* context, bool isDependent); +typedef bool (* async_pin_enum_fn)(Object* object, void* context); // Opaque type for tracking object pointers #ifndef DACCESS_COMPILE @@ -417,6 +421,10 @@ public: virtual OBJECTHANDLE CreateDependentHandle(Object* primary, Object* secondary) = 0; + virtual void RelocateAsyncPinnedHandles(IGCHandleStore* pTarget) = 0; + + virtual bool EnumerateAsyncPinnedHandles(async_pin_enum_fn callback, void* context) = 0; + virtual ~IGCHandleStore() {}; }; @@ -449,6 +457,10 @@ public: virtual bool StoreObjectInHandleIfNull(OBJECTHANDLE handle, Object* object) = 0; + virtual void SetDependentHandleSecondary(OBJECTHANDLE handle, Object* object) = 0; + + virtual Object* GetDependentHandleSecondary(OBJECTHANDLE handle) = 0; + virtual Object* InterlockedCompareExchangeObjectInHandle(OBJECTHANDLE handle, Object* object, Object* comparandObject) = 0; }; diff --git a/src/gc/gcpriv.h b/src/gc/gcpriv.h index 9f098ebe3b..08fedbbde3 100644 --- a/src/gc/gcpriv.h +++ b/src/gc/gcpriv.h @@ -234,46 +234,6 @@ const int policy_compact = 1; const int policy_expand = 2; #ifdef TRACE_GC - - -extern int print_level; -extern BOOL trace_gc; -extern int gc_trace_fac; - - -class hlet -{ - static hlet* bindings; - int prev_val; - int* pval; - hlet* prev_let; -public: - hlet (int& place, int value) - { - prev_val = place; - pval = &place; - place = value; - prev_let = bindings; - bindings = this; - } - ~hlet () - { - *pval = prev_val; - bindings = prev_let; - } -}; - - -#define let(p,v) hlet __x = hlet (p, v); - -#else //TRACE_GC - -#define gc_count -1 -#define let(s,v) - -#endif //TRACE_GC - -#ifdef TRACE_GC #define SEG_REUSE_LOG_0 7 #define SEG_REUSE_LOG_1 (SEG_REUSE_LOG_0 + 1) #define DT_LOG_0 (SEG_REUSE_LOG_1 + 1) @@ -299,15 +259,12 @@ void GCLog (const char *fmt, ... ); //#define dprintf(l,x) {if ((l==GTC_LOG) || (l <= 1)) {GCLog x;}} //#define dprintf(l,x) {if (trace_gc && ((l <= print_level) || (l==GTC_LOG))) {GCLog x;}} //#define dprintf(l,x) {if (l==GTC_LOG) {printf ("\n");printf x ; fflush(stdout);}} -#else //SIMPLE_DPRINTF +#else -// The GCTrace output goes to stdout by default but can get sent to the stress log or the logfile if the -// reg key GCTraceFacility is set. THe stress log can only take a format string and 4 numbers or -// string literals. -#define dprintf(l,x) {if (trace_gc && (l<=print_level)) { \ - if ( !gc_trace_fac) {printf ("\n");printf x ; fflush(stdout);} \ - else if ( gc_trace_fac == 2) {LogSpewAlways x;LogSpewAlways ("\n");} \ - else if ( gc_trace_fac == 1) {STRESS_LOG_VA(x);}}} +// Nobody used the logging mechanism that used to be here. If we find ourselves +// wanting to inspect GC logs on unmodified builds, we can use this define here +// to do so. +#define dprintf(l, x) #endif //SIMPLE_DPRINTF @@ -602,7 +559,7 @@ struct GCStatistics : public StatisticsBase { // initialized to the contents of COMPlus_GcMixLog, or NULL, if not present - static TCHAR* logFileName; + static char* logFileName; static FILE* logFile; // number of times we executed a background GC, a foreground GC, or a diff --git a/src/gc/handletable.cpp b/src/gc/handletable.cpp index 05137e4d68..da50483a88 100644 --- a/src/gc/handletable.cpp +++ b/src/gc/handletable.cpp @@ -1286,9 +1286,9 @@ uint32_t HndCountAllHandles(BOOL fUseLocks) return uCount; } -#ifndef FEATURE_REDHAWK -BOOL Ref_HandleAsyncPinHandles() +BOOL Ref_HandleAsyncPinHandles(async_pin_enum_fn asyncPinCallback, void* context) { +#ifndef FEATURE_REDHAWK CONTRACTL { NOTHROW; @@ -1297,22 +1297,27 @@ BOOL Ref_HandleAsyncPinHandles() } CONTRACTL_END; + AsyncPinCallbackContext callbackCtx(asyncPinCallback, context); HandleTableBucket *pBucket = g_HandleTableMap.pBuckets[0]; BOOL result = FALSE; int limit = getNumberOfSlots(); for (int n = 0; n < limit; n ++ ) { - if (TableHandleAsyncPinHandles(Table(pBucket->pTable[n]))) + if (TableHandleAsyncPinHandles(Table(pBucket->pTable[n]), callbackCtx)) { result = TRUE; } } return result; +#else + return true; +#endif // !FEATURE_REDHAWK } void Ref_RelocateAsyncPinHandles(HandleTableBucket *pSource, HandleTableBucket *pTarget) { +#ifndef FEATURE_REDHAWK CONTRACTL { NOTHROW; @@ -1325,8 +1330,8 @@ void Ref_RelocateAsyncPinHandles(HandleTableBucket *pSource, HandleTableBucket { TableRelocateAsyncPinHandles(Table(pSource->pTable[n]), Table(pTarget->pTable[n])); } -} #endif // !FEATURE_REDHAWK +} /*--------------------------------------------------------------------------*/ diff --git a/src/gc/handletablecache.cpp b/src/gc/handletablecache.cpp index aaf3370bd6..498e688677 100644 --- a/src/gc/handletablecache.cpp +++ b/src/gc/handletablecache.cpp @@ -57,7 +57,7 @@ void SpinUntil(void *pCond, BOOL fNonZero) #endif //_DEBUG // on MP machines, allow ourselves some spin time before sleeping - uint32_t uNonSleepSpins = 8 * (g_SystemInfo.dwNumberOfProcessors - 1); + static uint32_t uNonSleepSpins = 8 * (GCToOSInterface::GetCurrentProcessCpuCount() - 1); // spin until the specificed condition is met while ((*(uintptr_t *)pCond != 0) != (fNonZero != 0)) diff --git a/src/gc/handletablecore.cpp b/src/gc/handletablecore.cpp index 228b8bfa09..4548237eda 100644 --- a/src/gc/handletablecore.cpp +++ b/src/gc/handletablecore.cpp @@ -516,14 +516,14 @@ BOOL SegmentInitialize(TableSegment *pSegment, HandleTable *pTable) #ifndef FEATURE_REDHAWK // todo: implement SafeInt // Prefast overflow sanity check the addition - if (!ClrSafeInt<uint32_t>::addition(dwCommit, g_SystemInfo.dwPageSize, dwCommit)) + if (!ClrSafeInt<uint32_t>::addition(dwCommit, OS_PAGE_SIZE, dwCommit)) { return FALSE; } #endif // !FEATURE_REDHAWK // Round down to the dwPageSize - dwCommit &= ~(g_SystemInfo.dwPageSize - 1); + dwCommit &= ~(OS_PAGE_SIZE - 1); // commit the header if (!GCToOSInterface::VirtualCommit(pSegment, dwCommit)) @@ -908,7 +908,7 @@ void SegmentCompactAsyncPinHandles(TableSegment *pSegment, TableSegment **ppWork // Mark AsyncPinHandles ready to be cleaned when the marker job is processed -BOOL SegmentHandleAsyncPinHandles (TableSegment *pSegment) +BOOL SegmentHandleAsyncPinHandles (TableSegment *pSegment, const AsyncPinCallbackContext &callbackCtx) { CONTRACTL { @@ -945,11 +945,10 @@ BOOL SegmentHandleAsyncPinHandles (TableSegment *pSegment) _UNCHECKED_OBJECTREF value = *pValue; if (!HndIsNullOrDestroyedHandle(value)) { - _ASSERTE (value->GetMethodTable() == g_pOverlappedDataClass); - OVERLAPPEDDATAREF overlapped = (OVERLAPPEDDATAREF)(ObjectToOBJECTREF((Object*)value)); - if (overlapped->GetAppDomainId() != DefaultADID && overlapped->HasCompleted()) + // calls back into the VM using the callback given to + // Ref_HandleAsyncPinHandles + if (callbackCtx.Invoke((Object*)value)) { - overlapped->HandleAsyncPinHandle(); result = TRUE; } } @@ -1024,7 +1023,7 @@ bool SegmentRelocateAsyncPinHandles (TableSegment *pSegment, HandleTable *pTarge // We will queue a marker Overlapped to io completion port. We use the marker // to make sure that all iocompletion jobs before this marker have been processed. // After that we can free the async pinned handles. -BOOL TableHandleAsyncPinHandles(HandleTable *pTable) +BOOL TableHandleAsyncPinHandles(HandleTable *pTable, const AsyncPinCallbackContext &callbackCtx) { CONTRACTL { @@ -1043,7 +1042,7 @@ BOOL TableHandleAsyncPinHandles(HandleTable *pTable) while (pSegment) { - if (SegmentHandleAsyncPinHandles (pSegment)) + if (SegmentHandleAsyncPinHandles (pSegment, callbackCtx)) { result = TRUE; } @@ -1444,7 +1443,7 @@ uint32_t SegmentInsertBlockFromFreeListWorker(TableSegment *pSegment, uint32_t u void * pvCommit = pSegment->rgValue + (uCommitLine * HANDLE_HANDLES_PER_BLOCK); // we should commit one more page of handles - uint32_t dwCommit = g_SystemInfo.dwPageSize; + uint32_t dwCommit = OS_PAGE_SIZE; // commit the memory if (!GCToOSInterface::VirtualCommit(pvCommit, dwCommit)) @@ -1809,7 +1808,7 @@ BOOL DoesSegmentNeedsToTrimExcessPages(TableSegment *pSegment) if (uEmptyLine < uDecommitLine) { // derive some useful info about the page size - uintptr_t dwPageRound = (uintptr_t)g_SystemInfo.dwPageSize - 1; + uintptr_t dwPageRound = (uintptr_t)OS_PAGE_SIZE - 1; uintptr_t dwPageMask = ~dwPageRound; // compute the address corresponding to the empty line @@ -1853,7 +1852,7 @@ void SegmentTrimExcessPages(TableSegment *pSegment) if (uEmptyLine < uDecommitLine) { // derive some useful info about the page size - uintptr_t dwPageRound = (uintptr_t)g_SystemInfo.dwPageSize - 1; + uintptr_t dwPageRound = (uintptr_t)OS_PAGE_SIZE - 1; uintptr_t dwPageMask = ~dwPageRound; // compute the address corresponding to the empty line @@ -1875,7 +1874,7 @@ void SegmentTrimExcessPages(TableSegment *pSegment) pSegment->bCommitLine = (uint8_t)((dwLo - (size_t)pSegment->rgValue) / HANDLE_BYTES_PER_BLOCK); // compute the address for the new decommit line - size_t dwDecommitAddr = dwLo - g_SystemInfo.dwPageSize; + size_t dwDecommitAddr = dwLo - OS_PAGE_SIZE; // assume a decommit line of zero until we know otheriwse uDecommitLine = 0; diff --git a/src/gc/handletablepriv.h b/src/gc/handletablepriv.h index 59c08ca744..cda1cb08aa 100644 --- a/src/gc/handletablepriv.h +++ b/src/gc/handletablepriv.h @@ -341,6 +341,37 @@ struct HandleTypeCache int32_t lFreeIndex; }; +/* + * Async pin EE callback context, used to call back tot he EE when enumerating + * over async pinned handles. + */ +class AsyncPinCallbackContext +{ +private: + async_pin_enum_fn m_callback; + void* m_context; + +public: + /* + * Constructs a new AsyncPinCallbackContext from a callback and a context, + * which will be passed to the callback as its second parameter every time + * it is invoked. + */ + AsyncPinCallbackContext(async_pin_enum_fn callback, void* context) + : m_callback(callback), m_context(context) + {} + + /* + * Invokes the callback with the given argument, returning the callback's + * result.' + */ + bool Invoke(Object* argument) const + { + assert(m_callback != nullptr); + return m_callback(argument, m_context); + } +}; + /*---------------------------------------------------------------------------*/ @@ -759,7 +790,7 @@ void SegmentFree(TableSegment *pSegment); * Mark ready for all non-pending OverlappedData that get moved to default domain. * */ -BOOL TableHandleAsyncPinHandles(HandleTable *pTable); +BOOL TableHandleAsyncPinHandles(HandleTable *pTable, const AsyncPinCallbackContext& callbackCtx); /* * TableRelocateAsyncPinHandles diff --git a/src/gc/objecthandle.cpp b/src/gc/objecthandle.cpp index 7df915fb72..838947dba1 100644 --- a/src/gc/objecthandle.cpp +++ b/src/gc/objecthandle.cpp @@ -563,10 +563,10 @@ int getNumberOfSlots() return 1; #ifdef FEATURE_REDHAWK - return g_SystemInfo.dwNumberOfProcessors; + return GCToOSInterface::GetCurrentProcessCpuCount(); #else return (CPUGroupInfo::CanEnableGCCPUGroups() ? CPUGroupInfo::GetNumActiveProcessors() : - g_SystemInfo.dwNumberOfProcessors); + GCToOSInterface::GetCurrentProcessCpuCount()); #endif } diff --git a/src/gc/objecthandle.h b/src/gc/objecthandle.h index b3e4b58a1c..6ae75b45e9 100644 --- a/src/gc/objecthandle.h +++ b/src/gc/objecthandle.h @@ -87,7 +87,7 @@ bool Ref_Initialize(); void Ref_Shutdown(); HandleTableBucket* Ref_CreateHandleTableBucket(void* context); bool Ref_InitializeHandleTableBucket(HandleTableBucket* bucket, void* context); -BOOL Ref_HandleAsyncPinHandles(); +BOOL Ref_HandleAsyncPinHandles(async_pin_enum_fn callback, void* context); void Ref_RelocateAsyncPinHandles(HandleTableBucket *pSource, HandleTableBucket *pTarget); void Ref_RemoveHandleTableBucket(HandleTableBucket *pBucket); void Ref_DestroyHandleTableBucket(HandleTableBucket *pBucket); diff --git a/src/gc/sample/CMakeLists.txt b/src/gc/sample/CMakeLists.txt index 5fe7887963..42f097a6e3 100644 --- a/src/gc/sample/CMakeLists.txt +++ b/src/gc/sample/CMakeLists.txt @@ -8,6 +8,7 @@ include_directories(../env) set(SOURCES GCSample.cpp gcenv.ee.cpp + ../gcconfig.cpp ../gccommon.cpp ../gceewks.cpp ../gchandletable.cpp diff --git a/src/gc/sample/GCSample.cpp b/src/gc/sample/GCSample.cpp index 0a771b7e91..43cb23878e 100644 --- a/src/gc/sample/GCSample.cpp +++ b/src/gc/sample/GCSample.cpp @@ -107,6 +107,8 @@ void WriteBarrier(Object ** dst, Object * ref) ErectWriteBarrier(dst, ref); } +extern "C" bool InitializeGarbageCollector(IGCToCLR* clrToGC, IGCHeap** gcHeap, IGCHandleManager** gcHandleManager, GcDacVars* gcDacVars); + int __cdecl main(int argc, char* argv[]) { // diff --git a/src/gc/sample/gcenv.ee.cpp b/src/gc/sample/gcenv.ee.cpp index fa6efbf2d6..03d960819a 100644 --- a/src/gc/sample/gcenv.ee.cpp +++ b/src/gc/sample/gcenv.ee.cpp @@ -286,64 +286,39 @@ bool GCToEEInterface::EagerFinalized(Object* obj) return false; } -MethodTable* GCToEEInterface::GetFreeObjectMethodTable() +bool GCToEEInterface::GetBooleanConfigValue(const char* key, bool* value) { - return g_pFreeObjectMethodTable; + return false; } -bool IsGCSpecialThread() +bool GCToEEInterface::GetIntConfigValue(const char* key, int64_t* value) { - // TODO: Implement for background GC return false; } -bool IsGCThread() +bool GCToEEInterface::GetStringConfigValue(const char* key, const char** value) { return false; } -void SwitchToWriteWatchBarrier() +void GCToEEInterface::FreeStringConfigValue(const char *value) { -} -void SwitchToNonWriteWatchBarrier() -{ } -void LogSpewAlways(const char * /*fmt*/, ...) +MethodTable* GCToEEInterface::GetFreeObjectMethodTable() { + return g_pFreeObjectMethodTable; } -uint32_t CLRConfig::GetConfigValue(ConfigDWORDInfo eType) +bool IsGCSpecialThread() { - switch (eType) - { - case UNSUPPORTED_BGCSpinCount: - return 140; - - case UNSUPPORTED_BGCSpin: - return 2; - - case UNSUPPORTED_GCLogEnabled: - case UNSUPPORTED_GCLogFile: - case UNSUPPORTED_GCLogFileSize: - case EXTERNAL_GCStressStart: - case INTERNAL_GCStressStartAtJit: - case INTERNAL_DbgDACSkipVerifyDlls: - return 0; - - case Config_COUNT: - default: -#ifdef _MSC_VER -#pragma warning(suppress:4127) // Constant conditional expression in ASSERT below -#endif - ASSERT(!"Unknown config value type"); - return 0; - } + // TODO: Implement for background GC + return false; } -HRESULT CLRConfig::GetConfigValue(ConfigStringInfo /*eType*/, TCHAR * * outVal) +bool IsGCThread() { - *outVal = NULL; - return 0; + return false; } + diff --git a/src/gc/sample/gcenv.h b/src/gc/sample/gcenv.h index 4505f1af30..14f60d8c6e 100644 --- a/src/gc/sample/gcenv.h +++ b/src/gc/sample/gcenv.h @@ -4,9 +4,9 @@ // The sample is to be kept simple, so building the sample // in tandem with a standalone GC is currently not supported. -#ifdef FEATURE_STANDALONE_GC -#undef FEATURE_STANDALONE_GC -#endif // FEATURE_STANDALONE_GC +#ifdef BUILD_AS_STANDALONE +#undef BUILD_AS_STANDALONE +#endif // BUILD_AS_STANDALONE #if defined(_DEBUG) #ifndef _DEBUG_IMPL diff --git a/src/gc/unix/gcenv.unix.cpp b/src/gc/unix/gcenv.unix.cpp index bca0dfedf2..eafd141fd5 100644 --- a/src/gc/unix/gcenv.unix.cpp +++ b/src/gc/unix/gcenv.unix.cpp @@ -32,10 +32,6 @@ static_assert(sizeof(uint64_t) == 8, "unsigned long isn't 8 bytes"); #include "gcenv.base.h" #include "gcenv.os.h" -#ifndef FEATURE_STANDALONE_GC - #error "A GC-private implementation of GCToOSInterface should only be used with FEATURE_STANDALONE_GC" -#endif // FEATURE_STANDALONE_GC - #if HAVE_SYS_TIME_H #include <sys/time.h> #else @@ -714,6 +710,18 @@ bool GCToOSInterface::CreateThread(GCThreadFunction function, void* param, GCThr return (st == 0); } +// Gets the total number of processors on the machine, not taking +// into account current process affinity. +// Return: +// Number of processors on the machine +uint32_t GCToOSInterface::GetTotalProcessorCount() +{ + // Calculated in GCToOSInterface::Initialize using + // sysconf(_SC_NPROCESSORS_ONLN) + return g_logicalCpuCount; +} + + // Initialize the critical section void CLRCriticalSection::Initialize() { diff --git a/src/gc/windows/gcenv.windows.cpp b/src/gc/windows/gcenv.windows.cpp index 3749f06a68..c543b0413a 100644 --- a/src/gc/windows/gcenv.windows.cpp +++ b/src/gc/windows/gcenv.windows.cpp @@ -603,6 +603,15 @@ bool GCToOSInterface::CreateThread(GCThreadFunction function, void* param, GCThr return true; } +// Gets the total number of processors on the machine, not taking +// into account current process affinity. +// Return: +// Number of processors on the machine +uint32_t GCToOSInterface::GetTotalProcessorCount() +{ + return g_SystemInfo.dwNumberOfProcessors; +} + // Initialize the critical section void CLRCriticalSection::Initialize() { diff --git a/src/gc/wks/CMakeLists.txt b/src/gc/wks/CMakeLists.txt deleted file mode 100644 index fcb95a385e..0000000000 --- a/src/gc/wks/CMakeLists.txt +++ /dev/null @@ -1 +0,0 @@ -add_library_clr(gc_wks STATIC ${GC_SOURCES_WKS}) diff --git a/src/inc/clrconfigvalues.h b/src/inc/clrconfigvalues.h index c4722bc44a..8a21a9d8fd 100644 --- a/src/inc/clrconfigvalues.h +++ b/src/inc/clrconfigvalues.h @@ -325,7 +325,6 @@ RETAIL_CONFIG_DWORD_INFO(UNSUPPORTED_GCLogFileSize, W("GCLogFileSize"), 0, "Spec RETAIL_CONFIG_DWORD_INFO(UNSUPPORTED_GCCompactRatio, W("GCCompactRatio"), 0, "Specifies the ratio compacting GCs vs sweeping ") RETAIL_CONFIG_DWORD_INFO_DIRECT_ACCESS(EXTERNAL_GCPollType, W("GCPollType"), "") RETAIL_CONFIG_STRING_INFO_EX(EXTERNAL_NewGCCalc, W("NewGCCalc"), "", CLRConfig::REGUTIL_default) -RETAIL_CONFIG_DWORD_INFO_DIRECT_ACCESS(UNSUPPORTED_GCprnLvl, W("GCprnLvl"), "Specifies the maximum level of GC logging") RETAIL_CONFIG_DWORD_INFO(UNSUPPORTED_GCRetainVM, W("GCRetainVM"), 0, "When set we put the segments that should be deleted on a standby list (instead of releasing them back to the OS) which will be considered to satisfy new segment requests (note that the same thing can be specified via API which is the supported way)") RETAIL_CONFIG_DWORD_INFO_DIRECT_ACCESS(UNSUPPORTED_GCSegmentSize, W("GCSegmentSize"), "Specifies the managed heap segment size") RETAIL_CONFIG_DWORD_INFO_DIRECT_ACCESS(UNSUPPORTED_GCLOHCompact, W("GCLOHCompact"), "Specifies the LOH compaction mode") @@ -334,9 +333,6 @@ RETAIL_CONFIG_DWORD_INFO_EX(EXTERNAL_GCStress, W("GCStress"), 0, "trigger GCs at CONFIG_DWORD_INFO_EX(INTERNAL_GcStressOnDirectCalls, W("GcStressOnDirectCalls"), 0, "whether to trigger a GC on direct calls", CLRConfig::REGUTIL_default) RETAIL_CONFIG_DWORD_INFO(EXTERNAL_GCStressStart, W("GCStressStart"), 0, "start GCStress after N stress GCs have been attempted") RETAIL_CONFIG_DWORD_INFO(INTERNAL_GCStressStartAtJit, W("GCStressStartAtJit"), 0, "start GCStress after N items are jitted") -RETAIL_CONFIG_DWORD_INFO_DIRECT_ACCESS(UNSUPPORTED_GCtraceEnd, W("GCtraceEnd"), "Specifies the index of the GC when the logging should end") -CONFIG_DWORD_INFO_DIRECT_ACCESS(INTERNAL_GCtraceFacility, W("GCtraceFacility"), "Specifies where to log to (this allows you to log to console, the stress log or a normal CLR log (good when you need to correlate the GC activities with other CLR activities)") -RETAIL_CONFIG_DWORD_INFO_DIRECT_ACCESS(UNSUPPORTED_GCtraceStart, W("GCtraceStart"), "Specifies the index of the GC when the logging should start") RETAIL_CONFIG_DWORD_INFO_DIRECT_ACCESS(EXTERNAL_gcTrimCommitOnLowMemory, W("gcTrimCommitOnLowMemory"), "When set we trim the committed space more aggressively for the ephemeral seg. This is used for running many instances of server processes where they want to keep as little memory committed as possible") RETAIL_CONFIG_DWORD_INFO(UNSUPPORTED_BGCSpinCount, W("BGCSpinCount"), 140, "Specifies the bgc spin count") RETAIL_CONFIG_DWORD_INFO(UNSUPPORTED_BGCSpin, W("BGCSpin"), 2, "Specifies the bgc spin time") @@ -346,6 +342,8 @@ RETAIL_CONFIG_DWORD_INFO(UNSUPPORTED_GCNumaAware, W("GCNumaAware"), 1, "Specifie RETAIL_CONFIG_DWORD_INFO(EXTERNAL_GCCpuGroup, W("GCCpuGroup"), 0, "Specifies if to enable GC to support CPU groups") RETAIL_CONFIG_DWORD_INFO(UNSUPPORTED_GCHeapCount, W("GCHeapCount"), 0, "") RETAIL_CONFIG_DWORD_INFO(UNSUPPORTED_GCNoAffinitize, W("GCNoAffinitize"), 0, "") +RETAIL_CONFIG_DWORD_INFO(EXTERNAL_GCUseStandalone, W("GCUseStandalone"), 0, "") +RETAIL_CONFIG_STRING_INFO(EXTERNAL_GCStandaloneLocation, W("GCStandaloneLocation"), "") // // IBC diff --git a/src/vm/CMakeLists.txt b/src/vm/CMakeLists.txt index 3895f710b0..4e6a523ba2 100644 --- a/src/vm/CMakeLists.txt +++ b/src/vm/CMakeLists.txt @@ -13,11 +13,15 @@ add_definitions(-D_UNICODE) if(CMAKE_CONFIGURATION_TYPES) # multi-configuration generator? foreach (Config DEBUG CHECKED) - set_property(DIRECTORY APPEND PROPERTY COMPILE_DEFINITIONS $<$<CONFIG:${Config}>:WRITE_BARRIER_CHECK=1>) + if(NOT FEATURE_STANDALONE_GC_ONLY) + set_property(DIRECTORY APPEND PROPERTY COMPILE_DEFINITIONS $<$<CONFIG:${Config}>:WRITE_BARRIER_CHECK=1>) + endif(NOT FEATURE_STANDALONE_GC_ONLY) endforeach (Config) else() if(UPPERCASE_CMAKE_BUILD_TYPE STREQUAL DEBUG OR UPPERCASE_CMAKE_BUILD_TYPE STREQUAL CHECKED) - add_definitions(-DWRITE_BARRIER_CHECK=1) + if(NOT FEATURE_STANDALONE_GC_ONLY) + add_definitions(-DWRITE_BARRIER_CHECK=1) + endif(NOT FEATURE_STANDALONE_GC_ONLY) endif(UPPERCASE_CMAKE_BUILD_TYPE STREQUAL DEBUG OR UPPERCASE_CMAKE_BUILD_TYPE STREQUAL CHECKED) endif(CMAKE_CONFIGURATION_TYPES) @@ -117,6 +121,18 @@ set(VM_SOURCES_DAC_AND_WKS_COMMON zapsig.cpp ) +set( GC_SOURCES_DAC_AND_WKS_COMMON + ../gc/gcconfig.cpp + ../gc/gccommon.cpp + ../gc/gcscan.cpp + ../gc/gcsvr.cpp + ../gc/gcwks.cpp + ../gc/handletable.cpp + ../gc/handletablecore.cpp + ../gc/handletablescan.cpp + ../gc/objecthandle.cpp + ../gc/softwarewritewatch.cpp) + if(FEATURE_READYTORUN) list(APPEND VM_SOURCES_DAC_AND_WKS_COMMON readytoruninfo.cpp @@ -129,6 +145,9 @@ set(VM_SOURCES_DAC threaddebugblockinginfo.cpp ) +set(GC_SOURCES_DAC + ${GC_SOURCES_DAC_AND_WKS_COMMON}) + set(VM_SOURCES_WKS ${VM_SOURCES_DAC_AND_WKS_COMMON} appdomainnative.cpp @@ -177,7 +196,9 @@ set(VM_SOURCES_WKS finalizerthread.cpp frameworkexceptionloader.cpp gccover.cpp - gcenv.ee.cpp + gcenv.ee.static.cpp + gcenv.ee.common.cpp + gcenv.os.cpp gchelpers.cpp genmeth.cpp hosting.cpp @@ -239,12 +260,25 @@ set(VM_SOURCES_WKS ${VM_SOURCES_GDBJIT} ) +set(GC_SOURCES_WKS + ${GC_SOURCES_DAC_AND_WKS_COMMON} + ../gc/gchandletable.cpp + ../gc/gceesvr.cpp + ../gc/gceewks.cpp + ../gc/handletablecache.cpp) + if(FEATURE_EVENT_TRACE) list(APPEND VM_SOURCES_WKS eventtrace.cpp ) endif(FEATURE_EVENT_TRACE) +if(FEATURE_STANDALONE_GC) + list(APPEND VM_SOURCES_WKS + gcenv.ee.standalone.cpp + ) +endif(FEATURE_STANDALONE_GC) + if(NOT FEATURE_STANDALONE_GC) list(APPEND VM_SOURCES_WKS gcenv.os.cpp @@ -472,6 +506,22 @@ list(APPEND VM_SOURCES_DAC ${VM_SOURCES_DAC_AND_WKS_ARCH} ) +# The default option for FEATURE_STANDALONE_GC builds a standalone +# and non-standalone GC, linking the non-standalone GC into coreclr.dll. +# For testing purposes, FEATURE_STANDALONE_GC_ONLY instead only builds and +# links the non-standalone GC into coreclr.dll. +if (NOT FEATURE_STANDALONE_GC_ONLY) + list(APPEND VM_SOURCES_WKS + ${GC_SOURCES_WKS} + ) +endif(NOT FEATURE_STANDALONE_GC_ONLY) + +# The DAC does need GC sources in order to link correctly, even if +# it's not used. +list(APPEND VM_SOURCES_DAC + ${GC_SOURCES_DAC} +) + convert_to_absolute_path(VM_SOURCES_WKS ${VM_SOURCES_WKS}) convert_to_absolute_path(VM_SOURCES_WKS_ARCH_ASM ${VM_SOURCES_WKS_ARCH_ASM}) convert_to_absolute_path(VM_SOURCES_DAC ${VM_SOURCES_DAC}) diff --git a/src/vm/appdomain.cpp b/src/vm/appdomain.cpp index 7468f0b4e2..56b5aa1256 100644 --- a/src/vm/appdomain.cpp +++ b/src/vm/appdomain.cpp @@ -9084,18 +9084,16 @@ void AppDomain::HandleAsyncPinHandles() } CONTRACTL_END; - // TODO: Temporarily casting stuff here until Ref_RelocateAsyncPinHandles is moved to the interface. - HandleTableBucket *pBucket = (HandleTableBucket*)m_handleStore; + IGCHandleStore *pBucket = m_handleStore; // IO completion port picks IO job using FIFO. Here is how we know which AsyncPinHandle can be freed. // 1. We mark all non-pending AsyncPinHandle with READYTOCLEAN. // 2. We queue a dump Overlapped to the IO completion as a marker. // 3. When the Overlapped is picked up by completion port, we wait until all previous IO jobs are processed. // 4. Then we can delete all AsyncPinHandle marked with READYTOCLEAN. - HandleTableBucket *pBucketInDefault = (HandleTableBucket*)SystemDomain::System()->DefaultDomain()->m_handleStore; + IGCHandleStore *pBucketInDefault = SystemDomain::System()->DefaultDomain()->m_handleStore; - // TODO: When this function is moved to the interface it will take void*s - Ref_RelocateAsyncPinHandles(pBucket, pBucketInDefault); + pBucket->RelocateAsyncPinnedHandles(pBucketInDefault); OverlappedDataObject::RequestCleanup(); } diff --git a/src/vm/ceemain.cpp b/src/vm/ceemain.cpp index de6059a6ec..ec16bdd153 100644 --- a/src/vm/ceemain.cpp +++ b/src/vm/ceemain.cpp @@ -478,8 +478,8 @@ void InitializeStartupFlags() g_IGCconcurrent = 0; - InitializeHeapType((flags & STARTUP_SERVER_GC) != 0); g_heap_type = (flags & STARTUP_SERVER_GC) == 0 ? GC_HEAP_WKS : GC_HEAP_SVR; + g_IGCHoardVM = (flags & STARTUP_HOARD_GC_VM) == 0 ? 0 : 1; } #endif // CROSSGEN_COMPILE @@ -2440,6 +2440,101 @@ BOOL ExecuteDLL_ReturnOrThrow(HRESULT hr, BOOL fFromThunk) // Initialize the Garbage Collector // +// Prototype for the function that initialzes the garbage collector. +// Should only be called once: here, during EE startup. +// Returns true if the initialization was successful, false otherwise. +// +// When using a standalone GC, this function is loaded dynamically using +// GetProcAddress. +extern "C" bool InitializeGarbageCollector(IGCToCLR* clrToGC, IGCHeap** gcHeap, IGCHandleManager** gcHandleManager, GcDacVars* gcDacVars); + +#ifdef FEATURE_STANDALONE_GC + +void LoadGarbageCollector() +{ + CONTRACTL { + THROWS; + GC_NOTRIGGER; + MODE_ANY; + } CONTRACTL_END; + + TCHAR *standaloneGc = nullptr; + CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_GCStandaloneLocation, &standaloneGc); + HMODULE hMod; + if (!standaloneGc) + { +#ifdef FEATURE_STANDALONE_GC_ONLY + // if the user has set GCUseStandalone but has not given us a standalone location, + // try and load the initialization symbol from the current module. + hMod = GetModuleInst(); +#else + ThrowHR(E_FAIL); +#endif // FEATURE_STANDALONE_GC_ONLY + } + else + { + hMod = CLRLoadLibrary(standaloneGc); + } + + if (!hMod) + { + ThrowHR(E_FAIL); + } + + InitializeGarbageCollectorFunction igcf = (InitializeGarbageCollectorFunction)GetProcAddress(hMod, INITIALIZE_GC_FUNCTION_NAME); + if (!igcf) + { + ThrowHR(E_FAIL); + } + + // at this point we are committing to using the standalone GC + // given to us. + IGCToCLR* gcToClr = new (nothrow) standalone::GCToEEInterface(); + if (!gcToClr) + { + ThrowOutOfMemory(); + } + + IGCHandleManager *pGcHandleManager; + IGCHeap *pGCHeap; + if (!igcf(gcToClr, &pGCHeap, &pGcHandleManager, &g_gc_dac_vars)) + { + ThrowOutOfMemory(); + } + + assert(pGCHeap != nullptr); + assert(pGcHandleManager != nullptr); + g_pGCHeap = pGCHeap; + g_pGCHandleManager = pGcHandleManager; + g_gcDacGlobals = &g_gc_dac_vars; +} + +#endif // FEATURE_STANDALONE_GC + +void LoadStaticGarbageCollector() +{ + CONTRACTL{ + THROWS; + GC_TRIGGERS; + MODE_ANY; + } CONTRACTL_END; + + IGCHandleManager *pGcHandleManager; + IGCHeap *pGCHeap; + + if (!InitializeGarbageCollector(nullptr, &pGCHeap, &pGcHandleManager, &g_gc_dac_vars)) + { + ThrowOutOfMemory(); + } + + assert(pGCHeap != nullptr); + assert(pGcHandleManager != nullptr); + g_pGCHeap = pGCHeap; + g_pGCHandleManager = pGcHandleManager; + g_gcDacGlobals = &g_gc_dac_vars; +} + + void InitializeGarbageCollector() { CONTRACTL{ @@ -2463,25 +2558,19 @@ void InitializeGarbageCollector() g_pFreeObjectMethodTable->SetComponentSize(1); #ifdef FEATURE_STANDALONE_GC - IGCToCLR* gcToClr = new (nothrow) GCToEEInterface(); - if (!gcToClr) - ThrowOutOfMemory(); -#else - IGCToCLR* gcToClr = nullptr; -#endif - - IGCHandleManager *pGcHandleManager; - - IGCHeap *pGCHeap; - if (!InitializeGarbageCollector(gcToClr, &pGCHeap, &pGcHandleManager, &g_gc_dac_vars)) + if (CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_GCUseStandalone) +#ifdef FEATURE_STANDALONE_GC_ONLY + || true +#endif // FEATURE_STANDALONE_GC_ONLY + ) { - ThrowOutOfMemory(); + LoadGarbageCollector(); + } + else +#endif // FEATURE_STANDALONE_GC + { + LoadStaticGarbageCollector(); } - - assert(pGCHeap != nullptr); - g_pGCHeap = pGCHeap; - g_pGCHandleManager = pGcHandleManager; - g_gcDacGlobals = &g_gc_dac_vars; // Apparently the Windows linker removes global variables if they are never // read from, which is a problem for g_gcDacGlobals since it's expected that diff --git a/src/vm/comdependenthandle.cpp b/src/vm/comdependenthandle.cpp index 4763e4833a..b021865d53 100644 --- a/src/vm/comdependenthandle.cpp +++ b/src/vm/comdependenthandle.cpp @@ -73,8 +73,9 @@ FCIMPL2(Object*, DependentHandle::nGetPrimaryAndSecondary, OBJECTHANDLE handle, OBJECTREF primary = ObjectFromHandle(handle); + IGCHandleManager *mgr = GCHandleUtilities::GetGCHandleManager(); // Secondary is tracked only if primary is non-null - *outSecondary = (primary != NULL) ? OBJECTREFToObject(GetDependentHandleSecondary(handle)) : NULL; + *outSecondary = (primary != NULL) ? mgr->GetDependentHandleSecondary(handle) : NULL; return OBJECTREFToObject(primary); } @@ -86,8 +87,8 @@ FCIMPL2(VOID, DependentHandle::nSetPrimary, OBJECTHANDLE handle, Object *_primar _ASSERTE(handle != NULL); - OBJECTREF primary(_primary); - StoreObjectInHandle(handle, primary); + IGCHandleManager *mgr = GCHandleUtilities::GetGCHandleManager(); + mgr->StoreObjectInHandle(handle, _primary); } FCIMPLEND @@ -97,7 +98,7 @@ FCIMPL2(VOID, DependentHandle::nSetSecondary, OBJECTHANDLE handle, Object *_seco _ASSERTE(handle != NULL); - OBJECTREF secondary(_secondary); - SetDependentHandleSecondary(handle, secondary); + IGCHandleManager *mgr = GCHandleUtilities::GetGCHandleManager(); + mgr->SetDependentHandleSecondary(handle, _secondary); } FCIMPLEND diff --git a/src/vm/eeconfig.cpp b/src/vm/eeconfig.cpp index 2ec6d39cdd..05cdd0aa6c 100644 --- a/src/vm/eeconfig.cpp +++ b/src/vm/eeconfig.cpp @@ -178,14 +178,6 @@ HRESULT EEConfig::Init() iGCHeapVerify = 0; // Heap Verification OFF by default #endif -#ifdef _DEBUG // TRACE_GC - iGCtraceStart = INT_MAX; // Set to huge value so GCtrace is off by default - iGCtraceEnd = INT_MAX; - iGCtraceFac = 0; - iGCprnLvl = DEFAULT_GC_PRN_LVL; - -#endif - #if defined(STRESS_HEAP) || defined(_DEBUG) iGCStress = 0; #endif diff --git a/src/vm/eeconfig.h b/src/vm/eeconfig.h index 1ec4460fd8..a12a94c73e 100644 --- a/src/vm/eeconfig.h +++ b/src/vm/eeconfig.h @@ -616,15 +616,6 @@ public: GCStressFlags GetGCStressLevel() const { WRAPPER_NO_CONTRACT; SUPPORTS_DAC; return GCStressFlags(iGCStress); } #endif -#ifdef _DEBUG // TRACE_GC - - int GetGCtraceStart() const {LIMITED_METHOD_CONTRACT; return iGCtraceStart; } - int GetGCtraceEnd () const {LIMITED_METHOD_CONTRACT; return iGCtraceEnd; } - int GetGCtraceFac () const {LIMITED_METHOD_CONTRACT; return iGCtraceFac; } - int GetGCprnLvl () const {LIMITED_METHOD_CONTRACT; return iGCprnLvl; } - -#endif - #ifdef STRESS_HEAP bool IsGCStressMix () const {LIMITED_METHOD_CONTRACT; return iGCStressMix != 0;} @@ -977,15 +968,6 @@ private: //---------------------------------------------------------------- int iGCHeapVerify; #endif -#ifdef _DEBUG // TRACE_GC - - int iGCtraceStart; - int iGCtraceEnd; - int iGCtraceFac; - int iGCprnLvl; - -#endif - #if defined(STRESS_HEAP) || defined(_DEBUG) int iGCStress; #endif diff --git a/src/vm/gcenv.ee.common.cpp b/src/vm/gcenv.ee.common.cpp new file mode 100644 index 0000000000..ca7349091f --- /dev/null +++ b/src/vm/gcenv.ee.common.cpp @@ -0,0 +1,394 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. +// See the LICENSE file in the project root for more information. + +#include "common.h" +#include "gcenv.h" + +#if defined(WIN64EXCEPTIONS) + +struct FindFirstInterruptiblePointState +{ + unsigned offs; + unsigned endOffs; + unsigned returnOffs; +}; + +bool FindFirstInterruptiblePointStateCB( + UINT32 startOffset, + UINT32 stopOffset, + LPVOID hCallback) +{ + FindFirstInterruptiblePointState* pState = (FindFirstInterruptiblePointState*)hCallback; + + _ASSERTE(startOffset < stopOffset); + _ASSERTE(pState->offs < pState->endOffs); + + if (stopOffset <= pState->offs) + { + // The range ends before the requested offset. + return false; + } + + // The offset is in the range. + if (startOffset <= pState->offs && + pState->offs < stopOffset) + { + pState->returnOffs = pState->offs; + return true; + } + + // The range is completely after the desired offset. We use the range start offset, if + // it comes before the given endOffs. We assume that the callback is called with ranges + // in increasing order, so earlier ones are reported before later ones. That is, if we + // get to this case, it will be the closest interruptible range after the requested + // offset. + + _ASSERTE(pState->offs < startOffset); + if (startOffset < pState->endOffs) + { + pState->returnOffs = startOffset; + return true; + } + + return false; +} + +// Find the first interruptible point in the range [offs .. endOffs) (the beginning of the range is inclusive, +// the end is exclusive). Return -1 if no such point exists. +unsigned FindFirstInterruptiblePoint(CrawlFrame* pCF, unsigned offs, unsigned endOffs) +{ +#ifdef USE_GC_INFO_DECODER + GCInfoToken gcInfoToken = pCF->GetGCInfoToken(); + GcInfoDecoder gcInfoDecoder(gcInfoToken, DECODE_FOR_RANGES_CALLBACK); + + FindFirstInterruptiblePointState state; + state.offs = offs; + state.endOffs = endOffs; + state.returnOffs = -1; + + gcInfoDecoder.EnumerateInterruptibleRanges(&FindFirstInterruptiblePointStateCB, &state); + + return state.returnOffs; +#else + PORTABILITY_ASSERT("FindFirstInterruptiblePoint"); + return -1; +#endif // USE_GC_INFO_DECODER +} + +#endif // WIN64EXCEPTIONS + +//----------------------------------------------------------------------------- +// Determine whether we should report the generic parameter context +// +// This is meant to detect the situation where a ThreadAbortException is raised +// in the prolog of a managed method, before the location for the generics +// context has been initialized; when such a TAE is raised, we are open to a +// race with the GC (e.g. while creating the managed object for the TAE). +// The GC would cause a stack walk, and if we report the stack location for +// the generic param context at this time we'd crash. +// The long term solution is to avoid raising TAEs in any non-GC safe points, +// and to additionally ensure that we do not expose the runtime to TAE +// starvation. +inline bool SafeToReportGenericParamContext(CrawlFrame* pCF) +{ + LIMITED_METHOD_CONTRACT; + if (!pCF->IsFrameless() || !(pCF->IsActiveFrame() || pCF->IsInterrupted())) + { + return true; + } + +#ifndef USE_GC_INFO_DECODER + + ICodeManager * pEECM = pCF->GetCodeManager(); + if (pEECM != NULL && pEECM->IsInPrologOrEpilog(pCF->GetRelOffset(), pCF->GetGCInfoToken(), NULL)) + { + return false; + } + +#else // USE_GC_INFO_DECODER + + GcInfoDecoder gcInfoDecoder(pCF->GetGCInfoToken(), + DECODE_PROLOG_LENGTH); + UINT32 prologLength = gcInfoDecoder.GetPrologSize(); + if (pCF->GetRelOffset() < prologLength) + { + return false; + } + +#endif // USE_GC_INFO_DECODER + + return true; +} + +/* + * GcEnumObject() + * + * This is the JIT compiler (or any remote code manager) + * GC enumeration callback + */ + +void GcEnumObject(LPVOID pData, OBJECTREF *pObj, uint32_t flags) +{ + Object ** ppObj = (Object **)pObj; + GCCONTEXT * pCtx = (GCCONTEXT *) pData; + + // Since we may be asynchronously walking another thread's stack, + // check (frequently) for stack-buffer-overrun corruptions after + // any long operation + if (pCtx->cf != NULL) + pCtx->cf->CheckGSCookies(); + + // + // Sanity check that the flags contain only these three values + // + assert((flags & ~(GC_CALL_INTERIOR|GC_CALL_PINNED|GC_CALL_CHECK_APP_DOMAIN)) == 0); + + // for interior pointers, we optimize the case in which + // it points into the current threads stack area + // + if (flags & GC_CALL_INTERIOR) + PromoteCarefully(pCtx->f, ppObj, pCtx->sc, flags); + else + (pCtx->f)(ppObj, pCtx->sc, flags); +} + +//----------------------------------------------------------------------------- +void GcReportLoaderAllocator(promote_func* fn, ScanContext* sc, LoaderAllocator *pLoaderAllocator) +{ + CONTRACTL + { + NOTHROW; + GC_NOTRIGGER; + SO_TOLERANT; + MODE_COOPERATIVE; + } + CONTRACTL_END; + + if (pLoaderAllocator != NULL && pLoaderAllocator->IsCollectible()) + { + Object *refCollectionObject = OBJECTREFToObject(pLoaderAllocator->GetExposedObject()); + +#ifdef _DEBUG + Object *oldObj = refCollectionObject; +#endif + + _ASSERTE(refCollectionObject != NULL); + fn(&refCollectionObject, sc, CHECK_APP_DOMAIN); + + // We are reporting the location of a local variable, assert it doesn't change. + _ASSERTE(oldObj == refCollectionObject); + } +} + +//----------------------------------------------------------------------------- +StackWalkAction GcStackCrawlCallBack(CrawlFrame* pCF, VOID* pData) +{ + // + // KEEP IN SYNC WITH DacStackReferenceWalker::Callback in debug\daccess\daccess.cpp + // + + Frame *pFrame; + GCCONTEXT *gcctx = (GCCONTEXT*) pData; + +#if CHECK_APP_DOMAIN_LEAKS + gcctx->sc->pCurrentDomain = pCF->GetAppDomain(); +#endif //CHECK_APP_DOMAIN_LEAKS + +#ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING + if (g_fEnableARM) + { + gcctx->sc->pCurrentDomain = pCF->GetAppDomain(); + } +#endif //FEATURE_APPDOMAIN_RESOURCE_MONITORING + + MethodDesc *pMD = pCF->GetFunction(); + +#ifdef GC_PROFILING + gcctx->sc->pMD = pMD; +#endif //GC_PROFILING + + // Clear it on exit so that we never have a stale CrawlFrame + ResetPointerHolder<CrawlFrame*> rph(&gcctx->cf); + // put it somewhere so that GcEnumObject can get to it. + gcctx->cf = pCF; + + bool fReportGCReferences = true; +#if defined(WIN64EXCEPTIONS) + // We may have unwound this crawlFrame and thus, shouldn't report the invalid + // references it may contain. + fReportGCReferences = pCF->ShouldCrawlframeReportGCReferences(); +#endif // defined(WIN64EXCEPTIONS) + + if (fReportGCReferences) + { + if (pCF->IsFrameless()) + { + ICodeManager * pCM = pCF->GetCodeManager(); + _ASSERTE(pCM != NULL); + + unsigned flags = pCF->GetCodeManagerFlags(); + + #ifdef _TARGET_X86_ + STRESS_LOG3(LF_GCROOTS, LL_INFO1000, "Scanning Frameless method %pM EIP = %p &EIP = %p\n", + pMD, GetControlPC(pCF->GetRegisterSet()), pCF->GetRegisterSet()->PCTAddr); + #else + STRESS_LOG2(LF_GCROOTS, LL_INFO1000, "Scanning Frameless method %pM ControlPC = %p\n", + pMD, GetControlPC(pCF->GetRegisterSet())); + #endif + + _ASSERTE(pMD != 0); + + #ifdef _DEBUG + LOG((LF_GCROOTS, LL_INFO1000, "Scanning Frame for method %s:%s\n", + pMD->m_pszDebugClassName, pMD->m_pszDebugMethodName)); + #endif // _DEBUG + + DWORD relOffsetOverride = NO_OVERRIDE_OFFSET; +#if defined(WIN64EXCEPTIONS) && defined(USE_GC_INFO_DECODER) + if (pCF->ShouldParentToFuncletUseUnwindTargetLocationForGCReporting()) + { + GCInfoToken gcInfoToken = pCF->GetGCInfoToken(); + GcInfoDecoder _gcInfoDecoder( + gcInfoToken, + DECODE_CODE_LENGTH + ); + + if(_gcInfoDecoder.WantsReportOnlyLeaf()) + { + // We're in a special case of unwinding from a funclet, and resuming execution in + // another catch funclet associated with same parent function. We need to report roots. + // Reporting at the original throw site gives incorrect liveness information. We choose to + // report the liveness information at the first interruptible instruction of the catch funclet + // that we are going to execute. We also only report stack slots, since no registers can be + // live at the first instruction of a handler, except the catch object, which the VM protects + // specially. If the catch funclet has not interruptible point, we fall back and just report + // what we used to: at the original throw instruction. This might lead to bad GC behavior + // if the liveness is not correct. + const EE_ILEXCEPTION_CLAUSE& ehClauseForCatch = pCF->GetEHClauseForCatch(); + relOffsetOverride = FindFirstInterruptiblePoint(pCF, ehClauseForCatch.HandlerStartPC, + ehClauseForCatch.HandlerEndPC); + _ASSERTE(relOffsetOverride != NO_OVERRIDE_OFFSET); + + STRESS_LOG3(LF_GCROOTS, LL_INFO1000, "Setting override offset = %u for method %pM ControlPC = %p\n", + relOffsetOverride, pMD, GetControlPC(pCF->GetRegisterSet())); + } + + } +#endif // WIN64EXCEPTIONS && USE_GC_INFO_DECODER + + pCM->EnumGcRefs(pCF->GetRegisterSet(), + pCF->GetCodeInfo(), + flags, + GcEnumObject, + pData, + relOffsetOverride); + + } + else + { + Frame * pFrame = pCF->GetFrame(); + + STRESS_LOG3(LF_GCROOTS, LL_INFO1000, + "Scanning ExplicitFrame %p AssocMethod = %pM frameVTable = %pV\n", + pFrame, pFrame->GetFunction(), *((void**) pFrame)); + pFrame->GcScanRoots( gcctx->f, gcctx->sc); + } + } + + + // If we're executing a LCG dynamic method then we must promote the associated resolver to ensure it + // doesn't get collected and yank the method code out from under us). + + // Be careful to only promote the reference -- we can also be called to relocate the reference and + // that can lead to all sorts of problems since we could be racing for the relocation with the long + // weak handle we recover the reference from. Promoting the reference is enough, the handle in the + // reference will be relocated properly as long as we keep it alive till the end of the collection + // as long as the reference is actually maintained by the long weak handle. + if (pMD && gcctx->sc->promotion) + { + BOOL fMaybeCollectibleMethod = TRUE; + + // If this is a frameless method then the jitmanager can answer the question of whether + // or not this is LCG simply by looking at the heap where the code lives, however there + // is also the prestub case where we need to explicitly look at the MD for stuff that isn't + // ngen'd + if (pCF->IsFrameless()) + { + fMaybeCollectibleMethod = ExecutionManager::IsCollectibleMethod(pCF->GetMethodToken()); + } + + if (fMaybeCollectibleMethod && pMD->IsLCGMethod()) + { + Object *refResolver = OBJECTREFToObject(pMD->AsDynamicMethodDesc()->GetLCGMethodResolver()->GetManagedResolver()); +#ifdef _DEBUG + Object *oldObj = refResolver; +#endif + _ASSERTE(refResolver != NULL); + (*gcctx->f)(&refResolver, gcctx->sc, CHECK_APP_DOMAIN); + _ASSERTE(!pMD->IsSharedByGenericInstantiations()); + + // We are reporting the location of a local variable, assert it doesn't change. + _ASSERTE(oldObj == refResolver); + } + else + { + if (fMaybeCollectibleMethod) + { + GcReportLoaderAllocator(gcctx->f, gcctx->sc, pMD->GetLoaderAllocator()); + } + + if (fReportGCReferences) + { + GenericParamContextType paramContextType = GENERIC_PARAM_CONTEXT_NONE; + + if (pCF->IsFrameless()) + { + // We need to grab the Context Type here because there are cases where the MethodDesc + // is shared, and thus indicates there should be an instantion argument, but the JIT + // was still allowed to optimize it away and we won't grab it below because we're not + // reporting any references from this frame. + paramContextType = pCF->GetCodeManager()->GetParamContextType(pCF->GetRegisterSet(), pCF->GetCodeInfo()); + } + else + { + if (pMD->RequiresInstMethodDescArg()) + paramContextType = GENERIC_PARAM_CONTEXT_METHODDESC; + else if (pMD->RequiresInstMethodTableArg()) + paramContextType = GENERIC_PARAM_CONTEXT_METHODTABLE; + } + + if (SafeToReportGenericParamContext(pCF)) + { + // Handle the case where the method is a static shared generic method and we need to keep the type + // of the generic parameters alive + if (paramContextType == GENERIC_PARAM_CONTEXT_METHODDESC) + { + MethodDesc *pMDReal = dac_cast<PTR_MethodDesc>(pCF->GetParamTypeArg()); + _ASSERTE((pMDReal != NULL) || !pCF->IsFrameless()); + if (pMDReal != NULL) + { + GcReportLoaderAllocator(gcctx->f, gcctx->sc, pMDReal->GetLoaderAllocator()); + } + } + else if (paramContextType == GENERIC_PARAM_CONTEXT_METHODTABLE) + { + MethodTable *pMTReal = dac_cast<PTR_MethodTable>(pCF->GetParamTypeArg()); + _ASSERTE((pMTReal != NULL) || !pCF->IsFrameless()); + if (pMTReal != NULL) + { + GcReportLoaderAllocator(gcctx->f, gcctx->sc, pMTReal->GetLoaderAllocator()); + } + } + } + } + } + } + + // Since we may be asynchronously walking another thread's stack, + // check (frequently) for stack-buffer-overrun corruptions after + // any long operation + pCF->CheckGSCookies(); + + return SWA_CONTINUE; +}
\ No newline at end of file diff --git a/src/vm/gcenv.ee.cpp b/src/vm/gcenv.ee.cpp index 63c4ffea10..38c8ef0340 100644 --- a/src/vm/gcenv.ee.cpp +++ b/src/vm/gcenv.ee.cpp @@ -11,33 +11,6 @@ * */ -#include "common.h" - -#include "gcenv.h" - -#ifdef FEATURE_STANDALONE_GC -#include "gcenv.ee.h" -#else -#include "../gc/env/gcenv.ee.h" -#endif // FEATURE_STANDALONE_GC - -#include "threadsuspend.h" - -#ifdef FEATURE_COMINTEROP -#include "runtimecallablewrapper.h" -#include "rcwwalker.h" -#include "comcallablewrapper.h" -#endif // FEATURE_COMINTEROP - -// the method table for the WeakReference class -extern MethodTable* pWeakReferenceMT; - -// The canonical method table for WeakReference<T> -extern MethodTable* pWeakReferenceOfTCanonMT; - -// Finalizes a weak reference directly. -extern void FinalizeWeakReference(Object* obj); - void GCToEEInterface::SuspendEE(SUSPEND_REASON reason) { WRAPPER_NO_CONTRACT; @@ -57,394 +30,6 @@ void GCToEEInterface::RestartEE(bool bFinishedGC) ThreadSuspend::RestartEE(bFinishedGC, TRUE); } -/* - * GcEnumObject() - * - * This is the JIT compiler (or any remote code manager) - * GC enumeration callback - */ - -void GcEnumObject(LPVOID pData, OBJECTREF *pObj, uint32_t flags) -{ - Object ** ppObj = (Object **)pObj; - GCCONTEXT * pCtx = (GCCONTEXT *) pData; - - // Since we may be asynchronously walking another thread's stack, - // check (frequently) for stack-buffer-overrun corruptions after - // any long operation - if (pCtx->cf != NULL) - pCtx->cf->CheckGSCookies(); - - // - // Sanity check that the flags contain only these three values - // - assert((flags & ~(GC_CALL_INTERIOR|GC_CALL_PINNED|GC_CALL_CHECK_APP_DOMAIN)) == 0); - - // for interior pointers, we optimize the case in which - // it points into the current threads stack area - // - if (flags & GC_CALL_INTERIOR) - PromoteCarefully(pCtx->f, ppObj, pCtx->sc, flags); - else - (pCtx->f)(ppObj, pCtx->sc, flags); -} - -//----------------------------------------------------------------------------- -void GcReportLoaderAllocator(promote_func* fn, ScanContext* sc, LoaderAllocator *pLoaderAllocator) -{ - CONTRACTL - { - NOTHROW; - GC_NOTRIGGER; - SO_TOLERANT; - MODE_COOPERATIVE; - } - CONTRACTL_END; - - if (pLoaderAllocator != NULL && pLoaderAllocator->IsCollectible()) - { - Object *refCollectionObject = OBJECTREFToObject(pLoaderAllocator->GetExposedObject()); - -#ifdef _DEBUG - Object *oldObj = refCollectionObject; -#endif - - _ASSERTE(refCollectionObject != NULL); - fn(&refCollectionObject, sc, CHECK_APP_DOMAIN); - - // We are reporting the location of a local variable, assert it doesn't change. - _ASSERTE(oldObj == refCollectionObject); - } -} - -//----------------------------------------------------------------------------- -// Determine whether we should report the generic parameter context -// -// This is meant to detect the situation where a ThreadAbortException is raised -// in the prolog of a managed method, before the location for the generics -// context has been initialized; when such a TAE is raised, we are open to a -// race with the GC (e.g. while creating the managed object for the TAE). -// The GC would cause a stack walk, and if we report the stack location for -// the generic param context at this time we'd crash. -// The long term solution is to avoid raising TAEs in any non-GC safe points, -// and to additionally ensure that we do not expose the runtime to TAE -// starvation. -inline bool SafeToReportGenericParamContext(CrawlFrame* pCF) -{ - LIMITED_METHOD_CONTRACT; - if (!pCF->IsFrameless() || !(pCF->IsActiveFrame() || pCF->IsInterrupted())) - { - return true; - } - -#ifndef USE_GC_INFO_DECODER - - ICodeManager * pEECM = pCF->GetCodeManager(); - if (pEECM != NULL && pEECM->IsInPrologOrEpilog(pCF->GetRelOffset(), pCF->GetGCInfoToken(), NULL)) - { - return false; - } - -#else // USE_GC_INFO_DECODER - - GcInfoDecoder gcInfoDecoder(pCF->GetGCInfoToken(), - DECODE_PROLOG_LENGTH); - UINT32 prologLength = gcInfoDecoder.GetPrologSize(); - if (pCF->GetRelOffset() < prologLength) - { - return false; - } - -#endif // USE_GC_INFO_DECODER - - return true; -} - -#if defined(WIN64EXCEPTIONS) - -struct FindFirstInterruptiblePointState -{ - unsigned offs; - unsigned endOffs; - unsigned returnOffs; -}; - -bool FindFirstInterruptiblePointStateCB( - UINT32 startOffset, - UINT32 stopOffset, - LPVOID hCallback) -{ - FindFirstInterruptiblePointState* pState = (FindFirstInterruptiblePointState*)hCallback; - - _ASSERTE(startOffset < stopOffset); - _ASSERTE(pState->offs < pState->endOffs); - - if (stopOffset <= pState->offs) - { - // The range ends before the requested offset. - return false; - } - - // The offset is in the range. - if (startOffset <= pState->offs && - pState->offs < stopOffset) - { - pState->returnOffs = pState->offs; - return true; - } - - // The range is completely after the desired offset. We use the range start offset, if - // it comes before the given endOffs. We assume that the callback is called with ranges - // in increasing order, so earlier ones are reported before later ones. That is, if we - // get to this case, it will be the closest interruptible range after the requested - // offset. - - _ASSERTE(pState->offs < startOffset); - if (startOffset < pState->endOffs) - { - pState->returnOffs = startOffset; - return true; - } - - return false; -} - -// Find the first interruptible point in the range [offs .. endOffs) (the beginning of the range is inclusive, -// the end is exclusive). Return -1 if no such point exists. -unsigned FindFirstInterruptiblePoint(CrawlFrame* pCF, unsigned offs, unsigned endOffs) -{ -#ifdef USE_GC_INFO_DECODER - GCInfoToken gcInfoToken = pCF->GetGCInfoToken(); - GcInfoDecoder gcInfoDecoder(gcInfoToken, DECODE_FOR_RANGES_CALLBACK); - - FindFirstInterruptiblePointState state; - state.offs = offs; - state.endOffs = endOffs; - state.returnOffs = -1; - - gcInfoDecoder.EnumerateInterruptibleRanges(&FindFirstInterruptiblePointStateCB, &state); - - return state.returnOffs; -#else - PORTABILITY_ASSERT("FindFirstInterruptiblePoint"); - return -1; -#endif // USE_GC_INFO_DECODER -} - -#endif // WIN64EXCEPTIONS - -//----------------------------------------------------------------------------- -StackWalkAction GcStackCrawlCallBack(CrawlFrame* pCF, VOID* pData) -{ - // - // KEEP IN SYNC WITH DacStackReferenceWalker::Callback in debug\daccess\daccess.cpp - // - - Frame *pFrame; - GCCONTEXT *gcctx = (GCCONTEXT*) pData; - -#if CHECK_APP_DOMAIN_LEAKS - gcctx->sc->pCurrentDomain = pCF->GetAppDomain(); -#endif //CHECK_APP_DOMAIN_LEAKS - -#ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING - if (g_fEnableARM) - { - gcctx->sc->pCurrentDomain = pCF->GetAppDomain(); - } -#endif //FEATURE_APPDOMAIN_RESOURCE_MONITORING - - MethodDesc *pMD = pCF->GetFunction(); - -#ifdef GC_PROFILING - gcctx->sc->pMD = pMD; -#endif //GC_PROFILING - - // Clear it on exit so that we never have a stale CrawlFrame - ResetPointerHolder<CrawlFrame*> rph(&gcctx->cf); - // put it somewhere so that GcEnumObject can get to it. - gcctx->cf = pCF; - - bool fReportGCReferences = true; -#if defined(WIN64EXCEPTIONS) - // We may have unwound this crawlFrame and thus, shouldn't report the invalid - // references it may contain. - fReportGCReferences = pCF->ShouldCrawlframeReportGCReferences(); -#endif // defined(WIN64EXCEPTIONS) - - if (fReportGCReferences) - { - if (pCF->IsFrameless()) - { - ICodeManager * pCM = pCF->GetCodeManager(); - _ASSERTE(pCM != NULL); - - unsigned flags = pCF->GetCodeManagerFlags(); - - #ifdef _TARGET_X86_ - STRESS_LOG3(LF_GCROOTS, LL_INFO1000, "Scanning Frameless method %pM EIP = %p &EIP = %p\n", - pMD, GetControlPC(pCF->GetRegisterSet()), pCF->GetRegisterSet()->PCTAddr); - #else - STRESS_LOG2(LF_GCROOTS, LL_INFO1000, "Scanning Frameless method %pM ControlPC = %p\n", - pMD, GetControlPC(pCF->GetRegisterSet())); - #endif - - _ASSERTE(pMD != 0); - - #ifdef _DEBUG - LOG((LF_GCROOTS, LL_INFO1000, "Scanning Frame for method %s:%s\n", - pMD->m_pszDebugClassName, pMD->m_pszDebugMethodName)); - #endif // _DEBUG - - DWORD relOffsetOverride = NO_OVERRIDE_OFFSET; -#if defined(WIN64EXCEPTIONS) && defined(USE_GC_INFO_DECODER) - if (pCF->ShouldParentToFuncletUseUnwindTargetLocationForGCReporting()) - { - GCInfoToken gcInfoToken = pCF->GetGCInfoToken(); - GcInfoDecoder _gcInfoDecoder( - gcInfoToken, - DECODE_CODE_LENGTH - ); - - if(_gcInfoDecoder.WantsReportOnlyLeaf()) - { - // We're in a special case of unwinding from a funclet, and resuming execution in - // another catch funclet associated with same parent function. We need to report roots. - // Reporting at the original throw site gives incorrect liveness information. We choose to - // report the liveness information at the first interruptible instruction of the catch funclet - // that we are going to execute. We also only report stack slots, since no registers can be - // live at the first instruction of a handler, except the catch object, which the VM protects - // specially. If the catch funclet has not interruptible point, we fall back and just report - // what we used to: at the original throw instruction. This might lead to bad GC behavior - // if the liveness is not correct. - const EE_ILEXCEPTION_CLAUSE& ehClauseForCatch = pCF->GetEHClauseForCatch(); - relOffsetOverride = FindFirstInterruptiblePoint(pCF, ehClauseForCatch.HandlerStartPC, - ehClauseForCatch.HandlerEndPC); - _ASSERTE(relOffsetOverride != NO_OVERRIDE_OFFSET); - - STRESS_LOG3(LF_GCROOTS, LL_INFO1000, "Setting override offset = %u for method %pM ControlPC = %p\n", - relOffsetOverride, pMD, GetControlPC(pCF->GetRegisterSet())); - } - - } -#endif // WIN64EXCEPTIONS && USE_GC_INFO_DECODER - - pCM->EnumGcRefs(pCF->GetRegisterSet(), - pCF->GetCodeInfo(), - flags, - GcEnumObject, - pData, - relOffsetOverride); - - } - else - { - Frame * pFrame = pCF->GetFrame(); - - STRESS_LOG3(LF_GCROOTS, LL_INFO1000, - "Scanning ExplicitFrame %p AssocMethod = %pM frameVTable = %pV\n", - pFrame, pFrame->GetFunction(), *((void**) pFrame)); - pFrame->GcScanRoots( gcctx->f, gcctx->sc); - } - } - - - // If we're executing a LCG dynamic method then we must promote the associated resolver to ensure it - // doesn't get collected and yank the method code out from under us). - - // Be careful to only promote the reference -- we can also be called to relocate the reference and - // that can lead to all sorts of problems since we could be racing for the relocation with the long - // weak handle we recover the reference from. Promoting the reference is enough, the handle in the - // reference will be relocated properly as long as we keep it alive till the end of the collection - // as long as the reference is actually maintained by the long weak handle. - if (pMD && gcctx->sc->promotion) - { - BOOL fMaybeCollectibleMethod = TRUE; - - // If this is a frameless method then the jitmanager can answer the question of whether - // or not this is LCG simply by looking at the heap where the code lives, however there - // is also the prestub case where we need to explicitly look at the MD for stuff that isn't - // ngen'd - if (pCF->IsFrameless()) - { - fMaybeCollectibleMethod = ExecutionManager::IsCollectibleMethod(pCF->GetMethodToken()); - } - - if (fMaybeCollectibleMethod && pMD->IsLCGMethod()) - { - Object *refResolver = OBJECTREFToObject(pMD->AsDynamicMethodDesc()->GetLCGMethodResolver()->GetManagedResolver()); -#ifdef _DEBUG - Object *oldObj = refResolver; -#endif - _ASSERTE(refResolver != NULL); - (*gcctx->f)(&refResolver, gcctx->sc, CHECK_APP_DOMAIN); - _ASSERTE(!pMD->IsSharedByGenericInstantiations()); - - // We are reporting the location of a local variable, assert it doesn't change. - _ASSERTE(oldObj == refResolver); - } - else - { - if (fMaybeCollectibleMethod) - { - GcReportLoaderAllocator(gcctx->f, gcctx->sc, pMD->GetLoaderAllocator()); - } - - if (fReportGCReferences) - { - GenericParamContextType paramContextType = GENERIC_PARAM_CONTEXT_NONE; - - if (pCF->IsFrameless()) - { - // We need to grab the Context Type here because there are cases where the MethodDesc - // is shared, and thus indicates there should be an instantion argument, but the JIT - // was still allowed to optimize it away and we won't grab it below because we're not - // reporting any references from this frame. - paramContextType = pCF->GetCodeManager()->GetParamContextType(pCF->GetRegisterSet(), pCF->GetCodeInfo()); - } - else - { - if (pMD->RequiresInstMethodDescArg()) - paramContextType = GENERIC_PARAM_CONTEXT_METHODDESC; - else if (pMD->RequiresInstMethodTableArg()) - paramContextType = GENERIC_PARAM_CONTEXT_METHODTABLE; - } - - if (SafeToReportGenericParamContext(pCF)) - { - // Handle the case where the method is a static shared generic method and we need to keep the type - // of the generic parameters alive - if (paramContextType == GENERIC_PARAM_CONTEXT_METHODDESC) - { - MethodDesc *pMDReal = dac_cast<PTR_MethodDesc>(pCF->GetParamTypeArg()); - _ASSERTE((pMDReal != NULL) || !pCF->IsFrameless()); - if (pMDReal != NULL) - { - GcReportLoaderAllocator(gcctx->f, gcctx->sc, pMDReal->GetLoaderAllocator()); - } - } - else if (paramContextType == GENERIC_PARAM_CONTEXT_METHODTABLE) - { - MethodTable *pMTReal = dac_cast<PTR_MethodTable>(pCF->GetParamTypeArg()); - _ASSERTE((pMTReal != NULL) || !pCF->IsFrameless()); - if (pMTReal != NULL) - { - GcReportLoaderAllocator(gcctx->f, gcctx->sc, pMTReal->GetLoaderAllocator()); - } - } - } - } - } - } - - // Since we may be asynchronously walking another thread's stack, - // check (frequently) for stack-buffer-overrun corruptions after - // any long operation - pCF->CheckGSCookies(); - - return SWA_CONTINUE; -} - VOID GCToEEInterface::SyncBlockCacheWeakPtrScan(HANDLESCANPROC scanProc, uintptr_t lp1, uintptr_t lp2) { CONTRACTL @@ -1408,3 +993,125 @@ MethodTable* GCToEEInterface::GetFreeObjectMethodTable() assert(g_pFreeObjectMethodTable != nullptr); return g_pFreeObjectMethodTable; } + +// These are arbitrary, we shouldn't ever be having confrig keys or values +// longer than these lengths. +const size_t MaxConfigKeyLength = 255; +const size_t MaxConfigValueLength = 255; + +bool GCToEEInterface::GetBooleanConfigValue(const char* key, bool* value) +{ + CONTRACTL { + NOTHROW; + GC_NOTRIGGER; + } CONTRACTL_END; + + // these configuration values are given to us via startup flags. + if (strcmp(key, "gcServer") == 0) + { + *value = g_heap_type == GC_HEAP_SVR; + return true; + } + + if (strcmp(key, "gcConcurrent") == 0) + { + *value = g_IGCconcurrent != 0; + return true; + } + + if (strcmp(key, "GCRetainVM") == 0) + { + *value = !!g_pConfig->GetGCRetainVM(); + return true; + } + + WCHAR configKey[MaxConfigKeyLength]; + if (MultiByteToWideChar(CP_ACP, 0, key, -1 /* key is null-terminated */, configKey, MaxConfigKeyLength) == 0) + { + // whatever this is... it's not something we care about. (It was too long, wasn't unicode, etc.) + return false; + } + + // otherwise, ask the config subsystem. + if (CLRConfig::IsConfigOptionSpecified(configKey)) + { + CLRConfig::ConfigDWORDInfo info { configKey , 0, CLRConfig::EEConfig_default }; + *value = CLRConfig::GetConfigValue(info) != 0; + return true; + } + + return false; +} + +bool GCToEEInterface::GetIntConfigValue(const char* key, int64_t* value) +{ + CONTRACTL { + NOTHROW; + GC_NOTRIGGER; + } CONTRACTL_END; + + WCHAR configKey[MaxConfigKeyLength]; + if (MultiByteToWideChar(CP_ACP, 0, key, -1 /* key is null-terminated */, configKey, MaxConfigKeyLength) == 0) + { + // whatever this is... it's not something we care about. (It was too long, wasn't unicode, etc.) + return false; + } + + if (CLRConfig::IsConfigOptionSpecified(configKey)) + { + CLRConfig::ConfigDWORDInfo info { configKey , 0, CLRConfig::EEConfig_default }; + *value = CLRConfig::GetConfigValue(info); + return true; + } + + return false; +} + +bool GCToEEInterface::GetStringConfigValue(const char* key, const char** value) +{ + CONTRACTL { + NOTHROW; + GC_NOTRIGGER; + } CONTRACTL_END; + + WCHAR configKey[MaxConfigKeyLength]; + if (MultiByteToWideChar(CP_ACP, 0, key, -1 /* key is null-terminated */, configKey, MaxConfigKeyLength) == 0) + { + // whatever this is... it's not something we care about. (It was too long, wasn't unicode, etc.) + return false; + } + + CLRConfig::ConfigStringInfo info { configKey, CLRConfig::EEConfig_default }; + LPWSTR out = CLRConfig::GetConfigValue(info); + if (!out) + { + // config not found + return false; + } + + // not allocated on the stack since it escapes this function + AStringHolder configResult = new (nothrow) char[MaxConfigValueLength]; + if (!configResult) + { + CLRConfig::FreeConfigString(out); + return false; + } + + if (WideCharToMultiByte(CP_ACP, 0, out, -1 /* out is null-terminated */, + configResult.GetValue(), MaxConfigKeyLength, nullptr, nullptr) == 0) + { + // this should only happen if the config subsystem gives us a string that's not valid + // unicode. + CLRConfig::FreeConfigString(out); + return false; + } + + *value = configResult.Extract(); + CLRConfig::FreeConfigString(out); + return true; +} + +void GCToEEInterface::FreeStringConfigValue(const char* value) +{ + delete [] value; +} diff --git a/src/vm/gcenv.ee.h b/src/vm/gcenv.ee.h index 9f7df14d22..6b02f5cfa1 100644 --- a/src/vm/gcenv.ee.h +++ b/src/vm/gcenv.ee.h @@ -9,6 +9,9 @@ #ifdef FEATURE_STANDALONE_GC +namespace standalone +{ + class GCToEEInterface : public IGCToCLR { public: GCToEEInterface() = default; @@ -49,8 +52,14 @@ public: bool ForceFullGCToBeBlocking(); bool EagerFinalized(Object* obj); MethodTable* GetFreeObjectMethodTable(); + bool GetBooleanConfigValue(const char* key, bool* value); + bool GetIntConfigValue(const char* key, int64_t* value); + bool GetStringConfigValue(const char* key, const char** value); + void FreeStringConfigValue(const char* value); }; +} // namespace standalone + #endif // FEATURE_STANDALONE_GC #endif // _GCENV_EE_H_ diff --git a/src/vm/gcenv.ee.standalone.cpp b/src/vm/gcenv.ee.standalone.cpp new file mode 100644 index 0000000000..5ba2aca812 --- /dev/null +++ b/src/vm/gcenv.ee.standalone.cpp @@ -0,0 +1,30 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. +// See the LICENSE file in the project root for more information. + +#include "common.h" +#include "gcenv.h" +#include "gcenv.ee.h" +#include "threadsuspend.h" + +#ifdef FEATURE_COMINTEROP +#include "runtimecallablewrapper.h" +#include "rcwwalker.h" +#include "comcallablewrapper.h" +#endif // FEATURE_COMINTEROP + +// the method table for the WeakReference class +extern MethodTable* pWeakReferenceMT; + +// The canonical method table for WeakReference<T> +extern MethodTable* pWeakReferenceOfTCanonMT; + +// Finalizes a weak reference directly. +extern void FinalizeWeakReference(Object* obj); + +namespace standalone +{ + +#include "gcenv.ee.cpp" + +} // namespace standalone
\ No newline at end of file diff --git a/src/vm/gcenv.ee.static.cpp b/src/vm/gcenv.ee.static.cpp new file mode 100644 index 0000000000..240e325a9e --- /dev/null +++ b/src/vm/gcenv.ee.static.cpp @@ -0,0 +1,25 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. +// See the LICENSE file in the project root for more information. + +#include "common.h" +#include "gcenv.h" +#include "../gc/env/gcenv.ee.h" +#include "threadsuspend.h" + +#ifdef FEATURE_COMINTEROP +#include "runtimecallablewrapper.h" +#include "rcwwalker.h" +#include "comcallablewrapper.h" +#endif // FEATURE_COMINTEROP + +// the method table for the WeakReference class +extern MethodTable* pWeakReferenceMT; + +// The canonical method table for WeakReference<T> +extern MethodTable* pWeakReferenceOfTCanonMT; + +// Finalizes a weak reference directly. +extern void FinalizeWeakReference(Object* obj); + +#include "gcenv.ee.cpp"
\ No newline at end of file diff --git a/src/vm/gcenv.os.cpp b/src/vm/gcenv.os.cpp index 77be88c96d..abacc3c76d 100644 --- a/src/vm/gcenv.os.cpp +++ b/src/vm/gcenv.os.cpp @@ -171,13 +171,18 @@ void* GCToOSInterface::VirtualReserve(size_t size, size_t alignment, uint32_t fl LIMITED_METHOD_CONTRACT; DWORD memFlags = (flags & VirtualReserveFlags::WriteWatch) ? (MEM_RESERVE | MEM_WRITE_WATCH) : MEM_RESERVE; + + // This is not strictly necessary for a correctness standpoint. Windows already guarantees + // allocation granularity alignment when using MEM_RESERVE, so aligning the size here has no effect. + // However, ClrVirtualAlloc does expect the size to be aligned to the allocation granularity. + size_t aligned_size = (size + g_SystemInfo.dwAllocationGranularity - 1) & ~static_cast<size_t>(g_SystemInfo.dwAllocationGranularity - 1); if (alignment == 0) { - return ::ClrVirtualAlloc(0, size, memFlags, PAGE_READWRITE); + return ::ClrVirtualAlloc(0, aligned_size, memFlags, PAGE_READWRITE); } else { - return ::ClrVirtualAllocAligned(0, size, memFlags, PAGE_READWRITE, alignment); + return ::ClrVirtualAllocAligned(0, aligned_size, memFlags, PAGE_READWRITE, alignment); } } @@ -669,6 +674,13 @@ bool GCToOSInterface::CreateThread(GCThreadFunction function, void* param, GCThr return true; } +uint32_t GCToOSInterface::GetTotalProcessorCount() +{ + LIMITED_METHOD_CONTRACT; + + return g_SystemInfo.dwNumberOfProcessors; +} + // Initialize the critical section void CLRCriticalSection::Initialize() { diff --git a/src/vm/nativeoverlapped.cpp b/src/vm/nativeoverlapped.cpp index 3a77c52dae..027af3164f 100644 --- a/src/vm/nativeoverlapped.cpp +++ b/src/vm/nativeoverlapped.cpp @@ -211,6 +211,33 @@ FCIMPL1(OverlappedDataObject*, GetOverlappedFromNative, LPOVERLAPPED lpOverlappe } FCIMPLEND +namespace +{ + +// Sets up an enumeration of all async pinned handles, such that all enumerated +// async pinned handles are processed by calling HandleAsyncPinHandle on the +// underlying overlapped instance. +BOOL HandleAsyncPinHandles() +{ + auto callback = [](Object* value, void*) + { + _ASSERTE (value->GetMethodTable() == g_pOverlappedDataClass); + OVERLAPPEDDATAREF overlapped = (OVERLAPPEDDATAREF)(ObjectToOBJECTREF(value)); + if (overlapped->GetAppDomainId() != DefaultADID && overlapped->HasCompleted()) + { + overlapped->HandleAsyncPinHandle(); + return true; + } + + return false; + }; + + IGCHandleManager* mgr = GCHandleUtilities::GetGCHandleManager(); + return mgr->GetGlobalHandleStore()->EnumerateAsyncPinnedHandles(callback, nullptr); +} + +} // anonymous namespace + void OverlappedDataObject::FreeAsyncPinHandles() { CONTRACTL @@ -262,7 +289,7 @@ void OverlappedDataObject::StartCleanup() if (FastInterlockExchange((LONG*)&s_CleanupInProgress, TRUE) == FALSE) { { - BOOL HasJob = Ref_HandleAsyncPinHandles(); + BOOL HasJob = HandleAsyncPinHandles(); if (!HasJob) { s_CleanupInProgress = FALSE; @@ -292,7 +319,7 @@ void OverlappedDataObject::FinishCleanup(bool wasDrained) GCX_COOP(); s_CleanupFreeHandle = TRUE; - Ref_HandleAsyncPinHandles(); + HandleAsyncPinHandles(); s_CleanupFreeHandle = FALSE; s_CleanupInProgress = FALSE; |