summaryrefslogtreecommitdiff
path: root/src/gc
diff options
context:
space:
mode:
authorSean Gillespie <segilles@microsoft.com>2017-06-02 12:08:18 -0700
committerGitHub <noreply@github.com>2017-06-02 12:08:18 -0700
commit450237b03534fca21f08cd295289b0ba275629e3 (patch)
tree247653b4a53614e618cac66a4a85247b6b2f1adb /src/gc
parentadaaa2e65127660166b7d2a3454e154f61f8218f (diff)
parent0597a34b570d359d34a03c74e1007025c43ae7e8 (diff)
downloadcoreclr-450237b03534fca21f08cd295289b0ba275629e3.tar.gz
coreclr-450237b03534fca21f08cd295289b0ba275629e3.tar.bz2
coreclr-450237b03534fca21f08cd295289b0ba275629e3.zip
Merge pull request #11699 from swgillespie/local-gc-branch-merge
Integration from dev/local-gc into master
Diffstat (limited to 'src/gc')
-rw-r--r--src/gc/CMakeLists.txt43
-rw-r--r--src/gc/dac/CMakeLists.txt2
-rw-r--r--src/gc/env/gcenv.ee.h4
-rw-r--r--src/gc/env/gcenv.os.h15
-rw-r--r--src/gc/gc.cpp260
-rw-r--r--src/gc/gc.h9
-rw-r--r--src/gc/gccommon.cpp83
-rw-r--r--src/gc/gcconfig.cpp48
-rw-r--r--src/gc/gcconfig.h137
-rw-r--r--src/gc/gcenv.ee.standalone.inl34
-rw-r--r--src/gc/gchandletable.cpp22
-rw-r--r--src/gc/gchandletableimpl.h8
-rw-r--r--src/gc/gcinterface.ee.h22
-rw-r--r--src/gc/gcinterface.h32
-rw-r--r--src/gc/gcpriv.h55
-rw-r--r--src/gc/handletable.cpp13
-rw-r--r--src/gc/handletablecache.cpp2
-rw-r--r--src/gc/handletablecore.cpp25
-rw-r--r--src/gc/handletablepriv.h33
-rw-r--r--src/gc/objecthandle.cpp4
-rw-r--r--src/gc/objecthandle.h2
-rw-r--r--src/gc/sample/CMakeLists.txt1
-rw-r--r--src/gc/sample/GCSample.cpp2
-rw-r--r--src/gc/sample/gcenv.ee.cpp51
-rw-r--r--src/gc/sample/gcenv.h6
-rw-r--r--src/gc/unix/gcenv.unix.cpp16
-rw-r--r--src/gc/windows/gcenv.windows.cpp9
-rw-r--r--src/gc/wks/CMakeLists.txt1
28 files changed, 576 insertions, 363 deletions
diff --git a/src/gc/CMakeLists.txt b/src/gc/CMakeLists.txt
index 59c18ffd87..4de3f4e412 100644
--- a/src/gc/CMakeLists.txt
+++ b/src/gc/CMakeLists.txt
@@ -1,24 +1,17 @@
set(CMAKE_INCLUDE_CURRENT_DIR ON)
-include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR})
+include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR})
include_directories(BEFORE ${CLR_DIR}/src/vm)
include_directories(BEFORE ${CLR_DIR}/src/vm/${ARCH_SOURCES_DIR})
+add_definitions(-DBUILD_AS_STANDALONE)
+
if(CLR_CMAKE_PLATFORM_UNIX)
add_compile_options(-fPIC)
endif(CLR_CMAKE_PLATFORM_UNIX)
-if(CMAKE_CONFIGURATION_TYPES)
- foreach (Config DEBUG CHECKED)
- set_property(DIRECTORY APPEND PROPERTY COMPILE_DEFINITIONS $<$<CONFIG:${Config}>:WRITE_BARRIER_CHECK=1>)
- endforeach (Config)
-else()
- if(UPPERCASE_CMAKE_BUILD_TYPE STREQUAL DEBUG OR UPPERCASE_CMAKE_BUILD_TYPE STREQUAL CHECKED)
- add_definitions(-DWRITE_BARRIER_CHECK=1)
- endif(UPPERCASE_CMAKE_BUILD_TYPE STREQUAL DEBUG OR UPPERCASE_CMAKE_BUILD_TYPE STREQUAL CHECKED)
-endif(CMAKE_CONFIGURATION_TYPES)
-
-set( GC_SOURCES_DAC_AND_WKS_COMMON
+set( GC_SOURCES
+ gcconfig.cpp
gccommon.cpp
gcscan.cpp
gcsvr.cpp
@@ -27,28 +20,18 @@ set( GC_SOURCES_DAC_AND_WKS_COMMON
handletablecore.cpp
handletablescan.cpp
objecthandle.cpp
- softwarewritewatch.cpp)
-
-set( GC_SOURCES_WKS
- ${GC_SOURCES_DAC_AND_WKS_COMMON}
+ softwarewritewatch.cpp
gchandletable.cpp
gceesvr.cpp
gceewks.cpp
handletablecache.cpp)
-set( GC_SOURCES_DAC
- ${GC_SOURCES_DAC_AND_WKS_COMMON})
-
-if(FEATURE_STANDALONE_GC)
- if(NOT CLR_CMAKE_PLATFORM_UNIX)
- set ( GC_SOURCES_WKS
- ${GC_SOURCES_WKS}
- windows/gcenv.windows.cpp)
- endif(NOT CLR_CMAKE_PLATFORM_UNIX)
-endif(FEATURE_STANDALONE_GC)
+if(NOT CLR_CMAKE_PLATFORM_UNIX)
+set ( GC_SOURCES
+ ${GC_SOURCES}
+ windows/gcenv.windows.cpp)
+endif(NOT CLR_CMAKE_PLATFORM_UNIX)
-convert_to_absolute_path(GC_SOURCES_WKS ${GC_SOURCES_WKS})
-convert_to_absolute_path(GC_SOURCES_DAC ${GC_SOURCES_DAC})
+convert_to_absolute_path(GC_SOURCES ${GC_SOURCES})
-add_subdirectory(wks)
-add_subdirectory(dac)
+add_library_clr(gc_standalone STATIC ${GC_SOURCES})
diff --git a/src/gc/dac/CMakeLists.txt b/src/gc/dac/CMakeLists.txt
deleted file mode 100644
index 1f1c9ebe5c..0000000000
--- a/src/gc/dac/CMakeLists.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-include(${CLR_DIR}/dac.cmake)
-add_library_clr(gc_dac STATIC ${GC_SOURCES_DAC})
diff --git a/src/gc/env/gcenv.ee.h b/src/gc/env/gcenv.ee.h
index aa00d19780..bb335c6834 100644
--- a/src/gc/env/gcenv.ee.h
+++ b/src/gc/env/gcenv.ee.h
@@ -74,6 +74,10 @@ public:
static bool ForceFullGCToBeBlocking();
static bool EagerFinalized(Object* obj);
static MethodTable* GetFreeObjectMethodTable();
+ static bool GetBooleanConfigValue(const char* key, bool* value);
+ static bool GetIntConfigValue(const char* key, int64_t* value);
+ static bool GetStringConfigValue(const char* key, const char** value);
+ static void FreeStringConfigValue(const char* key);
};
#endif // __GCENV_EE_H__
diff --git a/src/gc/env/gcenv.os.h b/src/gc/env/gcenv.os.h
index d3e40ac4ff..8c533222ef 100644
--- a/src/gc/env/gcenv.os.h
+++ b/src/gc/env/gcenv.os.h
@@ -158,6 +158,15 @@ public:
// flags - flags to control special settings like write watching
// Return:
// Starting virtual address of the reserved range
+ // Notes:
+ // Previous uses of this API aligned the `size` parameter to the platform
+ // allocation granularity. This is not required by POSIX or Windows. Windows will
+ // round the size up to the nearest page boundary. POSIX does not specify what is done,
+ // but Linux probably also rounds up. If an implementation of GCToOSInterface needs to
+ // align to the allocation granularity, it will do so in its implementation.
+ //
+ // Windows guarantees that the returned mapping will be aligned to the allocation
+ // granularity.
static void* VirtualReserve(size_t size, size_t alignment, uint32_t flags);
// Release virtual memory range previously reserved using VirtualReserve
@@ -357,6 +366,12 @@ public:
// Return:
// Time stamp in milliseconds
static uint32_t GetLowPrecisionTimeStamp();
+
+ // Gets the total number of processors on the machine, not taking
+ // into account current process affinity.
+ // Return:
+ // Number of processors on the machine
+ static uint32_t GetTotalProcessorCount();
};
#endif // __GCENV_OS_H__
diff --git a/src/gc/gc.cpp b/src/gc/gc.cpp
index 653f379b66..962e09b445 100644
--- a/src/gc/gc.cpp
+++ b/src/gc/gc.cpp
@@ -21,7 +21,6 @@
#define USE_INTROSORT
-
#if defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE)
BOOL bgc_heap_walk_for_etw_p = FALSE;
#endif //BACKGROUND_GC && FEATURE_EVENT_TRACE
@@ -176,7 +175,7 @@ size_t GetHighPrecisionTimeStamp()
GCStatistics g_GCStatistics;
GCStatistics g_LastGCStatistics;
-TCHAR* GCStatistics::logFileName = NULL;
+char* GCStatistics::logFileName = NULL;
FILE* GCStatistics::logFile = NULL;
void GCStatistics::AddGCStats(const gc_mechanisms& settings, size_t timeInMSec)
@@ -749,7 +748,7 @@ public:
if (color == join_struct.lock_color)
{
respin:
- int spin_count = 4096 * g_SystemInfo.dwNumberOfProcessors;
+ int spin_count = 4096 * g_num_processors;
for (int j = 0; j < spin_count; j++)
{
if (color != join_struct.lock_color)
@@ -850,7 +849,7 @@ respin:
if (!join_struct.wait_done)
{
respin:
- int spin_count = 2 * 4096 * g_SystemInfo.dwNumberOfProcessors;
+ int spin_count = 2 * 4096 * g_num_processors;
for (int j = 0; j < spin_count; j++)
{
if (join_struct.wait_done)
@@ -1043,7 +1042,7 @@ class exclusive_sync
public:
void init()
{
- spin_count = 32 * (g_SystemInfo.dwNumberOfProcessors - 1);
+ spin_count = 32 * (g_num_processors - 1);
rwp_object = 0;
needs_checking = 0;
for (int i = 0; i < max_pending_allocs; i++)
@@ -1424,15 +1423,6 @@ int mark_time, plan_time, sweep_time, reloc_time, compact_time;
#endif // MULTIPLE_HEAPS
-#ifdef TRACE_GC
-
-int print_level = DEFAULT_GC_PRN_LVL; //level of detail of the debug trace
-BOOL trace_gc = FALSE;
-int gc_trace_fac = 0;
-hlet* hlet::bindings = 0;
-
-#endif //TRACE_GC
-
void reset_memory (uint8_t* o, size_t sizeo);
#ifdef WRITE_WATCH
@@ -1508,7 +1498,7 @@ void WaitLongerNoInstru (int i)
// if we're waiting for gc to finish, we should block immediately
if (!g_TrapReturningThreads)
{
- if (g_SystemInfo.dwNumberOfProcessors > 1)
+ if (g_num_processors > 1)
{
YieldProcessor(); // indicate to the processor that we are spining
if (i & 0x01f)
@@ -1580,12 +1570,12 @@ retry:
{
if ((++i & 7) && !IsGCInProgress())
{
- if (g_SystemInfo.dwNumberOfProcessors > 1)
+ if (g_num_processors > 1)
{
#ifndef MULTIPLE_HEAPS
- int spin_count = 1024 * g_SystemInfo.dwNumberOfProcessors;
+ int spin_count = 1024 * g_num_processors;
#else //!MULTIPLE_HEAPS
- int spin_count = 32 * g_SystemInfo.dwNumberOfProcessors;
+ int spin_count = 32 * g_num_processors;
#endif //!MULTIPLE_HEAPS
for (int j = 0; j < spin_count; j++)
{
@@ -1696,7 +1686,7 @@ void WaitLonger (int i
#ifdef SYNCHRONIZATION_STATS
(spin_lock->num_switch_thread_w)++;
#endif //SYNCHRONIZATION_STATS
- if (g_SystemInfo.dwNumberOfProcessors > 1)
+ if (g_num_processors > 1)
{
YieldProcessor(); // indicate to the processor that we are spining
if (i & 0x01f)
@@ -1741,12 +1731,12 @@ retry:
{
if ((++i & 7) && !gc_heap::gc_started)
{
- if (g_SystemInfo.dwNumberOfProcessors > 1)
+ if (g_num_processors > 1)
{
#ifndef MULTIPLE_HEAPS
- int spin_count = 1024 * g_SystemInfo.dwNumberOfProcessors;
+ int spin_count = 1024 * g_num_processors;
#else //!MULTIPLE_HEAPS
- int spin_count = 32 * g_SystemInfo.dwNumberOfProcessors;
+ int spin_count = 32 * g_num_processors;
#endif //!MULTIPLE_HEAPS
for (int j = 0; j < spin_count; j++)
{
@@ -3790,7 +3780,7 @@ public:
_ASSERTE(pMT->SanityCheck());
bool noRangeChecks =
- (g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_NO_RANGE_CHECKS) == EEConfig::HEAPVERIFY_NO_RANGE_CHECKS;
+ (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_NO_RANGE_CHECKS) == GCConfig::HEAPVERIFY_NO_RANGE_CHECKS;
BOOL fSmallObjectHeapPtr = FALSE, fLargeObjectHeapPtr = FALSE;
if (!noRangeChecks)
@@ -3814,7 +3804,7 @@ public:
#endif // FEATURE_64BIT_ALIGNMENT
#ifdef VERIFY_HEAP
- if (bDeep && (g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_GC))
+ if (bDeep && (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC))
g_theGCHeap->ValidateObjectMember(this);
#endif
if (fSmallObjectHeapPtr)
@@ -3905,7 +3895,7 @@ public:
//This introduces a bug in the free list management.
//((void**) this)[-1] = 0; // clear the sync block,
assert (*numComponentsPtr >= 0);
- if (g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_GC)
+ if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC)
memset (((uint8_t*)this)+sizeof(ArrayBase), 0xcc, *numComponentsPtr);
#endif //VERIFY_HEAP
}
@@ -4388,12 +4378,12 @@ static size_t get_valid_segment_size (BOOL large_seg=FALSE)
if (!large_seg)
{
initial_seg_size = INITIAL_ALLOC;
- seg_size = g_pConfig->GetSegmentSize();
+ seg_size = static_cast<size_t>(GCConfig::GetSegmentSize());
}
else
{
initial_seg_size = LHEAP_ALLOC;
- seg_size = g_pConfig->GetSegmentSize() / 2;
+ seg_size = static_cast<size_t>(GCConfig::GetSegmentSize()) / 2;
}
#ifdef MULTIPLE_HEAPS
@@ -4401,9 +4391,9 @@ static size_t get_valid_segment_size (BOOL large_seg=FALSE)
if (!large_seg)
#endif // BIT64
{
- if (g_SystemInfo.dwNumberOfProcessors > 4)
+ if (g_num_processors > 4)
initial_seg_size /= 2;
- if (g_SystemInfo.dwNumberOfProcessors > 8)
+ if (g_num_processors > 8)
initial_seg_size /= 2;
}
#endif //MULTIPLE_HEAPS
@@ -5334,7 +5324,7 @@ void gc_heap::gc_thread_function ()
}
else
{
- int spin_count = 32 * (g_SystemInfo.dwNumberOfProcessors - 1);
+ int spin_count = 32 * (g_num_processors - 1);
// wait until RestartEE has progressed to a stage where we can restart user threads
while (!gc_heap::internal_gc_done && !GCHeap::SafeToRestartManagedThreads())
@@ -5670,7 +5660,7 @@ void gc_mechanisms::first_init()
#ifdef BACKGROUND_GC
pause_mode = gc_heap::gc_can_use_concurrent ? pause_interactive : pause_batch;
#ifdef _DEBUG
- int debug_pause_mode = g_pConfig->GetGCLatencyMode();
+ int debug_pause_mode = static_cast<int>(GCConfig::GetLatencyMode());
if (debug_pause_mode >= 0)
{
assert (debug_pause_mode <= pause_sustained_low_latency);
@@ -7009,9 +6999,7 @@ uint32_t* gc_heap::make_card_table (uint8_t* start, uint8_t* end)
// it is impossible for alloc_size to overflow due bounds on each of
// its components.
size_t alloc_size = sizeof (uint8_t)*(sizeof(card_table_info) + cs + bs + cb + wws + st + ms);
- size_t alloc_size_aligned = Align (alloc_size, g_SystemInfo.dwAllocationGranularity-1);
-
- uint8_t* mem = (uint8_t*)GCToOSInterface::VirtualReserve (alloc_size_aligned, 0, virtual_reserve_flags);
+ uint8_t* mem = (uint8_t*)GCToOSInterface::VirtualReserve (alloc_size, 0, virtual_reserve_flags);
if (!mem)
return 0;
@@ -7025,7 +7013,7 @@ uint32_t* gc_heap::make_card_table (uint8_t* start, uint8_t* end)
if (!GCToOSInterface::VirtualCommit (mem, commit_size))
{
dprintf (2, ("Card table commit failed"));
- GCToOSInterface::VirtualRelease (mem, alloc_size_aligned);
+ GCToOSInterface::VirtualRelease (mem, alloc_size);
return 0;
}
@@ -7035,7 +7023,7 @@ uint32_t* gc_heap::make_card_table (uint8_t* start, uint8_t* end)
card_table_lowest_address (ct) = start;
card_table_highest_address (ct) = end;
card_table_brick_table (ct) = (short*)((uint8_t*)ct + cs);
- card_table_size (ct) = alloc_size_aligned;
+ card_table_size (ct) = alloc_size;
card_table_next (ct) = 0;
#ifdef CARD_BUNDLE
@@ -7216,11 +7204,10 @@ int gc_heap::grow_brick_card_tables (uint8_t* start,
// it is impossible for alloc_size to overflow due bounds on each of
// its components.
size_t alloc_size = sizeof (uint8_t)*(sizeof(card_table_info) + cs + bs + cb + wws + st + ms);
- size_t alloc_size_aligned = Align (alloc_size, g_SystemInfo.dwAllocationGranularity-1);
dprintf (GC_TABLE_LOG, ("card table: %Id; brick table: %Id; card bundle: %Id; sw ww table: %Id; seg table: %Id; mark array: %Id",
cs, bs, cb, wws, st, ms));
- uint8_t* mem = (uint8_t*)GCToOSInterface::VirtualReserve (alloc_size_aligned, 0, virtual_reserve_flags);
+ uint8_t* mem = (uint8_t*)GCToOSInterface::VirtualReserve (alloc_size, 0, virtual_reserve_flags);
if (!mem)
{
@@ -7417,7 +7404,7 @@ fail:
#endif
//delete (uint32_t*)((uint8_t*)ct - sizeof(card_table_info));
- if (!GCToOSInterface::VirtualRelease (mem, alloc_size_aligned))
+ if (!GCToOSInterface::VirtualRelease (mem, alloc_size))
{
dprintf (GC_TABLE_LOG, ("GCToOSInterface::VirtualRelease failed"));
assert (!"release failed");
@@ -9343,7 +9330,7 @@ void gc_heap::rearrange_large_heap_segments()
while (seg)
{
heap_segment* next_seg = heap_segment_next (seg);
- delete_heap_segment (seg, (g_pConfig->GetGCRetainVM() != 0));
+ delete_heap_segment (seg, GCConfig::GetRetainVM());
seg = next_seg;
}
freeable_large_heap_segment = 0;
@@ -9386,7 +9373,7 @@ void gc_heap::rearrange_heap_segments(BOOL compacting)
assert (prev_seg);
assert (seg != ephemeral_heap_segment);
heap_segment_next (prev_seg) = next_seg;
- delete_heap_segment (seg, (g_pConfig->GetGCRetainVM() != 0));
+ delete_heap_segment (seg, GCConfig::GetRetainVM());
dprintf (2, ("Deleting heap segment %Ix", (size_t)seg));
}
@@ -9789,28 +9776,20 @@ void gc_heap::adjust_ephemeral_limits ()
}
#if defined(TRACE_GC) || defined(GC_CONFIG_DRIVEN)
-FILE* CreateLogFile(const CLRConfig::ConfigStringInfo & info, BOOL is_config)
+FILE* CreateLogFile(const GCConfigStringHolder& temp_logfile_name, bool is_config)
{
FILE* logFile;
- TCHAR * temp_logfile_name = NULL;
- CLRConfig::GetConfigValue(info, &temp_logfile_name);
- TCHAR logfile_name[MAX_LONGPATH+1];
- if (temp_logfile_name != 0)
+ if (!temp_logfile_name.Get())
{
- _tcscpy(logfile_name, temp_logfile_name);
+ return nullptr;
}
- size_t logfile_name_len = _tcslen(logfile_name);
- TCHAR* szPid = logfile_name + logfile_name_len;
- size_t remaining_space = MAX_LONGPATH + 1 - logfile_name_len;
-
- _stprintf_s(szPid, remaining_space, _T(".%d%s"), GCToOSInterface::GetCurrentProcessId(), (is_config ? _T(".config.log") : _T(".log")));
-
- logFile = _tfopen(logfile_name, _T("wb"));
-
- delete temp_logfile_name;
-
+ char logfile_name[MAX_LONGPATH+1];
+ uint32_t pid = GCToOSInterface::GetCurrentProcessId();
+ const char* suffix = is_config ? ".config.log" : ".log";
+ _snprintf_s(logfile_name, MAX_LONGPATH+1, _TRUNCATE, "%s.%d%s", temp_logfile_name.Get(), pid, suffix);
+ logFile = fopen(logfile_name, "wb");
return logFile;
}
#endif //TRACE_GC || GC_CONFIG_DRIVEN
@@ -9823,18 +9802,17 @@ HRESULT gc_heap::initialize_gc (size_t segment_size,
)
{
#ifdef TRACE_GC
- int log_last_gcs = CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_GCLogEnabled);
- if (log_last_gcs)
+ if (GCConfig::GetLogEnabled())
{
- gc_log = CreateLogFile(CLRConfig::UNSUPPORTED_GCLogFile, FALSE);
+ gc_log = CreateLogFile(GCConfig::GetLogFile(), false);
if (gc_log == NULL)
return E_FAIL;
// GCLogFileSize in MBs.
- gc_log_file_size = CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_GCLogFileSize);
+ gc_log_file_size = static_cast<size_t>(GCConfig::GetLogFileSize());
- if (gc_log_file_size > 500)
+ if (gc_log_file_size <= 0 || gc_log_file_size > 500)
{
fclose (gc_log);
return E_FAIL;
@@ -9855,10 +9833,9 @@ HRESULT gc_heap::initialize_gc (size_t segment_size,
#endif // TRACE_GC
#ifdef GC_CONFIG_DRIVEN
- gc_config_log_on = CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_GCConfigLogEnabled);
- if (gc_config_log_on)
+ if (GCConfig::GetConfigLogEnabled())
{
- gc_config_log = CreateLogFile(CLRConfig::UNSUPPORTED_GCConfigLogFile, TRUE);
+ gc_config_log = CreateLogFile(GCConfig::GetConfigLogFile(), true);
if (gc_config_log == NULL)
return E_FAIL;
@@ -9870,7 +9847,7 @@ HRESULT gc_heap::initialize_gc (size_t segment_size,
return E_FAIL;
}
- compact_ratio = CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_GCCompactRatio);
+ compact_ratio = static_cast<int>(GCConfig::GetCompactRatio());
// h# | GC | gen | C | EX | NF | BF | ML | DM || PreS | PostS | Merge | Conv | Pre | Post | PrPo | PreP | PostP |
cprintf (("%2s | %6s | %1s | %1s | %2s | %2s | %2s | %2s | %2s || %5s | %5s | %5s | %5s | %5s | %5s | %5s | %5s | %5s |",
@@ -9897,10 +9874,15 @@ HRESULT gc_heap::initialize_gc (size_t segment_size,
#endif //GC_CONFIG_DRIVEN
#ifdef GC_STATS
- GCStatistics::logFileName = CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_GCMixLog);
- if (GCStatistics::logFileName != NULL)
+ GCConfigStringHolder logFileName = GCConfig::GetMixLogFile();
+ if (logFileName.Get() != nullptr)
{
- GCStatistics::logFile = _tfopen(GCStatistics::logFileName, _T("a"));
+ GCStatistics::logFileName = _strdup(logFileName.Get());
+ GCStatistics::logFile = fopen(GCStatistics::logFileName, "a");
+ if (!GCStatistics::logFile)
+ {
+ return E_FAIL;
+ }
}
#endif // GC_STATS
@@ -9909,7 +9891,7 @@ HRESULT gc_heap::initialize_gc (size_t segment_size,
#ifdef WRITE_WATCH
hardware_write_watch_api_supported();
#ifdef BACKGROUND_GC
- if (can_use_write_watch_for_gc_heap() && g_pConfig->GetGCconcurrent() != 0)
+ if (can_use_write_watch_for_gc_heap() && GCConfig::GetConcurrentGC())
{
gc_can_use_concurrent = true;
#ifndef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
@@ -10009,11 +9991,6 @@ HRESULT gc_heap::initialize_gc (size_t segment_size,
#endif //MULTIPLE_HEAPS
-#ifdef TRACE_GC
- print_level = g_pConfig->GetGCprnLvl();
- gc_trace_fac = g_pConfig->GetGCtraceFac();
-#endif //TRACE_GC
-
if (!init_semi_shared())
{
hres = E_FAIL;
@@ -10095,14 +10072,14 @@ gc_heap::init_semi_shared()
should_expand_in_full_gc = FALSE;
#ifdef FEATURE_LOH_COMPACTION
- loh_compaction_always_p = (g_pConfig->GetGCLOHCompactionMode() != 0);
+ loh_compaction_always_p = GCConfig::GetLOHCompactionMode() != 0;
loh_compaction_mode = loh_compaction_default;
#endif //FEATURE_LOH_COMPACTION
#ifdef BACKGROUND_GC
memset (ephemeral_fgc_counts, 0, sizeof (ephemeral_fgc_counts));
- bgc_alloc_spin_count = CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_BGCSpinCount);
- bgc_alloc_spin = CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_BGCSpin);
+ bgc_alloc_spin_count = static_cast<uint32_t>(GCConfig::GetBGCSpinCount());
+ bgc_alloc_spin = static_cast<uint32_t>(GCConfig::GetBGCSpin());
{
int number_bgc_threads = 1;
@@ -10267,9 +10244,9 @@ retry:
{
while (gc_done_event_lock >= 0)
{
- if (g_SystemInfo.dwNumberOfProcessors > 1)
+ if (g_num_processors > 1)
{
- int spin_count = 32 * g_SystemInfo.dwNumberOfProcessors;
+ int spin_count = 32 * g_num_processors;
for (int j = 0; j < spin_count; j++)
{
if (gc_done_event_lock < 0)
@@ -11534,7 +11511,7 @@ void gc_heap::handle_oom (int heap_num, oom_reason reason, size_t alloc_size,
// Break early - before the more_space_lock is release so no other threads
// could have allocated on the same heap when OOM happened.
- if (g_pConfig->IsGCBreakOnOOMEnabled())
+ if (GCConfig::GetBreakOnOOM())
{
GCToOSInterface::DebugBreak();
}
@@ -11927,7 +11904,7 @@ void gc_heap::bgc_loh_alloc_clr (uint8_t* alloc_start,
#ifdef VERIFY_HEAP
// since we filled in 0xcc for free object when we verify heap,
// we need to make sure we clear those bytes.
- if (g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_GC)
+ if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC)
{
if (size_to_clear < saved_size_to_clear)
{
@@ -13136,7 +13113,7 @@ int gc_heap::try_allocate_more_space (alloc_context* acontext, size_t size,
#ifdef SYNCHRONIZATION_STATS
good_suspension++;
#endif //SYNCHRONIZATION_STATS
- BOOL fStress = (g_pConfig->GetGCStressLevel() & EEConfig::GCSTRESS_TRANSITION) != 0;
+ BOOL fStress = (g_pConfig->GetGCStressLevel() & GCConfig::GCSTRESS_TRANSITION) != 0;
if (!fStress)
{
//Rendez vous early (MP scaling issue)
@@ -14484,6 +14461,8 @@ int gc_heap::joined_generation_to_condemn (BOOL should_evaluate_elevation,
// We can only do Concurrent GC Stress if the caller did not explicitly ask for all
// generations to be collected,
+ // [LOCALGC TODO] STRESS_HEAP is not defined for a standalone GC so there are multiple
+ // things that need to be fixed in this code block.
if (n_original != max_generation &&
g_pConfig->GetGCStressLevel() && gc_can_use_concurrent)
{
@@ -15543,7 +15522,6 @@ void gc_heap::gc1()
}
descr_generations (FALSE);
- descr_card_table();
verify_soh_segment_list();
@@ -15577,7 +15555,7 @@ void gc_heap::gc1()
// value. If we ever allow randomly adjusting this as the process runs,
// we cannot call it this way as joins need to match - we must have the same
// value for all heaps like we do with bgc_heap_walk_for_etw_p.
- || (g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_GC)
+ || (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC)
#endif
#if defined(FEATURE_EVENT_TRACE) && defined(BACKGROUND_GC)
|| (bgc_heap_walk_for_etw_p && settings.concurrent)
@@ -15639,7 +15617,7 @@ void gc_heap::gc1()
#endif //BACKGROUND_GC
#ifdef VERIFY_HEAP
- if (g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_GC)
+ if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC)
verify_heap (FALSE);
#endif // VERIFY_HEAP
@@ -16594,14 +16572,6 @@ int gc_heap::garbage_collect (int n)
if (gc_t_join.joined())
#endif //MULTIPLE_HEAPS
{
-#ifdef TRACE_GC
- int gc_count = (int)dd_collection_count (dynamic_data_of (0));
- if (gc_count >= g_pConfig->GetGCtraceStart())
- trace_gc = 1;
- if (gc_count >= g_pConfig->GetGCtraceEnd())
- trace_gc = 0;
-#endif //TRACE_GC
-
#ifdef MULTIPLE_HEAPS
#if !defined(SEG_MAPPING_TABLE) && !defined(FEATURE_BASICFREEZE)
//delete old slots from the segment table
@@ -16748,12 +16718,12 @@ int gc_heap::garbage_collect (int n)
// descr_card_table();
#ifdef VERIFY_HEAP
- if ((g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_GC) &&
- !(g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_POST_GC_ONLY))
+ if ((GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC) &&
+ !(GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_POST_GC_ONLY))
{
verify_heap (TRUE);
}
- if (g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_BARRIERCHECK)
+ if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_BARRIERCHECK)
checkGCWriteBarrier();
#endif // VERIFY_HEAP
@@ -17125,7 +17095,7 @@ uint8_t* gc_heap::find_object (uint8_t* interior, uint8_t* low)
heap_segment* seg = find_segment_per_heap (interior, FALSE);
if (seg
#ifdef FEATURE_CONSERVATIVE_GC
- && (!g_pConfig->GetGCConservative() || interior <= heap_segment_allocated(seg))
+ && (GCConfig::GetConservativeGC() || interior <= heap_segment_allocated(seg))
#endif
)
{
@@ -17133,7 +17103,7 @@ uint8_t* gc_heap::find_object (uint8_t* interior, uint8_t* low)
// we don't have brick entry for it, and we may incorrectly treat it as on large object heap.
int align_const = get_alignment_constant (heap_segment_read_only_p (seg)
#ifdef FEATURE_CONSERVATIVE_GC
- || (g_pConfig->GetGCConservative() && !heap_segment_loh_p (seg))
+ || (GCConfig::GetConservativeGC() && !heap_segment_loh_p (seg))
#endif
);
//int align_const = get_alignment_constant (heap_segment_read_only_p (seg));
@@ -18603,7 +18573,7 @@ void gc_heap::background_promote (Object** ppObject, ScanContext* sc, uint32_t f
#ifdef FEATURE_CONSERVATIVE_GC
// For conservative GC, a value on stack may point to middle of a free object.
// In this case, we don't need to promote the pointer.
- if (g_pConfig->GetGCConservative() && ((CObjectHeader*)o)->IsFree())
+ if (GCConfig::GetConservativeGC() && ((CObjectHeader*)o)->IsFree())
{
return;
}
@@ -23714,7 +23684,7 @@ void gc_heap::relocate_survivor_helper (uint8_t* plug, uint8_t* plug_end)
void gc_heap::verify_pins_with_post_plug_info (const char* msg)
{
#if defined (_DEBUG) && defined (VERIFY_HEAP)
- if (g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_GC)
+ if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC)
{
if (!verify_pinned_queue_p)
return;
@@ -26596,7 +26566,7 @@ void gc_heap::background_promote_callback (Object** ppObject, ScanContext* sc,
#ifdef FEATURE_CONSERVATIVE_GC
// For conservative GC, a value on stack may point to middle of a free object.
// In this case, we don't need to promote the pointer.
- if (g_pConfig->GetGCConservative() && ((CObjectHeader*)o)->IsFree())
+ if (GCConfig::GetConservativeGC() && ((CObjectHeader*)o)->IsFree())
{
return;
}
@@ -29192,7 +29162,7 @@ gc_heap::realloc_plugs (generation* consing_gen, heap_segment* seg,
void gc_heap::verify_no_pins (uint8_t* start, uint8_t* end)
{
#ifdef VERIFY_HEAP
- if (g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_GC)
+ if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC)
{
BOOL contains_pinned_plugs = FALSE;
size_t mi = 0;
@@ -30259,7 +30229,7 @@ BOOL gc_heap::decide_on_compacting (int condemned_gen_number,
#endif // GC_STATS
#endif //STRESS_HEAP
- if (g_pConfig->GetGCForceCompact())
+ if (GCConfig::GetForceCompact())
should_compact = TRUE;
if ((condemned_gen_number == max_generation) && last_gc_before_oom)
@@ -30596,7 +30566,7 @@ CObjectHeader* gc_heap::allocate_large_object (size_t jsize, int64_t& alloc_byte
if (jsize >= maxObjectSize)
{
- if (g_pConfig->IsGCBreakOnOOMEnabled())
+ if (GCConfig::GetBreakOnOOM())
{
GCToOSInterface::DebugBreak();
}
@@ -30822,8 +30792,8 @@ void gc_heap::set_mem_verify (uint8_t* start, uint8_t* end, uint8_t b)
#ifdef VERIFY_HEAP
if (end > start)
{
- if ((g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_GC) &&
- !(g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_NO_MEM_FILL))
+ if ((GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC) &&
+ !(GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_NO_MEM_FILL))
{
dprintf (3, ("setting mem to %c [%Ix, [%Ix", b, start, end));
memset (start, b, (end - start));
@@ -31182,7 +31152,7 @@ void gc_heap::background_ephemeral_sweep()
// the following line is temporary.
heap_segment_saved_bg_allocated (ephemeral_heap_segment) = plug_end;
#ifdef VERIFY_HEAP
- if (g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_GC)
+ if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC)
{
make_unused_array (plug_end, (end - plug_end));
}
@@ -31982,36 +31952,6 @@ void gc_heap::descr_segment (heap_segment* seg )
#endif // TRACE_GC
}
-void gc_heap::descr_card_table ()
-{
-#ifdef TRACE_GC
- if (trace_gc && (print_level >= 4))
- {
- ptrdiff_t min = -1;
- dprintf(3,("Card Table set at: "));
- for (size_t i = card_of (lowest_address); i < card_of (highest_address); i++)
- {
- if (card_set_p (i))
- {
- if (min == -1)
- {
- min = i;
- }
- }
- else
- {
- if (! ((min == -1)))
- {
- dprintf (3,("[%Ix %Ix[, ",
- (size_t)card_address (min), (size_t)card_address (i)));
- min = -1;
- }
- }
- }
- }
-#endif //TRACE_GC
-}
-
void gc_heap::descr_generations_to_profiler (gen_walk_fn fn, void *context)
{
#ifdef MULTIPLE_HEAPS
@@ -32395,7 +32335,7 @@ BOOL gc_heap::bgc_mark_array_range (heap_segment* seg,
void gc_heap::bgc_verify_mark_array_cleared (heap_segment* seg)
{
#if defined (VERIFY_HEAP) && defined (MARK_ARRAY)
- if (recursive_gc_sync::background_running_p() && g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_GC)
+ if (recursive_gc_sync::background_running_p() && GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC)
{
uint8_t* range_beg = 0;
uint8_t* range_end = 0;
@@ -32579,7 +32519,7 @@ void gc_heap::verify_mark_array_cleared (heap_segment* seg)
void gc_heap::verify_mark_array_cleared ()
{
#if defined (VERIFY_HEAP) && defined (MARK_ARRAY)
- if (recursive_gc_sync::background_running_p() && g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_GC)
+ if (recursive_gc_sync::background_running_p() && GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC)
{
generation* gen = generation_of (max_generation);
heap_segment* seg = heap_segment_rw (generation_start_segment (gen));
@@ -32609,7 +32549,7 @@ void gc_heap::verify_mark_array_cleared ()
void gc_heap::verify_seg_end_mark_array_cleared()
{
#if defined (VERIFY_HEAP) && defined (MARK_ARRAY)
- if (g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_GC)
+ if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC)
{
generation* gen = generation_of (max_generation);
heap_segment* seg = heap_segment_rw (generation_start_segment (gen));
@@ -32671,7 +32611,7 @@ void gc_heap::verify_seg_end_mark_array_cleared()
void gc_heap::verify_soh_segment_list()
{
#ifdef VERIFY_HEAP
- if (g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_GC)
+ if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC)
{
generation* gen = generation_of (max_generation);
heap_segment* seg = heap_segment_rw (generation_start_segment (gen));
@@ -32857,7 +32797,7 @@ gc_heap::verify_free_lists ()
void
gc_heap::verify_heap (BOOL begin_gc_p)
{
- int heap_verify_level = g_pConfig->GetHeapVerifyLevel();
+ int heap_verify_level = static_cast<int>(GCConfig::GetHeapVerifyLevel());
size_t last_valid_brick = 0;
BOOL bCurrentBrickInvalid = FALSE;
BOOL large_brick_p = TRUE;
@@ -32919,7 +32859,7 @@ gc_heap::verify_heap (BOOL begin_gc_p)
if (!settings.concurrent)
#endif //BACKGROUND_GC
{
- if (!(heap_verify_level & EEConfig::HEAPVERIFY_NO_MEM_FILL))
+ if (!(heap_verify_level & GCConfig::HEAPVERIFY_NO_MEM_FILL))
{
//uninit the unused portions of segments.
generation* gen1 = large_object_generation;
@@ -33199,7 +33139,7 @@ gc_heap::verify_heap (BOOL begin_gc_p)
#endif //BACKGROUND_GC
BOOL deep_verify_obj = can_verify_deep;
- if ((heap_verify_level & EEConfig::HEAPVERIFY_DEEP_ON_COMPACT) && !settings.compaction)
+ if ((heap_verify_level & GCConfig::HEAPVERIFY_DEEP_ON_COMPACT) && !settings.compaction)
deep_verify_obj = FALSE;
((CObjectHeader*)curr_object)->ValidateHeap((Object*)curr_object, deep_verify_obj);
@@ -33492,7 +33432,10 @@ HRESULT GCHeap::Initialize ()
return E_FAIL;
}
+
g_gc_pFreeObjectMethodTable = GCToEEInterface::GetFreeObjectMethodTable();
+ g_num_processors = GCToOSInterface::GetTotalProcessorCount();
+ assert(g_num_processors != 0);
//Initialize the static members.
#ifdef TRACE_GC
@@ -33507,10 +33450,10 @@ HRESULT GCHeap::Initialize ()
gc_heap::min_segment_size = min (seg_size, large_seg_size);
#ifdef MULTIPLE_HEAPS
- if (g_pConfig->GetGCNoAffinitize())
+ if (GCConfig::GetNoAffinitize())
gc_heap::gc_thread_no_affinitize_p = true;
- uint32_t nhp_from_config = g_pConfig->GetGCHeapCount();
+ uint32_t nhp_from_config = static_cast<uint32_t>(GCConfig::GetHeapCount());
// GetGCProcessCpuCount only returns up to 64 procs.
uint32_t nhp_from_process = CPUGroupInfo::CanEnableGCCPUGroups() ?
CPUGroupInfo::GetNumActiveProcessors():
@@ -33533,7 +33476,7 @@ HRESULT GCHeap::Initialize ()
gc_heap::mem_one_percent = gc_heap::total_physical_mem / 100;
#ifndef MULTIPLE_HEAPS
- gc_heap::mem_one_percent /= g_SystemInfo.dwNumberOfProcessors;
+ gc_heap::mem_one_percent /= g_num_processors;
#endif //!MULTIPLE_HEAPS
// We should only use this if we are in the "many process" mode which really is only applicable
@@ -33545,7 +33488,7 @@ HRESULT GCHeap::Initialize ()
int available_mem_th = 10;
if (gc_heap::total_physical_mem >= ((uint64_t)80 * 1024 * 1024 * 1024))
{
- int adjusted_available_mem_th = 3 + (int)((float)47 / (float)(g_SystemInfo.dwNumberOfProcessors));
+ int adjusted_available_mem_th = 3 + (int)((float)47 / (float)(g_num_processors));
available_mem_th = min (available_mem_th, adjusted_available_mem_th);
}
@@ -33813,7 +33756,7 @@ void GCHeap::Promote(Object** ppObject, ScanContext* sc, uint32_t flags)
#ifdef FEATURE_CONSERVATIVE_GC
// For conservative GC, a value on stack may point to middle of a free object.
// In this case, we don't need to promote the pointer.
- if (g_pConfig->GetGCConservative()
+ if (GCConfig::GetConservativeGC()
&& ((CObjectHeader*)o)->IsFree())
{
return;
@@ -34086,14 +34029,7 @@ bool GCHeap::StressHeap(gc_alloc_context * context)
str->SetMethodTable (g_pStringClass);
str->SetStringLength (strLen);
-#if CHECK_APP_DOMAIN_LEAKS
- if (g_pConfig->AppDomainLeaks() && str->SetAppDomainNoThrow())
- {
-#endif
- HndAssignHandle(m_StressObjs[i], ObjectToOBJECTREF(str));
-#if CHECK_APP_DOMAIN_LEAKS
- }
-#endif
+ HndAssignHandle(m_StressObjs[i], ObjectToOBJECTREF(str));
}
i = (i + 1) % NUM_HEAP_STRESS_OBJS;
if (i == m_CurStressObj) break;
@@ -35510,7 +35446,7 @@ size_t GCHeap::GetValidSegmentSize(bool large_seg)
// Get the max gen0 heap size, making sure it conforms.
size_t GCHeap::GetValidGen0MaxSize(size_t seg_size)
{
- size_t gen0size = g_pConfig->GetGCgen0size();
+ size_t gen0size = static_cast<size_t>(GCConfig::GetGen0Size());
if ((gen0size == 0) || !g_theGCHeap->IsValidGen0MaxSize(gen0size))
{
@@ -35716,7 +35652,7 @@ bool CFinalize::Initialize()
{
ASSERT (m_Array);
STRESS_LOG_OOM_STACK(sizeof(Object*[100]));
- if (g_pConfig->IsGCBreakOnOOMEnabled())
+ if (GCConfig::GetBreakOnOOM())
{
GCToOSInterface::DebugBreak();
}
@@ -35825,7 +35761,7 @@ CFinalize::RegisterForFinalization (int gen, Object* obj, size_t size)
((CObjectHeader*)obj)->SetFree(size);
}
STRESS_LOG_OOM_STACK(0);
- if (g_pConfig->IsGCBreakOnOOMEnabled())
+ if (GCConfig::GetBreakOnOOM())
{
GCToOSInterface::DebugBreak();
}
@@ -36506,7 +36442,7 @@ void deleteGCShadow()
// Called at startup and right after a GC, get a snapshot of the GC Heap
void initGCShadow()
{
- if (!(g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_BARRIERCHECK))
+ if (!(GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_BARRIERCHECK))
return;
size_t len = g_gc_highest_address - g_gc_lowest_address;
diff --git a/src/gc/gc.h b/src/gc/gc.h
index 07ae6c916c..822fd42a54 100644
--- a/src/gc/gc.h
+++ b/src/gc/gc.h
@@ -27,16 +27,18 @@ Module Name:
#include "gcinterface.h"
#include "env/gcenv.os.h"
-#include "env/gcenv.ee.h"
-#ifdef FEATURE_STANDALONE_GC
+#ifdef BUILD_AS_STANDALONE
#include "gcenv.ee.standalone.inl"
// GCStress does not currently work with Standalone GC
#ifdef STRESS_HEAP
#undef STRESS_HEAP
#endif // STRESS_HEAP
-#endif // FEATURE_STANDALONE_GC
+#else
+#include "env/gcenv.ee.h"
+#endif // BUILD_AS_STANDALONE
+#include "gcconfig.h"
/*
* Promotion Function Prototypes
@@ -114,6 +116,7 @@ extern "C" uint8_t* g_gc_highest_address;
extern "C" GCHeapType g_gc_heap_type;
extern "C" uint32_t g_max_generation;
extern "C" MethodTable* g_gc_pFreeObjectMethodTable;
+extern "C" uint32_t g_num_processors;
::IGCHandleManager* CreateGCHandleManager();
diff --git a/src/gc/gccommon.cpp b/src/gc/gccommon.cpp
index 4950809cda..932f4a2c33 100644
--- a/src/gc/gccommon.cpp
+++ b/src/gc/gccommon.cpp
@@ -17,9 +17,9 @@
IGCHeapInternal* g_theGCHeap;
IGCHandleManager* g_theGCHandleManager;
-#ifdef FEATURE_STANDALONE_GC
+#ifdef BUILD_AS_STANDALONE
IGCToCLR* g_theGCToCLR;
-#endif // FEATURE_STANDALONE_GC
+#endif // BUILD_AS_STANDALONE
#ifdef GC_CONFIG_DRIVEN
size_t gc_global_mechanisms[MAX_GLOBAL_GC_MECHANISMS_COUNT];
@@ -44,6 +44,7 @@ uint8_t* g_gc_highest_address = 0;
GCHeapType g_gc_heap_type = GC_HEAP_INVALID;
uint32_t g_max_generation = max_generation;
MethodTable* g_gc_pFreeObjectMethodTable = nullptr;
+uint32_t g_num_processors = 0;
#ifdef GC_CONFIG_DRIVEN
void record_global_mechanism (int mech_index)
@@ -113,26 +114,6 @@ void record_changed_seg (uint8_t* start, uint8_t* end,
}
}
-// The runtime needs to know whether we're using workstation or server GC
-// long before the GCHeap is created.
-void InitializeHeapType(bool bServerHeap)
-{
- LIMITED_METHOD_CONTRACT;
-#ifdef FEATURE_SVR_GC
- g_gc_heap_type = bServerHeap ? GC_HEAP_SVR : GC_HEAP_WKS;
-#ifdef WRITE_BARRIER_CHECK
- if (g_gc_heap_type == GC_HEAP_SVR)
- {
- g_GCShadow = 0;
- g_GCShadowEnd = 0;
- }
-#endif // WRITE_BARRIER_CHECK
-#else // FEATURE_SVR_GC
- UNREFERENCED_PARAMETER(bServerHeap);
- CONSISTENCY_CHECK(bServerHeap == false);
-#endif // FEATURE_SVR_GC
-}
-
namespace WKS
{
extern void PopulateDacVars(GcDacVars* dacVars);
@@ -143,7 +124,30 @@ namespace SVR
extern void PopulateDacVars(GcDacVars* dacVars);
}
-bool InitializeGarbageCollector(IGCToCLR* clrToGC, IGCHeap** gcHeap, IGCHandleManager** gcHandleManager, GcDacVars* gcDacVars)
+//------------------------------------------------------------------
+// Externally-facing GC symbols, used to initialize the GC
+// -----------------------------------------------------------------
+
+#ifdef _MSC_VER
+#define DLLEXPORT __declspec(dllexport)
+#else
+#define DLLEXPORT __attribute__ ((visibility ("default")))
+#endif // _MSC_VER
+
+#ifdef BUILD_AS_STANDALONE
+#define GC_API extern "C" DLLEXPORT
+#else
+#define GC_API extern "C"
+#endif // BUILD_AS_STANDALONE
+
+GC_API
+bool
+InitializeGarbageCollector(
+ /* In */ IGCToCLR* clrToGC,
+ /* Out */ IGCHeap** gcHeap,
+ /* Out */ IGCHandleManager** gcHandleManager,
+ /* Out */ GcDacVars* gcDacVars
+ )
{
LIMITED_METHOD_CONTRACT;
@@ -153,6 +157,18 @@ bool InitializeGarbageCollector(IGCToCLR* clrToGC, IGCHeap** gcHeap, IGCHandleMa
assert(gcHeap != nullptr);
assert(gcHandleManager != nullptr);
+#ifdef BUILD_AS_STANDALONE
+ assert(clrToGC != nullptr);
+ g_theGCToCLR = clrToGC;
+#else
+ UNREFERENCED_PARAMETER(clrToGC);
+ assert(clrToGC == nullptr);
+#endif
+
+ // Initialize GCConfig before anything else - initialization of our
+ // various components may want to query the current configuration.
+ GCConfig::Initialize();
+
IGCHandleManager* handleManager = CreateGCHandleManager();
if (handleManager == nullptr)
{
@@ -160,19 +176,25 @@ bool InitializeGarbageCollector(IGCToCLR* clrToGC, IGCHeap** gcHeap, IGCHandleMa
}
#ifdef FEATURE_SVR_GC
- assert(g_gc_heap_type != GC_HEAP_INVALID);
-
- if (g_gc_heap_type == GC_HEAP_SVR)
+ if (GCConfig::GetServerGC())
{
+#ifdef WRITE_BARRIER_CHECK
+ g_GCShadow = 0;
+ g_GCShadowEnd = 0;
+#endif // WRITE_BARRIER_CHECK
+
+ g_gc_heap_type = GC_HEAP_SVR;
heap = SVR::CreateGCHeap();
SVR::PopulateDacVars(gcDacVars);
}
else
{
+ g_gc_heap_type = GC_HEAP_WKS;
heap = WKS::CreateGCHeap();
WKS::PopulateDacVars(gcDacVars);
}
#else
+ g_gc_heap_type = GC_HEAP_WKS;
heap = WKS::CreateGCHeap();
WKS::PopulateDacVars(gcDacVars);
#endif
@@ -183,15 +205,6 @@ bool InitializeGarbageCollector(IGCToCLR* clrToGC, IGCHeap** gcHeap, IGCHandleMa
}
g_theGCHeap = heap;
-
-#ifdef FEATURE_STANDALONE_GC
- assert(clrToGC != nullptr);
- g_theGCToCLR = clrToGC;
-#else
- UNREFERENCED_PARAMETER(clrToGC);
- assert(clrToGC == nullptr);
-#endif
-
*gcHandleManager = handleManager;
*gcHeap = heap;
return true;
diff --git a/src/gc/gcconfig.cpp b/src/gc/gcconfig.cpp
new file mode 100644
index 0000000000..d84a5a5801
--- /dev/null
+++ b/src/gc/gcconfig.cpp
@@ -0,0 +1,48 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+#include "common.h"
+#include "gcenv.h"
+#include "gc.h"
+
+#define BOOL_CONFIG(name, key, default, unused_doc) \
+ bool GCConfig::Get##name() { return s_##name; } \
+ bool GCConfig::s_##name = default;
+
+#define INT_CONFIG(name, key, default, unused_doc) \
+ int64_t GCConfig::Get##name() { return s_##name; } \
+ int64_t GCConfig::s_##name = default;
+
+// String configs are not cached because 1) they are rare and
+// not on hot paths and 2) they involve transfers of ownership
+// of EE-allocated strings, which is potentially complicated.
+#define STRING_CONFIG(name, key, unused_doc) \
+ GCConfigStringHolder GCConfig::Get##name() \
+ { \
+ const char* resultStr = nullptr; \
+ GCToEEInterface::GetStringConfigValue(key, &resultStr); \
+ return GCConfigStringHolder(resultStr); \
+ }
+
+GC_CONFIGURATION_KEYS
+
+#undef BOOL_CONFIG
+#undef INT_CONFIG
+#undef STRING_CONFIG
+
+void GCConfig::Initialize()
+{
+#define BOOL_CONFIG(name, key, default, unused_doc) \
+ GCToEEInterface::GetBooleanConfigValue(key, &s_##name);
+
+#define INT_CONFIG(name, key, default, unused_doc) \
+ GCToEEInterface::GetIntConfigValue(key, &s_##name);
+
+#define STRING_CONFIG(unused_name, unused_key, unused_doc)
+
+GC_CONFIGURATION_KEYS
+
+#undef BOOL_CONFIG
+#undef INT_CONFIG
+}
diff --git a/src/gc/gcconfig.h b/src/gc/gcconfig.h
new file mode 100644
index 0000000000..3a95857430
--- /dev/null
+++ b/src/gc/gcconfig.h
@@ -0,0 +1,137 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+#ifndef __GCCONFIG_H__
+#define __GCCONFIG_H__
+
+// gcconfig.h - GC configuration management and retrieval.
+//
+// This file and the GCConfig class are designed to be the primary entry point
+// for querying configuration information from within the GC.
+
+// GCConfigStringHolder is a wrapper around a configuration string obtained
+// from the EE. Such strings must be disposed using GCToEEInterface::FreeStringConfigValue,
+// so this class ensures that is done correctly.
+//
+// The name is unfortunately a little long, but "ConfigStringHolder" is already taken by the
+// EE's config mechanism.
+class GCConfigStringHolder
+{
+private:
+ const char* m_str;
+
+public:
+ // Constructs a new GCConfigStringHolder around a string obtained from
+ // GCToEEInterface::GetStringConfigValue.
+ explicit GCConfigStringHolder(const char* str)
+ : m_str(str) {}
+
+ // No copy operators - this type cannot be copied.
+ GCConfigStringHolder(const GCConfigStringHolder&) = delete;
+ GCConfigStringHolder& operator=(const GCConfigStringHolder&) = delete;
+
+ // This type is returned by-value by string config functions, so it
+ // requires a move constructor.
+ GCConfigStringHolder(GCConfigStringHolder&&) = default;
+
+ // Frees a string config value by delegating to GCToEEInterface::FreeStringConfigValue.
+ ~GCConfigStringHolder()
+ {
+ if (m_str)
+ {
+ GCToEEInterface::FreeStringConfigValue(m_str);
+ }
+
+ m_str = nullptr;
+ }
+
+ // Retrieves the wrapped config string.
+ const char* Get() const { return m_str; }
+};
+
+// Each one of these keys produces a method on GCConfig with the name "Get{name}", where {name}
+// is the first parameter of the *_CONFIG macros below.
+#define GC_CONFIGURATION_KEYS \
+ BOOL_CONFIG(ServerGC, "gcServer", false, "Whether we should be using Server GC") \
+ BOOL_CONFIG(ConcurrentGC, "gcConcurrent", true, "Whether we should be using Concurrent GC") \
+ BOOL_CONFIG(ConservativeGC, "gcConservative", false, "Enables/Disables conservative GC") \
+ BOOL_CONFIG(ForceCompact, "gcForceCompact", false, \
+ "When set to true, always do compacting GC") \
+ BOOL_CONFIG(RetainVM, "GCRetainVM", false, \
+ "When set we put the segments that should be deleted on a standby list (instead of " \
+ "releasing them back to the OS) which will be considered to satisfy new segment requests"\
+ " (note that the same thing can be specified via API which is the supported way)") \
+ BOOL_CONFIG(StressMix, "GCStressMix", false, \
+ "Specifies whether the GC mix mode is enabled or not") \
+ BOOL_CONFIG(BreakOnOOM, "GCBreakOnOOM", false, \
+ "Does a DebugBreak at the soonest time we detect an OOM") \
+ BOOL_CONFIG(NoAffinitize, "GCNoAffinitize", false, \
+ "If set, do not affinitize server GC threads") \
+ BOOL_CONFIG(LogEnabled, "GCLogEnabled", false, \
+ "Specifies if you want to turn on logging in GC") \
+ BOOL_CONFIG(ConfigLogEnabled, "GCConfigLogEnabled", false, \
+ "Specifies the name of the GC config log file") \
+ INT_CONFIG(HeapVerifyLevel, "HeapVerify", HEAPVERIFY_NONE, \
+ "When set verifies the integrity of the managed heap on entry and exit of each GC") \
+ INT_CONFIG(LOHCompactionMode, "GCLOHCompact", 0, "Specifies the LOH compaction mode") \
+ INT_CONFIG(BGCSpinCount, "BGCSpinCount", 140, "Specifies the bgc spin count") \
+ INT_CONFIG(BGCSpin, "BGCSpin", 2, "Specifies the bgc spin time") \
+ INT_CONFIG(HeapCount, "GCHeapCount", 0, "Specifies the number of server GC heaps") \
+ INT_CONFIG(Gen0Size, "GCgen0size", 0, "Specifies the smallest gen0 size") \
+ INT_CONFIG(SegmentSize, "GCSegmentSize", 0, "Specifies the managed heap segment size") \
+ INT_CONFIG(LatencyMode, "GCLatencyMode", -1, \
+ "Specifies the GC latency mode - batch, interactive or low latency (note that the same " \
+ "thing can be specified via API which is the supported way") \
+ INT_CONFIG(LogFileSize, "GCLogFileSize", 0, "Specifies the GC log file size") \
+ INT_CONFIG(CompactRatio, "GCCompactRatio", 0, \
+ "Specifies the ratio compacting GCs vs sweeping") \
+ STRING_CONFIG(LogFile, "GCLogFile", "Specifies the name of the GC log file") \
+ STRING_CONFIG(ConfigLogFile, "GCConfigLogFile", \
+ "Specifies the name of the GC config log file") \
+ STRING_CONFIG(MixLogFile, "GCMixLog", \
+ "Specifies the name of the log file for GC mix statistics")
+
+// This class is responsible for retreiving configuration information
+// for how the GC should operate.
+class GCConfig
+{
+#define BOOL_CONFIG(name, unused_key, unused_default, unused_doc) \
+ public: static bool Get##name(); \
+ private: static bool s_##name;
+#define INT_CONFIG(name, unused_key, unused_default, unused_doc) \
+ public: static int64_t Get##name(); \
+ private: static int64_t s_##name;
+#define STRING_CONFIG(name, unused_key, unused_doc) \
+ public: static GCConfigStringHolder Get##name();
+GC_CONFIGURATION_KEYS
+#undef BOOL_CONFIG
+#undef INT_CONFIG
+#undef STRING_CONFIG
+
+public:
+// Flags that may inhabit the number returned for the HeapVerifyLevel config option.
+// Keep this in sync with vm\eeconfig.h if this ever changes.
+enum HeapVerifyFlags {
+ HEAPVERIFY_NONE = 0,
+ HEAPVERIFY_GC = 1, // Verify the heap at beginning and end of GC
+ HEAPVERIFY_BARRIERCHECK = 2, // Verify the brick table
+ HEAPVERIFY_SYNCBLK = 4, // Verify sync block scanning
+
+ // the following options can be used to mitigate some of the overhead introduced
+ // by heap verification. some options might cause heap verifiction to be less
+ // effective depending on the scenario.
+
+ HEAPVERIFY_NO_RANGE_CHECKS = 0x10, // Excludes checking if an OBJECTREF is within the bounds of the managed heap
+ HEAPVERIFY_NO_MEM_FILL = 0x20, // Excludes filling unused segment portions with fill pattern
+ HEAPVERIFY_POST_GC_ONLY = 0x40, // Performs heap verification post-GCs only (instead of before and after each GC)
+ HEAPVERIFY_DEEP_ON_COMPACT = 0x80 // Performs deep object verfication only on compacting GCs.
+};
+
+// Initializes the GCConfig subsystem. Must be called before accessing any
+// configuration information.
+static void Initialize();
+
+};
+
+#endif // __GCCONFIG_H__
diff --git a/src/gc/gcenv.ee.standalone.inl b/src/gc/gcenv.ee.standalone.inl
index f6954fc476..642d150976 100644
--- a/src/gc/gcenv.ee.standalone.inl
+++ b/src/gc/gcenv.ee.standalone.inl
@@ -5,12 +5,17 @@
#ifndef __GCTOENV_EE_STANDALONE_INL__
#define __GCTOENV_EE_STANDALONE_INL__
-#include "env/gcenv.ee.h"
+#include "gcinterface.h"
// The singular interface instance. All calls in GCToEEInterface
// will be fowarded to this interface instance.
extern IGCToCLR* g_theGCToCLR;
+namespace
+{
+
+#include "env/gcenv.ee.h"
+
// A note about this:
// In general, we don't want to pretend to be smarter than the compiler
// and force it to inline things. However, inlining is here is required
@@ -236,6 +241,33 @@ ALWAYS_INLINE MethodTable* GCToEEInterface::GetFreeObjectMethodTable()
assert(g_theGCToCLR != nullptr);
return g_theGCToCLR->GetFreeObjectMethodTable();
}
+
+ALWAYS_INLINE bool GCToEEInterface::GetBooleanConfigValue(const char* key, bool* value)
+{
+ assert(g_theGCToCLR != nullptr);
+ return g_theGCToCLR->GetBooleanConfigValue(key, value);
+}
+
+ALWAYS_INLINE bool GCToEEInterface::GetIntConfigValue(const char* key, int64_t* value)
+{
+ assert(g_theGCToCLR != nullptr);
+ return g_theGCToCLR->GetIntConfigValue(key, value);
+}
+
+ALWAYS_INLINE bool GCToEEInterface::GetStringConfigValue(const char* key, const char** value)
+{
+ assert(g_theGCToCLR != nullptr);
+ return g_theGCToCLR->GetStringConfigValue(key, value);
+}
+
+ALWAYS_INLINE void GCToEEInterface::FreeStringConfigValue(const char* value)
+{
+ assert(g_theGCToCLR != nullptr);
+ g_theGCToCLR->FreeStringConfigValue(value);
+}
+
#undef ALWAYS_INLINE
+} // anonymous namespace
+
#endif // __GCTOENV_EE_STANDALONE_INL__
diff --git a/src/gc/gchandletable.cpp b/src/gc/gchandletable.cpp
index 52fede6299..63f2f79711 100644
--- a/src/gc/gchandletable.cpp
+++ b/src/gc/gchandletable.cpp
@@ -56,6 +56,18 @@ OBJECTHANDLE GCHandleStore::CreateDependentHandle(Object* primary, Object* secon
return handle;
}
+void GCHandleStore::RelocateAsyncPinnedHandles(IGCHandleStore* pTarget)
+{
+ // assumption - the IGCHandleStore is an instance of GCHandleStore
+ GCHandleStore* other = static_cast<GCHandleStore*>(pTarget);
+ ::Ref_RelocateAsyncPinHandles(&_underlyingBucket, &other->_underlyingBucket);
+}
+
+bool GCHandleStore::EnumerateAsyncPinnedHandles(async_pin_enum_fn callback, void* context)
+{
+ return !!::Ref_HandleAsyncPinHandles(callback, context);
+}
+
GCHandleStore::~GCHandleStore()
{
::Ref_DestroyHandleTableBucket(&_underlyingBucket);
@@ -147,6 +159,16 @@ bool GCHandleManager::StoreObjectInHandleIfNull(OBJECTHANDLE handle, Object* obj
return !!::HndFirstAssignHandle(handle, ObjectToOBJECTREF(object));
}
+void GCHandleManager::SetDependentHandleSecondary(OBJECTHANDLE handle, Object* object)
+{
+ ::SetDependentHandleSecondary(handle, ObjectToOBJECTREF(object));
+}
+
+Object* GCHandleManager::GetDependentHandleSecondary(OBJECTHANDLE handle)
+{
+ return OBJECTREFToObject(::GetDependentHandleSecondary(handle));
+}
+
Object* GCHandleManager::InterlockedCompareExchangeObjectInHandle(OBJECTHANDLE handle, Object* object, Object* comparandObject)
{
return (Object*)::HndInterlockedCompareExchangeHandle(handle, ObjectToOBJECTREF(object), ObjectToOBJECTREF(comparandObject));
diff --git a/src/gc/gchandletableimpl.h b/src/gc/gchandletableimpl.h
index 01c1c130ed..4be346fb28 100644
--- a/src/gc/gchandletableimpl.h
+++ b/src/gc/gchandletableimpl.h
@@ -23,6 +23,10 @@ public:
virtual OBJECTHANDLE CreateDependentHandle(Object* primary, Object* secondary);
+ virtual void RelocateAsyncPinnedHandles(IGCHandleStore* pTarget);
+
+ virtual bool EnumerateAsyncPinnedHandles(async_pin_enum_fn callback, void* context);
+
virtual ~GCHandleStore();
HandleTableBucket _underlyingBucket;
@@ -59,6 +63,10 @@ public:
virtual bool StoreObjectInHandleIfNull(OBJECTHANDLE handle, Object* object);
+ virtual void SetDependentHandleSecondary(OBJECTHANDLE handle, Object* object);
+
+ virtual Object* GetDependentHandleSecondary(OBJECTHANDLE handle);
+
virtual Object* InterlockedCompareExchangeObjectInHandle(OBJECTHANDLE handle, Object* object, Object* comparandObject);
};
diff --git a/src/gc/gcinterface.ee.h b/src/gc/gcinterface.ee.h
index 7b868e780e..32dcc039ef 100644
--- a/src/gc/gcinterface.ee.h
+++ b/src/gc/gcinterface.ee.h
@@ -9,16 +9,16 @@
// of the execution engine. Everything that the GC does that requires the EE
// to be informed or that requires EE action must go through this interface.
//
-// When FEATURE_STANDALONE_GC is defined, this class is named IGCToCLR and is
+// When BUILD_AS_STANDALONE is defined, this class is named IGCToCLR and is
// an abstract class. The EE will provide a class that fulfills this interface,
-// and the GC will dispatch virtually on it to call into the EE. When FEATURE_STANDALONE_GC
+// and the GC will dispatch virtually on it to call into the EE. When BUILD_AS_STANDALONE
// is not defined, this class is named GCToEEInterface and the GC will dispatch statically on it.
class IGCToCLR {
public:
// Suspends the EE for the given reason.
virtual
void SuspendEE(SUSPEND_REASON reason) = 0;
-
+
// Resumes all paused threads, with a boolean indicating
// if the EE is being restarted because a GC is complete.
virtual
@@ -166,6 +166,22 @@ public:
// field to see how many bytes to skip before the next object on a heap segment begins.
virtual
MethodTable* GetFreeObjectMethodTable() = 0;
+
+ // Asks the EE for the value of a given configuration key. If the EE does not know or does not
+ // have a value for the requeested config key, false is returned and the value of the passed-in
+ // pointer is undefined. Otherwise, true is returned and the config key's value is written to
+ // the passed-in pointer.
+ virtual
+ bool GetBooleanConfigValue(const char* key, bool* value) = 0;
+
+ virtual
+ bool GetIntConfigValue(const char* key, int64_t* value) = 0;
+
+ virtual
+ bool GetStringConfigValue(const char* key, const char** value) = 0;
+
+ virtual
+ void FreeStringConfigValue(const char* value) = 0;
};
#endif // _GCINTERFACE_EE_H_
diff --git a/src/gc/gcinterface.h b/src/gc/gcinterface.h
index 552a8caec8..aefa84b99b 100644
--- a/src/gc/gcinterface.h
+++ b/src/gc/gcinterface.h
@@ -171,16 +171,19 @@ class Object;
class IGCHeap;
class IGCHandleManager;
-// Initializes the garbage collector. Should only be called
-// once, during EE startup. Returns true if the initialization
-// was successful, false otherwise.
-bool InitializeGarbageCollector(IGCToCLR* clrToGC, IGCHeap** gcHeap, IGCHandleManager** gcHandleTable, GcDacVars* gcDacVars);
-
-// The runtime needs to know whether we're using workstation or server GC
-// long before the GCHeap is created. This function sets the type of
-// heap that will be created, before InitializeGarbageCollector is called
-// and the heap is actually recated.
-void InitializeHeapType(bool bServerHeap);
+// The function that initialzes the garbage collector.
+// Should only be called once: here, during EE startup.
+// Returns true if the initialization was successful, false otherwise.
+typedef bool (*InitializeGarbageCollectorFunction)(
+ /* In */ IGCToCLR*,
+ /* Out */ IGCHeap**,
+ /* Out */ IGCHandleManager**,
+ /* Out */ GcDacVars*
+);
+
+// The name of the function that initializes the garbage collector,
+// to be used as an argument to GetProcAddress.
+#define INITIALIZE_GC_FUNCTION_NAME "InitializeGarbageCollector"
#ifdef WRITE_BARRIER_CHECK
//always defined, but should be 0 in Server GC
@@ -390,6 +393,7 @@ typedef void (* record_surv_fn)(uint8_t* begin, uint8_t* end, ptrdiff_t reloc, v
typedef void (* fq_walk_fn)(bool, void*);
typedef void (* fq_scan_fn)(Object** ppObject, ScanContext *pSC, uint32_t dwFlags);
typedef void (* handle_scan_fn)(Object** pRef, Object* pSec, uint32_t flags, ScanContext* context, bool isDependent);
+typedef bool (* async_pin_enum_fn)(Object* object, void* context);
// Opaque type for tracking object pointers
#ifndef DACCESS_COMPILE
@@ -417,6 +421,10 @@ public:
virtual OBJECTHANDLE CreateDependentHandle(Object* primary, Object* secondary) = 0;
+ virtual void RelocateAsyncPinnedHandles(IGCHandleStore* pTarget) = 0;
+
+ virtual bool EnumerateAsyncPinnedHandles(async_pin_enum_fn callback, void* context) = 0;
+
virtual ~IGCHandleStore() {};
};
@@ -449,6 +457,10 @@ public:
virtual bool StoreObjectInHandleIfNull(OBJECTHANDLE handle, Object* object) = 0;
+ virtual void SetDependentHandleSecondary(OBJECTHANDLE handle, Object* object) = 0;
+
+ virtual Object* GetDependentHandleSecondary(OBJECTHANDLE handle) = 0;
+
virtual Object* InterlockedCompareExchangeObjectInHandle(OBJECTHANDLE handle, Object* object, Object* comparandObject) = 0;
};
diff --git a/src/gc/gcpriv.h b/src/gc/gcpriv.h
index 9f098ebe3b..08fedbbde3 100644
--- a/src/gc/gcpriv.h
+++ b/src/gc/gcpriv.h
@@ -234,46 +234,6 @@ const int policy_compact = 1;
const int policy_expand = 2;
#ifdef TRACE_GC
-
-
-extern int print_level;
-extern BOOL trace_gc;
-extern int gc_trace_fac;
-
-
-class hlet
-{
- static hlet* bindings;
- int prev_val;
- int* pval;
- hlet* prev_let;
-public:
- hlet (int& place, int value)
- {
- prev_val = place;
- pval = &place;
- place = value;
- prev_let = bindings;
- bindings = this;
- }
- ~hlet ()
- {
- *pval = prev_val;
- bindings = prev_let;
- }
-};
-
-
-#define let(p,v) hlet __x = hlet (p, v);
-
-#else //TRACE_GC
-
-#define gc_count -1
-#define let(s,v)
-
-#endif //TRACE_GC
-
-#ifdef TRACE_GC
#define SEG_REUSE_LOG_0 7
#define SEG_REUSE_LOG_1 (SEG_REUSE_LOG_0 + 1)
#define DT_LOG_0 (SEG_REUSE_LOG_1 + 1)
@@ -299,15 +259,12 @@ void GCLog (const char *fmt, ... );
//#define dprintf(l,x) {if ((l==GTC_LOG) || (l <= 1)) {GCLog x;}}
//#define dprintf(l,x) {if (trace_gc && ((l <= print_level) || (l==GTC_LOG))) {GCLog x;}}
//#define dprintf(l,x) {if (l==GTC_LOG) {printf ("\n");printf x ; fflush(stdout);}}
-#else //SIMPLE_DPRINTF
+#else
-// The GCTrace output goes to stdout by default but can get sent to the stress log or the logfile if the
-// reg key GCTraceFacility is set. THe stress log can only take a format string and 4 numbers or
-// string literals.
-#define dprintf(l,x) {if (trace_gc && (l<=print_level)) { \
- if ( !gc_trace_fac) {printf ("\n");printf x ; fflush(stdout);} \
- else if ( gc_trace_fac == 2) {LogSpewAlways x;LogSpewAlways ("\n");} \
- else if ( gc_trace_fac == 1) {STRESS_LOG_VA(x);}}}
+// Nobody used the logging mechanism that used to be here. If we find ourselves
+// wanting to inspect GC logs on unmodified builds, we can use this define here
+// to do so.
+#define dprintf(l, x)
#endif //SIMPLE_DPRINTF
@@ -602,7 +559,7 @@ struct GCStatistics
: public StatisticsBase
{
// initialized to the contents of COMPlus_GcMixLog, or NULL, if not present
- static TCHAR* logFileName;
+ static char* logFileName;
static FILE* logFile;
// number of times we executed a background GC, a foreground GC, or a
diff --git a/src/gc/handletable.cpp b/src/gc/handletable.cpp
index 05137e4d68..da50483a88 100644
--- a/src/gc/handletable.cpp
+++ b/src/gc/handletable.cpp
@@ -1286,9 +1286,9 @@ uint32_t HndCountAllHandles(BOOL fUseLocks)
return uCount;
}
-#ifndef FEATURE_REDHAWK
-BOOL Ref_HandleAsyncPinHandles()
+BOOL Ref_HandleAsyncPinHandles(async_pin_enum_fn asyncPinCallback, void* context)
{
+#ifndef FEATURE_REDHAWK
CONTRACTL
{
NOTHROW;
@@ -1297,22 +1297,27 @@ BOOL Ref_HandleAsyncPinHandles()
}
CONTRACTL_END;
+ AsyncPinCallbackContext callbackCtx(asyncPinCallback, context);
HandleTableBucket *pBucket = g_HandleTableMap.pBuckets[0];
BOOL result = FALSE;
int limit = getNumberOfSlots();
for (int n = 0; n < limit; n ++ )
{
- if (TableHandleAsyncPinHandles(Table(pBucket->pTable[n])))
+ if (TableHandleAsyncPinHandles(Table(pBucket->pTable[n]), callbackCtx))
{
result = TRUE;
}
}
return result;
+#else
+ return true;
+#endif // !FEATURE_REDHAWK
}
void Ref_RelocateAsyncPinHandles(HandleTableBucket *pSource, HandleTableBucket *pTarget)
{
+#ifndef FEATURE_REDHAWK
CONTRACTL
{
NOTHROW;
@@ -1325,8 +1330,8 @@ void Ref_RelocateAsyncPinHandles(HandleTableBucket *pSource, HandleTableBucket
{
TableRelocateAsyncPinHandles(Table(pSource->pTable[n]), Table(pTarget->pTable[n]));
}
-}
#endif // !FEATURE_REDHAWK
+}
/*--------------------------------------------------------------------------*/
diff --git a/src/gc/handletablecache.cpp b/src/gc/handletablecache.cpp
index aaf3370bd6..498e688677 100644
--- a/src/gc/handletablecache.cpp
+++ b/src/gc/handletablecache.cpp
@@ -57,7 +57,7 @@ void SpinUntil(void *pCond, BOOL fNonZero)
#endif //_DEBUG
// on MP machines, allow ourselves some spin time before sleeping
- uint32_t uNonSleepSpins = 8 * (g_SystemInfo.dwNumberOfProcessors - 1);
+ static uint32_t uNonSleepSpins = 8 * (GCToOSInterface::GetCurrentProcessCpuCount() - 1);
// spin until the specificed condition is met
while ((*(uintptr_t *)pCond != 0) != (fNonZero != 0))
diff --git a/src/gc/handletablecore.cpp b/src/gc/handletablecore.cpp
index 228b8bfa09..4548237eda 100644
--- a/src/gc/handletablecore.cpp
+++ b/src/gc/handletablecore.cpp
@@ -516,14 +516,14 @@ BOOL SegmentInitialize(TableSegment *pSegment, HandleTable *pTable)
#ifndef FEATURE_REDHAWK // todo: implement SafeInt
// Prefast overflow sanity check the addition
- if (!ClrSafeInt<uint32_t>::addition(dwCommit, g_SystemInfo.dwPageSize, dwCommit))
+ if (!ClrSafeInt<uint32_t>::addition(dwCommit, OS_PAGE_SIZE, dwCommit))
{
return FALSE;
}
#endif // !FEATURE_REDHAWK
// Round down to the dwPageSize
- dwCommit &= ~(g_SystemInfo.dwPageSize - 1);
+ dwCommit &= ~(OS_PAGE_SIZE - 1);
// commit the header
if (!GCToOSInterface::VirtualCommit(pSegment, dwCommit))
@@ -908,7 +908,7 @@ void SegmentCompactAsyncPinHandles(TableSegment *pSegment, TableSegment **ppWork
// Mark AsyncPinHandles ready to be cleaned when the marker job is processed
-BOOL SegmentHandleAsyncPinHandles (TableSegment *pSegment)
+BOOL SegmentHandleAsyncPinHandles (TableSegment *pSegment, const AsyncPinCallbackContext &callbackCtx)
{
CONTRACTL
{
@@ -945,11 +945,10 @@ BOOL SegmentHandleAsyncPinHandles (TableSegment *pSegment)
_UNCHECKED_OBJECTREF value = *pValue;
if (!HndIsNullOrDestroyedHandle(value))
{
- _ASSERTE (value->GetMethodTable() == g_pOverlappedDataClass);
- OVERLAPPEDDATAREF overlapped = (OVERLAPPEDDATAREF)(ObjectToOBJECTREF((Object*)value));
- if (overlapped->GetAppDomainId() != DefaultADID && overlapped->HasCompleted())
+ // calls back into the VM using the callback given to
+ // Ref_HandleAsyncPinHandles
+ if (callbackCtx.Invoke((Object*)value))
{
- overlapped->HandleAsyncPinHandle();
result = TRUE;
}
}
@@ -1024,7 +1023,7 @@ bool SegmentRelocateAsyncPinHandles (TableSegment *pSegment, HandleTable *pTarge
// We will queue a marker Overlapped to io completion port. We use the marker
// to make sure that all iocompletion jobs before this marker have been processed.
// After that we can free the async pinned handles.
-BOOL TableHandleAsyncPinHandles(HandleTable *pTable)
+BOOL TableHandleAsyncPinHandles(HandleTable *pTable, const AsyncPinCallbackContext &callbackCtx)
{
CONTRACTL
{
@@ -1043,7 +1042,7 @@ BOOL TableHandleAsyncPinHandles(HandleTable *pTable)
while (pSegment)
{
- if (SegmentHandleAsyncPinHandles (pSegment))
+ if (SegmentHandleAsyncPinHandles (pSegment, callbackCtx))
{
result = TRUE;
}
@@ -1444,7 +1443,7 @@ uint32_t SegmentInsertBlockFromFreeListWorker(TableSegment *pSegment, uint32_t u
void * pvCommit = pSegment->rgValue + (uCommitLine * HANDLE_HANDLES_PER_BLOCK);
// we should commit one more page of handles
- uint32_t dwCommit = g_SystemInfo.dwPageSize;
+ uint32_t dwCommit = OS_PAGE_SIZE;
// commit the memory
if (!GCToOSInterface::VirtualCommit(pvCommit, dwCommit))
@@ -1809,7 +1808,7 @@ BOOL DoesSegmentNeedsToTrimExcessPages(TableSegment *pSegment)
if (uEmptyLine < uDecommitLine)
{
// derive some useful info about the page size
- uintptr_t dwPageRound = (uintptr_t)g_SystemInfo.dwPageSize - 1;
+ uintptr_t dwPageRound = (uintptr_t)OS_PAGE_SIZE - 1;
uintptr_t dwPageMask = ~dwPageRound;
// compute the address corresponding to the empty line
@@ -1853,7 +1852,7 @@ void SegmentTrimExcessPages(TableSegment *pSegment)
if (uEmptyLine < uDecommitLine)
{
// derive some useful info about the page size
- uintptr_t dwPageRound = (uintptr_t)g_SystemInfo.dwPageSize - 1;
+ uintptr_t dwPageRound = (uintptr_t)OS_PAGE_SIZE - 1;
uintptr_t dwPageMask = ~dwPageRound;
// compute the address corresponding to the empty line
@@ -1875,7 +1874,7 @@ void SegmentTrimExcessPages(TableSegment *pSegment)
pSegment->bCommitLine = (uint8_t)((dwLo - (size_t)pSegment->rgValue) / HANDLE_BYTES_PER_BLOCK);
// compute the address for the new decommit line
- size_t dwDecommitAddr = dwLo - g_SystemInfo.dwPageSize;
+ size_t dwDecommitAddr = dwLo - OS_PAGE_SIZE;
// assume a decommit line of zero until we know otheriwse
uDecommitLine = 0;
diff --git a/src/gc/handletablepriv.h b/src/gc/handletablepriv.h
index 59c08ca744..cda1cb08aa 100644
--- a/src/gc/handletablepriv.h
+++ b/src/gc/handletablepriv.h
@@ -341,6 +341,37 @@ struct HandleTypeCache
int32_t lFreeIndex;
};
+/*
+ * Async pin EE callback context, used to call back tot he EE when enumerating
+ * over async pinned handles.
+ */
+class AsyncPinCallbackContext
+{
+private:
+ async_pin_enum_fn m_callback;
+ void* m_context;
+
+public:
+ /*
+ * Constructs a new AsyncPinCallbackContext from a callback and a context,
+ * which will be passed to the callback as its second parameter every time
+ * it is invoked.
+ */
+ AsyncPinCallbackContext(async_pin_enum_fn callback, void* context)
+ : m_callback(callback), m_context(context)
+ {}
+
+ /*
+ * Invokes the callback with the given argument, returning the callback's
+ * result.'
+ */
+ bool Invoke(Object* argument) const
+ {
+ assert(m_callback != nullptr);
+ return m_callback(argument, m_context);
+ }
+};
+
/*---------------------------------------------------------------------------*/
@@ -759,7 +790,7 @@ void SegmentFree(TableSegment *pSegment);
* Mark ready for all non-pending OverlappedData that get moved to default domain.
*
*/
-BOOL TableHandleAsyncPinHandles(HandleTable *pTable);
+BOOL TableHandleAsyncPinHandles(HandleTable *pTable, const AsyncPinCallbackContext& callbackCtx);
/*
* TableRelocateAsyncPinHandles
diff --git a/src/gc/objecthandle.cpp b/src/gc/objecthandle.cpp
index 7df915fb72..838947dba1 100644
--- a/src/gc/objecthandle.cpp
+++ b/src/gc/objecthandle.cpp
@@ -563,10 +563,10 @@ int getNumberOfSlots()
return 1;
#ifdef FEATURE_REDHAWK
- return g_SystemInfo.dwNumberOfProcessors;
+ return GCToOSInterface::GetCurrentProcessCpuCount();
#else
return (CPUGroupInfo::CanEnableGCCPUGroups() ? CPUGroupInfo::GetNumActiveProcessors() :
- g_SystemInfo.dwNumberOfProcessors);
+ GCToOSInterface::GetCurrentProcessCpuCount());
#endif
}
diff --git a/src/gc/objecthandle.h b/src/gc/objecthandle.h
index b3e4b58a1c..6ae75b45e9 100644
--- a/src/gc/objecthandle.h
+++ b/src/gc/objecthandle.h
@@ -87,7 +87,7 @@ bool Ref_Initialize();
void Ref_Shutdown();
HandleTableBucket* Ref_CreateHandleTableBucket(void* context);
bool Ref_InitializeHandleTableBucket(HandleTableBucket* bucket, void* context);
-BOOL Ref_HandleAsyncPinHandles();
+BOOL Ref_HandleAsyncPinHandles(async_pin_enum_fn callback, void* context);
void Ref_RelocateAsyncPinHandles(HandleTableBucket *pSource, HandleTableBucket *pTarget);
void Ref_RemoveHandleTableBucket(HandleTableBucket *pBucket);
void Ref_DestroyHandleTableBucket(HandleTableBucket *pBucket);
diff --git a/src/gc/sample/CMakeLists.txt b/src/gc/sample/CMakeLists.txt
index 5fe7887963..42f097a6e3 100644
--- a/src/gc/sample/CMakeLists.txt
+++ b/src/gc/sample/CMakeLists.txt
@@ -8,6 +8,7 @@ include_directories(../env)
set(SOURCES
GCSample.cpp
gcenv.ee.cpp
+ ../gcconfig.cpp
../gccommon.cpp
../gceewks.cpp
../gchandletable.cpp
diff --git a/src/gc/sample/GCSample.cpp b/src/gc/sample/GCSample.cpp
index 0a771b7e91..43cb23878e 100644
--- a/src/gc/sample/GCSample.cpp
+++ b/src/gc/sample/GCSample.cpp
@@ -107,6 +107,8 @@ void WriteBarrier(Object ** dst, Object * ref)
ErectWriteBarrier(dst, ref);
}
+extern "C" bool InitializeGarbageCollector(IGCToCLR* clrToGC, IGCHeap** gcHeap, IGCHandleManager** gcHandleManager, GcDacVars* gcDacVars);
+
int __cdecl main(int argc, char* argv[])
{
//
diff --git a/src/gc/sample/gcenv.ee.cpp b/src/gc/sample/gcenv.ee.cpp
index fa6efbf2d6..03d960819a 100644
--- a/src/gc/sample/gcenv.ee.cpp
+++ b/src/gc/sample/gcenv.ee.cpp
@@ -286,64 +286,39 @@ bool GCToEEInterface::EagerFinalized(Object* obj)
return false;
}
-MethodTable* GCToEEInterface::GetFreeObjectMethodTable()
+bool GCToEEInterface::GetBooleanConfigValue(const char* key, bool* value)
{
- return g_pFreeObjectMethodTable;
+ return false;
}
-bool IsGCSpecialThread()
+bool GCToEEInterface::GetIntConfigValue(const char* key, int64_t* value)
{
- // TODO: Implement for background GC
return false;
}
-bool IsGCThread()
+bool GCToEEInterface::GetStringConfigValue(const char* key, const char** value)
{
return false;
}
-void SwitchToWriteWatchBarrier()
+void GCToEEInterface::FreeStringConfigValue(const char *value)
{
-}
-void SwitchToNonWriteWatchBarrier()
-{
}
-void LogSpewAlways(const char * /*fmt*/, ...)
+MethodTable* GCToEEInterface::GetFreeObjectMethodTable()
{
+ return g_pFreeObjectMethodTable;
}
-uint32_t CLRConfig::GetConfigValue(ConfigDWORDInfo eType)
+bool IsGCSpecialThread()
{
- switch (eType)
- {
- case UNSUPPORTED_BGCSpinCount:
- return 140;
-
- case UNSUPPORTED_BGCSpin:
- return 2;
-
- case UNSUPPORTED_GCLogEnabled:
- case UNSUPPORTED_GCLogFile:
- case UNSUPPORTED_GCLogFileSize:
- case EXTERNAL_GCStressStart:
- case INTERNAL_GCStressStartAtJit:
- case INTERNAL_DbgDACSkipVerifyDlls:
- return 0;
-
- case Config_COUNT:
- default:
-#ifdef _MSC_VER
-#pragma warning(suppress:4127) // Constant conditional expression in ASSERT below
-#endif
- ASSERT(!"Unknown config value type");
- return 0;
- }
+ // TODO: Implement for background GC
+ return false;
}
-HRESULT CLRConfig::GetConfigValue(ConfigStringInfo /*eType*/, TCHAR * * outVal)
+bool IsGCThread()
{
- *outVal = NULL;
- return 0;
+ return false;
}
+
diff --git a/src/gc/sample/gcenv.h b/src/gc/sample/gcenv.h
index 4505f1af30..14f60d8c6e 100644
--- a/src/gc/sample/gcenv.h
+++ b/src/gc/sample/gcenv.h
@@ -4,9 +4,9 @@
// The sample is to be kept simple, so building the sample
// in tandem with a standalone GC is currently not supported.
-#ifdef FEATURE_STANDALONE_GC
-#undef FEATURE_STANDALONE_GC
-#endif // FEATURE_STANDALONE_GC
+#ifdef BUILD_AS_STANDALONE
+#undef BUILD_AS_STANDALONE
+#endif // BUILD_AS_STANDALONE
#if defined(_DEBUG)
#ifndef _DEBUG_IMPL
diff --git a/src/gc/unix/gcenv.unix.cpp b/src/gc/unix/gcenv.unix.cpp
index bca0dfedf2..eafd141fd5 100644
--- a/src/gc/unix/gcenv.unix.cpp
+++ b/src/gc/unix/gcenv.unix.cpp
@@ -32,10 +32,6 @@ static_assert(sizeof(uint64_t) == 8, "unsigned long isn't 8 bytes");
#include "gcenv.base.h"
#include "gcenv.os.h"
-#ifndef FEATURE_STANDALONE_GC
- #error "A GC-private implementation of GCToOSInterface should only be used with FEATURE_STANDALONE_GC"
-#endif // FEATURE_STANDALONE_GC
-
#if HAVE_SYS_TIME_H
#include <sys/time.h>
#else
@@ -714,6 +710,18 @@ bool GCToOSInterface::CreateThread(GCThreadFunction function, void* param, GCThr
return (st == 0);
}
+// Gets the total number of processors on the machine, not taking
+// into account current process affinity.
+// Return:
+// Number of processors on the machine
+uint32_t GCToOSInterface::GetTotalProcessorCount()
+{
+ // Calculated in GCToOSInterface::Initialize using
+ // sysconf(_SC_NPROCESSORS_ONLN)
+ return g_logicalCpuCount;
+}
+
+
// Initialize the critical section
void CLRCriticalSection::Initialize()
{
diff --git a/src/gc/windows/gcenv.windows.cpp b/src/gc/windows/gcenv.windows.cpp
index 3749f06a68..c543b0413a 100644
--- a/src/gc/windows/gcenv.windows.cpp
+++ b/src/gc/windows/gcenv.windows.cpp
@@ -603,6 +603,15 @@ bool GCToOSInterface::CreateThread(GCThreadFunction function, void* param, GCThr
return true;
}
+// Gets the total number of processors on the machine, not taking
+// into account current process affinity.
+// Return:
+// Number of processors on the machine
+uint32_t GCToOSInterface::GetTotalProcessorCount()
+{
+ return g_SystemInfo.dwNumberOfProcessors;
+}
+
// Initialize the critical section
void CLRCriticalSection::Initialize()
{
diff --git a/src/gc/wks/CMakeLists.txt b/src/gc/wks/CMakeLists.txt
deleted file mode 100644
index fcb95a385e..0000000000
--- a/src/gc/wks/CMakeLists.txt
+++ /dev/null
@@ -1 +0,0 @@
-add_library_clr(gc_wks STATIC ${GC_SOURCES_WKS})