summaryrefslogtreecommitdiff
path: root/src/gc
diff options
context:
space:
mode:
Diffstat (limited to 'src/gc')
-rw-r--r--src/gc/CMakeLists.txt1
-rw-r--r--src/gc/env/gcenv.base.h13
-rw-r--r--src/gc/env/gcenv.ee.h6
-rw-r--r--src/gc/env/gcenv.structs.h2
-rw-r--r--src/gc/gc.cpp1520
-rw-r--r--src/gc/gc.h90
-rw-r--r--src/gc/gccommon.cpp73
-rw-r--r--src/gc/gcee.cpp25
-rw-r--r--src/gc/gceesvr.cpp2
-rw-r--r--src/gc/gceewks.cpp1
-rw-r--r--src/gc/gcenv.ee.standalone.inl29
-rw-r--r--src/gc/gchandletable.cpp111
-rw-r--r--src/gc/gchandletableimpl.h48
-rw-r--r--src/gc/gcimpl.h54
-rw-r--r--src/gc/gcinterface.dac.h156
-rw-r--r--src/gc/gcinterface.dacvars.def66
-rw-r--r--src/gc/gcinterface.ee.h33
-rw-r--r--src/gc/gcinterface.h320
-rw-r--r--src/gc/gcpriv.h271
-rw-r--r--src/gc/gcscan.cpp17
-rw-r--r--src/gc/gcscan.h12
-rw-r--r--src/gc/handletable.cpp22
-rw-r--r--src/gc/handletable.h23
-rw-r--r--src/gc/objecthandle.cpp91
-rw-r--r--src/gc/objecthandle.h537
-rw-r--r--src/gc/sample/CMakeLists.txt1
-rw-r--r--src/gc/sample/GCSample.cpp34
-rw-r--r--src/gc/sample/gcenv.ee.cpp32
-rw-r--r--src/gc/unix/CMakeLists.txt3
-rw-r--r--src/gc/unix/cgroup.cpp342
-rw-r--r--src/gc/unix/gcenv.unix.cpp25
-rw-r--r--src/gc/windows/gcenv.windows.cpp3
32 files changed, 2234 insertions, 1729 deletions
diff --git a/src/gc/CMakeLists.txt b/src/gc/CMakeLists.txt
index cba1aa9778..59c18ffd87 100644
--- a/src/gc/CMakeLists.txt
+++ b/src/gc/CMakeLists.txt
@@ -31,6 +31,7 @@ set( GC_SOURCES_DAC_AND_WKS_COMMON
set( GC_SOURCES_WKS
${GC_SOURCES_DAC_AND_WKS_COMMON}
+ gchandletable.cpp
gceesvr.cpp
gceewks.cpp
handletablecache.cpp)
diff --git a/src/gc/env/gcenv.base.h b/src/gc/env/gcenv.base.h
index 9fe583f9a6..a4befca09e 100644
--- a/src/gc/env/gcenv.base.h
+++ b/src/gc/env/gcenv.base.h
@@ -37,7 +37,7 @@
// Aliases for Win32 types
//
-typedef uint32_t BOOL;
+typedef int BOOL;
typedef uint32_t DWORD;
// -----------------------------------------------------------------------------------------------------------
@@ -65,6 +65,7 @@ inline HRESULT HRESULT_FROM_WIN32(unsigned long x)
#define E_UNEXPECTED 0x8000FFFF
#define E_NOTIMPL 0x80004001
#define E_INVALIDARG 0x80070057
+#define COR_E_EXECUTIONENGINE 0x80131506
#define NOERROR 0x0
#define ERROR_TIMEOUT 1460
@@ -328,16 +329,6 @@ typedef PTR_PTR_Object PTR_OBJECTREF;
typedef PTR_Object _UNCHECKED_OBJECTREF;
typedef PTR_PTR_Object PTR_UNCHECKED_OBJECTREF;
-#ifndef DACCESS_COMPILE
-struct OBJECTHANDLE__
-{
- void* unused;
-};
-typedef struct OBJECTHANDLE__* OBJECTHANDLE;
-#else
-typedef TADDR OBJECTHANDLE;
-#endif
-
// With no object reference wrapping the following macros are very simple.
#define ObjectToOBJECTREF(_obj) (OBJECTREF)(_obj)
#define OBJECTREFToObject(_obj) (Object*)(_obj)
diff --git a/src/gc/env/gcenv.ee.h b/src/gc/env/gcenv.ee.h
index 9f7f266a89..aa00d19780 100644
--- a/src/gc/env/gcenv.ee.h
+++ b/src/gc/env/gcenv.ee.h
@@ -68,6 +68,12 @@ public:
static void StompWriteBarrier(WriteBarrierParameters* args);
static void EnableFinalization(bool foundFinalizers);
+
+ static void HandleFatalError(unsigned int exitCode);
+ static bool ShouldFinalizeObjectForUnload(AppDomain* pDomain, Object* obj);
+ static bool ForceFullGCToBeBlocking();
+ static bool EagerFinalized(Object* obj);
+ static MethodTable* GetFreeObjectMethodTable();
};
#endif // __GCENV_EE_H__
diff --git a/src/gc/env/gcenv.structs.h b/src/gc/env/gcenv.structs.h
index 5887dd7852..bb503e36e8 100644
--- a/src/gc/env/gcenv.structs.h
+++ b/src/gc/env/gcenv.structs.h
@@ -65,7 +65,7 @@ extern "C" uint32_t __stdcall GetCurrentThreadId();
class EEThreadId
{
- uint32_t m_uiId;
+ uint64_t m_uiId;
public:
bool IsCurrentThread()
diff --git a/src/gc/gc.cpp b/src/gc/gc.cpp
index 66c8b6afbc..ecc13e38fd 100644
--- a/src/gc/gc.cpp
+++ b/src/gc/gc.cpp
@@ -53,9 +53,13 @@ BOOL bgc_heap_walk_for_etw_p = FALSE;
#define LOH_PIN_QUEUE_LENGTH 100
#define LOH_PIN_DECAY 10
-// Right now we support maximum 256 procs - meaning that we will create at most
-// 256 GC threads and 256 GC heaps.
-#define MAX_SUPPORTED_CPUS 256
+#ifdef BIT64
+// Right now we support maximum 1024 procs - meaning that we will create at most
+// that many GC threads and GC heaps.
+#define MAX_SUPPORTED_CPUS 1024
+#else
+#define MAX_SUPPORTED_CPUS 64
+#endif // BIT64
#ifdef GC_CONFIG_DRIVEN
int compact_ratio = 0;
@@ -68,6 +72,24 @@ int compact_ratio = 0;
// See comments in reset_memory.
BOOL reset_mm_p = TRUE;
+bool g_fFinalizerRunOnShutDown = false;
+
+#ifdef FEATURE_SVR_GC
+bool g_built_with_svr_gc = true;
+#else
+bool g_built_with_svr_gc = false;
+#endif // FEATURE_SVR_GC
+
+#if defined(BUILDENV_DEBUG)
+uint8_t g_build_variant = 0;
+#elif defined(BUILDENV_CHECKED)
+uint8_t g_build_variant = 1;
+#else
+uint8_t g_build_variant = 2;
+#endif // defined(BUILDENV_DEBUG)
+
+VOLATILE(int32_t) g_no_gc_lock = -1;
+
#if defined (TRACE_GC) && !defined (DACCESS_COMPILE)
const char * const allocation_state_str[] = {
"start",
@@ -93,7 +115,6 @@ const char * const allocation_state_str[] = {
};
#endif //TRACE_GC && !DACCESS_COMPILE
-
// Keep this in sync with the definition of gc_reason
#if (defined(DT_LOG) || defined(TRACE_GC)) && !defined (DACCESS_COMPILE)
static const char* const str_gc_reasons[] =
@@ -150,6 +171,7 @@ size_t GetHighPrecisionTimeStamp()
}
#endif
+
#ifdef GC_STATS
// There is a current and a prior copy of the statistics. This allows us to display deltas per reporting
// interval, as well as running totals. The 'min' and 'max' values require special treatment. They are
@@ -190,8 +212,10 @@ void GCStatistics::AddGCStats(const gc_mechanisms& settings, size_t timeInMSec)
if (is_induced (settings.reason))
cntReasons[(int)reason_induced]++;
+#ifdef STRESS_HEAP
else if (settings.stress_induced)
cntReasons[(int)reason_gcstress]++;
+#endif // STRESS_HEAP
else
cntReasons[(int)settings.reason]++;
@@ -1458,7 +1482,11 @@ inline bool can_use_write_watch_for_gc_heap()
inline bool can_use_write_watch_for_card_table()
{
+#ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
+ return true;
+#else
return can_use_hardware_write_watch();
+#endif
}
#else
@@ -2161,18 +2189,6 @@ size_t logcount (size_t word)
return count;
}
-//n!=0
-int log2(unsigned int n)
-{
- int pos = 0;
- if (n >= 1<<16) { n >>= 16; pos += 16; }
- if (n >= 1<< 8) { n >>= 8; pos += 8; }
- if (n >= 1<< 4) { n >>= 4; pos += 4; }
- if (n >= 1<< 2) { n >>= 2; pos += 2; }
- if (n >= 1<< 1) { pos += 1; }
- return pos;
-}
-
#ifndef DACCESS_COMPILE
void stomp_write_barrier_resize(bool is_runtime_suspended, bool requires_upper_bounds_check)
@@ -2181,15 +2197,22 @@ void stomp_write_barrier_resize(bool is_runtime_suspended, bool requires_upper_b
args.operation = WriteBarrierOp::StompResize;
args.is_runtime_suspended = is_runtime_suspended;
args.requires_upper_bounds_check = requires_upper_bounds_check;
+
args.card_table = g_gc_card_table;
+#ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
+ args.card_bundle_table = g_gc_card_bundle_table;
+#endif
+
args.lowest_address = g_gc_lowest_address;
args.highest_address = g_gc_highest_address;
+
#ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
if (SoftwareWriteWatch::IsEnabledForGCHeap())
{
args.write_watch_table = g_gc_sw_ww_table;
}
#endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
+
GCToEEInterface::StompWriteBarrier(&args);
}
@@ -2210,6 +2233,11 @@ void stomp_write_barrier_initialize()
args.is_runtime_suspended = true;
args.requires_upper_bounds_check = false;
args.card_table = g_gc_card_table;
+
+#ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
+ args.card_bundle_table = g_gc_card_bundle_table;
+#endif
+
args.lowest_address = g_gc_lowest_address;
args.highest_address = g_gc_highest_address;
args.ephemeral_low = reinterpret_cast<uint8_t*>(1);
@@ -2257,8 +2285,8 @@ void virtual_free (void* add, size_t size);
/* per heap static initialization */
#ifdef MARK_ARRAY
#ifndef MULTIPLE_HEAPS
-SPTR_IMPL_NS(uint32_t, WKS, gc_heap, mark_array);
-#endif //!MULTIPLE_HEAPS
+uint32_t* gc_heap::mark_array;
+#endif //MULTIPLE_HEAPS
#endif //MARK_ARRAY
#ifdef MARK_LIST
@@ -2292,8 +2320,9 @@ CLREvent gc_heap::gc_start_event;
bool gc_heap::gc_thread_no_affinitize_p = false;
-SVAL_IMPL_NS(int, SVR, gc_heap, n_heaps);
-SPTR_IMPL_NS(PTR_gc_heap, SVR, gc_heap, g_heaps);
+int gc_heap::n_heaps;
+
+gc_heap** gc_heap::g_heaps;
size_t* gc_heap::g_promoted;
@@ -2384,11 +2413,7 @@ size_t gc_heap::ephemeral_fgc_counts[max_generation];
BOOL gc_heap::alloc_wait_event_p = FALSE;
-#if defined (DACCESS_COMPILE) && !defined (MULTIPLE_HEAPS)
-SVAL_IMPL_NS_INIT(gc_heap::c_gc_state, WKS, gc_heap, current_c_gc_state, c_gc_state_free);
-#else
-VOLATILE(gc_heap::c_gc_state) gc_heap::current_c_gc_state = c_gc_state_free;
-#endif //DACCESS_COMPILE && !MULTIPLE_HEAPS
+VOLATILE(c_gc_state) gc_heap::current_c_gc_state = c_gc_state_free;
#endif //BACKGROUND_GC
@@ -2409,14 +2434,14 @@ BOOL gc_heap::elevation_requested = FALSE;
BOOL gc_heap::last_gc_before_oom = FALSE;
#ifdef BACKGROUND_GC
-SPTR_IMPL_NS_INIT(uint8_t, WKS, gc_heap, background_saved_lowest_address, 0);
-SPTR_IMPL_NS_INIT(uint8_t, WKS, gc_heap, background_saved_highest_address, 0);
-SPTR_IMPL_NS_INIT(uint8_t, WKS, gc_heap, next_sweep_obj, 0);
+uint8_t* gc_heap::background_saved_lowest_address = 0;
+uint8_t* gc_heap::background_saved_highest_address = 0;
+uint8_t* gc_heap::next_sweep_obj = 0;
uint8_t* gc_heap::current_sweep_pos = 0;
exclusive_sync* gc_heap::bgc_alloc_lock;
#endif //BACKGROUND_GC
-SVAL_IMPL_NS(oom_history, WKS, gc_heap, oom_info);
+oom_history gc_heap::oom_info;
fgm_history gc_heap::fgm_result;
@@ -2467,7 +2492,7 @@ size_t gc_heap::allocation_running_time;
size_t gc_heap::allocation_running_amount;
-SPTR_IMPL_NS_INIT(heap_segment, WKS, gc_heap, ephemeral_heap_segment, 0);
+heap_segment* gc_heap::ephemeral_heap_segment = 0;
BOOL gc_heap::blocking_collection = FALSE;
@@ -2542,8 +2567,9 @@ uint8_t* gc_heap::background_min_soh_overflow_address =0;
uint8_t* gc_heap::background_max_soh_overflow_address =0;
-SPTR_IMPL_NS_INIT(heap_segment, WKS, gc_heap, saved_sweep_ephemeral_seg, 0);
-SPTR_IMPL_NS_INIT(uint8_t, WKS, gc_heap, saved_sweep_ephemeral_start, 0);
+heap_segment* gc_heap::saved_sweep_ephemeral_seg = 0;
+
+uint8_t* gc_heap::saved_sweep_ephemeral_start = 0;
heap_segment* gc_heap::saved_overflow_ephemeral_seg = 0;
@@ -2619,9 +2645,11 @@ size_t gc_heap::total_ephemeral_size = 0;
size_t gc_heap::internal_root_array_length = initial_internal_roots;
-SPTR_IMPL_NS_INIT(PTR_uint8_t, WKS, gc_heap, internal_root_array, 0);
-SVAL_IMPL_NS_INIT(size_t, WKS, gc_heap, internal_root_array_index, 0);
-SVAL_IMPL_NS_INIT(BOOL, WKS, gc_heap, heap_analyze_success, TRUE);
+uint8_t** gc_heap::internal_root_array = 0;
+
+size_t gc_heap::internal_root_array_index = 0;
+
+BOOL gc_heap::heap_analyze_success = TRUE;
uint8_t* gc_heap::current_obj = 0;
size_t gc_heap::current_obj_size = 0;
@@ -2681,24 +2709,6 @@ BOOL gc_heap::heap_analyze_enabled = FALSE;
#ifndef MULTIPLE_HEAPS
-#ifndef DACCESS_COMPILE
-extern "C" {
-#endif //!DACCESS_COMPILE
-GARY_IMPL(generation, generation_table,NUMBERGENERATIONS+1);
-#ifdef GC_CONFIG_DRIVEN
-GARY_IMPL(size_t, interesting_data_per_heap, max_idp_count);
-GARY_IMPL(size_t, compact_reasons_per_heap, max_compact_reasons_count);
-GARY_IMPL(size_t, expand_mechanisms_per_heap, max_expand_mechanisms_count);
-GARY_IMPL(size_t, interesting_mechanism_bits_per_heap, max_gc_mechanism_bits_count);
-#endif //GC_CONFIG_DRIVEN
-#ifndef DACCESS_COMPILE
-}
-#endif //!DACCESS_COMPILE
-
-#endif //MULTIPLE_HEAPS
-
-#ifndef MULTIPLE_HEAPS
-
alloc_list gc_heap::loh_alloc_list [NUM_LOH_ALIST-1];
alloc_list gc_heap::gen2_alloc_list[NUM_GEN2_ALIST-1];
@@ -2706,7 +2716,7 @@ dynamic_data gc_heap::dynamic_data_table [NUMBERGENERATIONS+1];
gc_history_per_heap gc_heap::gc_data_per_heap;
size_t gc_heap::maxgen_pinned_compact_before_advance = 0;
-SPTR_IMPL_NS_INIT(uint8_t, WKS, gc_heap, alloc_allocated, 0);
+uint8_t* gc_heap::alloc_allocated = 0;
size_t gc_heap::allocation_quantum = CLR_SIZE;
@@ -2724,6 +2734,7 @@ unsigned int gc_heap::num_low_msl_acquire = 0;
size_t gc_heap::alloc_contexts_used = 0;
size_t gc_heap::soh_allocation_no_gc = 0;
size_t gc_heap::loh_allocation_no_gc = 0;
+bool gc_heap::no_gc_oom_p = false;
heap_segment* gc_heap::saved_loh_segment_no_gc = 0;
#endif //MULTIPLE_HEAPS
@@ -2737,9 +2748,19 @@ int gc_heap::gen0_must_clear_bricks = 0;
#endif //FFIND_OBJECT
#ifdef FEATURE_PREMORTEM_FINALIZATION
-SPTR_IMPL_NS_INIT(CFinalize, WKS, gc_heap, finalize_queue, 0);
+CFinalize* gc_heap::finalize_queue = 0;
#endif // FEATURE_PREMORTEM_FINALIZATION
+generation gc_heap::generation_table [NUMBERGENERATIONS + 1];
+
+size_t gc_heap::interesting_data_per_heap[max_idp_count];
+
+size_t gc_heap::compact_reasons_per_heap[max_compact_reasons_count];
+
+size_t gc_heap::expand_mechanisms_per_heap[max_expand_mechanisms_count];
+
+size_t gc_heap::interesting_mechanism_bits_per_heap[max_gc_mechanism_bits_count];
+
#endif // MULTIPLE_HEAPS
/* end of per heap static initialization */
@@ -3697,8 +3718,6 @@ heap_segment* seg_mapping_table_segment_of (uint8_t* o)
if (seg)
{
- // Can't assert this when it's callled by everyone (it's true when it's called by mark cards).
- //assert (in_range_for_segment (o, seg));
if (in_range_for_segment (o, seg))
{
dprintf (2, ("obj %Ix belongs to segment %Ix(-%Ix)", o, (uint8_t*)heap_segment_mem(seg), (uint8_t*)heap_segment_reserved(seg)));
@@ -3735,7 +3754,6 @@ heap_segment* seg_mapping_table_segment_of (uint8_t* o)
#endif //SEG_MAPPING_TABLE
size_t gcard_of ( uint8_t*);
-void gset_card (size_t card);
#define memref(i) *(uint8_t**)(i)
@@ -3873,10 +3891,10 @@ public:
{
assert (size >= free_object_base_size);
- assert (g_pFreeObjectMethodTable->GetBaseSize() == free_object_base_size);
- assert (g_pFreeObjectMethodTable->RawGetComponentSize() == 1);
+ assert (g_gc_pFreeObjectMethodTable->GetBaseSize() == free_object_base_size);
+ assert (g_gc_pFreeObjectMethodTable->RawGetComponentSize() == 1);
- RawSetMethodTable( g_pFreeObjectMethodTable );
+ RawSetMethodTable( g_gc_pFreeObjectMethodTable );
size_t* numComponentsPtr = (size_t*) &((uint8_t*) this)[ArrayBase::GetOffsetOfNumComponents()];
*numComponentsPtr = size - free_object_base_size;
@@ -3901,7 +3919,7 @@ public:
BOOL IsFree () const
{
- return (GetMethodTable() == g_pFreeObjectMethodTable);
+ return (GetMethodTable() == g_gc_pFreeObjectMethodTable);
}
#ifdef FEATURE_STRUCTALIGN
@@ -4897,12 +4915,12 @@ class heap_select
static unsigned n_sniff_buffers;
static unsigned cur_sniff_index;
- static uint8_t proc_no_to_heap_no[MAX_SUPPORTED_CPUS];
- static uint8_t heap_no_to_proc_no[MAX_SUPPORTED_CPUS];
- static uint8_t heap_no_to_numa_node[MAX_SUPPORTED_CPUS];
- static uint8_t heap_no_to_cpu_group[MAX_SUPPORTED_CPUS];
- static uint8_t heap_no_to_group_proc[MAX_SUPPORTED_CPUS];
- static uint8_t numa_node_to_heap_map[MAX_SUPPORTED_CPUS+4];
+ static uint16_t proc_no_to_heap_no[MAX_SUPPORTED_CPUS];
+ static uint16_t heap_no_to_proc_no[MAX_SUPPORTED_CPUS];
+ static uint16_t heap_no_to_numa_node[MAX_SUPPORTED_CPUS];
+ static uint16_t heap_no_to_cpu_group[MAX_SUPPORTED_CPUS];
+ static uint16_t heap_no_to_group_proc[MAX_SUPPORTED_CPUS];
+ static uint16_t numa_node_to_heap_map[MAX_SUPPORTED_CPUS+4];
static int access_time(uint8_t *sniff_buffer, int heap_number, unsigned sniff_index, unsigned n_sniff_buffers)
{
@@ -4944,7 +4962,7 @@ public:
//can not enable gc numa aware, force all heaps to be in
//one numa node by filling the array with all 0s
if (!NumaNodeInfo::CanEnableGCNumaAware())
- memset(heap_no_to_numa_node, 0, MAX_SUPPORTED_CPUS);
+ memset(heap_no_to_numa_node, 0, sizeof (heap_no_to_numa_node));
return TRUE;
}
@@ -4954,10 +4972,10 @@ public:
if (GCToOSInterface::CanGetCurrentProcessorNumber())
{
uint32_t proc_no = GCToOSInterface::GetCurrentProcessorNumber() % gc_heap::n_heaps;
- // We can safely cast heap_number to a BYTE 'cause GetCurrentProcessCpuCount
+ // We can safely cast heap_number to a uint16_t 'cause GetCurrentProcessCpuCount
// only returns up to MAX_SUPPORTED_CPUS procs right now. We only ever create at most
// MAX_SUPPORTED_CPUS GC threads.
- proc_no_to_heap_no[proc_no] = (uint8_t)heap_number;
+ proc_no_to_heap_no[proc_no] = (uint16_t)heap_number;
}
}
@@ -5020,42 +5038,42 @@ public:
return GCToOSInterface::CanGetCurrentProcessorNumber();
}
- static uint8_t find_proc_no_from_heap_no(int heap_number)
+ static uint16_t find_proc_no_from_heap_no(int heap_number)
{
return heap_no_to_proc_no[heap_number];
}
- static void set_proc_no_for_heap(int heap_number, uint8_t proc_no)
+ static void set_proc_no_for_heap(int heap_number, uint16_t proc_no)
{
heap_no_to_proc_no[heap_number] = proc_no;
}
- static uint8_t find_numa_node_from_heap_no(int heap_number)
+ static uint16_t find_numa_node_from_heap_no(int heap_number)
{
return heap_no_to_numa_node[heap_number];
}
- static void set_numa_node_for_heap(int heap_number, uint8_t numa_node)
+ static void set_numa_node_for_heap(int heap_number, uint16_t numa_node)
{
heap_no_to_numa_node[heap_number] = numa_node;
}
- static uint8_t find_cpu_group_from_heap_no(int heap_number)
+ static uint16_t find_cpu_group_from_heap_no(int heap_number)
{
return heap_no_to_cpu_group[heap_number];
}
- static void set_cpu_group_for_heap(int heap_number, uint8_t group_number)
+ static void set_cpu_group_for_heap(int heap_number, uint16_t group_number)
{
heap_no_to_cpu_group[heap_number] = group_number;
}
- static uint8_t find_group_proc_from_heap_no(int heap_number)
+ static uint16_t find_group_proc_from_heap_no(int heap_number)
{
return heap_no_to_group_proc[heap_number];
}
- static void set_group_proc_for_heap(int heap_number, uint8_t group_proc)
+ static void set_group_proc_for_heap(int heap_number, uint16_t group_proc)
{
heap_no_to_group_proc[heap_number] = group_proc;
}
@@ -5070,15 +5088,15 @@ public:
for (int i=1; i < nheaps; i++)
{
if (heap_no_to_numa_node[i] != heap_no_to_numa_node[i-1])
- numa_node_to_heap_map[node_index++] = (uint8_t)i;
+ numa_node_to_heap_map[node_index++] = (uint16_t)i;
}
- numa_node_to_heap_map[node_index] = (uint8_t)nheaps; //mark the end with nheaps
+ numa_node_to_heap_map[node_index] = (uint16_t)nheaps; //mark the end with nheaps
}
static void get_heap_range_for_heap(int hn, int* start, int* end)
{ // 1-tier/no numa case: heap_no_to_numa_node[] all zeros,
// and treated as in one node. thus: start=0, end=n_heaps
- uint8_t numa_node = heap_no_to_numa_node[hn];
+ uint16_t numa_node = heap_no_to_numa_node[hn];
*start = (int)numa_node_to_heap_map[numa_node];
*end = (int)(numa_node_to_heap_map[numa_node+1]);
}
@@ -5086,12 +5104,12 @@ public:
uint8_t* heap_select::sniff_buffer;
unsigned heap_select::n_sniff_buffers;
unsigned heap_select::cur_sniff_index;
-uint8_t heap_select::proc_no_to_heap_no[MAX_SUPPORTED_CPUS];
-uint8_t heap_select::heap_no_to_proc_no[MAX_SUPPORTED_CPUS];
-uint8_t heap_select::heap_no_to_numa_node[MAX_SUPPORTED_CPUS];
-uint8_t heap_select::heap_no_to_cpu_group[MAX_SUPPORTED_CPUS];
-uint8_t heap_select::heap_no_to_group_proc[MAX_SUPPORTED_CPUS];
-uint8_t heap_select::numa_node_to_heap_map[MAX_SUPPORTED_CPUS+4];
+uint16_t heap_select::proc_no_to_heap_no[MAX_SUPPORTED_CPUS];
+uint16_t heap_select::heap_no_to_proc_no[MAX_SUPPORTED_CPUS];
+uint16_t heap_select::heap_no_to_numa_node[MAX_SUPPORTED_CPUS];
+uint16_t heap_select::heap_no_to_cpu_group[MAX_SUPPORTED_CPUS];
+uint16_t heap_select::heap_no_to_group_proc[MAX_SUPPORTED_CPUS];
+uint16_t heap_select::numa_node_to_heap_map[MAX_SUPPORTED_CPUS+4];
BOOL gc_heap::create_thread_support (unsigned number_of_heaps)
{
@@ -5150,8 +5168,8 @@ void set_thread_group_affinity_for_heap(int heap_number, GCThreadAffinity* affin
dprintf(3, ("using processor group %d, mask %Ix for heap %d\n", gn, mask, heap_number));
affinity->Processor = gpn;
affinity->Group = gn;
- heap_select::set_cpu_group_for_heap(heap_number, (uint8_t)gn);
- heap_select::set_group_proc_for_heap(heap_number, (uint8_t)gpn);
+ heap_select::set_cpu_group_for_heap(heap_number, gn);
+ heap_select::set_group_proc_for_heap(heap_number, gpn);
if (NumaNodeInfo::CanEnableGCNumaAware())
{
PROCESSOR_NUMBER proc_no;
@@ -5161,11 +5179,11 @@ void set_thread_group_affinity_for_heap(int heap_number, GCThreadAffinity* affin
uint16_t node_no = 0;
if (NumaNodeInfo::GetNumaProcessorNodeEx(&proc_no, &node_no))
- heap_select::set_numa_node_for_heap(heap_number, (uint8_t)node_no);
+ heap_select::set_numa_node_for_heap(heap_number, node_no);
}
else
{ // no numa setting, each cpu group is treated as a node
- heap_select::set_numa_node_for_heap(heap_number, (uint8_t)gn);
+ heap_select::set_numa_node_for_heap(heap_number, gn);
}
return;
}
@@ -5202,7 +5220,7 @@ void set_thread_affinity_mask_for_heap(int heap_number, GCThreadAffinity* affini
proc_no.Reserved = 0;
if (NumaNodeInfo::GetNumaProcessorNodeEx(&proc_no, &node_no))
{
- heap_select::set_numa_node_for_heap(heap_number, (uint8_t)node_no);
+ heap_select::set_numa_node_for_heap(heap_number, node_no);
}
}
return;
@@ -5226,9 +5244,9 @@ bool gc_heap::create_gc_thread ()
#if !defined(FEATURE_PAL)
if (!gc_thread_no_affinitize_p)
{
- //We are about to set affinity for GC threads, it is a good place to setup NUMA and
- //CPU groups, because the process mask, processor number, group number are all
- //readyly available.
+ // We are about to set affinity for GC threads. It is a good place to set up NUMA and
+ // CPU groups because the process mask, processor number, and group number are all
+ // readily available.
if (CPUGroupInfo::CanEnableGCCPUGroups())
set_thread_group_affinity_for_heap(heap_number, &affinity);
else
@@ -5342,7 +5360,9 @@ bool virtual_alloc_commit_for_heap(void* addr, size_t size, int h_number)
#if defined(MULTIPLE_HEAPS) && !defined(FEATURE_REDHAWK) && !defined(FEATURE_PAL)
// Currently there is no way for us to specific the numa node to allocate on via hosting interfaces to
// a host. This will need to be added later.
+#if !defined(FEATURE_CORECLR)
if (!CLRMemoryHosted())
+#endif
{
if (NumaNodeInfo::CanEnableGCNumaAware())
{
@@ -5569,7 +5589,7 @@ public:
// We should think about whether it's really necessary to have to copy back the pre plug
// info since it was already copied during compacting plugs. But if a plug doesn't move
- // by < 3 ptr size, it means we'd have to recover pre plug info.
+ // by >= 3 ptr size (the size of gap_reloc_pair), it means we'd have to recover pre plug info.
void recover_plug_info()
{
if (saved_pre_p)
@@ -5714,18 +5734,13 @@ void gc_mechanisms::record (gc_history_global* history)
//as opposed to concurrent heap verification
void gc_heap::fix_youngest_allocation_area (BOOL for_gc_p)
{
- assert (alloc_allocated);
- alloc_context* acontext = generation_alloc_context (youngest_generation);
- dprintf (3, ("generation 0 alloc context: ptr: %Ix, limit %Ix",
- (size_t)acontext->alloc_ptr, (size_t)acontext->alloc_limit));
- fix_allocation_context (acontext, for_gc_p, get_alignment_constant (TRUE));
- if (for_gc_p)
- {
- acontext->alloc_ptr = alloc_allocated;
- acontext->alloc_limit = acontext->alloc_ptr;
- }
- heap_segment_allocated (ephemeral_heap_segment) =
- alloc_allocated;
+ UNREFERENCED_PARAMETER(for_gc_p);
+
+ // The gen 0 alloc context is never used for allocation in the allocator path. It's
+ // still used in the allocation path during GCs.
+ assert (generation_allocation_pointer (youngest_generation) == nullptr);
+ assert (generation_allocation_limit (youngest_generation) == nullptr);
+ heap_segment_allocated (ephemeral_heap_segment) = alloc_allocated;
}
void gc_heap::fix_large_allocation_area (BOOL for_gc_p)
@@ -5791,9 +5806,10 @@ void gc_heap::fix_allocation_context (alloc_context* acontext, BOOL for_gc_p,
alloc_contexts_used ++;
}
-
if (for_gc_p)
{
+ // We need to update the alloc_bytes to reflect the portion that we have not used
+ acontext->alloc_bytes -= (acontext->alloc_limit - acontext->alloc_ptr);
acontext->alloc_ptr = 0;
acontext->alloc_limit = acontext->alloc_ptr;
}
@@ -5830,12 +5846,6 @@ void void_allocation (gc_alloc_context* acontext, void*)
void gc_heap::repair_allocation_contexts (BOOL repair_p)
{
GCToEEInterface::GcEnumAllocContexts (repair_p ? repair_allocation : void_allocation, NULL);
-
- alloc_context* acontext = generation_alloc_context (youngest_generation);
- if (repair_p)
- repair_allocation (acontext, NULL);
- else
- void_allocation (acontext, NULL);
}
struct fix_alloc_context_args
@@ -5847,7 +5857,7 @@ struct fix_alloc_context_args
void fix_alloc_context(gc_alloc_context* acontext, void* param)
{
fix_alloc_context_args* args = (fix_alloc_context_args*)param;
- g_theGCHeap->FixAllocContext(acontext, FALSE, (void*)(size_t)(args->for_gc_p), args->heap);
+ g_theGCHeap->FixAllocContext(acontext, false, (void*)(size_t)(args->for_gc_p), args->heap);
}
void gc_heap::fix_allocation_contexts(BOOL for_gc_p)
@@ -5855,8 +5865,8 @@ void gc_heap::fix_allocation_contexts(BOOL for_gc_p)
fix_alloc_context_args args;
args.for_gc_p = for_gc_p;
args.heap = __this;
- GCToEEInterface::GcEnumAllocContexts(fix_alloc_context, &args);
+ GCToEEInterface::GcEnumAllocContexts(fix_alloc_context, &args);
fix_youngest_allocation_area(for_gc_p);
fix_large_allocation_area(for_gc_p);
}
@@ -6289,6 +6299,150 @@ void gc_heap::make_c_mark_list (uint8_t** arr)
}
#endif //BACKGROUND_GC
+
+#ifdef CARD_BUNDLE
+
+// The card bundle keeps track of groups of card words.
+static const size_t card_bundle_word_width = 32;
+
+// How do we express the fact that 32 bits (card_word_width) is one uint32_t?
+static const size_t card_bundle_size = (size_t)(OS_PAGE_SIZE / (sizeof(uint32_t)*card_bundle_word_width));
+
+inline
+size_t card_bundle_word (size_t cardb)
+{
+ return cardb / card_bundle_word_width;
+}
+
+inline
+uint32_t card_bundle_bit (size_t cardb)
+{
+ return (uint32_t)(cardb % card_bundle_word_width);
+}
+
+size_t align_cardw_on_bundle (size_t cardw)
+{
+ return ((size_t)(cardw + card_bundle_size - 1) & ~(card_bundle_size - 1 ));
+}
+
+// Get the card bundle representing a card word
+size_t cardw_card_bundle (size_t cardw)
+{
+ return cardw / card_bundle_size;
+}
+
+// Get the first card word in a card bundle
+size_t card_bundle_cardw (size_t cardb)
+{
+ return cardb * card_bundle_size;
+}
+
+// Clear the specified card bundle
+void gc_heap::card_bundle_clear (size_t cardb)
+{
+ card_bundle_table [card_bundle_word (cardb)] &= ~(1 << card_bundle_bit (cardb));
+ dprintf (1,("Cleared card bundle %Ix [%Ix, %Ix[", cardb, (size_t)card_bundle_cardw (cardb),
+ (size_t)card_bundle_cardw (cardb+1)));
+}
+
+void gc_heap::card_bundle_set (size_t cardb)
+{
+ if (!card_bundle_set_p (cardb))
+ {
+ card_bundle_table [card_bundle_word (cardb)] |= (1 << card_bundle_bit (cardb));
+ }
+}
+
+// Set the card bundle bits between start_cardb and end_cardb
+void gc_heap::card_bundles_set (size_t start_cardb, size_t end_cardb)
+{
+ if (start_cardb == end_cardb)
+ {
+ card_bundle_set(start_cardb);
+ return;
+ }
+
+ size_t start_word = card_bundle_word (start_cardb);
+ size_t end_word = card_bundle_word (end_cardb);
+
+ if (start_word < end_word)
+ {
+ // Set the partial words
+ card_bundle_table [start_word] |= highbits (~0u, card_bundle_bit (start_cardb));
+
+ if (card_bundle_bit (end_cardb))
+ card_bundle_table [end_word] |= lowbits (~0u, card_bundle_bit (end_cardb));
+
+ // Set the full words
+ for (size_t i = start_word + 1; i < end_word; i++)
+ card_bundle_table [i] = ~0u;
+ }
+ else
+ {
+ card_bundle_table [start_word] |= (highbits (~0u, card_bundle_bit (start_cardb)) &
+ lowbits (~0u, card_bundle_bit (end_cardb)));
+ }
+}
+
+// Indicates whether the specified bundle is set.
+BOOL gc_heap::card_bundle_set_p (size_t cardb)
+{
+ return (card_bundle_table[card_bundle_word(cardb)] & (1 << card_bundle_bit (cardb)));
+}
+
+// Returns the size (in bytes) of a card bundle representing the region from 'from' to 'end'
+size_t size_card_bundle_of (uint8_t* from, uint8_t* end)
+{
+ // Number of heap bytes represented by a card bundle word
+ size_t cbw_span = card_size * card_word_width * card_bundle_size * card_bundle_word_width;
+
+ // Align the start of the region down
+ from = (uint8_t*)((size_t)from & ~(cbw_span - 1));
+
+ // Align the end of the region up
+ end = (uint8_t*)((size_t)(end + (cbw_span - 1)) & ~(cbw_span - 1));
+
+ // Make sure they're really aligned
+ assert (((size_t)from & (cbw_span - 1)) == 0);
+ assert (((size_t)end & (cbw_span - 1)) == 0);
+
+ return ((end - from) / cbw_span) * sizeof (uint32_t);
+}
+
+// Takes a pointer to a card bundle table and an address, and returns a pointer that represents
+// where a theoretical card bundle table that represents every address (starting from 0) would
+// start if the bundle word representing the address were to be located at the pointer passed in.
+// The returned 'translated' pointer makes it convenient/fast to calculate where the card bundle
+// for a given address is using a simple shift operation on the address.
+uint32_t* translate_card_bundle_table (uint32_t* cb, uint8_t* lowest_address)
+{
+ // The number of bytes of heap memory represented by a card bundle word
+ const size_t heap_bytes_for_bundle_word = card_size * card_word_width * card_bundle_size * card_bundle_word_width;
+
+ // Each card bundle word is 32 bits
+ return (uint32_t*)((uint8_t*)cb - (((size_t)lowest_address / heap_bytes_for_bundle_word) * sizeof (uint32_t)));
+}
+
+void gc_heap::enable_card_bundles ()
+{
+ if (can_use_write_watch_for_card_table() && (!card_bundles_enabled()))
+ {
+ dprintf (1, ("Enabling card bundles"));
+
+ // We initially set all of the card bundles
+ card_bundles_set (cardw_card_bundle (card_word (card_of (lowest_address))),
+ cardw_card_bundle (align_cardw_on_bundle (card_word (card_of (highest_address)))));
+ settings.card_bundles = TRUE;
+ }
+}
+
+BOOL gc_heap::card_bundles_enabled ()
+{
+ return settings.card_bundles;
+}
+
+#endif // CARD_BUNDLE
+
#if defined (_TARGET_AMD64_)
#define brick_size ((size_t)4096)
#else
@@ -6418,14 +6572,18 @@ void gc_heap::clear_card (size_t card)
inline
void gc_heap::set_card (size_t card)
{
- card_table [card_word (card)] =
- (card_table [card_word (card)] | (1 << card_bit (card)));
-}
+ size_t word = card_word (card);
+ card_table[word] = (card_table [word] | (1 << card_bit (card)));
-inline
-void gset_card (size_t card)
-{
- g_gc_card_table [card_word (card)] |= (1 << card_bit (card));
+#ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
+ // Also set the card bundle that corresponds to the card
+ size_t bundle_to_set = cardw_card_bundle(word);
+
+ card_bundle_set(bundle_to_set);
+
+ dprintf (3,("Set card %Ix [%Ix, %Ix[ and bundle %Ix", card, (size_t)card_address (card), (size_t)card_address (card+1), bundle_to_set));
+ assert(card_bundle_set_p(bundle_to_set) != 0);
+#endif
}
inline
@@ -6448,116 +6606,6 @@ size_t size_card_of (uint8_t* from, uint8_t* end)
return count_card_of (from, end) * sizeof(uint32_t);
}
-#ifdef CARD_BUNDLE
-
-//The card bundle keeps track of groups of card words
-#define card_bundle_word_width ((size_t)32)
-//how do we express the fact that 32 bits (card_word_width) is one uint32_t?
-#define card_bundle_size ((size_t)(OS_PAGE_SIZE/(sizeof (uint32_t)*card_bundle_word_width)))
-
-inline
-size_t card_bundle_word (size_t cardb)
-{
- return cardb / card_bundle_word_width;
-}
-
-inline
-uint32_t card_bundle_bit (size_t cardb)
-{
- return (uint32_t)(cardb % card_bundle_word_width);
-}
-
-size_t align_cardw_on_bundle (size_t cardw)
-{
- return ((size_t)(cardw + card_bundle_size - 1) & ~(card_bundle_size - 1 ));
-}
-
-size_t cardw_card_bundle (size_t cardw)
-{
- return cardw/card_bundle_size;
-}
-
-size_t card_bundle_cardw (size_t cardb)
-{
- return cardb*card_bundle_size;
-}
-
-void gc_heap::card_bundle_clear(size_t cardb)
-{
- card_bundle_table [card_bundle_word (cardb)] &= ~(1 << card_bundle_bit (cardb));
- dprintf (3,("Cleared card bundle %Ix [%Ix, %Ix[", cardb, (size_t)card_bundle_cardw (cardb),
- (size_t)card_bundle_cardw (cardb+1)));
-// printf ("Cleared card bundle %Ix\n", cardb);
-}
-
-void gc_heap::card_bundles_set (size_t start_cardb, size_t end_cardb)
-{
- size_t start_word = card_bundle_word (start_cardb);
- size_t end_word = card_bundle_word (end_cardb);
- if (start_word < end_word)
- {
- //set the partial words
- card_bundle_table [start_word] |= highbits (~0u, card_bundle_bit (start_cardb));
-
- if (card_bundle_bit (end_cardb))
- card_bundle_table [end_word] |= lowbits (~0u, card_bundle_bit (end_cardb));
-
- for (size_t i = start_word+1; i < end_word; i++)
- card_bundle_table [i] = ~0u;
-
- }
- else
- {
- card_bundle_table [start_word] |= (highbits (~0u, card_bundle_bit (start_cardb)) &
- lowbits (~0u, card_bundle_bit (end_cardb)));
-
- }
-
-}
-
-BOOL gc_heap::card_bundle_set_p (size_t cardb)
-{
- return ( card_bundle_table [ card_bundle_word (cardb) ] & (1 << card_bundle_bit (cardb)));
-}
-
-size_t size_card_bundle_of (uint8_t* from, uint8_t* end)
-{
- //align from to lower
- from = (uint8_t*)((size_t)from & ~(card_size*card_word_width*card_bundle_size*card_bundle_word_width - 1));
- //align to to upper
- end = (uint8_t*)((size_t)(end + (card_size*card_word_width*card_bundle_size*card_bundle_word_width - 1)) &
- ~(card_size*card_word_width*card_bundle_size*card_bundle_word_width - 1));
-
- assert (((size_t)from & ((card_size*card_word_width*card_bundle_size*card_bundle_word_width)-1)) == 0);
- assert (((size_t)end & ((card_size*card_word_width*card_bundle_size*card_bundle_word_width)-1)) == 0);
-
- return ((end - from) / (card_size*card_word_width*card_bundle_size*card_bundle_word_width)) * sizeof (uint32_t);
-}
-
-uint32_t* translate_card_bundle_table (uint32_t* cb)
-{
- return (uint32_t*)((uint8_t*)cb - ((((size_t)g_gc_lowest_address) / (card_size*card_word_width*card_bundle_size*card_bundle_word_width)) * sizeof (uint32_t)));
-}
-
-void gc_heap::enable_card_bundles ()
-{
- if (can_use_write_watch_for_card_table() && (!card_bundles_enabled()))
- {
- dprintf (3, ("Enabling card bundles"));
- //set all of the card bundles
- card_bundles_set (cardw_card_bundle (card_word (card_of (lowest_address))),
- cardw_card_bundle (align_cardw_on_bundle (card_word (card_of (highest_address)))));
- settings.card_bundles = TRUE;
- }
-}
-
-BOOL gc_heap::card_bundles_enabled ()
-{
- return settings.card_bundles;
-}
-
-#endif //CARD_BUNDLE
-
// We don't store seg_mapping_table in card_table_info because there's only always one view.
class card_table_info
{
@@ -6612,6 +6660,7 @@ short*& card_table_brick_table (uint32_t* c_table)
}
#ifdef CARD_BUNDLE
+// Get the card bundle table for the specified card table.
inline
uint32_t*& card_table_card_bundle_table (uint32_t* c_table)
{
@@ -6879,6 +6928,10 @@ void release_card_table (uint32_t* c_table)
if (&g_gc_card_table[card_word (gcard_of(g_gc_lowest_address))] == c_table)
{
g_gc_card_table = 0;
+
+#ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
+ g_gc_card_bundle_table = 0;
+#endif
#ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
SoftwareWriteWatch::StaticClose();
#endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
@@ -6927,8 +6980,12 @@ uint32_t* gc_heap::make_card_table (uint8_t* start, uint8_t* end)
#ifdef CARD_BUNDLE
if (can_use_write_watch_for_card_table())
{
- virtual_reserve_flags |= VirtualReserveFlags::WriteWatch;
cb = size_card_bundle_of (g_gc_lowest_address, g_gc_highest_address);
+#ifndef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
+ // If we're not manually managing the card bundles, we will need to use OS write
+ // watch APIs over this region to track changes.
+ virtual_reserve_flags |= VirtualReserveFlags::WriteWatch;
+#endif
}
#endif //CARD_BUNDLE
@@ -6987,6 +7044,11 @@ uint32_t* gc_heap::make_card_table (uint8_t* start, uint8_t* end)
#ifdef CARD_BUNDLE
card_table_card_bundle_table (ct) = (uint32_t*)((uint8_t*)card_table_brick_table (ct) + bs);
+
+#ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
+ g_gc_card_bundle_table = translate_card_bundle_table(card_table_card_bundle_table(ct), g_gc_lowest_address);
+#endif
+
#endif //CARD_BUNDLE
#ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
@@ -7097,6 +7159,11 @@ int gc_heap::grow_brick_card_tables (uint8_t* start,
bool write_barrier_updated = false;
uint32_t virtual_reserve_flags = VirtualReserveFlags::None;
uint32_t* saved_g_card_table = g_gc_card_table;
+
+#ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
+ uint32_t* saved_g_card_bundle_table = g_gc_card_bundle_table;
+#endif
+
uint32_t* ct = 0;
uint32_t* translated_ct = 0;
short* bt = 0;
@@ -7117,8 +7184,13 @@ int gc_heap::grow_brick_card_tables (uint8_t* start,
#ifdef CARD_BUNDLE
if (can_use_write_watch_for_card_table())
{
- virtual_reserve_flags = VirtualReserveFlags::WriteWatch;
cb = size_card_bundle_of (saved_g_lowest_address, saved_g_highest_address);
+
+#ifndef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
+ // If we're not manually managing the card bundles, we will need to use OS write
+ // watch APIs over this region to track changes.
+ virtual_reserve_flags |= VirtualReserveFlags::WriteWatch;
+#endif
}
#endif //CARD_BUNDLE
@@ -7280,6 +7352,11 @@ int gc_heap::grow_brick_card_tables (uint8_t* start,
}
g_gc_card_table = translated_ct;
+
+#ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
+ g_gc_card_bundle_table = translate_card_bundle_table(card_table_card_bundle_table(ct), saved_g_lowest_address);
+#endif
+
SoftwareWriteWatch::SetResizedUntranslatedTable(
mem + sw_ww_table_offset,
saved_g_lowest_address,
@@ -7304,6 +7381,10 @@ int gc_heap::grow_brick_card_tables (uint8_t* start,
#endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
{
g_gc_card_table = translated_ct;
+
+#ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
+ g_gc_card_bundle_table = translate_card_bundle_table(card_table_card_bundle_table(ct), saved_g_lowest_address);
+#endif
}
seg_mapping_table = new_seg_mapping_table;
@@ -7335,6 +7416,10 @@ fail:
{
assert(g_gc_card_table == saved_g_card_table);
+#ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
+ assert(g_gc_card_bundle_table == saved_g_card_bundle_table);
+#endif
+
//delete (uint32_t*)((uint8_t*)ct - sizeof(card_table_info));
if (!GCToOSInterface::VirtualRelease (mem, alloc_size_aligned))
{
@@ -7438,12 +7523,23 @@ void gc_heap::copy_brick_card_range (uint8_t* la, uint32_t* old_card_table,
(card_table_lowest_address (ct) <= start))
{
// or the card_tables
- uint32_t* dest = &card_table [card_word (card_of (start))];
- uint32_t* src = &((translate_card_table (ct)) [card_word (card_of (start))]);
+
+ size_t start_word = card_word (card_of (start));
+
+ uint32_t* dest = &card_table[start_word];
+ uint32_t* src = &((translate_card_table (ct))[start_word]);
ptrdiff_t count = count_card_of (start, end);
for (int x = 0; x < count; x++)
{
*dest |= *src;
+
+#ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
+ if (*src != 0)
+ {
+ card_bundle_set(cardw_card_bundle(start_word+x));
+ }
+#endif
+
dest++;
src++;
}
@@ -7519,7 +7615,10 @@ void gc_heap::copy_brick_card_table()
size_t st = 0;
#endif //GROWABLE_SEG_MAPPING_TABLE
#endif //MARK_ARRAY && _DEBUG
- card_bundle_table = translate_card_bundle_table (card_table_card_bundle_table (ct));
+ card_bundle_table = translate_card_bundle_table (card_table_card_bundle_table (ct), g_gc_lowest_address);
+
+ // Ensure that the word that represents g_gc_lowest_address in the translated table is located at the
+ // start of the untranslated table.
assert (&card_bundle_table [card_bundle_word (cardw_card_bundle (card_word (card_of (g_gc_lowest_address))))] ==
card_table_card_bundle_table (ct));
@@ -9341,89 +9440,112 @@ static unsigned int tot_cycles = 0;
#ifdef CARD_BUNDLE
+inline void gc_heap::verify_card_bundle_bits_set(size_t first_card_word, size_t last_card_word)
+{
+#ifdef _DEBUG
+ for (size_t x = cardw_card_bundle (first_card_word); x < cardw_card_bundle (last_card_word); x++)
+ {
+ if (!card_bundle_set_p (x))
+ {
+ assert (!"Card bundle not set");
+ dprintf (3, ("Card bundle %Ix not set", x));
+ }
+ }
+#endif
+}
+
+// Verifies that any bundles that are not set represent only cards that are not set.
+inline void gc_heap::verify_card_bundles()
+{
+#ifdef _DEBUG
+ size_t lowest_card = card_word (card_of (lowest_address));
+ size_t highest_card = card_word (card_of (highest_address));
+ size_t cardb = cardw_card_bundle (lowest_card);
+ size_t end_cardb = cardw_card_bundle (align_cardw_on_bundle (highest_card));
+
+ while (cardb < end_cardb)
+ {
+ uint32_t* card_word = &card_table[max(card_bundle_cardw (cardb), lowest_card)];
+ uint32_t* card_word_end = &card_table[min(card_bundle_cardw (cardb+1), highest_card)];
+
+ if (card_bundle_set_p (cardb) == 0)
+ {
+ // Verify that no card is set
+ while (card_word < card_word_end)
+ {
+ if (*card_word != 0)
+ {
+ dprintf (3, ("gc: %d, Card word %Ix for address %Ix set, card_bundle %Ix clear",
+ dd_collection_count (dynamic_data_of (0)),
+ (size_t)(card_word-&card_table[0]),
+ (size_t)(card_address ((size_t)(card_word-&card_table[0]) * card_word_width)), cardb));
+ }
+
+ assert((*card_word)==0);
+ card_word++;
+ }
+ }
+
+ cardb++;
+ }
+#endif
+}
+
+// If card bundles are enabled, use write watch to find pages in the card table that have
+// been dirtied, and set the corresponding card bundle bits.
void gc_heap::update_card_table_bundle()
{
if (card_bundles_enabled())
{
+ // The address of the card word containing the card representing the lowest heap address
uint8_t* base_address = (uint8_t*)(&card_table[card_word (card_of (lowest_address))]);
+
+ // The address of the card word containing the card representing the highest heap address
+ uint8_t* high_address = (uint8_t*)(&card_table[card_word (card_of (highest_address))]);
+
uint8_t* saved_base_address = base_address;
uintptr_t bcount = array_size;
- uint8_t* high_address = (uint8_t*)(&card_table[card_word (card_of (highest_address))]);
size_t saved_region_size = align_on_page (high_address) - saved_base_address;
do
{
size_t region_size = align_on_page (high_address) - base_address;
+
dprintf (3,("Probing card table pages [%Ix, %Ix[", (size_t)base_address, (size_t)base_address+region_size));
- bool success = GCToOSInterface::GetWriteWatch (false /* resetState */ , base_address, region_size,
- (void**)g_addresses,
- &bcount);
+ bool success = GCToOSInterface::GetWriteWatch(false /* resetState */,
+ base_address,
+ region_size,
+ (void**)g_addresses,
+ &bcount);
assert (success && "GetWriteWatch failed!");
+
dprintf (3,("Found %d pages written", bcount));
- for (unsigned i = 0; i < bcount; i++)
+ for (unsigned i = 0; i < bcount; i++)
{
+ // Offset of the dirty page from the start of the card table (clamped to base_address)
size_t bcardw = (uint32_t*)(max(g_addresses[i],base_address)) - &card_table[0];
+
+ // Offset of the end of the page from the start of the card table (clamped to high addr)
size_t ecardw = (uint32_t*)(min(g_addresses[i]+OS_PAGE_SIZE, high_address)) - &card_table[0];
assert (bcardw >= card_word (card_of (g_gc_lowest_address)));
- card_bundles_set (cardw_card_bundle (bcardw),
- cardw_card_bundle (align_cardw_on_bundle (ecardw)));
-
- dprintf (3,("Set Card bundle [%Ix, %Ix[",
- cardw_card_bundle (bcardw), cardw_card_bundle (align_cardw_on_bundle (ecardw))));
-
-#ifdef _DEBUG
- for (size_t x = cardw_card_bundle (bcardw); x < cardw_card_bundle (ecardw); x++)
- {
- if (!card_bundle_set_p (x))
- {
- assert (!"Card bundle not set");
- dprintf (3, ("Card bundle %Ix not set", x));
- }
- }
-#endif //_DEBUG
+ // Set the card bundle bits representing the dirty card table page
+ card_bundles_set (cardw_card_bundle (bcardw), cardw_card_bundle (align_cardw_on_bundle (ecardw)));
+ dprintf (3,("Set Card bundle [%Ix, %Ix[", cardw_card_bundle (bcardw), cardw_card_bundle (align_cardw_on_bundle (ecardw))));
+ verify_card_bundle_bits_set(bcardw, ecardw);
}
- if (bcount >= array_size){
+
+ if (bcount >= array_size)
+ {
base_address = g_addresses [array_size-1] + OS_PAGE_SIZE;
bcount = array_size;
}
+
} while ((bcount >= array_size) && (base_address < high_address));
+ // Now that we've updated the card bundle bits, reset the write-tracking state.
GCToOSInterface::ResetWriteWatch (saved_base_address, saved_region_size);
-
-#ifdef _DEBUG
-
- size_t lowest_card = card_word (card_of (lowest_address));
- size_t highest_card = card_word (card_of (highest_address));
- size_t cardb = cardw_card_bundle (lowest_card);
- size_t end_cardb = cardw_card_bundle (align_cardw_on_bundle (highest_card));
-
- //find a non null bundle
- while (cardb < end_cardb)
- {
- if (card_bundle_set_p (cardb)==0)
- {
- //verify that the cards are indeed empty
- uint32_t* card_word = &card_table[max(card_bundle_cardw (cardb), lowest_card)];
- uint32_t* card_word_end = &card_table[min(card_bundle_cardw (cardb+1), highest_card)];
- while (card_word < card_word_end)
- {
- if ((*card_word) != 0)
- {
- dprintf (3, ("gc: %d, Card word %Ix for address %Ix set, card_bundle %Ix clear",
- dd_collection_count (dynamic_data_of (0)),
- (size_t)(card_word-&card_table[0]),
- (size_t)(card_address ((size_t)(card_word-&card_table[0]) * card_word_width)), cardb));
- }
- assert((*card_word)==0);
- card_word++;
- }
- }
- //end of verification
- cardb++;
- }
-#endif //_DEBUG
}
}
#endif //CARD_BUNDLE
@@ -10346,7 +10468,7 @@ gc_heap::init_gc_heap (int h_number)
lowest_address = card_table_lowest_address (ct);
#ifdef CARD_BUNDLE
- card_bundle_table = translate_card_bundle_table (card_table_card_bundle_table (ct));
+ card_bundle_table = translate_card_bundle_table (card_table_card_bundle_table (ct), g_gc_lowest_address);
assert (&card_bundle_table [card_bundle_word (cardw_card_bundle (card_word (card_of (g_gc_lowest_address))))] ==
card_table_card_bundle_table (ct));
#endif //CARD_BUNDLE
@@ -11260,6 +11382,13 @@ void gc_heap::adjust_limit_clr (uint8_t* start, size_t limit_size,
}
acontext->alloc_ptr = start;
}
+ else
+ {
+ // If the next alloc context is right up against the current one it means we are absorbing the min
+ // object, so need to account for that.
+ acontext->alloc_bytes += (start - acontext->alloc_limit);
+ }
+
acontext->alloc_limit = (start + limit_size - aligned_min_obj_size);
acontext->alloc_bytes += limit_size - ((gen_number < max_generation + 1) ? aligned_min_obj_size : 0);
@@ -13219,11 +13348,11 @@ try_again:
if (CPUGroupInfo::CanEnableGCCPUGroups())
{ //only set ideal processor when max_hp and org_hp are in the same cpu
//group. DO NOT MOVE THREADS ACROSS CPU GROUPS
- uint8_t org_gn = heap_select::find_cpu_group_from_heap_no(org_hp->heap_number);
- uint8_t max_gn = heap_select::find_cpu_group_from_heap_no(max_hp->heap_number);
+ uint16_t org_gn = heap_select::find_cpu_group_from_heap_no(org_hp->heap_number);
+ uint16_t max_gn = heap_select::find_cpu_group_from_heap_no(max_hp->heap_number);
if (org_gn == max_gn) //only set within CPU group, so SetThreadIdealProcessor is enough
{
- uint8_t group_proc_no = heap_select::find_group_proc_from_heap_no(max_hp->heap_number);
+ uint16_t group_proc_no = heap_select::find_group_proc_from_heap_no(max_hp->heap_number);
GCThreadAffinity affinity;
affinity.Processor = group_proc_no;
@@ -13237,7 +13366,7 @@ try_again:
}
else
{
- uint8_t proc_no = heap_select::find_proc_no_from_heap_no(max_hp->heap_number);
+ uint16_t proc_no = heap_select::find_proc_no_from_heap_no(max_hp->heap_number);
GCThreadAffinity affinity;
affinity.Processor = proc_no;
@@ -14068,7 +14197,8 @@ uint8_t* gc_heap::allocate_in_condemned_generations (generation* gen,
to_gen_number = from_gen_number + (settings.promotion ? 1 : 0);
}
- dprintf (3, ("aic gen%d: s: %Id", gen->gen_num, size));
+ dprintf (3, ("aic gen%d: s: %Id, %d->%d, %Ix->%Ix", gen->gen_num, size, from_gen_number,
+ to_gen_number, generation_allocation_pointer(gen), generation_allocation_limit(gen)));
int pad_in_front = (old_loc != 0) ? USE_PADDING_FRONT : 0;
@@ -15006,26 +15136,21 @@ exit:
}
}
-#ifndef FEATURE_REDHAWK
- if (n == max_generation)
+ if (n == max_generation && GCToEEInterface::ForceFullGCToBeBlocking())
{
- if (SystemDomain::System()->RequireAppDomainCleanup())
- {
#ifdef BACKGROUND_GC
- // do not turn stress-induced collections into blocking GCs, unless there
- // have already been more full BGCs than full NGCs
+ // do not turn stress-induced collections into blocking GCs, unless there
+ // have already been more full BGCs than full NGCs
#if 0
- // This exposes DevDiv 94129, so we'll leave this out for now
- if (!settings.stress_induced ||
- full_gc_counts[gc_type_blocking] <= full_gc_counts[gc_type_background])
+ // This exposes DevDiv 94129, so we'll leave this out for now
+ if (!settings.stress_induced ||
+ full_gc_counts[gc_type_blocking] <= full_gc_counts[gc_type_background])
#endif // 0
#endif // BACKGROUND_GC
- {
- *blocking_collection_p = TRUE;
- }
+ {
+ *blocking_collection_p = TRUE;
}
}
-#endif //!FEATURE_REDHAWK
return n;
}
@@ -15219,6 +15344,10 @@ void gc_heap::gc1()
assert (g_gc_card_table == card_table);
+#ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
+ assert (g_gc_card_bundle_table == card_bundle_table);
+#endif
+
{
if (n == max_generation)
{
@@ -15753,8 +15882,10 @@ start_no_gc_region_status gc_heap::prepare_for_no_gc_region (uint64_t total_size
size_t allocation_no_gc_soh = 0;
size_t size_per_heap = 0;
+ total_size = (size_t)((float)total_size * 1.05);
if (loh_size_known)
{
+ loh_size = (size_t)((float)loh_size * 1.05);
allocation_no_gc_loh = (size_t)loh_size;
allocation_no_gc_soh = (size_t)(total_size - loh_size);
}
@@ -15765,12 +15896,14 @@ start_no_gc_region_status gc_heap::prepare_for_no_gc_region (uint64_t total_size
}
size_t soh_segment_size = get_valid_segment_size();
+ int soh_align_const = get_alignment_constant (TRUE);
+ size_t max_soh_allocated = (soh_segment_size - OS_PAGE_SIZE - eph_gen_starts_size);
int num_heaps = 1;
#ifdef MULTIPLE_HEAPS
num_heaps = n_heaps;
#endif //MULTIPLE_HEAPS
- size_t total_allowed_soh_allocation = (soh_segment_size - OS_PAGE_SIZE) * num_heaps;
+ size_t total_allowed_soh_allocation = max_soh_allocated * num_heaps;
if (allocation_no_gc_soh > total_allowed_soh_allocation)
{
@@ -15783,24 +15916,23 @@ start_no_gc_region_status gc_heap::prepare_for_no_gc_region (uint64_t total_size
if (allocation_no_gc_soh != 0)
{
- current_no_gc_region_info.soh_allocation_size = (size_t)((float)allocation_no_gc_soh * 1.05);
- //current_no_gc_region_info.soh_allocation_size = allocation_no_gc_soh;
+ current_no_gc_region_info.soh_allocation_size = allocation_no_gc_soh;
size_per_heap = current_no_gc_region_info.soh_allocation_size;
#ifdef MULTIPLE_HEAPS
size_per_heap /= n_heaps;
for (int i = 0; i < n_heaps; i++)
{
// due to heap balancing we need to allow some room before we even look to balance to another heap.
- g_heaps[i]->soh_allocation_no_gc = min (Align (size_per_heap + min_balance_threshold, get_alignment_constant (TRUE)), (soh_segment_size - OS_PAGE_SIZE));
+ g_heaps[i]->soh_allocation_no_gc = min (Align ((size_per_heap + min_balance_threshold), soh_align_const), max_soh_allocated);
}
#else //MULTIPLE_HEAPS
- soh_allocation_no_gc = min (Align (size_per_heap, get_alignment_constant (TRUE)), (soh_segment_size - OS_PAGE_SIZE));
+ soh_allocation_no_gc = min (Align (size_per_heap, soh_align_const), max_soh_allocated);
#endif //MULTIPLE_HEAPS
}
if (allocation_no_gc_loh != 0)
{
- current_no_gc_region_info.loh_allocation_size = (size_t)((float)allocation_no_gc_loh * 1.05);
+ current_no_gc_region_info.loh_allocation_size = allocation_no_gc_loh;
size_per_heap = current_no_gc_region_info.loh_allocation_size;
#ifdef MULTIPLE_HEAPS
size_per_heap /= n_heaps;
@@ -16188,19 +16320,44 @@ BOOL gc_heap::expand_soh_with_minimal_gc()
heap_segment* new_seg = soh_get_segment_to_expand();
if (new_seg)
{
+ if (g_gc_card_table != card_table)
+ copy_brick_card_table();
+
settings.promotion = TRUE;
settings.demotion = FALSE;
ephemeral_promotion = TRUE;
- save_ephemeral_generation_starts();
+ int condemned_gen_number = max_generation - 1;
+
+ generation* gen = 0;
+ int align_const = get_alignment_constant (TRUE);
+
+ for (int i = 0; i <= condemned_gen_number; i++)
+ {
+ gen = generation_of (i);
+ saved_ephemeral_plan_start[i] = generation_allocation_start (gen);
+ saved_ephemeral_plan_start_size[i] = Align (size (generation_allocation_start (gen)), align_const);
+ }
+
+ // We do need to clear the bricks here as we are converting a bunch of ephemeral objects to gen2
+ // and need to make sure that there are no left over bricks from the previous GCs for the space
+ // we just used for gen0 allocation. We will need to go through the bricks for these objects for
+ // ephemeral GCs later.
+ for (size_t b = brick_of (generation_allocation_start (generation_of (0)));
+ b < brick_of (align_on_brick (heap_segment_allocated (ephemeral_heap_segment)));
+ b++)
+ {
+ set_brick (b, -1);
+ }
+
size_t ephemeral_size = (heap_segment_allocated (ephemeral_heap_segment) -
generation_allocation_start (generation_of (max_generation - 1)));
heap_segment_next (ephemeral_heap_segment) = new_seg;
ephemeral_heap_segment = new_seg;
uint8_t* start = heap_segment_mem (ephemeral_heap_segment);
- for (int i = (max_generation - 1); i >= 0; i--)
+ for (int i = condemned_gen_number; i >= 0; i--)
{
- generation* gen = generation_of (i);
+ gen = generation_of (i);
size_t gen_start_size = Align (min_obj_size);
make_generation (generation_table[i], ephemeral_heap_segment, start, 0);
generation_plan_allocation_start (gen) = start;
@@ -16210,15 +16367,11 @@ BOOL gc_heap::expand_soh_with_minimal_gc()
heap_segment_used (ephemeral_heap_segment) = start - plug_skew;
heap_segment_plan_allocated (ephemeral_heap_segment) = start;
- fix_generation_bounds ((max_generation - 1), generation_of (0));
+ fix_generation_bounds (condemned_gen_number, generation_of (0));
dd_gc_new_allocation (dynamic_data_of (max_generation)) -= ephemeral_size;
dd_new_allocation (dynamic_data_of (max_generation)) = dd_gc_new_allocation (dynamic_data_of (max_generation));
-#ifndef FEATURE_REDHAWK
- // IsGCThread() always returns false on CoreRT, but this assert is useful in CoreCLR.
- assert(!!IsGCThread());
-#endif // FEATURE_REDHAWK
adjust_ephemeral_limits();
return TRUE;
}
@@ -16226,11 +16379,36 @@ BOOL gc_heap::expand_soh_with_minimal_gc()
return FALSE;
}
+// Only to be done on the thread that calls restart in a join for server GC
+// and reset the oom status per heap.
+void gc_heap::check_and_set_no_gc_oom()
+{
+#ifdef MULTIPLE_HEAPS
+ for (int i = 0; i < n_heaps; i++)
+ {
+ gc_heap* hp = g_heaps[i];
+ if (hp->no_gc_oom_p)
+ {
+ current_no_gc_region_info.start_status = start_no_gc_no_memory;
+ hp->no_gc_oom_p = false;
+ }
+ }
+#else
+ if (no_gc_oom_p)
+ {
+ current_no_gc_region_info.start_status = start_no_gc_no_memory;
+ no_gc_oom_p = false;
+ }
+#endif //MULTIPLE_HEAPS
+}
+
void gc_heap::allocate_for_no_gc_after_gc()
{
if (current_no_gc_region_info.minimal_gc_p)
repair_allocation_contexts (TRUE);
+ no_gc_oom_p = false;
+
if (current_no_gc_region_info.start_status != start_no_gc_no_memory)
{
if (current_no_gc_region_info.soh_allocation_size != 0)
@@ -16238,18 +16416,19 @@ void gc_heap::allocate_for_no_gc_after_gc()
if (((size_t)(heap_segment_reserved (ephemeral_heap_segment) - heap_segment_allocated (ephemeral_heap_segment)) < soh_allocation_no_gc) ||
(!grow_heap_segment (ephemeral_heap_segment, (heap_segment_allocated (ephemeral_heap_segment) + soh_allocation_no_gc))))
{
- current_no_gc_region_info.start_status = start_no_gc_no_memory;
+ no_gc_oom_p = true;
}
#ifdef MULTIPLE_HEAPS
- if (!current_no_gc_region_info.minimal_gc_p &&
- (current_no_gc_region_info.loh_allocation_size != 0))
+ gc_t_join.join(this, gc_join_after_commit_soh_no_gc);
+ if (gc_t_join.joined())
{
- gc_t_join.join(this, gc_join_after_commit_soh_no_gc);
- if (gc_t_join.joined())
- {
- gc_t_join.restart();
- }
+#endif //MULTIPLE_HEAPS
+
+ check_and_set_no_gc_oom();
+
+#ifdef MULTIPLE_HEAPS
+ gc_t_join.restart();
}
#endif //MULTIPLE_HEAPS
}
@@ -16272,7 +16451,7 @@ void gc_heap::allocate_for_no_gc_after_gc()
found_seg_p = TRUE;
if (!commit_loh_for_no_gc (seg))
{
- current_no_gc_region_info.start_status = start_no_gc_no_memory;
+ no_gc_oom_p = true;
break;
}
}
@@ -16287,20 +16466,31 @@ void gc_heap::allocate_for_no_gc_after_gc()
gc_t_join.join(this, gc_join_expand_loh_no_gc);
if (gc_t_join.joined())
{
- for (int i = 0; i < n_heaps; i++)
+ check_and_set_no_gc_oom();
+
+ if (current_no_gc_region_info.start_status == start_no_gc_success)
{
- gc_heap* hp = g_heaps[i];
- if (hp->gc_policy == policy_expand)
+ for (int i = 0; i < n_heaps; i++)
{
- hp->saved_loh_segment_no_gc = get_segment_for_loh (get_large_seg_size (loh_allocation_no_gc), hp);
- if (!(hp->saved_loh_segment_no_gc))
- current_no_gc_region_info.start_status = start_no_gc_no_memory;
+ gc_heap* hp = g_heaps[i];
+ if (hp->gc_policy == policy_expand)
+ {
+ hp->saved_loh_segment_no_gc = get_segment_for_loh (get_large_seg_size (loh_allocation_no_gc), hp);
+ if (!(hp->saved_loh_segment_no_gc))
+ {
+ current_no_gc_region_info.start_status = start_no_gc_no_memory;
+ break;
+ }
+ }
}
}
+
gc_t_join.restart();
}
#else //MULTIPLE_HEAPS
- if (gc_policy == policy_expand)
+ check_and_set_no_gc_oom();
+
+ if ((current_no_gc_region_info.start_status == start_no_gc_success) && (gc_policy == policy_expand))
{
saved_loh_segment_no_gc = get_segment_for_loh (get_large_seg_size (loh_allocation_no_gc));
if (!saved_loh_segment_no_gc)
@@ -16312,8 +16502,8 @@ void gc_heap::allocate_for_no_gc_after_gc()
{
if (!commit_loh_for_no_gc (saved_loh_segment_no_gc))
{
- current_no_gc_region_info.start_status = start_no_gc_no_memory;
- }
+ no_gc_oom_p = true;
+ }
}
}
}
@@ -16323,6 +16513,9 @@ void gc_heap::allocate_for_no_gc_after_gc()
if (gc_t_join.joined())
{
#endif //MULTIPLE_HEAPS
+
+ check_and_set_no_gc_oom();
+
if (current_no_gc_region_info.start_status == start_no_gc_success)
{
set_allocations_for_no_gc();
@@ -17298,7 +17491,7 @@ void gc_heap::enque_pinned_plug (uint8_t* plug,
// risks. This happens very rarely and fixing it in the
// way so that we can continue is a bit involved and will
// not be done in Dev10.
- EEPOLICY_HANDLE_FATAL_ERROR(CORINFO_EXCEPTION_GC);
+ GCToEEInterface::HandleFatalError(CORINFO_EXCEPTION_GC);
}
}
@@ -18530,9 +18723,9 @@ gc_heap::scan_background_roots (promote_func* fn, int hn, ScanContext *pSC)
#endif //BACKGROUND_GC
-
void gc_heap::fix_card_table ()
{
+#ifdef NO_WRITE_BARRIER
#ifdef WRITE_WATCH
heap_segment* seg = heap_segment_rw (generation_start_segment (generation_of (max_generation)));
@@ -18598,14 +18791,22 @@ void gc_heap::fix_card_table ()
dprintf (3,("Found %Id pages written", bcount));
for (unsigned i = 0; i < bcount; i++)
{
- for (unsigned j = 0; j< (card_size*card_word_width)/OS_PAGE_SIZE; j++)
+ // Set the card words corresponding to the entire page.
+ for (unsigned j = 0; j < (card_size*card_word_width)/OS_PAGE_SIZE; j++)
{
card_table [card_word (card_of (g_addresses [i]))+j] = ~0u;
}
dprintf (2,("Set Cards [%Ix:%Ix, %Ix:%Ix[",
card_of (g_addresses [i]), (size_t)g_addresses [i],
card_of (g_addresses [i]+OS_PAGE_SIZE), (size_t)g_addresses [i]+OS_PAGE_SIZE));
+
+#ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
+ // We don't need to update card bundles here because this function is only used when
+ // we don't have write barriers.
+ #error Cannot have manually managed card bundles without write barriers.
+#endif
}
+
if (bcount >= array_size){
base_address = g_addresses [array_size-1] + OS_PAGE_SIZE;
bcount = array_size;
@@ -18626,6 +18827,7 @@ void gc_heap::fix_card_table ()
}
#endif //BACKGROUND_GC
#endif //WRITE_WATCH
+#endif //NO_WRITE_BARRIER
}
#ifdef BACKGROUND_GC
@@ -19541,7 +19743,15 @@ void gc_heap::mark_phase (int condemned_gen_number, BOOL mark_only_p)
{
#endif //MULTIPLE_HEAPS
- update_card_table_bundle ();
+#ifndef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
+ // If we are manually managing card bundles, every write to the card table should already be
+ // accounted for in the card bundle table so there's nothing to update here.
+ update_card_table_bundle();
+#endif
+ if (card_bundles_enabled())
+ {
+ verify_card_bundles();
+ }
#ifdef MULTIPLE_HEAPS
gc_t_join.r_restart();
@@ -20880,7 +21090,7 @@ BOOL gc_heap::plan_loh()
{
while (o < heap_segment_allocated (seg) && !marked (o))
{
- dprintf (1235, ("%Ix(%Id) F (%d)", o, AlignQword (size (o)), ((method_table (o) == g_pFreeObjectMethodTable) ? 1 : 0)));
+ dprintf (1235, ("%Ix(%Id) F (%d)", o, AlignQword (size (o)), ((method_table (o) == g_gc_pFreeObjectMethodTable) ? 1 : 0)));
o = o + AlignQword (size (o));
}
}
@@ -21117,7 +21327,7 @@ void gc_heap::relocate_in_loh_compact()
generation_free_obj_space (gen)));
}
-void gc_heap::walk_relocation_for_loh (size_t profiling_context, record_surv_fn fn)
+void gc_heap::walk_relocation_for_loh (void* profiling_context, record_surv_fn fn)
{
generation* gen = large_object_generation;
heap_segment* seg = heap_segment_rw (generation_start_segment (gen));
@@ -21147,7 +21357,7 @@ void gc_heap::walk_relocation_for_loh (size_t profiling_context, record_surv_fn
STRESS_LOG_PLUG_MOVE(o, (o + size), -reloc);
- fn (o, (o + size), reloc, profiling_context, settings.compaction, FALSE);
+ fn (o, (o + size), reloc, profiling_context, !!settings.compaction, false);
o = o + size;
if (o < heap_segment_allocated (seg))
@@ -21190,7 +21400,7 @@ void gc_heap::convert_to_pinned_plug (BOOL& last_npinned_plug_p,
artificial_pinned_size = ps;
}
-// Because we have the artifical pinning, we can't gaurantee that pinned and npinned
+// Because we have the artificial pinning, we can't guarantee that pinned and npinned
// plugs are always interleaved.
void gc_heap::store_plug_gap_info (uint8_t* plug_start,
uint8_t* plug_end,
@@ -23962,7 +24172,7 @@ void gc_heap::walk_plug (uint8_t* plug, size_t size, BOOL check_last_object_p, w
STRESS_LOG_PLUG_MOVE(plug, (plug + size), -last_plug_relocation);
ptrdiff_t reloc = settings.compaction ? last_plug_relocation : 0;
- (args->fn) (plug, (plug + size), reloc, args->profiling_context, settings.compaction, FALSE);
+ (args->fn) (plug, (plug + size), reloc, args->profiling_context, !!settings.compaction, false);
if (check_last_object_p)
{
@@ -24030,7 +24240,7 @@ void gc_heap::walk_relocation_in_brick (uint8_t* tree, walk_relocate_args* args)
}
}
-void gc_heap::walk_relocation (size_t profiling_context, record_surv_fn fn)
+void gc_heap::walk_relocation (void* profiling_context, record_surv_fn fn)
{
generation* condemned_gen = generation_of (settings.condemned_generation);
uint8_t* start_address = generation_allocation_start (condemned_gen);
@@ -24086,7 +24296,7 @@ void gc_heap::walk_relocation (size_t profiling_context, record_surv_fn fn)
}
}
-void gc_heap::walk_survivors (record_surv_fn fn, size_t context, walk_surv_type type)
+void gc_heap::walk_survivors (record_surv_fn fn, void* context, walk_surv_type type)
{
if (type == walk_for_gc)
walk_survivors_relocation (context, fn);
@@ -24101,7 +24311,7 @@ void gc_heap::walk_survivors (record_surv_fn fn, size_t context, walk_surv_type
}
#if defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE)
-void gc_heap::walk_survivors_for_bgc (size_t profiling_context, record_surv_fn fn)
+void gc_heap::walk_survivors_for_bgc (void* profiling_context, record_surv_fn fn)
{
// This should only be called for BGCs
assert(settings.concurrent);
@@ -24136,7 +24346,7 @@ void gc_heap::walk_survivors_for_bgc (size_t profiling_context, record_surv_fn f
while (o < end)
{
- if (method_table(o) == g_pFreeObjectMethodTable)
+ if (method_table(o) == g_gc_pFreeObjectMethodTable)
{
o += Align (size (o), align_const);
continue;
@@ -24147,7 +24357,7 @@ void gc_heap::walk_survivors_for_bgc (size_t profiling_context, record_surv_fn f
uint8_t* plug_start = o;
- while (method_table(o) != g_pFreeObjectMethodTable)
+ while (method_table(o) != g_gc_pFreeObjectMethodTable)
{
o += Align (size (o), align_const);
if (o >= end)
@@ -24162,8 +24372,8 @@ void gc_heap::walk_survivors_for_bgc (size_t profiling_context, record_surv_fn f
plug_end,
0, // Reloc distance == 0 as this is non-compacting
profiling_context,
- FALSE, // Non-compacting
- TRUE); // BGC
+ false, // Non-compacting
+ true); // BGC
}
seg = heap_segment_next (seg);
@@ -24787,7 +24997,7 @@ void gc_heap::gc_thread_stub (void* arg)
#else
STRESS_LOG0(LF_GC, LL_ALWAYS, "Thread::CommitThreadStack failed.");
_ASSERTE(!"Thread::CommitThreadStack failed.");
- EEPOLICY_HANDLE_FATAL_ERROR(COR_E_STACKOVERFLOW);
+ GCToEEInterface::HandleFatalError(COR_E_STACKOVERFLOW);
#endif //BACKGROUND_GC
}
#endif // FEATURE_REDHAWK
@@ -26941,6 +27151,7 @@ void gc_heap::clear_cards (size_t start_card, size_t end_card)
size_t end_word = card_word (end_card);
if (start_word < end_word)
{
+ // Figure out the bit positions of the cards within their words
unsigned bits = card_bit (start_card);
card_table [start_word] &= lowbits (~0, bits);
for (size_t i = start_word+1; i < end_word; i++)
@@ -26954,6 +27165,8 @@ void gc_heap::clear_cards (size_t start_card, size_t end_card)
}
else
{
+ // If the start and end cards are in the same word, just clear the appropriate card
+ // bits in that word.
card_table [start_word] &= (lowbits (~0, card_bit (start_card)) |
highbits (~0, card_bit (end_card)));
}
@@ -26981,8 +27194,10 @@ void gc_heap::clear_card_for_addresses (uint8_t* start_address, uint8_t* end_add
// copy [srccard, ...[ to [dst_card, end_card[
// This will set the same bit twice. Can be optimized.
inline
-void gc_heap::copy_cards (size_t dst_card, size_t src_card,
- size_t end_card, BOOL nextp)
+void gc_heap::copy_cards (size_t dst_card,
+ size_t src_card,
+ size_t end_card,
+ BOOL nextp)
{
// If the range is empty, this function is a no-op - with the subtlety that
// either of the accesses card_table[srcwrd] or card_table[dstwrd] could be
@@ -26996,31 +27211,51 @@ void gc_heap::copy_cards (size_t dst_card, size_t src_card,
size_t dstwrd = card_word (dst_card);
unsigned int srctmp = card_table[srcwrd];
unsigned int dsttmp = card_table[dstwrd];
+
for (size_t card = dst_card; card < end_card; card++)
{
if (srctmp & (1 << srcbit))
dsttmp |= 1 << dstbit;
else
dsttmp &= ~(1 << dstbit);
+
if (!(++srcbit % 32))
{
srctmp = card_table[++srcwrd];
srcbit = 0;
}
+
if (nextp)
{
if (srctmp & (1 << srcbit))
dsttmp |= 1 << dstbit;
}
+
if (!(++dstbit % 32))
{
card_table[dstwrd] = dsttmp;
+
+#ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
+ if (dsttmp != 0)
+ {
+ card_bundle_set(cardw_card_bundle(dstwrd));
+ }
+#endif
+
dstwrd++;
dsttmp = card_table[dstwrd];
dstbit = 0;
}
}
+
card_table[dstwrd] = dsttmp;
+
+#ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
+ if (dsttmp != 0)
+ {
+ card_bundle_set(cardw_card_bundle(dstwrd));
+ }
+#endif
}
void gc_heap::copy_cards_for_addresses (uint8_t* dest, uint8_t* src, size_t len)
@@ -27081,6 +27316,10 @@ void gc_heap::copy_cards_for_addresses (uint8_t* dest, uint8_t* src, size_t len)
if (card_set_p (card_of (src + len - 1)))
set_card (end_dest_card);
+
+#ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
+ card_bundles_set(cardw_card_bundle(card_word(card_of(dest))), cardw_card_bundle(align_cardw_on_bundle(card_word(end_dest_card))));
+#endif
}
#ifdef BACKGROUND_GC
@@ -27229,6 +27468,9 @@ uint8_t* gc_heap::find_first_object (uint8_t* start, uint8_t* first_object)
}
#ifdef CARD_BUNDLE
+
+// Find the first non-zero card word between cardw and cardw_end.
+// The index of the word we find is returned in cardw.
BOOL gc_heap::find_card_dword (size_t& cardw, size_t cardw_end)
{
dprintf (3, ("gc: %d, find_card_dword cardw: %Ix, cardw_end: %Ix",
@@ -27240,26 +27482,26 @@ BOOL gc_heap::find_card_dword (size_t& cardw, size_t cardw_end)
size_t end_cardb = cardw_card_bundle (align_cardw_on_bundle (cardw_end));
while (1)
{
- //find a non null bundle
- while ((cardb < end_cardb) &&
- (card_bundle_set_p (cardb)==0))
+ // Find a non-zero bundle
+ while ((cardb < end_cardb) && (card_bundle_set_p (cardb) == 0))
{
cardb++;
}
+
if (cardb == end_cardb)
return FALSE;
- //find a non empty card word
+ // We found a bundle, so go through its words and find a non-zero card word
uint32_t* card_word = &card_table[max(card_bundle_cardw (cardb),cardw)];
uint32_t* card_word_end = &card_table[min(card_bundle_cardw (cardb+1),cardw_end)];
- while ((card_word < card_word_end) &&
- !(*card_word))
+ while ((card_word < card_word_end) && !(*card_word))
{
card_word++;
}
+
if (card_word != card_word_end)
{
- cardw = (card_word - &card_table [0]);
+ cardw = (card_word - &card_table[0]);
return TRUE;
}
else if ((cardw <= card_bundle_cardw (cardb)) &&
@@ -27272,6 +27514,7 @@ BOOL gc_heap::find_card_dword (size_t& cardw, size_t cardw_end)
card_bundle_cardw (cardb+1)));
card_bundle_clear (cardb);
}
+
cardb++;
}
}
@@ -27282,96 +27525,122 @@ BOOL gc_heap::find_card_dword (size_t& cardw, size_t cardw_end)
while (card_word < card_word_end)
{
- if ((*card_word) != 0)
+ if (*card_word != 0)
{
cardw = (card_word - &card_table [0]);
return TRUE;
}
+
card_word++;
}
- return FALSE;
+ return FALSE;
}
-
}
#endif //CARD_BUNDLE
-BOOL gc_heap::find_card (uint32_t* card_table, size_t& card,
- size_t card_word_end, size_t& end_card)
+// Find cards that are set between two points in a card table.
+// Parameters
+// card_table : The card table.
+// card : [in/out] As input, the card to start searching from.
+// As output, the first card that's set.
+// card_word_end : The card word at which to stop looking.
+// end_card : [out] The last card which is set.
+BOOL gc_heap::find_card(uint32_t* card_table,
+ size_t& card,
+ size_t card_word_end,
+ size_t& end_card)
{
uint32_t* last_card_word;
- uint32_t y;
- uint32_t z;
+ uint32_t card_word_value;
+ uint32_t bit_position;
+
// Find the first card which is set
-
last_card_word = &card_table [card_word (card)];
- z = card_bit (card);
- y = (*last_card_word) >> z;
- if (!y)
+ bit_position = card_bit (card);
+ card_word_value = (*last_card_word) >> bit_position;
+ if (!card_word_value)
{
- z = 0;
+ bit_position = 0;
#ifdef CARD_BUNDLE
- size_t lcw = card_word(card)+1;
+ // Using the card bundle, go through the remaining card words between here and
+ // card_word_end until we find one that is non-zero.
+ size_t lcw = card_word(card) + 1;
if (gc_heap::find_card_dword (lcw, card_word_end) == FALSE)
+ {
return FALSE;
+ }
else
{
last_card_word = &card_table [lcw];
- y = *last_card_word;
+ card_word_value = *last_card_word;
}
#else //CARD_BUNDLE
+ // Go through the remaining card words between here and card_word_end until we find
+ // one that is non-zero.
do
{
++last_card_word;
}
+ while ((last_card_word < &card_table [card_word_end]) && !(*last_card_word));
- while ((last_card_word < &card_table [card_word_end]) &&
- !(*last_card_word));
if (last_card_word < &card_table [card_word_end])
- y = *last_card_word;
+ {
+ card_word_value = *last_card_word;
+ }
else
+ {
+ // We failed to find any non-zero card words before we got to card_word_end
return FALSE;
+ }
#endif //CARD_BUNDLE
}
-
// Look for the lowest bit set
- if (y)
+ if (card_word_value)
{
- while (!(y & 1))
+ while (!(card_word_value & 1))
{
- z++;
- y = y / 2;
+ bit_position++;
+ card_word_value = card_word_value / 2;
}
}
- card = (last_card_word - &card_table [0])* card_word_width + z;
+
+ // card is the card word index * card size + the bit index within the card
+ card = (last_card_word - &card_table[0]) * card_word_width + bit_position;
+
do
{
- z++;
- y = y / 2;
- if ((z == card_word_width) &&
- (last_card_word < &card_table [card_word_end]))
- {
+ // Keep going until we get to an un-set card.
+ bit_position++;
+ card_word_value = card_word_value / 2;
+ // If we reach the end of the card word and haven't hit a 0 yet, start going
+ // card word by card word until we get to one that's not fully set (0xFFFF...)
+ // or we reach card_word_end.
+ if ((bit_position == card_word_width) && (last_card_word < &card_table [card_word_end]))
+ {
do
{
- y = *(++last_card_word);
- }while ((last_card_word < &card_table [card_word_end]) &&
+ card_word_value = *(++last_card_word);
+ } while ((last_card_word < &card_table [card_word_end]) &&
+
#ifdef _MSC_VER
- (y == (1 << card_word_width)-1)
+ (card_word_value == (1 << card_word_width)-1)
#else
// if left shift count >= width of type,
// gcc reports error.
- (y == ~0u)
+ (card_word_value == ~0u)
#endif // _MSC_VER
);
- z = 0;
+ bit_position = 0;
}
- } while (y & 1);
+ } while (card_word_value & 1);
- end_card = (last_card_word - &card_table [0])* card_word_width + z;
+ end_card = (last_card_word - &card_table [0])* card_word_width + bit_position;
+
//dprintf (3, ("find_card: [%Ix, %Ix[ set", card, end_card));
dprintf (3, ("fc: [%Ix, %Ix[", card, end_card));
return TRUE;
@@ -27533,49 +27802,49 @@ void gc_heap::mark_through_cards_for_segments (card_fn fn, BOOL relocating)
#ifdef BACKGROUND_GC
dprintf (3, ("current_sweep_pos is %Ix, saved_sweep_ephemeral_seg is %Ix(%Ix)",
current_sweep_pos, saved_sweep_ephemeral_seg, saved_sweep_ephemeral_start));
+
heap_segment* soh_seg = heap_segment_rw (generation_start_segment (generation_of (max_generation)));
- PREFIX_ASSUME(soh_seg != NULL);
- while (soh_seg )
+ PREFIX_ASSUME(soh_seg != NULL);
+
+ while (soh_seg)
{
dprintf (3, ("seg %Ix, bgc_alloc: %Ix, alloc: %Ix",
soh_seg,
heap_segment_background_allocated (soh_seg),
heap_segment_allocated (soh_seg)));
+
soh_seg = heap_segment_next_rw (soh_seg);
}
#endif //BACKGROUND_GC
uint8_t* low = gc_low;
uint8_t* high = gc_high;
- size_t end_card = 0;
+ size_t end_card = 0;
+
generation* oldest_gen = generation_of (max_generation);
int curr_gen_number = max_generation;
- uint8_t* gen_boundary = generation_allocation_start
- (generation_of (curr_gen_number - 1));
- uint8_t* next_boundary = (compute_next_boundary
- (gc_low, curr_gen_number, relocating));
+ uint8_t* gen_boundary = generation_allocation_start(generation_of(curr_gen_number - 1));
+ uint8_t* next_boundary = compute_next_boundary(gc_low, curr_gen_number, relocating);
+
heap_segment* seg = heap_segment_rw (generation_start_segment (oldest_gen));
-
PREFIX_ASSUME(seg != NULL);
- uint8_t* beg = generation_allocation_start (oldest_gen);
- uint8_t* end = compute_next_end (seg, low);
- uint8_t* last_object = beg;
+ uint8_t* beg = generation_allocation_start (oldest_gen);
+ uint8_t* end = compute_next_end (seg, low);
+ uint8_t* last_object = beg;
size_t cg_pointers_found = 0;
- size_t card_word_end = (card_of (align_on_card_word (end)) /
- card_word_width);
+ size_t card_word_end = (card_of (align_on_card_word (end)) / card_word_width);
size_t n_eph = 0;
size_t n_gen = 0;
size_t n_card_set = 0;
- uint8_t* nhigh = (relocating ?
- heap_segment_plan_allocated (ephemeral_heap_segment) : high);
+ uint8_t* nhigh = (relocating ? heap_segment_plan_allocated (ephemeral_heap_segment) : high);
BOOL foundp = FALSE;
- uint8_t* start_address = 0;
- uint8_t* limit = 0;
+ uint8_t* start_address = 0;
+ uint8_t* limit = 0;
size_t card = card_of (beg);
#ifdef BACKGROUND_GC
BOOL consider_bgc_mark_p = FALSE;
@@ -27591,6 +27860,7 @@ void gc_heap::mark_through_cards_for_segments (card_fn fn, BOOL relocating)
{
if (card_of(last_object) > card)
{
+ // cg means cross-generational
dprintf (3, ("Found %Id cg pointers", cg_pointers_found));
if (cg_pointers_found == 0)
{
@@ -27599,23 +27869,29 @@ void gc_heap::mark_through_cards_for_segments (card_fn fn, BOOL relocating)
n_card_set -= (card_of (last_object) - card);
total_cards_cleared += (card_of (last_object) - card);
}
- n_eph +=cg_pointers_found;
+
+ n_eph += cg_pointers_found;
cg_pointers_found = 0;
card = card_of (last_object);
}
+
if (card >= end_card)
{
- foundp = find_card (card_table, card, card_word_end, end_card);
+ // Find the first card that's set (between card and card_word_end)
+ foundp = find_card(card_table, card, card_word_end, end_card);
if (foundp)
{
- n_card_set+= end_card - card;
+ // We found card(s) set.
+ n_card_set += end_card - card;
start_address = max (beg, card_address (card));
}
+
limit = min (end, card_address (end_card));
}
- if ((!foundp) || (last_object >= end) || (card_address (card) >= end))
+
+ if (!foundp || (last_object >= end) || (card_address (card) >= end))
{
- if ((foundp) && (cg_pointers_found == 0))
+ if (foundp && (cg_pointers_found == 0))
{
dprintf(3,(" Clearing cards [%Ix, %Ix[ ", (size_t)card_address(card),
(size_t)end));
@@ -27623,8 +27899,10 @@ void gc_heap::mark_through_cards_for_segments (card_fn fn, BOOL relocating)
n_card_set -= (card_of (end) - card);
total_cards_cleared += (card_of (end) - card);
}
- n_eph +=cg_pointers_found;
+
+ n_eph += cg_pointers_found;
cg_pointers_found = 0;
+
if ((seg = heap_segment_next_in_range (seg)) != 0)
{
#ifdef BACKGROUND_GC
@@ -27644,17 +27922,17 @@ void gc_heap::mark_through_cards_for_segments (card_fn fn, BOOL relocating)
}
}
+ // We've found a card and will now go through the objects in it.
assert (card_set_p (card));
{
- uint8_t* o = last_object;
-
+ uint8_t* o = last_object;
o = find_first_object (start_address, last_object);
- //Never visit an object twice.
- assert (o >= last_object);
+ // Never visit an object twice.
+ assert (o >= last_object);
- //dprintf(3,("Considering card %Ix start object: %Ix, %Ix[ boundary: %Ix",
- dprintf(3, ("c: %Ix, o: %Ix, l: %Ix[ boundary: %Ix",
- card, (size_t)o, (size_t)limit, (size_t)gen_boundary));
+ //dprintf(3,("Considering card %Ix start object: %Ix, %Ix[ boundary: %Ix",
+ dprintf(3, ("c: %Ix, o: %Ix, l: %Ix[ boundary: %Ix",
+ card, (size_t)o, (size_t)limit, (size_t)gen_boundary));
while (o < limit)
{
@@ -30489,6 +30767,7 @@ CObjectHeader* gc_heap::allocate_large_object (size_t jsize, int64_t& alloc_byte
uint8_t* result = acontext.alloc_ptr;
assert ((size_t)(acontext.alloc_limit - acontext.alloc_ptr) == size);
+ alloc_bytes += size;
CObjectHeader* obj = (CObjectHeader*)result;
@@ -30525,7 +30804,6 @@ CObjectHeader* gc_heap::allocate_large_object (size_t jsize, int64_t& alloc_byte
assert (obj != 0);
assert ((size_t)obj == Align ((size_t)obj, align_const));
- alloc_bytes += acontext.alloc_bytes;
return obj;
}
@@ -30580,7 +30858,7 @@ BOOL gc_heap::large_object_marked (uint8_t* o, BOOL clearp)
return m;
}
-void gc_heap::walk_survivors_relocation (size_t profiling_context, record_surv_fn fn)
+void gc_heap::walk_survivors_relocation (void* profiling_context, record_surv_fn fn)
{
// Now walk the portion of memory that is actually being relocated.
walk_relocation (profiling_context, fn);
@@ -30593,7 +30871,7 @@ void gc_heap::walk_survivors_relocation (size_t profiling_context, record_surv_f
#endif //FEATURE_LOH_COMPACTION
}
-void gc_heap::walk_survivors_for_loh (size_t profiling_context, record_surv_fn fn)
+void gc_heap::walk_survivors_for_loh (void* profiling_context, record_surv_fn fn)
{
generation* gen = large_object_generation;
heap_segment* seg = heap_segment_rw (generation_start_segment (gen));;
@@ -30631,7 +30909,7 @@ void gc_heap::walk_survivors_for_loh (size_t profiling_context, record_surv_fn f
plug_end = o;
- fn (plug_start, plug_end, 0, profiling_context, FALSE, FALSE);
+ fn (plug_start, plug_end, 0, profiling_context, false, false);
}
else
{
@@ -31265,7 +31543,7 @@ void gc_heap::background_sweep()
seg = start_seg;
prev_seg = 0;
o = generation_allocation_start (gen);
- assert (method_table (o) == g_pFreeObjectMethodTable);
+ assert (method_table (o) == g_gc_pFreeObjectMethodTable);
align_const = get_alignment_constant (FALSE);
o = o + Align(size (o), align_const);
plug_end = o;
@@ -32164,45 +32442,18 @@ int GCHeap::m_CurStressObj = 0;
#endif // FEATURE_REDHAWK
#endif //FEATURE_PREMORTEM_FINALIZATION
-inline
-static void spin_lock ()
-{
- enter_spin_lock_noinstru (&m_GCLock);
-}
-inline
-void EnterAllocLock()
-{
-#if defined(_TARGET_X86_)
- __asm {
- inc dword ptr m_GCLock
- jz gotit
- call spin_lock
- gotit:
- }
-#else //_TARGET_X86_
- spin_lock();
-#endif //_TARGET_X86_
-}
-
-inline
-void LeaveAllocLock()
-{
- // Trick this out
- leave_spin_lock_noinstru (&m_GCLock);
-}
-
-class AllocLockHolder
+class NoGCRegionLockHolder
{
public:
- AllocLockHolder()
+ NoGCRegionLockHolder()
{
- EnterAllocLock();
+ enter_spin_lock_noinstru(&g_no_gc_lock);
}
- ~AllocLockHolder()
+ ~NoGCRegionLockHolder()
{
- LeaveAllocLock();
+ leave_spin_lock_noinstru(&g_no_gc_lock);
}
};
@@ -32634,7 +32885,7 @@ void gc_heap::verify_partial ()
//dprintf (3, ("VOM: verifying member %Ix in obj %Ix", (size_t)*oo, o));
MethodTable *pMT = method_table (*oo);
- if (pMT == g_pFreeObjectMethodTable)
+ if (pMT == g_gc_pFreeObjectMethodTable)
{
free_ref_p = TRUE;
FATAL_GC_ERROR();
@@ -33068,12 +33319,12 @@ gc_heap::verify_heap (BOOL begin_gc_p)
}
}
- if (*((uint8_t**)curr_object) != (uint8_t *) g_pFreeObjectMethodTable)
+ if (*((uint8_t**)curr_object) != (uint8_t *) g_gc_pFreeObjectMethodTable)
{
#ifdef FEATURE_LOH_COMPACTION
if ((curr_gen_num == (max_generation+1)) && (prev_object != 0))
{
- assert (method_table (prev_object) == g_pFreeObjectMethodTable);
+ assert (method_table (prev_object) == g_gc_pFreeObjectMethodTable);
}
#endif //FEATURE_LOH_COMPACTION
@@ -33245,6 +33496,7 @@ void GCHeap::ValidateObjectMember (Object* obj)
{
dprintf (3, ("VOM: m: %Ix obj %Ix", (size_t)child_o, o));
MethodTable *pMT = method_table (child_o);
+ assert(pMT);
if (!pMT->SanityCheck()) {
dprintf (3, ("Bad member of %Ix %Ix",
(size_t)oo, (size_t)child_o));
@@ -33291,7 +33543,11 @@ HRESULT GCHeap::Shutdown ()
if (card_table_refcount (ct) == 0)
{
destroy_card_table (ct);
- g_gc_card_table = 0;
+ g_gc_card_table = nullptr;
+
+#ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
+ g_gc_card_bundle_table = nullptr;
+#endif
#ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
SoftwareWriteWatch::StaticClose();
#endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
@@ -33350,14 +33606,6 @@ HRESULT GCHeap::Init(size_t hn)
{
HRESULT hres = S_OK;
- //Initialize all of the instance members.
-
-#ifdef MULTIPLE_HEAPS
- m_GCLock = -1;
-#endif //MULTIPLE_HEAPS
-
- // Rest of the initialization
-
#ifdef MULTIPLE_HEAPS
if ((pGenGCHeap = gc_heap::make_gc_heap(this, (int)hn)) == 0)
hres = E_OUTOFMEMORY;
@@ -33382,6 +33630,8 @@ HRESULT GCHeap::Initialize ()
return E_FAIL;
}
+ g_gc_pFreeObjectMethodTable = GCToEEInterface::GetFreeObjectMethodTable();
+
//Initialize the static members.
#ifdef TRACE_GC
GcDuration = 0;
@@ -33405,6 +33655,8 @@ HRESULT GCHeap::Initialize ()
uint32_t nhp = ((nhp_from_config == 0) ? nhp_from_process :
(min (nhp_from_config, nhp_from_process)));
+ nhp = min (nhp, MAX_SUPPORTED_CPUS);
+
hr = gc_heap::initialize_gc (seg_size, large_seg_size /*LHEAP_ALLOC*/, nhp);
#else
hr = gc_heap::initialize_gc (seg_size, large_seg_size /*LHEAP_ALLOC*/);
@@ -33496,7 +33748,7 @@ HRESULT GCHeap::Initialize ()
////
// GC callback functions
-BOOL GCHeap::IsPromoted(Object* object)
+bool GCHeap::IsPromoted(Object* object)
{
#ifdef _DEBUG
((CObjectHeader*)object)->Validate();
@@ -33515,7 +33767,7 @@ BOOL GCHeap::IsPromoted(Object* object)
#ifdef BACKGROUND_GC
if (gc_heap::settings.concurrent)
{
- BOOL is_marked = (!((o < hp->background_saved_highest_address) && (o >= hp->background_saved_lowest_address))||
+ bool is_marked = (!((o < hp->background_saved_highest_address) && (o >= hp->background_saved_lowest_address))||
hp->background_marked (o));
return is_marked;
}
@@ -33556,11 +33808,11 @@ unsigned int GCHeap::WhichGeneration (Object* object)
return g;
}
-BOOL GCHeap::IsEphemeral (Object* object)
+bool GCHeap::IsEphemeral (Object* object)
{
uint8_t* o = (uint8_t*)object;
gc_heap* hp = gc_heap::heap_of (o);
- return hp->ephemeral_pointer_p (o);
+ return !!hp->ephemeral_pointer_p (o);
}
// Return NULL if can't find next object. When EE is not suspended,
@@ -33634,7 +33886,7 @@ BOOL GCHeap::IsInFrozenSegment (Object * object)
#endif //VERIFY_HEAP
// returns TRUE if the pointer is in one of the GC heaps.
-BOOL GCHeap::IsHeapPointer (void* vpObject, BOOL small_heap_only)
+bool GCHeap::IsHeapPointer (void* vpObject, bool small_heap_only)
{
STATIC_CONTRACT_SO_TOLERANT;
@@ -33805,7 +34057,7 @@ void GCHeap::Relocate (Object** ppObject, ScanContext* sc,
STRESS_LOG_ROOT_RELOCATE(ppObject, object, pheader, ((!(flags & GC_CALL_INTERIOR)) ? ((Object*)object)->GetGCSafeMethodTable() : 0));
}
-/*static*/ BOOL GCHeap::IsObjectInFixedHeap(Object *pObj)
+/*static*/ bool GCHeap::IsObjectInFixedHeap(Object *pObj)
{
// For now we simply look at the size of the object to determine if it in the
// fixed heap or not. If the bit indicating this gets set at some point
@@ -33851,10 +34103,11 @@ int StressRNG(int iMaxValue)
// free up object so that things will move and then do a GC
//return TRUE if GC actually happens, otherwise FALSE
-BOOL GCHeap::StressHeap(gc_alloc_context * context)
+bool GCHeap::StressHeap(gc_alloc_context * context)
{
#if defined(STRESS_HEAP) && !defined(FEATURE_REDHAWK)
alloc_context* acontext = static_cast<alloc_context*>(context);
+ assert(context != nullptr);
// if GC stress was dynamically disabled during this run we return FALSE
if (!GCStressPolicy::IsEnabled())
@@ -33936,9 +34189,6 @@ BOOL GCHeap::StressHeap(gc_alloc_context * context)
#ifndef MULTIPLE_HEAPS
static int32_t OneAtATime = -1;
- if (acontext == 0)
- acontext = generation_alloc_context (pGenGCHeap->generation_of(0));
-
// Only bother with this if the stress level is big enough and if nobody else is
// doing it right now. Note that some callers are inside the AllocLock and are
// guaranteed synchronized. But others are using AllocationContexts and have no
@@ -33954,11 +34204,11 @@ BOOL GCHeap::StressHeap(gc_alloc_context * context)
StringObject* str;
// If the current string is used up
- if (ObjectFromHandle(m_StressObjs[m_CurStressObj]) == 0)
+ if (HndFetchHandle(m_StressObjs[m_CurStressObj]) == 0)
{
// Populate handles with strings
int i = m_CurStressObj;
- while(ObjectFromHandle(m_StressObjs[i]) == 0)
+ while(HndFetchHandle(m_StressObjs[i]) == 0)
{
_ASSERTE(m_StressObjs[i] != 0);
unsigned strLen = (LARGE_OBJECT_SIZE - 32) / sizeof(WCHAR);
@@ -33990,7 +34240,7 @@ BOOL GCHeap::StressHeap(gc_alloc_context * context)
}
// Get the current string
- str = (StringObject*) OBJECTREFToObject(ObjectFromHandle(m_StressObjs[m_CurStressObj]));
+ str = (StringObject*) OBJECTREFToObject(HndFetchHandle(m_StressObjs[m_CurStressObj]));
if (str)
{
// Chop off the end of the string and form a new object out of it.
@@ -34061,122 +34311,7 @@ BOOL GCHeap::StressHeap(gc_alloc_context * context)
// Small Object Allocator
//
//
-Object *
-GCHeap::Alloc( size_t size, uint32_t flags REQD_ALIGN_DCL)
-{
- CONTRACTL {
- NOTHROW;
- GC_TRIGGERS;
- } CONTRACTL_END;
-
- TRIGGERSGC();
-
- Object* newAlloc = NULL;
-
-#ifdef TRACE_GC
-#ifdef COUNT_CYCLES
- AllocStart = GetCycleCount32();
- unsigned finish;
-#elif defined(ENABLE_INSTRUMENTATION)
- unsigned AllocStart = GetInstLogTime();
- unsigned finish;
-#endif //COUNT_CYCLES
-#endif //TRACE_GC
-
-#ifdef MULTIPLE_HEAPS
- //take the first heap....
- gc_heap* hp = gc_heap::g_heaps[0];
-#else
- gc_heap* hp = pGenGCHeap;
-#ifdef _PREFAST_
- // prefix complains about us dereferencing hp in wks build even though we only access static members
- // this way. not sure how to shut it up except for this ugly workaround:
- PREFIX_ASSUME(hp != NULL);
-#endif //_PREFAST_
-#endif //MULTIPLE_HEAPS
-
- {
- AllocLockHolder lh;
-
-#ifndef FEATURE_REDHAWK
- GCStress<gc_on_alloc>::MaybeTrigger(generation_alloc_context(hp->generation_of(0)));
-#endif // FEATURE_REDHAWK
-
- alloc_context* acontext = 0;
-
- if (size < LARGE_OBJECT_SIZE)
- {
- acontext = generation_alloc_context (hp->generation_of (0));
-
-#ifdef TRACE_GC
- AllocSmallCount++;
-#endif //TRACE_GC
- newAlloc = (Object*) hp->allocate (size + ComputeMaxStructAlignPad(requiredAlignment), acontext);
-#ifdef FEATURE_STRUCTALIGN
- newAlloc = (Object*) hp->pad_for_alignment ((uint8_t*) newAlloc, requiredAlignment, size, acontext);
-#endif // FEATURE_STRUCTALIGN
- // ASSERT (newAlloc);
- }
- else
- {
- acontext = generation_alloc_context (hp->generation_of (max_generation+1));
-
- newAlloc = (Object*) hp->allocate_large_object (size + ComputeMaxStructAlignPadLarge(requiredAlignment), acontext->alloc_bytes_loh);
-#ifdef FEATURE_STRUCTALIGN
- newAlloc = (Object*) hp->pad_for_alignment_large ((uint8_t*) newAlloc, requiredAlignment, size);
-#endif // FEATURE_STRUCTALIGN
- }
- }
-
- CHECK_ALLOC_AND_POSSIBLY_REGISTER_FOR_FINALIZATION(newAlloc, size, flags & GC_ALLOC_FINALIZE);
-
-#ifdef TRACE_GC
-#ifdef COUNT_CYCLES
- finish = GetCycleCount32();
-#elif defined(ENABLE_INSTRUMENTATION)
- finish = GetInstLogTime();
-#endif //COUNT_CYCLES
- AllocDuration += finish - AllocStart;
- AllocCount++;
-#endif //TRACE_GC
- return newAlloc;
-}
-
-// Allocate small object with an alignment requirement of 8-bytes. Non allocation context version.
-Object *
-GCHeap::AllocAlign8( size_t size, uint32_t flags)
-{
-#ifdef FEATURE_64BIT_ALIGNMENT
- CONTRACTL {
- NOTHROW;
- GC_TRIGGERS;
- } CONTRACTL_END;
-
- Object* newAlloc = NULL;
-
- {
- AllocLockHolder lh;
-
-#ifdef MULTIPLE_HEAPS
- //take the first heap....
- gc_heap* hp = gc_heap::g_heaps[0];
-#else
- gc_heap* hp = pGenGCHeap;
-#endif //MULTIPLE_HEAPS
-
- newAlloc = AllocAlign8Common(hp, generation_alloc_context (hp->generation_of (0)), size, flags);
- }
-
- return newAlloc;
-#else
- UNREFERENCED_PARAMETER(size);
- UNREFERENCED_PARAMETER(flags);
- assert(!"should not call GCHeap::AllocAlign8 without FEATURE_64BIT_ALIGNMENT defined!");
- return nullptr;
-#endif //FEATURE_64BIT_ALIGNMENT
-}
-
-// Allocate small object with an alignment requirement of 8-bytes. Allocation context version.
+// Allocate small object with an alignment requirement of 8-bytes.
Object*
GCHeap::AllocAlign8(gc_alloc_context* ctx, size_t size, uint32_t flags )
{
@@ -34365,10 +34500,6 @@ GCHeap::AllocLHeap( size_t size, uint32_t flags REQD_ALIGN_DCL)
#endif //_PREFAST_
#endif //MULTIPLE_HEAPS
-#ifndef FEATURE_REDHAWK
- GCStress<gc_on_alloc>::MaybeTrigger(generation_alloc_context(hp->generation_of(0)));
-#endif // FEATURE_REDHAWK
-
alloc_context* acontext = generation_alloc_context (hp->generation_of (max_generation+1));
newAlloc = (Object*) hp->allocate_large_object (size + ComputeMaxStructAlignPadLarge(requiredAlignment), acontext->alloc_bytes_loh);
@@ -34470,7 +34601,7 @@ GCHeap::Alloc(gc_alloc_context* context, size_t size, uint32_t flags REQD_ALIGN_
}
void
-GCHeap::FixAllocContext (gc_alloc_context* context, BOOL lockp, void* arg, void *heap)
+GCHeap::FixAllocContext (gc_alloc_context* context, bool lockp, void* arg, void *heap)
{
alloc_context* acontext = static_cast<alloc_context*>(context);
#ifdef MULTIPLE_HEAPS
@@ -34506,14 +34637,18 @@ GCHeap::FixAllocContext (gc_alloc_context* context, BOOL lockp, void* arg, void
}
Object*
-GCHeap::GetContainingObject (void *pInteriorPtr)
+GCHeap::GetContainingObject (void *pInteriorPtr, bool fCollectedGenOnly)
{
uint8_t *o = (uint8_t*)pInteriorPtr;
gc_heap* hp = gc_heap::heap_of (o);
- if (o >= hp->lowest_address && o < hp->highest_address)
+
+ uint8_t* lowest = (fCollectedGenOnly ? hp->gc_low : hp->lowest_address);
+ uint8_t* highest = (fCollectedGenOnly ? hp->gc_high : hp->highest_address);
+
+ if (o >= lowest && o < highest)
{
- o = hp->find_object (o, hp->gc_low);
+ o = hp->find_object (o, lowest);
}
else
{
@@ -34544,7 +34679,7 @@ BOOL should_collect_optimized (dynamic_data* dd, BOOL low_memory_p)
// API to ensure that a complete new garbage collection takes place
//
HRESULT
-GCHeap::GarbageCollect (int generation, BOOL low_memory_p, int mode)
+GCHeap::GarbageCollect (int generation, bool low_memory_p, int mode)
{
#if defined(BIT64)
if (low_memory_p)
@@ -35375,7 +35510,7 @@ void GCHeap::SetLOHCompactionMode (int newLOHCompactionyMode)
#endif //FEATURE_LOH_COMPACTION
}
-BOOL GCHeap::RegisterForFullGCNotification(uint32_t gen2Percentage,
+bool GCHeap::RegisterForFullGCNotification(uint32_t gen2Percentage,
uint32_t lohPercentage)
{
#ifdef MULTIPLE_HEAPS
@@ -35398,7 +35533,7 @@ BOOL GCHeap::RegisterForFullGCNotification(uint32_t gen2Percentage,
return TRUE;
}
-BOOL GCHeap::CancelFullGCNotification()
+bool GCHeap::CancelFullGCNotification()
{
pGenGCHeap->fgn_maxgen_percent = 0;
pGenGCHeap->fgn_loh_percent = 0;
@@ -35425,9 +35560,9 @@ int GCHeap::WaitForFullGCComplete(int millisecondsTimeout)
return result;
}
-int GCHeap::StartNoGCRegion(uint64_t totalSize, BOOL lohSizeKnown, uint64_t lohSize, BOOL disallowFullBlockingGC)
+int GCHeap::StartNoGCRegion(uint64_t totalSize, bool lohSizeKnown, uint64_t lohSize, bool disallowFullBlockingGC)
{
- AllocLockHolder lh;
+ NoGCRegionLockHolder lh;
dprintf (1, ("begin no gc called"));
start_no_gc_region_status status = gc_heap::prepare_for_no_gc_region (totalSize, lohSizeKnown, lohSize, disallowFullBlockingGC);
@@ -35445,7 +35580,7 @@ int GCHeap::StartNoGCRegion(uint64_t totalSize, BOOL lohSizeKnown, uint64_t lohS
int GCHeap::EndNoGCRegion()
{
- AllocLockHolder lh;
+ NoGCRegionLockHolder lh;
return (int)gc_heap::end_no_gc_region();
}
@@ -35503,7 +35638,7 @@ HRESULT GCHeap::GetGcCounters(int gen, gc_counters* counters)
}
// Get the segment size to use, making sure it conforms.
-size_t GCHeap::GetValidSegmentSize(BOOL large_seg)
+size_t GCHeap::GetValidSegmentSize(bool large_seg)
{
return get_valid_segment_size (large_seg);
}
@@ -35627,15 +35762,15 @@ size_t GCHeap::GetFinalizablePromotedCount()
#endif //MULTIPLE_HEAPS
}
-BOOL GCHeap::FinalizeAppDomain(AppDomain *pDomain, BOOL fRunFinalizers)
+bool GCHeap::FinalizeAppDomain(AppDomain *pDomain, bool fRunFinalizers)
{
#ifdef MULTIPLE_HEAPS
- BOOL foundp = FALSE;
+ bool foundp = false;
for (int hn = 0; hn < gc_heap::n_heaps; hn++)
{
gc_heap* hp = gc_heap::g_heaps [hn];
if (hp->finalize_queue->FinalizeAppDomain (pDomain, fRunFinalizers))
- foundp = TRUE;
+ foundp = true;
}
return foundp;
@@ -35644,13 +35779,13 @@ BOOL GCHeap::FinalizeAppDomain(AppDomain *pDomain, BOOL fRunFinalizers)
#endif //MULTIPLE_HEAPS
}
-BOOL GCHeap::ShouldRestartFinalizerWatchDog()
+bool GCHeap::ShouldRestartFinalizerWatchDog()
{
// This condition was historically used as part of the condition to detect finalizer thread timeouts
return gc_heap::gc_lock.lock != -1;
}
-void GCHeap::SetFinalizeQueueForShutdown(BOOL fHasLock)
+void GCHeap::SetFinalizeQueueForShutdown(bool fHasLock)
{
#ifdef MULTIPLE_HEAPS
for (int hn = 0; hn < gc_heap::n_heaps; hn++)
@@ -35690,9 +35825,6 @@ void GCHeap::SetFinalizationRun (Object* obj)
((CObjectHeader*)obj)->GetHeader()->SetBit(BIT_SBLK_FINALIZER_RUN);
}
-#endif // FEATURE_PREMORTEM_FINALIZATION
-
-#ifdef FEATURE_PREMORTEM_FINALIZATION
//--------------------------------------------------------------------
//
@@ -35967,43 +36099,15 @@ CFinalize::FinalizeSegForAppDomain (AppDomain *pDomain,
// if it has the index we are looking for. If the methodtable is null, it can't be from the
// unloading domain, so skip it.
if (method_table(obj) == NULL)
+ {
continue;
+ }
- // eagerly finalize all objects except those that may be agile.
- if (obj->GetAppDomainIndex() != pDomain->GetIndex())
+ // does the EE actually want us to finalize this object?
+ if (!GCToEEInterface::ShouldFinalizeObjectForUnload(pDomain, obj))
+ {
continue;
-
-#ifndef FEATURE_REDHAWK
- if (method_table(obj)->IsAgileAndFinalizable())
- {
- // If an object is both agile & finalizable, we leave it in the
- // finalization queue during unload. This is OK, since it's agile.
- // Right now only threads can be this way, so if that ever changes, change
- // the assert to just continue if not a thread.
- _ASSERTE(method_table(obj) == g_pThreadClass);
-
- if (method_table(obj) == g_pThreadClass)
- {
- // However, an unstarted thread should be finalized. It could be holding a delegate
- // in the domain we want to unload. Once the thread has been started, its
- // delegate is cleared so only unstarted threads are a problem.
- Thread *pThread = ((THREADBASEREF)ObjectToOBJECTREF(obj))->GetInternal();
- if (! pThread || ! pThread->IsUnstarted())
- {
- // This appdomain is going to be gone soon so let us assign
- // it the appdomain that's guaranteed to exist
- // The object is agile and the delegate should be null so we can do it
- obj->GetHeader()->ResetAppDomainIndexNoFailure(SystemDomain::System()->DefaultDomain()->GetIndex());
- continue;
- }
- }
- else
- {
- obj->GetHeader()->ResetAppDomainIndexNoFailure(SystemDomain::System()->DefaultDomain()->GetIndex());
- continue;
- }
}
-#endif //!FEATURE_REDHAWK
if (!fRunFinalizers || (obj->GetHeader()->GetBits()) & BIT_SBLK_FINALIZER_RUN)
{
@@ -36039,10 +36143,10 @@ CFinalize::FinalizeSegForAppDomain (AppDomain *pDomain,
return finalizedFound;
}
-BOOL
-CFinalize::FinalizeAppDomain (AppDomain *pDomain, BOOL fRunFinalizers)
+bool
+CFinalize::FinalizeAppDomain (AppDomain *pDomain, bool fRunFinalizers)
{
- BOOL finalizedFound = FALSE;
+ bool finalizedFound = false;
unsigned int startSeg = gen_segment (max_generation);
@@ -36052,7 +36156,7 @@ CFinalize::FinalizeAppDomain (AppDomain *pDomain, BOOL fRunFinalizers)
{
if (FinalizeSegForAppDomain (pDomain, fRunFinalizers, Seg))
{
- finalizedFound = TRUE;
+ finalizedFound = true;
}
}
@@ -36162,16 +36266,11 @@ CFinalize::ScanForFinalization (promote_func* pfn, int gen, BOOL mark_only_p,
assert (method_table(obj)->HasFinalizer());
-#ifndef FEATURE_REDHAWK
- if (method_table(obj) == pWeakReferenceMT || method_table(obj)->GetCanonicalMethodTable() == pWeakReferenceOfTCanonMT)
+ if (GCToEEInterface::EagerFinalized(obj))
{
- //destruct the handle right there.
- FinalizeWeakReference (obj);
MoveItem (i, Seg, FreeList);
}
- else
-#endif //!FEATURE_REDHAWK
- if ((obj->GetHeader()->GetBits()) & BIT_SBLK_FINALIZER_RUN)
+ else if ((obj->GetHeader()->GetBits()) & BIT_SBLK_FINALIZER_RUN)
{
//remove the object because we don't want to
//run the finalizer
@@ -36460,13 +36559,13 @@ void GCHeap::DiagWalkObject (Object* obj, walk_fn fn, void* context)
}
}
-void GCHeap::DiagWalkSurvivorsWithType (void* gc_context, record_surv_fn fn, size_t diag_context, walk_surv_type type)
+void GCHeap::DiagWalkSurvivorsWithType (void* gc_context, record_surv_fn fn, void* diag_context, walk_surv_type type)
{
gc_heap* hp = (gc_heap*)gc_context;
hp->walk_survivors (fn, diag_context, type);
}
-void GCHeap::DiagWalkHeap (walk_fn fn, void* context, int gen_number, BOOL walk_large_object_heap_p)
+void GCHeap::DiagWalkHeap (walk_fn fn, void* context, int gen_number, bool walk_large_object_heap_p)
{
gc_heap::walk_heap (fn, context, gen_number, walk_large_object_heap_p);
}
@@ -36610,7 +36709,7 @@ inline void testGCShadow(Object** ptr)
if (*ptr != 0 && (uint8_t*) shadow < g_GCShadowEnd && *ptr != *shadow)
{
- // If you get this assertion, someone updated a GC poitner in the heap without
+ // If you get this assertion, someone updated a GC pointer in the heap without
// using the write barrier. To find out who, check the value of
// dd_collection_count (dynamic_data_of (0)). Also
// note the value of 'ptr'. Rerun the App that the previous GC just occurred.
@@ -36786,7 +36885,7 @@ void GCHeap::TemporaryDisableConcurrentGC()
#endif //BACKGROUND_GC
}
-BOOL GCHeap::IsConcurrentGCEnabled()
+bool GCHeap::IsConcurrentGCEnabled()
{
#ifdef BACKGROUND_GC
return (gc_heap::gc_can_use_concurrent && !(gc_heap::temp_disable_concurrent_p));
@@ -36794,3 +36893,52 @@ BOOL GCHeap::IsConcurrentGCEnabled()
return FALSE;
#endif //BACKGROUND_GC
}
+
+void GCHeap::SetFinalizeRunOnShutdown(bool value)
+{
+ g_fFinalizerRunOnShutDown = value;
+}
+
+void PopulateDacVars(GcDacVars *gcDacVars)
+{
+#ifndef DACCESS_COMPILE
+ assert(gcDacVars != nullptr);
+ *gcDacVars = {};
+ gcDacVars->major_version_number = 1;
+ gcDacVars->minor_version_number = 0;
+ gcDacVars->built_with_svr = &g_built_with_svr_gc;
+ gcDacVars->build_variant = &g_build_variant;
+ gcDacVars->gc_structures_invalid_cnt = const_cast<int32_t*>(&GCScan::m_GcStructuresInvalidCnt);
+ gcDacVars->generation_size = sizeof(generation);
+ gcDacVars->max_gen = &g_max_generation;
+#ifndef MULTIPLE_HEAPS
+ gcDacVars->mark_array = &gc_heap::mark_array;
+ gcDacVars->ephemeral_heap_segment = reinterpret_cast<dac_heap_segment**>(&gc_heap::ephemeral_heap_segment);
+ gcDacVars->current_c_gc_state = const_cast<c_gc_state*>(&gc_heap::current_c_gc_state);
+ gcDacVars->saved_sweep_ephemeral_seg = reinterpret_cast<dac_heap_segment**>(&gc_heap::saved_sweep_ephemeral_seg);
+ gcDacVars->saved_sweep_ephemeral_start = &gc_heap::saved_sweep_ephemeral_start;
+ gcDacVars->background_saved_lowest_address = &gc_heap::background_saved_lowest_address;
+ gcDacVars->background_saved_highest_address = &gc_heap::background_saved_highest_address;
+ gcDacVars->alloc_allocated = &gc_heap::alloc_allocated;
+ gcDacVars->next_sweep_obj = &gc_heap::next_sweep_obj;
+ gcDacVars->oom_info = &gc_heap::oom_info;
+ gcDacVars->finalize_queue = reinterpret_cast<dac_finalize_queue**>(&gc_heap::finalize_queue);
+ gcDacVars->generation_table = reinterpret_cast<dac_generation**>(&gc_heap::generation_table);
+#ifdef GC_CONFIG_DRIVEN
+ gcDacVars->gc_global_mechanisms = reinterpret_cast<size_t**>(&gc_global_mechanisms);
+ gcDacVars->interesting_data_per_heap = reinterpret_cast<size_t**>(&gc_heap::interesting_data_per_heap);
+ gcDacVars->compact_reasons_per_heap = reinterpret_cast<size_t**>(&gc_heap::compact_reasons_per_heap);
+ gcDacVars->expand_mechanisms_per_heap = reinterpret_cast<size_t**>(&gc_heap::expand_mechanisms_per_heap);
+ gcDacVars->interesting_mechanism_bits_per_heap = reinterpret_cast<size_t**>(&gc_heap::interesting_mechanism_bits_per_heap);
+#endif // GC_CONFIG_DRIVEN
+#ifdef HEAP_ANALYZE
+ gcDacVars->internal_root_array = &gc_heap::internal_root_array;
+ gcDacVars->internal_root_array_index = &gc_heap::internal_root_array_index;
+ gcDacVars->heap_analyze_success = &gc_heap::heap_analyze_success;
+#endif // HEAP_ANALYZE
+#else
+ gcDacVars->n_heaps = &gc_heap::n_heaps;
+ gcDacVars->g_heaps = reinterpret_cast<dac_gc_heap***>(&gc_heap::g_heaps);
+#endif // MULTIPLE_HEAPS
+#endif // DACCESS_COMPILE
+}
diff --git a/src/gc/gc.h b/src/gc/gc.h
index 7332e42885..a661c311ab 100644
--- a/src/gc/gc.h
+++ b/src/gc/gc.h
@@ -31,6 +31,11 @@ Module Name:
#ifdef FEATURE_STANDALONE_GC
#include "gcenv.ee.standalone.inl"
+
+// GCStress does not currently work with Standalone GC
+#ifdef STRESS_HEAP
+ #undef STRESS_HEAP
+#endif // STRESS_HEAP
#endif // FEATURE_STANDALONE_GC
/*
@@ -41,21 +46,6 @@ typedef void enum_func (Object*);
// callback functions for heap walkers
typedef void object_callback_func(void * pvContext, void * pvDataLoc);
-/*!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!*/
-/* If you modify failure_get_memory and */
-/* oom_reason be sure to make the corresponding */
-/* changes in toolbox\sos\strike\strike.cpp. */
-/*!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!*/
-enum failure_get_memory
-{
- fgm_no_failure = 0,
- fgm_reserve_segment = 1,
- fgm_commit_segment_beg = 2,
- fgm_commit_eph_segment = 3,
- fgm_grow_table = 4,
- fgm_commit_table = 5
-};
-
struct fgm_history
{
failure_get_memory fgm;
@@ -71,17 +61,6 @@ struct fgm_history
}
};
-enum oom_reason
-{
- oom_no_failure = 0,
- oom_budget = 1,
- oom_cant_commit = 2,
- oom_cant_reserve = 3,
- oom_loh = 4,
- oom_low_mem = 5,
- oom_unproductive_full_gc = 6
-};
-
// TODO : it would be easier to make this an ORed value
enum gc_reason
{
@@ -100,19 +79,6 @@ enum gc_reason
reason_max
};
-struct oom_history
-{
- oom_reason reason;
- size_t alloc_size;
- uint8_t* reserved;
- uint8_t* allocated;
- size_t gc_index;
- failure_get_memory fgm;
- size_t size;
- size_t available_pagefile_mb;
- BOOL loh_p;
-};
-
/* forward declerations */
class CObjectHeader;
class Object;
@@ -121,10 +87,11 @@ class IGCHeapInternal;
/* misc defines */
#define LARGE_OBJECT_SIZE ((size_t)(85000))
+#define max_generation 2
#ifdef GC_CONFIG_DRIVEN
#define MAX_GLOBAL_GC_MECHANISMS_COUNT 6
-GARY_DECL(size_t, gc_global_mechanisms, MAX_GLOBAL_GC_MECHANISMS_COUNT);
+extern size_t gc_global_mechanisms[MAX_GLOBAL_GC_MECHANISMS_COUNT];
#endif //GC_CONFIG_DRIVEN
#ifdef DACCESS_COMPILE
@@ -137,10 +104,18 @@ class DacHeapWalker;
#define MP_LOCKS
+#ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
+extern "C" uint32_t* g_gc_card_bundle_table;
+#endif
+
extern "C" uint32_t* g_gc_card_table;
extern "C" uint8_t* g_gc_lowest_address;
extern "C" uint8_t* g_gc_highest_address;
-extern "C" bool g_fFinalizerRunOnShutDown;
+extern "C" GCHeapType g_gc_heap_type;
+extern "C" uint32_t g_max_generation;
+extern "C" MethodTable* g_gc_pFreeObjectMethodTable;
+
+::IGCHandleTable* CreateGCHandleTable();
namespace WKS {
::IGCHeapInternal* CreateGCHeap();
@@ -248,32 +223,25 @@ public:
unsigned GetMaxGeneration()
{
- return IGCHeap::maxGeneration;
+ return max_generation;
}
- BOOL IsValidSegmentSize(size_t cbSize)
+ bool IsValidSegmentSize(size_t cbSize)
{
//Must be aligned on a Mb and greater than 4Mb
return (((cbSize & (1024*1024-1)) ==0) && (cbSize >> 22));
}
- BOOL IsValidGen0MaxSize(size_t cbSize)
+ bool IsValidGen0MaxSize(size_t cbSize)
{
return (cbSize >= 64*1024);
}
BOOL IsLargeObject(MethodTable *mt)
{
- WRAPPER_NO_CONTRACT;
-
return mt->GetBaseSize() >= LARGE_OBJECT_SIZE;
}
- void SetFinalizeRunOnShutdown(bool value)
- {
- g_fFinalizerRunOnShutDown = value;
- }
-
protected:
public:
#if defined(FEATURE_BASICFREEZE) && defined(VERIFY_HEAP)
@@ -289,30 +257,24 @@ void TouchPages(void * pStart, size_t cb);
void updateGCShadow(Object** ptr, Object* val);
#endif
-// the method table for the WeakReference class
-extern MethodTable *pWeakReferenceMT;
-// The canonical method table for WeakReference<T>
-extern MethodTable *pWeakReferenceOfTCanonMT;
-extern void FinalizeWeakReference(Object * obj);
-
// The single GC heap instance, shared with the VM.
extern IGCHeapInternal* g_theGCHeap;
+// The single GC handle table instance, shared with the VM.
+extern IGCHandleTable* g_theGCHandleTable;
+
#ifndef DACCESS_COMPILE
-inline BOOL IsGCInProgress(bool bConsiderGCStart = FALSE)
+inline bool IsGCInProgress(bool bConsiderGCStart = false)
{
- WRAPPER_NO_CONTRACT;
-
return g_theGCHeap != nullptr ? g_theGCHeap->IsGCInProgressHelper(bConsiderGCStart) : false;
}
#endif // DACCESS_COMPILE
-inline BOOL IsServerHeap()
+inline bool IsServerHeap()
{
- LIMITED_METHOD_CONTRACT;
#ifdef FEATURE_SVR_GC
- _ASSERTE(IGCHeap::gcHeapType != IGCHeap::GC_HEAP_INVALID);
- return (IGCHeap::gcHeapType == IGCHeap::GC_HEAP_SVR);
+ assert(g_gc_heap_type != GC_HEAP_INVALID);
+ return g_gc_heap_type == GC_HEAP_SVR;
#else // FEATURE_SVR_GC
return false;
#endif // FEATURE_SVR_GC
diff --git a/src/gc/gccommon.cpp b/src/gc/gccommon.cpp
index 133f05e490..f931597667 100644
--- a/src/gc/gccommon.cpp
+++ b/src/gc/gccommon.cpp
@@ -14,20 +14,15 @@
#include "gcenv.h"
#include "gc.h"
-#ifdef FEATURE_SVR_GC
-SVAL_IMPL_INIT(uint32_t,IGCHeap,gcHeapType,IGCHeap::GC_HEAP_INVALID);
-#endif // FEATURE_SVR_GC
-
-SVAL_IMPL_INIT(uint32_t,IGCHeap,maxGeneration,2);
-
IGCHeapInternal* g_theGCHeap;
+IGCHandleTable* g_theGCHandleTable;
#ifdef FEATURE_STANDALONE_GC
IGCToCLR* g_theGCToCLR;
#endif // FEATURE_STANDALONE_GC
#ifdef GC_CONFIG_DRIVEN
-GARY_IMPL(size_t, gc_global_mechanisms, MAX_GLOBAL_GC_MECHANISMS_COUNT);
+size_t gc_global_mechanisms[MAX_GLOBAL_GC_MECHANISMS_COUNT];
#endif //GC_CONFIG_DRIVEN
#ifndef DACCESS_COMPILE
@@ -39,16 +34,21 @@ uint8_t* g_shadow_lowest_address = NULL;
#endif
uint32_t* g_gc_card_table;
+
+#ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
+uint32_t* g_gc_card_bundle_table;
+#endif
+
uint8_t* g_gc_lowest_address = 0;
uint8_t* g_gc_highest_address = 0;
-bool g_fFinalizerRunOnShutDown = false;
-
-VOLATILE(int32_t) m_GCLock = -1;
+GCHeapType g_gc_heap_type = GC_HEAP_INVALID;
+uint32_t g_max_generation = max_generation;
+MethodTable* g_gc_pFreeObjectMethodTable = nullptr;
#ifdef GC_CONFIG_DRIVEN
void record_global_mechanism (int mech_index)
{
- (gc_global_mechanisms[mech_index])++;
+ (gc_global_mechanisms[mech_index])++;
}
#endif //GC_CONFIG_DRIVEN
@@ -119,9 +119,9 @@ void InitializeHeapType(bool bServerHeap)
{
LIMITED_METHOD_CONTRACT;
#ifdef FEATURE_SVR_GC
- IGCHeap::gcHeapType = bServerHeap ? IGCHeap::GC_HEAP_SVR : IGCHeap::GC_HEAP_WKS;
+ g_gc_heap_type = bServerHeap ? GC_HEAP_SVR : GC_HEAP_WKS;
#ifdef WRITE_BARRIER_CHECK
- if (IGCHeap::gcHeapType == IGCHeap::GC_HEAP_SVR)
+ if (g_gc_heap_type == GC_HEAP_SVR)
{
g_GCShadow = 0;
g_GCShadowEnd = 0;
@@ -133,18 +133,55 @@ void InitializeHeapType(bool bServerHeap)
#endif // FEATURE_SVR_GC
}
-IGCHeap* InitializeGarbageCollector(IGCToCLR* clrToGC)
+namespace WKS
+{
+ extern void PopulateDacVars(GcDacVars* dacVars);
+}
+
+namespace SVR
+{
+ extern void PopulateDacVars(GcDacVars* dacVars);
+}
+
+bool InitializeGarbageCollector(IGCToCLR* clrToGC, IGCHeap** gcHeap, IGCHandleTable** gcHandleTable, GcDacVars* gcDacVars)
{
LIMITED_METHOD_CONTRACT;
IGCHeapInternal* heap;
+
+ assert(gcDacVars != nullptr);
+ assert(gcHeap != nullptr);
+ assert(gcHandleTable != nullptr);
+
+ IGCHandleTable* handleTable = CreateGCHandleTable();
+ if (handleTable == nullptr)
+ {
+ return false;
+ }
+
#ifdef FEATURE_SVR_GC
- assert(IGCHeap::gcHeapType != IGCHeap::GC_HEAP_INVALID);
- heap = IGCHeap::gcHeapType == IGCHeap::GC_HEAP_SVR ? SVR::CreateGCHeap() : WKS::CreateGCHeap();
+ assert(g_gc_heap_type != GC_HEAP_INVALID);
+
+ if (g_gc_heap_type == GC_HEAP_SVR)
+ {
+ heap = SVR::CreateGCHeap();
+ SVR::PopulateDacVars(gcDacVars);
+ }
+ else
+ {
+ heap = WKS::CreateGCHeap();
+ WKS::PopulateDacVars(gcDacVars);
+ }
#else
heap = WKS::CreateGCHeap();
+ WKS::PopulateDacVars(gcDacVars);
#endif
+ if (heap == nullptr)
+ {
+ return false;
+ }
+
g_theGCHeap = heap;
#ifdef FEATURE_STANDALONE_GC
@@ -155,7 +192,9 @@ IGCHeap* InitializeGarbageCollector(IGCToCLR* clrToGC)
assert(clrToGC == nullptr);
#endif
- return heap;
+ *gcHandleTable = handleTable;
+ *gcHeap = heap;
+ return true;
}
#endif // !DACCESS_COMPILE
diff --git a/src/gc/gcee.cpp b/src/gc/gcee.cpp
index c93cc91b57..889f940973 100644
--- a/src/gc/gcee.cpp
+++ b/src/gc/gcee.cpp
@@ -381,12 +381,12 @@ size_t GCHeap::GetNow()
return GetHighPrecisionTimeStamp();
}
-BOOL GCHeap::IsGCInProgressHelper (BOOL bConsiderGCStart)
+bool GCHeap::IsGCInProgressHelper (bool bConsiderGCStart)
{
return GcInProgress || (bConsiderGCStart? VolatileLoad(&gc_heap::gc_started) : FALSE);
}
-uint32_t GCHeap::WaitUntilGCComplete(BOOL bConsiderGCStart)
+uint32_t GCHeap::WaitUntilGCComplete(bool bConsiderGCStart)
{
if (bConsiderGCStart)
{
@@ -408,12 +408,8 @@ BlockAgain:
dwWaitResult = WaitForGCEvent->Wait(DETECT_DEADLOCK_TIMEOUT, FALSE );
if (dwWaitResult == WAIT_TIMEOUT) {
- // Even in retail, stop in the debugger if available. Ideally, the
- // following would use DebugBreak, but debspew.h makes this a null
- // macro in retail. Note that in debug, we don't use the debspew.h
- // macros because these take a critical section that may have been
- // taken by a suspended thread.
- FreeBuildDebugBreak();
+ // Even in retail, stop in the debugger if available.
+ GCToOSInterface::DebugBreak();
goto BlockAgain;
}
@@ -427,7 +423,7 @@ BlockAgain:
return dwWaitResult;
}
-void GCHeap::SetGCInProgress(BOOL fInProgress)
+void GCHeap::SetGCInProgress(bool fInProgress)
{
GcInProgress = fInProgress;
}
@@ -445,12 +441,12 @@ void GCHeap::WaitUntilConcurrentGCComplete()
#endif //BACKGROUND_GC
}
-BOOL GCHeap::IsConcurrentGCInProgress()
+bool GCHeap::IsConcurrentGCInProgress()
{
#ifdef BACKGROUND_GC
- return pGenGCHeap->settings.concurrent;
+ return !!pGenGCHeap->settings.concurrent;
#else
- return FALSE;
+ return false;
#endif //BACKGROUND_GC
}
@@ -681,6 +677,11 @@ void GCHeap::UnregisterFrozenSegment(segment_handle seg)
#endif // FEATURE_BASICFREEZE
}
+bool GCHeap::RuntimeStructuresValid()
+{
+ return GCScan::GetGcRuntimeStructuresValid();
+}
+
#endif // !DACCESS_COMPILE
diff --git a/src/gc/gceesvr.cpp b/src/gc/gceesvr.cpp
index aacae486f5..2e6dbe2d08 100644
--- a/src/gc/gceesvr.cpp
+++ b/src/gc/gceesvr.cpp
@@ -12,9 +12,11 @@
#include "gc.h"
#include "gcscan.h"
+#include "gchandletableimpl.h"
#define SERVER_GC 1
+
namespace SVR {
#include "gcimpl.h"
#include "gcee.cpp"
diff --git a/src/gc/gceewks.cpp b/src/gc/gceewks.cpp
index 72a7d3bdb9..f23038f012 100644
--- a/src/gc/gceewks.cpp
+++ b/src/gc/gceewks.cpp
@@ -10,6 +10,7 @@
#include "gc.h"
#include "gcscan.h"
+#include "gchandletableimpl.h"
#ifdef SERVER_GC
#undef SERVER_GC
diff --git a/src/gc/gcenv.ee.standalone.inl b/src/gc/gcenv.ee.standalone.inl
index 31f3d1d8da..f6954fc476 100644
--- a/src/gc/gcenv.ee.standalone.inl
+++ b/src/gc/gcenv.ee.standalone.inl
@@ -207,6 +207,35 @@ ALWAYS_INLINE void GCToEEInterface::EnableFinalization(bool foundFinalizers)
g_theGCToCLR->EnableFinalization(foundFinalizers);
}
+ALWAYS_INLINE void GCToEEInterface::HandleFatalError(unsigned int exitCode)
+{
+ assert(g_theGCToCLR != nullptr);
+ g_theGCToCLR->HandleFatalError(exitCode);
+}
+
+ALWAYS_INLINE bool GCToEEInterface::ShouldFinalizeObjectForUnload(AppDomain* pDomain, Object* obj)
+{
+ assert(g_theGCToCLR != nullptr);
+ return g_theGCToCLR->ShouldFinalizeObjectForUnload(pDomain, obj);
+}
+
+ALWAYS_INLINE bool GCToEEInterface::ForceFullGCToBeBlocking()
+{
+ assert(g_theGCToCLR != nullptr);
+ return g_theGCToCLR->ForceFullGCToBeBlocking();
+}
+
+ALWAYS_INLINE bool GCToEEInterface::EagerFinalized(Object* obj)
+{
+ assert(g_theGCToCLR != nullptr);
+ return g_theGCToCLR->EagerFinalized(obj);
+}
+
+ALWAYS_INLINE MethodTable* GCToEEInterface::GetFreeObjectMethodTable()
+{
+ assert(g_theGCToCLR != nullptr);
+ return g_theGCToCLR->GetFreeObjectMethodTable();
+}
#undef ALWAYS_INLINE
#endif // __GCTOENV_EE_STANDALONE_INL__
diff --git a/src/gc/gchandletable.cpp b/src/gc/gchandletable.cpp
new file mode 100644
index 0000000000..82ab269861
--- /dev/null
+++ b/src/gc/gchandletable.cpp
@@ -0,0 +1,111 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+//
+
+#include "common.h"
+#include "gcenv.h"
+#include "gchandletableimpl.h"
+#include "objecthandle.h"
+
+IGCHandleTable* CreateGCHandleTable()
+{
+ return new(nothrow) GCHandleTable();
+}
+
+bool GCHandleTable::Initialize()
+{
+ return Ref_Initialize();
+}
+
+void GCHandleTable::Shutdown()
+{
+ Ref_Shutdown();
+}
+
+void* GCHandleTable::GetGlobalHandleStore()
+{
+ return (void*)g_HandleTableMap.pBuckets[0];
+}
+
+void* GCHandleTable::CreateHandleStore(void* context)
+{
+#ifndef FEATURE_REDHAWK
+ return (void*)::Ref_CreateHandleTableBucket(ADIndex((DWORD)(uintptr_t)context));
+#else
+ assert("CreateHandleStore is not implemented when FEATURE_REDHAWK is defined!");
+ return nullptr;
+#endif
+}
+
+void* GCHandleTable::GetHandleContext(OBJECTHANDLE handle)
+{
+ return (void*)((uintptr_t)::HndGetHandleTableADIndex(::HndGetHandleTable(handle)).m_dwIndex);
+}
+
+void GCHandleTable::DestroyHandleStore(void* store)
+{
+ Ref_DestroyHandleTableBucket((HandleTableBucket*) store);
+}
+
+void GCHandleTable::UprootHandleStore(void* store)
+{
+ Ref_RemoveHandleTableBucket((HandleTableBucket*) store);
+}
+
+bool GCHandleTable::ContainsHandle(void* store, OBJECTHANDLE handle)
+{
+ return ((HandleTableBucket*)store)->Contains(handle);
+}
+
+OBJECTHANDLE GCHandleTable::CreateHandleOfType(void* store, Object* object, int type)
+{
+ HHANDLETABLE handletable = ((HandleTableBucket*)store)->pTable[GetCurrentThreadHomeHeapNumber()];
+ return ::HndCreateHandle(handletable, type, ObjectToOBJECTREF(object));
+}
+
+OBJECTHANDLE GCHandleTable::CreateHandleOfType(void* store, Object* object, int type, int heapToAffinitizeTo)
+{
+ HHANDLETABLE handletable = ((HandleTableBucket*)store)->pTable[heapToAffinitizeTo];
+ return ::HndCreateHandle(handletable, type, ObjectToOBJECTREF(object));
+}
+
+OBJECTHANDLE GCHandleTable::CreateGlobalHandleOfType(Object* object, int type)
+{
+ return ::HndCreateHandle(g_HandleTableMap.pBuckets[0]->pTable[GetCurrentThreadHomeHeapNumber()], type, ObjectToOBJECTREF(object));
+}
+
+OBJECTHANDLE GCHandleTable::CreateHandleWithExtraInfo(void* store, Object* object, int type, void* pExtraInfo)
+{
+ HHANDLETABLE handletable = ((HandleTableBucket*)store)->pTable[GetCurrentThreadHomeHeapNumber()];
+ return ::HndCreateHandle(handletable, type, ObjectToOBJECTREF(object), reinterpret_cast<uintptr_t>(pExtraInfo));
+}
+
+OBJECTHANDLE GCHandleTable::CreateDependentHandle(void* store, Object* primary, Object* secondary)
+{
+ HHANDLETABLE handletable = ((HandleTableBucket*)store)->pTable[GetCurrentThreadHomeHeapNumber()];
+ OBJECTHANDLE handle = ::HndCreateHandle(handletable, HNDTYPE_DEPENDENT, ObjectToOBJECTREF(primary));
+ ::SetDependentHandleSecondary(handle, ObjectToOBJECTREF(secondary));
+
+ return handle;
+}
+
+OBJECTHANDLE GCHandleTable::CreateDuplicateHandle(OBJECTHANDLE handle)
+{
+ return ::HndCreateHandle(HndGetHandleTable(handle), HNDTYPE_DEFAULT, ::HndFetchHandle(handle));
+}
+
+void GCHandleTable::DestroyHandleOfType(OBJECTHANDLE handle, int type)
+{
+ ::HndDestroyHandle(::HndGetHandleTable(handle), type, handle);
+}
+
+void GCHandleTable::DestroyHandleOfUnknownType(OBJECTHANDLE handle)
+{
+ ::HndDestroyHandleOfUnknownType(::HndGetHandleTable(handle), handle);
+}
+
+void* GCHandleTable::GetExtraInfoFromHandle(OBJECTHANDLE handle)
+{
+ return (void*)::HndGetHandleExtraInfo(handle);
+}
diff --git a/src/gc/gchandletableimpl.h b/src/gc/gchandletableimpl.h
new file mode 100644
index 0000000000..af20f52e54
--- /dev/null
+++ b/src/gc/gchandletableimpl.h
@@ -0,0 +1,48 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+#ifndef GCHANDLETABLE_H_
+#define GCHANDLETABLE_H_
+
+#include "gcinterface.h"
+
+class GCHandleTable : public IGCHandleTable
+{
+public:
+ virtual bool Initialize();
+
+ virtual void Shutdown();
+
+ virtual void* GetGlobalHandleStore();
+
+ virtual void* CreateHandleStore(void* context);
+
+ virtual void* GetHandleContext(OBJECTHANDLE handle);
+
+ virtual void DestroyHandleStore(void* store);
+
+ virtual void UprootHandleStore(void* store);
+
+ virtual bool ContainsHandle(void* store, OBJECTHANDLE handle);
+
+ virtual OBJECTHANDLE CreateHandleOfType(void* store, Object* object, int type);
+
+ virtual OBJECTHANDLE CreateHandleOfType(void* store, Object* object, int type, int heapToAffinitizeTo);
+
+ virtual OBJECTHANDLE CreateHandleWithExtraInfo(void* store, Object* object, int type, void* pExtraInfo);
+
+ virtual OBJECTHANDLE CreateDependentHandle(void* store, Object* primary, Object* secondary);
+
+ virtual OBJECTHANDLE CreateGlobalHandleOfType(Object* object, int type);
+
+ virtual OBJECTHANDLE CreateDuplicateHandle(OBJECTHANDLE handle);
+
+ virtual void DestroyHandleOfType(OBJECTHANDLE handle, int type);
+
+ virtual void DestroyHandleOfUnknownType(OBJECTHANDLE handle);
+
+ virtual void* GetExtraInfoFromHandle(OBJECTHANDLE handle);
+};
+
+#endif // GCHANDLETABLE_H_
diff --git a/src/gc/gcimpl.h b/src/gc/gcimpl.h
index cb91c4dc3e..2a51d477b0 100644
--- a/src/gc/gcimpl.h
+++ b/src/gc/gcimpl.h
@@ -39,6 +39,11 @@ void GCProfileWalkHeap();
class gc_heap;
class CFinalize;
+extern bool g_fFinalizerRunOnShutDown;
+extern bool g_built_with_svr_gc;
+extern uint8_t g_build_variant;
+extern VOLATILE(int32_t) g_no_gc_lock;
+
class GCHeap : public IGCHeapInternal
{
protected:
@@ -80,19 +85,19 @@ public:
void DiagTraceGCSegments ();
void PublishObject(uint8_t* obj);
- BOOL IsGCInProgressHelper (BOOL bConsiderGCStart = FALSE);
+ bool IsGCInProgressHelper (bool bConsiderGCStart = false);
+
+ uint32_t WaitUntilGCComplete (bool bConsiderGCStart = false);
- uint32_t WaitUntilGCComplete (BOOL bConsiderGCStart = FALSE);
+ void SetGCInProgress(bool fInProgress);
- void SetGCInProgress(BOOL fInProgress);
+ bool RuntimeStructuresValid();
CLREvent * GetWaitForGCEvent();
HRESULT Initialize ();
//flags can be GC_ALLOC_CONTAINS_REF GC_ALLOC_FINALIZE
- Object* Alloc (size_t size, uint32_t flags);
- Object* AllocAlign8 (size_t size, uint32_t flags);
Object* AllocAlign8 (gc_alloc_context* acontext, size_t size, uint32_t flags);
private:
Object* AllocAlign8Common (void* hp, alloc_context* acontext, size_t size, uint32_t flags);
@@ -101,9 +106,9 @@ public:
Object* Alloc (gc_alloc_context* acontext, size_t size, uint32_t flags);
void FixAllocContext (gc_alloc_context* acontext,
- BOOL lockp, void* arg, void *heap);
+ bool lockp, void* arg, void *heap);
- Object* GetContainingObject(void *pInteriorPtr);
+ Object* GetContainingObject(void *pInteriorPtr, bool fCollectedGenOnly);
#ifdef MULTIPLE_HEAPS
static void AssignHeap (alloc_context* acontext);
@@ -116,15 +121,15 @@ public:
void HideAllocContext(alloc_context*);
void RevealAllocContext(alloc_context*);
- BOOL IsObjectInFixedHeap(Object *pObj);
+ bool IsObjectInFixedHeap(Object *pObj);
- HRESULT GarbageCollect (int generation = -1, BOOL low_memory_p=FALSE, int mode=collection_blocking);
+ HRESULT GarbageCollect (int generation = -1, bool low_memory_p=false, int mode=collection_blocking);
////
// GC callback functions
// Check if an argument is promoted (ONLY CALL DURING
// THE PROMOTIONSGRANTED CALLBACK.)
- BOOL IsPromoted (Object *object);
+ bool IsPromoted (Object *object);
size_t GetPromotedBytes (int heap_index);
@@ -152,8 +157,8 @@ public:
//returns the generation number of an object (not valid during relocation)
unsigned WhichGeneration (Object* object);
// returns TRUE is the object is ephemeral
- BOOL IsEphemeral (Object* object);
- BOOL IsHeapPointer (void* object, BOOL small_heap_only = FALSE);
+ bool IsEphemeral (Object* object);
+ bool IsHeapPointer (void* object, bool small_heap_only = false);
void ValidateObjectMember (Object *obj);
@@ -168,13 +173,13 @@ public:
int GetLOHCompactionMode();
void SetLOHCompactionMode(int newLOHCompactionyMode);
- BOOL RegisterForFullGCNotification(uint32_t gen2Percentage,
+ bool RegisterForFullGCNotification(uint32_t gen2Percentage,
uint32_t lohPercentage);
- BOOL CancelFullGCNotification();
+ bool CancelFullGCNotification();
int WaitForFullGCApproach(int millisecondsTimeout);
int WaitForFullGCComplete(int millisecondsTimeout);
- int StartNoGCRegion(uint64_t totalSize, BOOL lohSizeKnown, uint64_t lohSize, BOOL disallowFullBlockingGC);
+ int StartNoGCRegion(uint64_t totalSize, bool lohSizeKnown, uint64_t lohSize, bool disallowFullBlockingGC);
int EndNoGCRegion();
unsigned GetGcCount();
@@ -184,7 +189,7 @@ public:
PER_HEAP_ISOLATED HRESULT GetGcCounters(int gen, gc_counters* counters);
- size_t GetValidSegmentSize(BOOL large_seg = FALSE);
+ size_t GetValidSegmentSize(bool large_seg = false);
static size_t GetValidGen0MaxSize(size_t seg_size);
@@ -194,11 +199,12 @@ public:
PER_HEAP_ISOLATED size_t GetNumberFinalizableObjects();
PER_HEAP_ISOLATED size_t GetFinalizablePromotedCount();
- void SetFinalizeQueueForShutdown(BOOL fHasLock);
- BOOL FinalizeAppDomain(AppDomain *pDomain, BOOL fRunFinalizers);
- BOOL ShouldRestartFinalizerWatchDog();
+ void SetFinalizeQueueForShutdown(bool fHasLock);
+ bool FinalizeAppDomain(AppDomain *pDomain, bool fRunFinalizers);
+ bool ShouldRestartFinalizerWatchDog();
void DiagWalkObject (Object* obj, walk_fn fn, void* context);
+ void SetFinalizeRunOnShutdown(bool value);
public: // FIX
@@ -229,12 +235,12 @@ public: // FIX
#ifndef DACCESS_COMPILE
HRESULT WaitUntilConcurrentGCCompleteAsync(int millisecondsTimeout); // Use in native threads. TRUE if succeed. FALSE if failed or timeout
#endif
- BOOL IsConcurrentGCInProgress();
+ bool IsConcurrentGCInProgress();
// Enable/disable concurrent GC
void TemporaryEnableConcurrentGC();
void TemporaryDisableConcurrentGC();
- BOOL IsConcurrentGCEnabled();
+ bool IsConcurrentGCEnabled();
PER_HEAP_ISOLATED CLREvent *WaitForGCEvent; // used for syncing w/GC
@@ -253,7 +259,7 @@ private:
}
public:
//return TRUE if GC actually happens, otherwise FALSE
- BOOL StressHeap(gc_alloc_context * acontext = 0);
+ bool StressHeap(gc_alloc_context * acontext);
#ifndef FEATURE_REDHAWK // Redhawk forces relocation a different way
#ifdef STRESS_HEAP
@@ -273,7 +279,7 @@ protected:
virtual void DiagDescrGenerations (gen_walk_fn fn, void *context);
- virtual void DiagWalkSurvivorsWithType (void* gc_context, record_surv_fn fn, size_t diag_context, walk_surv_type type);
+ virtual void DiagWalkSurvivorsWithType (void* gc_context, record_surv_fn fn, void* diag_context, walk_surv_type type);
virtual void DiagWalkFinalizeQueue (void* gc_context, fq_walk_fn fn);
@@ -283,7 +289,7 @@ protected:
virtual void DiagScanDependentHandles (handle_scan_fn fn, int gen_number, ScanContext* context);
- virtual void DiagWalkHeap(walk_fn fn, void* context, int gen_number, BOOL walk_large_object_heap_p);
+ virtual void DiagWalkHeap(walk_fn fn, void* context, int gen_number, bool walk_large_object_heap_p);
public:
Object * NextObj (Object * object);
diff --git a/src/gc/gcinterface.dac.h b/src/gc/gcinterface.dac.h
new file mode 100644
index 0000000000..647101fa1f
--- /dev/null
+++ b/src/gc/gcinterface.dac.h
@@ -0,0 +1,156 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+#ifndef _GC_INTERFACE_DAC_H_
+#define _GC_INTERFACE_DAC_H_
+
+// This file defines the interface between the GC and the DAC. The interface consists of two things:
+// 1. A number of variables ("DAC vars") whose addresses are exposed to the DAC (see "struct GcDacVars")
+// 2. A number of types that are analogues to GC-internal types. These types expose a subset of the
+// GC-internal type's fields, while still maintaining the same layout.
+// This interface is strictly versioned, see gcinterface.dacvars.def for more information.
+
+#define NUM_GC_DATA_POINTS 9
+#define MAX_COMPACT_REASONS_COUNT 11
+#define MAX_EXPAND_MECHANISMS_COUNT 6
+#define MAX_GC_MECHANISM_BITS_COUNT 2
+#define MAX_GLOBAL_GC_MECHANISMS_COUNT 6
+#define NUMBERGENERATIONS 4
+
+// Analogue for the GC heap_segment class, containing information regarding a single
+// heap segment.
+class dac_heap_segment {
+public:
+ uint8_t* allocated;
+ uint8_t* committed;
+ uint8_t* reserved;
+ uint8_t* used;
+ uint8_t* mem;
+ size_t flags;
+ DPTR(dac_heap_segment) next;
+ uint8_t* background_allocated;
+ class dac_gc_heap* heap;
+};
+
+// Analogue for the GC generation class, containing information about the start segment
+// of a generation and its allocation context.
+class dac_generation {
+public:
+ gc_alloc_context allocation_context;
+ DPTR(dac_heap_segment) start_segment;
+ uint8_t* allocation_start;
+};
+
+// Analogue for the GC CFinalize class, containing information about the finalize queue.
+class dac_finalize_queue {
+public:
+ static const int ExtraSegCount = 2;
+ uint8_t** m_FillPointers[NUMBERGENERATIONS + ExtraSegCount];
+};
+
+// Possible values of the current_c_gc_state dacvar, indicating the state of
+// a background GC.
+enum c_gc_state
+{
+ c_gc_state_marking,
+ c_gc_state_planning,
+ c_gc_state_free
+};
+
+// Reasons why an OOM might occur, recorded in the oom_history
+// struct below.
+enum oom_reason
+{
+ oom_no_failure = 0,
+ oom_budget = 1,
+ oom_cant_commit = 2,
+ oom_cant_reserve = 3,
+ oom_loh = 4,
+ oom_low_mem = 5,
+ oom_unproductive_full_gc = 6
+};
+
+/*!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!*/
+/* If you modify failure_get_memory and */
+/* oom_reason be sure to make the corresponding */
+/* changes in toolbox\sos\strike\strike.cpp. */
+/*!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!*/
+enum failure_get_memory
+{
+ fgm_no_failure = 0,
+ fgm_reserve_segment = 1,
+ fgm_commit_segment_beg = 2,
+ fgm_commit_eph_segment = 3,
+ fgm_grow_table = 4,
+ fgm_commit_table = 5
+};
+
+// A record of the last OOM that occured in the GC, with some
+// additional information as to what triggered the OOM.
+struct oom_history
+{
+ oom_reason reason;
+ size_t alloc_size;
+ uint8_t* reserved;
+ uint8_t* allocated;
+ size_t gc_index;
+ failure_get_memory fgm;
+ size_t size;
+ size_t available_pagefile_mb;
+ BOOL loh_p;
+};
+
+// Analogue for the GC gc_heap class, containing information regarding a single
+// GC heap (of which there are multiple, with server GC).
+class dac_gc_heap {
+public:
+ uint8_t* alloc_allocated;
+ DPTR(dac_heap_segment) ephemeral_heap_segment;
+ DPTR(dac_finalize_queue) finalize_queue;
+ oom_history oom_info;
+ size_t interesting_data_per_heap[NUM_GC_DATA_POINTS];
+ size_t compact_reasons_per_heap[MAX_COMPACT_REASONS_COUNT];
+ size_t expand_mechanisms_per_heap[MAX_EXPAND_MECHANISMS_COUNT];
+ size_t interesting_mechanism_bits_per_heap[MAX_GC_MECHANISM_BITS_COUNT];
+ uint8_t* internal_root_array;
+ size_t internal_root_array_index;
+ BOOL heap_analyze_success;
+
+ // The generation table must always be last, because the size of this array
+ // (stored inline in the gc_heap class) can vary.
+ //
+ // The size of the generation class is not part of the GC-DAC interface,
+ // despite being embedded by-value into the gc_heap class. The DAC variable
+ // "generation_size" stores the size of the generation class, so the DAC can
+ // use it and pointer arithmetic to calculate correct offsets into the generation
+ // table. (See "GenerationTableIndex" function in the DAC for details)
+ //
+ // Also note that this array has length 1 because the C++ standard doesn't allow
+ // for 0-length arrays, although every major compiler is willing to tolerate it.
+ dac_generation generation_table[1];
+};
+
+
+// The actual structure containing the DAC variables. When DACCESS_COMPILE is not
+// defined (i.e. the normal runtime build), this structure contains pointers to the
+// GC's global DAC variabels. When DACCESS_COMPILE is defined (i.e. the DAC build),
+// this structure contains __DPtrs for every DAC variable that will marshal values
+// from the debugee process to the debugger process when dereferenced.
+struct GcDacVars {
+ uint8_t major_version_number;
+ uint8_t minor_version_number;
+ size_t generation_size;
+#ifdef DACCESS_COMPILE
+ #define GC_DAC_VAR(type, name) DPTR(type) name;
+ // ArrayDPTR doesn't allow decaying arrays to pointers, which
+ // avoids some accidental errors.
+ #define GC_DAC_PTR_VAR(type, name) DPTR(type*) name;
+ #define GC_DAC_ARRAY_VAR(type, name) DPTR(type) name;
+#else
+ #define GC_DAC_VAR(type, name) type *name;
+#endif
+#include "gcinterface.dacvars.def"
+};
+
+#endif // _GC_INTERFACE_DAC_H_
diff --git a/src/gc/gcinterface.dacvars.def b/src/gc/gcinterface.dacvars.def
new file mode 100644
index 0000000000..b788079dcb
--- /dev/null
+++ b/src/gc/gcinterface.dacvars.def
@@ -0,0 +1,66 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+// This file contains the defintions of all DAC variables that the G
+// exports and that the DAC uses to interface with the GC.
+//
+// This interface has a strict semantic versioning. The following changes require
+// a bump to the major version number:
+// 1. Changing the type of any of these variables,
+// 2. Changing the type layouts of any of the types in gcinterface.dac.h,
+// (dac_generation, dac_heap_segment, dac_finalize_queue)
+// 3. Changing the semantic meaning of any of these variables, such that the DAC's
+// use of them is no longer correct,
+//
+// The following change requires a bump to the minor version number:
+// 1. Adding additional DAC variables.
+//
+// Minor version mismatches are tolerated by the DAC, at the risk of a possibly
+// degraded debugging experience.
+// Major version mismatches are not tolerated by the DAC and will be rejected upon load.
+
+#ifndef GC_DAC_VAR
+ #define GC_DAC_VAR(type, name)
+#endif // GC_DAC_VAR
+
+#ifndef GC_DAC_ARRAY_VAR
+ #define GC_DAC_ARRAY_VAR(type, name) GC_DAC_VAR(type*, name)
+#endif // GC_DAC_ARRAY_VAR
+
+#ifndef GC_DAC_PTR_VAR
+ #define GC_DAC_PTR_VAR(type, name) GC_DAC_VAR(type*, name)
+#endif // GC_DAC_PTR_VAR
+
+// This sequence of macros defines the specific variables that are exposed by the
+// GC to the DAC.
+GC_DAC_VAR (uint8_t, build_variant)
+GC_DAC_VAR (bool, built_with_svr)
+GC_DAC_ARRAY_VAR (size_t, gc_global_mechanisms)
+GC_DAC_ARRAY_VAR (dac_generation, generation_table)
+GC_DAC_VAR (uint32_t, max_gen)
+GC_DAC_PTR_VAR (uint32_t, mark_array)
+GC_DAC_VAR (c_gc_state, current_c_gc_state)
+GC_DAC_PTR_VAR (dac_heap_segment, ephemeral_heap_segment)
+GC_DAC_PTR_VAR (dac_heap_segment, saved_sweep_ephemeral_seg)
+GC_DAC_PTR_VAR (uint8_t, saved_sweep_ephemeral_start)
+GC_DAC_PTR_VAR (uint8_t, background_saved_lowest_address)
+GC_DAC_PTR_VAR (uint8_t, background_saved_highest_address)
+GC_DAC_PTR_VAR (uint8_t, alloc_allocated)
+GC_DAC_PTR_VAR (uint8_t, next_sweep_obj)
+GC_DAC_VAR (oom_history, oom_info)
+GC_DAC_PTR_VAR (dac_finalize_queue, finalize_queue)
+GC_DAC_PTR_VAR (uint8_t*, internal_root_array)
+GC_DAC_VAR (size_t, internal_root_array_index)
+GC_DAC_VAR (BOOL, heap_analyze_success)
+GC_DAC_VAR (int, n_heaps)
+GC_DAC_PTR_VAR (dac_gc_heap*, g_heaps)
+GC_DAC_VAR (int32_t, gc_structures_invalid_cnt)
+GC_DAC_ARRAY_VAR (size_t, interesting_data_per_heap)
+GC_DAC_ARRAY_VAR (size_t, compact_reasons_per_heap)
+GC_DAC_ARRAY_VAR (size_t, expand_mechanisms_per_heap)
+GC_DAC_ARRAY_VAR (size_t, interesting_mechanism_bits_per_heap)
+
+#undef GC_DAC_VAR
+#undef GC_DAC_ARRAY_VAR
+#undef GC_DAC_PTR_VAR
diff --git a/src/gc/gcinterface.ee.h b/src/gc/gcinterface.ee.h
index 7c0eea2d95..7b868e780e 100644
--- a/src/gc/gcinterface.ee.h
+++ b/src/gc/gcinterface.ee.h
@@ -133,6 +133,39 @@ public:
// be finalized.
virtual
void EnableFinalization(bool foundFinalizers) = 0;
+
+ // Signals to the EE that the GC encountered a fatal error and can't recover.
+ virtual
+ void HandleFatalError(unsigned int exitCode) = 0;
+
+ // Asks the EE if it wants a particular object to be finalized when unloading
+ // an app domain.
+ virtual
+ bool ShouldFinalizeObjectForUnload(AppDomain* pDomain, Object* obj) = 0;
+
+ // Offers the EE the option to finalize the given object eagerly, i.e.
+ // not on the finalizer thread but on the current thread. The
+ // EE returns true if it finalized the object eagerly and the GC does not
+ // need to do so, and false if it chose not to eagerly finalize the object
+ // and it's up to the GC to finalize it later.
+ virtual
+ bool EagerFinalized(Object* obj) = 0;
+
+ // Asks the EE if it wishes for the current GC to be a blocking GC. The GC will
+ // only invoke this callback when it intends to do a full GC, so at this point
+ // the EE can opt to elevate that collection to be a blocking GC and not a background one.
+ virtual
+ bool ForceFullGCToBeBlocking() = 0;
+
+ // Retrieves the method table for the free object, a special kind of object used by the GC
+ // to keep the heap traversable. Conceptually, the free object is similar to a managed array
+ // of bytes: it consists of an object header (like all objects) and a "numComponents" field,
+ // followed by some number of bytes of space that's free on the heap.
+ //
+ // The free object allows the GC to traverse the heap because it can inspect the numComponents
+ // field to see how many bytes to skip before the next object on a heap segment begins.
+ virtual
+ MethodTable* GetFreeObjectMethodTable() = 0;
};
#endif // _GCINTERFACE_EE_H_
diff --git a/src/gc/gcinterface.h b/src/gc/gcinterface.h
index 99d79df633..cac2ba7114 100644
--- a/src/gc/gcinterface.h
+++ b/src/gc/gcinterface.h
@@ -77,6 +77,10 @@ struct WriteBarrierParameters
// card table. Used for WriteBarrierOp::Initialize and WriteBarrierOp::StompResize.
uint32_t* card_table;
+ // The new card bundle table location. May or may not be the same as the previous
+ // card bundle table. Used for WriteBarrierOp::Initialize and WriteBarrierOp::StompResize.
+ uint32_t* card_bundle_table;
+
// The heap's new low boundary. May or may not be the same as the previous
// value. Used for WriteBarrierOp::Initialize and WriteBarrierOp::StompResize.
uint8_t* lowest_address;
@@ -98,6 +102,11 @@ struct WriteBarrierParameters
uint8_t* write_watch_table;
};
+ /*
+ * Scanning callback.
+ */
+typedef void (CALLBACK *HANDLESCANPROC)(PTR_UNCHECKED_OBJECTREF pref, uintptr_t *pExtraInfo, uintptr_t param1, uintptr_t param2);
+
#include "gcinterface.ee.h"
// The allocation context must be known to the VM for use in the allocation
@@ -129,6 +138,8 @@ public:
}
};
+#include "gcinterface.dac.h"
+
// stub type to abstract a heap segment
struct gc_heap_segment_stub;
typedef gc_heap_segment_stub *segment_handle;
@@ -138,7 +149,7 @@ struct segment_info
void * pvMem; // base of the allocation, not the first object (must add ibFirstObject)
size_t ibFirstObject; // offset to the base of the first object in the segment
size_t ibAllocated; // limit of allocated memory in the segment (>= firstobject)
- size_t ibCommit; // limit of committed memory in the segment (>= alllocated)
+ size_t ibCommit; // limit of committed memory in the segment (>= allocated)
size_t ibReserved; // limit of reserved memory in the segment (>= commit)
};
@@ -152,18 +163,18 @@ struct segment_info
// one for the object header, and one for the first field in the object.
#define min_obj_size ((sizeof(uint8_t*) + sizeof(uintptr_t) + sizeof(size_t)))
-#define max_generation 2
-
// The bit shift used to convert a memory address into an index into the
// Software Write Watch table.
#define SOFTWARE_WRITE_WATCH_AddressToTableByteIndexShift 0xc
class Object;
class IGCHeap;
+class IGCHandleTable;
// Initializes the garbage collector. Should only be called
-// once, during EE startup.
-IGCHeap* InitializeGarbageCollector(IGCToCLR* clrToGC);
+// once, during EE startup. Returns true if the initialization
+// was successful, false otherwise.
+bool InitializeGarbageCollector(IGCToCLR* clrToGC, IGCHeap** gcHeap, IGCHandleTable** gcHandleTable, GcDacVars* gcDacVars);
// The runtime needs to know whether we're using workstation or server GC
// long before the GCHeap is created. This function sets the type of
@@ -182,8 +193,6 @@ extern uint8_t* g_shadow_lowest_address;
// For low memory notification from host
extern int32_t g_bLowMemoryFromHost;
-extern VOLATILE(int32_t) m_GCLock;
-
// !!!!!!!!!!!!!!!!!!!!!!!
// make sure you change the def in bcl\system\gc.cs
// if you change this!
@@ -229,12 +238,207 @@ enum end_no_gc_region_status
end_no_gc_alloc_exceeded = 3
};
-typedef BOOL (* walk_fn)(Object*, void*);
+typedef enum
+{
+ /*
+ * WEAK HANDLES
+ *
+ * Weak handles are handles that track an object as long as it is alive,
+ * but do not keep the object alive if there are no strong references to it.
+ *
+ */
+
+ /*
+ * SHORT-LIVED WEAK HANDLES
+ *
+ * Short-lived weak handles are weak handles that track an object until the
+ * first time it is detected to be unreachable. At this point, the handle is
+ * severed, even if the object will be visible from a pending finalization
+ * graph. This further implies that short weak handles do not track
+ * across object resurrections.
+ *
+ */
+ HNDTYPE_WEAK_SHORT = 0,
+
+ /*
+ * LONG-LIVED WEAK HANDLES
+ *
+ * Long-lived weak handles are weak handles that track an object until the
+ * object is actually reclaimed. Unlike short weak handles, long weak handles
+ * continue to track their referents through finalization and across any
+ * resurrections that may occur.
+ *
+ */
+ HNDTYPE_WEAK_LONG = 1,
+ HNDTYPE_WEAK_DEFAULT = 1,
+
+ /*
+ * STRONG HANDLES
+ *
+ * Strong handles are handles which function like a normal object reference.
+ * The existence of a strong handle for an object will cause the object to
+ * be promoted (remain alive) through a garbage collection cycle.
+ *
+ */
+ HNDTYPE_STRONG = 2,
+ HNDTYPE_DEFAULT = 2,
+
+ /*
+ * PINNED HANDLES
+ *
+ * Pinned handles are strong handles which have the added property that they
+ * prevent an object from moving during a garbage collection cycle. This is
+ * useful when passing a pointer to object innards out of the runtime while GC
+ * may be enabled.
+ *
+ * NOTE: PINNING AN OBJECT IS EXPENSIVE AS IT PREVENTS THE GC FROM ACHIEVING
+ * OPTIMAL PACKING OF OBJECTS DURING EPHEMERAL COLLECTIONS. THIS TYPE
+ * OF HANDLE SHOULD BE USED SPARINGLY!
+ */
+ HNDTYPE_PINNED = 3,
+
+ /*
+ * VARIABLE HANDLES
+ *
+ * Variable handles are handles whose type can be changed dynamically. They
+ * are larger than other types of handles, and are scanned a little more often,
+ * but are useful when the handle owner needs an efficient way to change the
+ * strength of a handle on the fly.
+ *
+ */
+ HNDTYPE_VARIABLE = 4,
+
+ /*
+ * REFCOUNTED HANDLES
+ *
+ * Refcounted handles are handles that behave as strong handles while the
+ * refcount on them is greater than 0 and behave as weak handles otherwise.
+ *
+ * N.B. These are currently NOT general purpose.
+ * The implementation is tied to COM Interop.
+ *
+ */
+ HNDTYPE_REFCOUNTED = 5,
+
+ /*
+ * DEPENDENT HANDLES
+ *
+ * Dependent handles are two handles that need to have the same lifetime. One handle refers to a secondary object
+ * that needs to have the same lifetime as the primary object. The secondary object should not cause the primary
+ * object to be referenced, but as long as the primary object is alive, so must be the secondary
+ *
+ * They are currently used for EnC for adding new field members to existing instantiations under EnC modes where
+ * the primary object is the original instantiation and the secondary represents the added field.
+ *
+ * They are also used to implement the ConditionalWeakTable class in mscorlib.dll. If you want to use
+ * these from managed code, they are exposed to BCL through the managed DependentHandle class.
+ *
+ *
+ */
+ HNDTYPE_DEPENDENT = 6,
+
+ /*
+ * PINNED HANDLES for asynchronous operation
+ *
+ * Pinned handles are strong handles which have the added property that they
+ * prevent an object from moving during a garbage collection cycle. This is
+ * useful when passing a pointer to object innards out of the runtime while GC
+ * may be enabled.
+ *
+ * NOTE: PINNING AN OBJECT IS EXPENSIVE AS IT PREVENTS THE GC FROM ACHIEVING
+ * OPTIMAL PACKING OF OBJECTS DURING EPHEMERAL COLLECTIONS. THIS TYPE
+ * OF HANDLE SHOULD BE USED SPARINGLY!
+ */
+ HNDTYPE_ASYNCPINNED = 7,
+
+ /*
+ * SIZEDREF HANDLES
+ *
+ * SizedRef handles are strong handles. Each handle has a piece of user data associated
+ * with it that stores the size of the object this handle refers to. These handles
+ * are scanned as strong roots during each GC but only during full GCs would the size
+ * be calculated.
+ *
+ */
+ HNDTYPE_SIZEDREF = 8,
+
+ /*
+ * WINRT WEAK HANDLES
+ *
+ * WinRT weak reference handles hold two different types of weak handles to any
+ * RCW with an underlying COM object that implements IWeakReferenceSource. The
+ * object reference itself is a short weak handle to the RCW. In addition an
+ * IWeakReference* to the underlying COM object is stored, allowing the handle
+ * to create a new RCW if the existing RCW is collected. This ensures that any
+ * code holding onto a WinRT weak reference can always access an RCW to the
+ * underlying COM object as long as it has not been released by all of its strong
+ * references.
+ */
+ HNDTYPE_WEAK_WINRT = 9
+} HandleType;
+
+typedef enum
+{
+ GC_HEAP_INVALID = 0,
+ GC_HEAP_WKS = 1,
+ GC_HEAP_SVR = 2
+} GCHeapType;
+
+typedef bool (* walk_fn)(Object*, void*);
typedef void (* gen_walk_fn)(void* context, int generation, uint8_t* range_start, uint8_t* range_end, uint8_t* range_reserved);
-typedef void (* record_surv_fn)(uint8_t* begin, uint8_t* end, ptrdiff_t reloc, size_t context, BOOL compacting_p, BOOL bgc_p);
-typedef void (* fq_walk_fn)(BOOL, void*);
+typedef void (* record_surv_fn)(uint8_t* begin, uint8_t* end, ptrdiff_t reloc, void* context, bool compacting_p, bool bgc_p);
+typedef void (* fq_walk_fn)(bool, void*);
typedef void (* fq_scan_fn)(Object** ppObject, ScanContext *pSC, uint32_t dwFlags);
-typedef void (* handle_scan_fn)(Object** pRef, Object* pSec, uint32_t flags, ScanContext* context, BOOL isDependent);
+typedef void (* handle_scan_fn)(Object** pRef, Object* pSec, uint32_t flags, ScanContext* context, bool isDependent);
+
+// Opaque type for tracking object pointers
+#ifndef DACCESS_COMPILE
+struct OBJECTHANDLE__
+{
+ void* unused;
+};
+typedef struct OBJECTHANDLE__* OBJECTHANDLE;
+#else
+typedef uintptr_t OBJECTHANDLE;
+#endif
+
+class IGCHandleTable {
+public:
+
+ virtual bool Initialize() = 0;
+
+ virtual void Shutdown() = 0;
+
+ virtual void* GetHandleContext(OBJECTHANDLE handle) = 0;
+
+ virtual void* GetGlobalHandleStore() = 0;
+
+ virtual void* CreateHandleStore(void* context) = 0;
+
+ virtual void DestroyHandleStore(void* store) = 0;
+
+ virtual void UprootHandleStore(void* store) = 0;
+
+ virtual bool ContainsHandle(void* store, OBJECTHANDLE handle) = 0;
+
+ virtual OBJECTHANDLE CreateHandleOfType(void* store, Object* object, int type) = 0;
+
+ virtual OBJECTHANDLE CreateHandleOfType(void* store, Object* object, int type, int heapToAffinitizeTo) = 0;
+
+ virtual OBJECTHANDLE CreateHandleWithExtraInfo(void* store, Object* object, int type, void* pExtraInfo) = 0;
+
+ virtual OBJECTHANDLE CreateDependentHandle(void* store, Object* primary, Object* secondary) = 0;
+
+ virtual OBJECTHANDLE CreateGlobalHandleOfType(Object* object, int type) = 0;
+
+ virtual OBJECTHANDLE CreateDuplicateHandle(OBJECTHANDLE handle) = 0;
+
+ virtual void DestroyHandleOfType(OBJECTHANDLE handle, int type) = 0;
+
+ virtual void DestroyHandleOfUnknownType(OBJECTHANDLE handle) = 0;
+
+ virtual void* GetExtraInfoFromHandle(OBJECTHANDLE handle) = 0;
+};
// IGCHeap is the interface that the VM will use when interacting with the GC.
class IGCHeap {
@@ -249,13 +453,13 @@ public:
*/
// Returns whether or not the given size is a valid segment size.
- virtual BOOL IsValidSegmentSize(size_t size) = 0;
+ virtual bool IsValidSegmentSize(size_t size) = 0;
// Returns whether or not the given size is a valid gen 0 max size.
- virtual BOOL IsValidGen0MaxSize(size_t size) = 0;
+ virtual bool IsValidGen0MaxSize(size_t size) = 0;
// Gets a valid segment size.
- virtual size_t GetValidSegmentSize(BOOL large_seg = FALSE) = 0;
+ virtual size_t GetValidSegmentSize(bool large_seg = false) = 0;
// Sets the limit for reserved virtual memory.
virtual void SetReservedVMLimit(size_t vmlimit) = 0;
@@ -275,7 +479,7 @@ public:
virtual void WaitUntilConcurrentGCComplete() = 0;
// Returns true if a concurrent GC is in progress, false otherwise.
- virtual BOOL IsConcurrentGCInProgress() = 0;
+ virtual bool IsConcurrentGCInProgress() = 0;
// Temporarily enables concurrent GC, used during profiling.
virtual void TemporaryEnableConcurrentGC() = 0;
@@ -284,7 +488,7 @@ public:
virtual void TemporaryDisableConcurrentGC() = 0;
// Returns whether or not Concurrent GC is enabled.
- virtual BOOL IsConcurrentGCEnabled() = 0;
+ virtual bool IsConcurrentGCEnabled() = 0;
// Wait for a concurrent GC to complete if one is in progress, with the given timeout.
virtual HRESULT WaitUntilConcurrentGCCompleteAsync(int millisecondsTimeout) = 0; // Use in native threads. TRUE if succeed. FALSE if failed or timeout
@@ -298,17 +502,17 @@ public:
*/
// Finalizes an app domain by finalizing objects within that app domain.
- virtual BOOL FinalizeAppDomain(AppDomain* pDomain, BOOL fRunFinalizers) = 0;
+ virtual bool FinalizeAppDomain(AppDomain* pDomain, bool fRunFinalizers) = 0;
// Finalizes all registered objects for shutdown, even if they are still reachable.
- virtual void SetFinalizeQueueForShutdown(BOOL fHasLock) = 0;
+ virtual void SetFinalizeQueueForShutdown(bool fHasLock) = 0;
// Gets the number of finalizable objects.
virtual size_t GetNumberOfFinalizable() = 0;
// Traditionally used by the finalizer thread on shutdown to determine
// whether or not to time out. Returns true if the GC lock has not been taken.
- virtual BOOL ShouldRestartFinalizerWatchDog() = 0;
+ virtual bool ShouldRestartFinalizerWatchDog() = 0;
// Gets the next finalizable object.
virtual Object* GetNextFinalizable() = 0;
@@ -341,10 +545,10 @@ public:
// Registers for a full GC notification, raising a notification if the gen 2 or
// LOH object heap thresholds are exceeded.
- virtual BOOL RegisterForFullGCNotification(uint32_t gen2Percentage, uint32_t lohPercentage) = 0;
+ virtual bool RegisterForFullGCNotification(uint32_t gen2Percentage, uint32_t lohPercentage) = 0;
// Cancels a full GC notification that was requested by `RegisterForFullGCNotification`.
- virtual BOOL CancelFullGCNotification() = 0;
+ virtual bool CancelFullGCNotification() = 0;
// Returns the status of a registered notification for determining whether a blocking
// Gen 2 collection is about to be initiated, with the given timeout.
@@ -365,7 +569,7 @@ public:
// Begins a no-GC region, returning a code indicating whether entering the no-GC
// region was successful.
- virtual int StartNoGCRegion(uint64_t totalSize, BOOL lohSizeKnown, uint64_t lohSize, BOOL disallowFullBlockingGC) = 0;
+ virtual int StartNoGCRegion(uint64_t totalSize, bool lohSizeKnown, uint64_t lohSize, bool disallowFullBlockingGC) = 0;
// Exits a no-GC region.
virtual int EndNoGCRegion() = 0;
@@ -375,7 +579,7 @@ public:
// Forces a garbage collection of the given generation. Also used extensively
// throughout the VM.
- virtual HRESULT GarbageCollect(int generation = -1, BOOL low_memory_p = FALSE, int mode = collection_blocking) = 0;
+ virtual HRESULT GarbageCollect(int generation = -1, bool low_memory_p = false, int mode = collection_blocking) = 0;
// Gets the largest GC generation. Also used extensively throughout the VM.
virtual unsigned GetMaxGeneration() = 0;
@@ -397,16 +601,16 @@ public:
virtual HRESULT Initialize() = 0;
// Returns whether nor this GC was promoted by the last GC.
- virtual BOOL IsPromoted(Object* object) = 0;
+ virtual bool IsPromoted(Object* object) = 0;
// Returns true if this pointer points into a GC heap, false otherwise.
- virtual BOOL IsHeapPointer(void* object, BOOL small_heap_only = FALSE) = 0;
+ virtual bool IsHeapPointer(void* object, bool small_heap_only = false) = 0;
// Return the generation that has been condemned by the current GC.
virtual unsigned GetCondemnedGeneration() = 0;
// Returns whether or not a GC is in progress.
- virtual BOOL IsGCInProgressHelper(BOOL bConsiderGCStart = FALSE) = 0;
+ virtual bool IsGCInProgressHelper(bool bConsiderGCStart = false) = 0;
// Returns the number of GCs that have occured. Mainly used for
// sanity checks asserting that a GC has not occured.
@@ -417,20 +621,23 @@ public:
virtual bool IsThreadUsingAllocationContextHeap(gc_alloc_context* acontext, int thread_number) = 0;
// Returns whether or not this object resides in an ephemeral generation.
- virtual BOOL IsEphemeral(Object* object) = 0;
+ virtual bool IsEphemeral(Object* object) = 0;
// Blocks until a GC is complete, returning a code indicating the wait was successful.
- virtual uint32_t WaitUntilGCComplete(BOOL bConsiderGCStart = FALSE) = 0;
+ virtual uint32_t WaitUntilGCComplete(bool bConsiderGCStart = false) = 0;
// "Fixes" an allocation context by binding its allocation pointer to a
// location on the heap.
- virtual void FixAllocContext(gc_alloc_context* acontext, BOOL lockp, void* arg, void* heap) = 0;
+ virtual void FixAllocContext(gc_alloc_context* acontext, bool lockp, void* arg, void* heap) = 0;
// Gets the total survived size plus the total allocated bytes on the heap.
virtual size_t GetCurrentObjSize() = 0;
// Sets whether or not a GC is in progress.
- virtual void SetGCInProgress(BOOL fInProgress) = 0;
+ virtual void SetGCInProgress(bool fInProgress) = 0;
+
+ // Gets whether or not the GC runtime structures are in a valid state for heap traversal.
+ virtual bool RuntimeStructuresValid() = 0;
/*
============================================================================
@@ -459,21 +666,22 @@ public:
*/
// Allocates an object on the given allocation context with the given size and flags.
+ // It is the responsibility of the caller to ensure that the passed-in alloc context is
+ // owned by the thread that is calling this function. If using per-thread alloc contexts,
+ // no lock is needed; callers not using per-thread alloc contexts will need to acquire
+ // a lock to ensure that the calling thread has unique ownership over this alloc context;
virtual Object* Alloc(gc_alloc_context* acontext, size_t size, uint32_t flags) = 0;
- // Allocates an object on the default allocation context with the given size and flags.
- virtual Object* Alloc(size_t size, uint32_t flags) = 0;
-
// Allocates an object on the large object heap with the given size and flags.
virtual Object* AllocLHeap(size_t size, uint32_t flags) = 0;
- // Allocates an object on the default allocation context, aligned to 64 bits,
- // with the given size and flags.
- virtual Object* AllocAlign8 (size_t size, uint32_t flags) = 0;
-
// Allocates an object on the given allocation context, aligned to 64 bits,
// with the given size and flags.
- virtual Object* AllocAlign8 (gc_alloc_context* acontext, size_t size, uint32_t flags) = 0;
+ // It is the responsibility of the caller to ensure that the passed-in alloc context is
+ // owned by the thread that is calling this function. If using per-thread alloc contexts,
+ // no lock is needed; callers not using per-thread alloc contexts will need to acquire
+ // a lock to ensure that the calling thread has unique ownership over this alloc context.
+ virtual Object* AllocAlign8(gc_alloc_context* acontext, size_t size, uint32_t flags) = 0;
// This is for the allocator to indicate it's done allocating a large object during a
// background GC as the BGC threads also need to walk LOH.
@@ -489,7 +697,7 @@ public:
===========================================================================
*/
// Returns whether or not this object is in the fixed heap.
- virtual BOOL IsObjectInFixedHeap(Object* pObj) = 0;
+ virtual bool IsObjectInFixedHeap(Object* pObj) = 0;
// Walks an object and validates its members.
virtual void ValidateObjectMember(Object* obj) = 0;
@@ -501,7 +709,9 @@ public:
// Given an interior pointer, return a pointer to the object
// containing that pointer. This is safe to call only when the EE is suspended.
- virtual Object* GetContainingObject(void* pInteriorPtr) = 0;
+ // When fCollectedGenOnly is true, it only returns the object if it's found in
+ // the generation(s) that are being collected.
+ virtual Object* GetContainingObject(void* pInteriorPtr, bool fCollectedGenOnly) = 0;
/*
===========================================================================
@@ -514,10 +724,10 @@ public:
virtual void DiagWalkObject(Object* obj, walk_fn fn, void* context) = 0;
// Walk the heap object by object.
- virtual void DiagWalkHeap(walk_fn fn, void* context, int gen_number, BOOL walk_large_object_heap_p) = 0;
+ virtual void DiagWalkHeap(walk_fn fn, void* context, int gen_number, bool walk_large_object_heap_p) = 0;
// Walks the survivors and get the relocation information if objects have moved.
- virtual void DiagWalkSurvivorsWithType(void* gc_context, record_surv_fn fn, size_t diag_context, walk_surv_type type) = 0;
+ virtual void DiagWalkSurvivorsWithType(void* gc_context, record_surv_fn fn, void* diag_context, walk_surv_type type) = 0;
// Walks the finalization queue.
virtual void DiagWalkFinalizeQueue(void* gc_context, fq_walk_fn fn) = 0;
@@ -543,8 +753,9 @@ public:
===========================================================================
*/
- // Returns TRUE if GC actually happens, otherwise FALSE
- virtual BOOL StressHeap(gc_alloc_context* acontext = 0) = 0;
+ // Returns TRUE if GC actually happens, otherwise FALSE. The passed alloc context
+ // must not be null.
+ virtual bool StressHeap(gc_alloc_context* acontext) = 0;
/*
===========================================================================
@@ -561,19 +772,6 @@ public:
IGCHeap() {}
virtual ~IGCHeap() {}
-
- typedef enum
- {
- GC_HEAP_INVALID = 0,
- GC_HEAP_WKS = 1,
- GC_HEAP_SVR = 2
- } GC_HEAP_TYPE;
-
-#ifdef FEATURE_SVR_GC
- SVAL_DECL(uint32_t, gcHeapType);
-#endif
-
- SVAL_DECL(uint32_t, maxGeneration);
};
#ifdef WRITE_BARRIER_CHECK
@@ -597,8 +795,8 @@ struct ScanContext
Thread* thread_under_crawl;
int thread_number;
uintptr_t stack_limit; // Lowest point on the thread stack that the scanning logic is permitted to read
- BOOL promotion; //TRUE: Promotion, FALSE: Relocation.
- BOOL concurrent; //TRUE: concurrent scanning
+ bool promotion; //TRUE: Promotion, FALSE: Relocation.
+ bool concurrent; //TRUE: concurrent scanning
#if CHECK_APP_DOMAIN_LEAKS || defined (FEATURE_APPDOMAIN_RESOURCE_MONITORING) || defined (DACCESS_COMPILE)
AppDomain *pCurrentDomain;
#endif //CHECK_APP_DOMAIN_LEAKS || FEATURE_APPDOMAIN_RESOURCE_MONITORING || DACCESS_COMPILE
@@ -619,8 +817,8 @@ struct ScanContext
thread_under_crawl = 0;
thread_number = -1;
stack_limit = 0;
- promotion = FALSE;
- concurrent = FALSE;
+ promotion = false;
+ concurrent = false;
#ifdef GC_PROFILING
pMD = NULL;
#endif //GC_PROFILING
diff --git a/src/gc/gcpriv.h b/src/gc/gcpriv.h
index 1f97d7f2d5..108045cd37 100644
--- a/src/gc/gcpriv.h
+++ b/src/gc/gcpriv.h
@@ -28,7 +28,7 @@ inline void FATAL_GC_ERROR()
GCToOSInterface::DebugBreak();
#endif // DACCESS_COMPILE
_ASSERTE(!"Fatal Error in GC.");
- EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE);
+ GCToEEInterface::HandleFatalError(COR_E_EXECUTIONENGINE);
}
#ifdef _MSC_VER
@@ -167,8 +167,6 @@ void GCLogConfig (const char *fmt, ... );
#define TRACE_GC
#endif
-#define NUMBERGENERATIONS 4 //Max number of generations
-
// For the bestfit algorithm when we relocate ephemeral generations into an
// existing gen2 segment.
// We recorded sizes from 2^6, 2^7, 2^8...up to 2^30 (1GB). So that's 25 sizes total.
@@ -759,10 +757,10 @@ public:
// Don't move these first two fields without adjusting the references
// from the __asm in jitinterface.cpp.
alloc_context allocation_context;
- heap_segment* allocation_segment;
PTR_heap_segment start_segment;
- uint8_t* allocation_context_start_region;
uint8_t* allocation_start;
+ heap_segment* allocation_segment;
+ uint8_t* allocation_context_start_region;
allocator free_list_allocator;
size_t free_list_allocated;
size_t end_seg_allocated;
@@ -792,6 +790,11 @@ public:
#endif //FREE_USAGE_STATS
};
+static_assert(offsetof(dac_generation, allocation_context) == offsetof(generation, allocation_context), "DAC generation offset mismatch");
+static_assert(offsetof(dac_generation, start_segment) == offsetof(generation, start_segment), "DAC generation offset mismatch");
+static_assert(offsetof(dac_generation, allocation_start) == offsetof(generation, allocation_start), "DAC generation offset mismatch");
+
+
// The dynamic data fields are grouped into 3 categories:
//
// calculated logical data (like desired_allocation)
@@ -1104,6 +1107,8 @@ class gc_heap
friend void initGCShadow();
#endif //defined (WRITE_BARRIER_CHECK) && !defined (SERVER_GC)
+ friend void PopulateDacVars(GcDacVars *gcDacVars);
+
#ifdef MULTIPLE_HEAPS
typedef void (gc_heap::* card_fn) (uint8_t**, int);
#define call_fn(fn) (this->*fn)
@@ -1293,19 +1298,19 @@ protected:
uint8_t* last_plug;
BOOL is_shortened;
mark* pinned_plug_entry;
- size_t profiling_context;
+ void* profiling_context;
record_surv_fn fn;
};
PER_HEAP
- void walk_survivors (record_surv_fn fn, size_t context, walk_surv_type type);
+ void walk_survivors (record_surv_fn fn, void* context, walk_surv_type type);
PER_HEAP
void walk_plug (uint8_t* plug, size_t size, BOOL check_last_object_p,
walk_relocate_args* args);
PER_HEAP
- void walk_relocation (size_t profiling_context, record_surv_fn fn);
+ void walk_relocation (void* profiling_context, record_surv_fn fn);
PER_HEAP
void walk_relocation_in_brick (uint8_t* tree, walk_relocate_args* args);
@@ -1315,14 +1320,14 @@ protected:
#if defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE)
PER_HEAP
- void walk_survivors_for_bgc (size_t profiling_context, record_surv_fn fn);
+ void walk_survivors_for_bgc (void* profiling_context, record_surv_fn fn);
#endif // defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE)
// used in blocking GCs after plan phase so this walks the plugs.
PER_HEAP
- void walk_survivors_relocation (size_t profiling_context, record_surv_fn fn);
+ void walk_survivors_relocation (void* profiling_context, record_surv_fn fn);
PER_HEAP
- void walk_survivors_for_loh (size_t profiling_context, record_surv_fn fn);
+ void walk_survivors_for_loh (void* profiling_context, record_surv_fn fn);
PER_HEAP
int generation_to_condemn (int n,
@@ -1390,6 +1395,9 @@ protected:
void thread_no_gc_loh_segments();
PER_HEAP
+ void check_and_set_no_gc_oom();
+
+ PER_HEAP
void allocate_for_no_gc_after_gc();
PER_HEAP
@@ -1550,13 +1558,6 @@ protected:
alloc_context* acontext,
int align_const);
- enum c_gc_state
- {
- c_gc_state_marking,
- c_gc_state_planning,
- c_gc_state_free
- };
-
#ifdef RECORD_LOH_STATE
#define max_saved_loh_states 12
PER_HEAP
@@ -1718,8 +1719,14 @@ protected:
PER_HEAP
void card_bundle_clear(size_t cardb);
PER_HEAP
+ void card_bundle_set (size_t cardb);
+ PER_HEAP
void card_bundles_set (size_t start_cardb, size_t end_cardb);
PER_HEAP
+ void verify_card_bundle_bits_set(size_t first_card_word, size_t last_card_word);
+ PER_HEAP
+ void verify_card_bundles();
+ PER_HEAP
BOOL card_bundle_set_p (size_t cardb);
PER_HEAP
BOOL find_card_dword (size_t& cardw, size_t cardw_end);
@@ -2161,7 +2168,7 @@ protected:
void relocate_in_loh_compact();
PER_HEAP
- void walk_relocation_for_loh (size_t profiling_context, record_surv_fn fn);
+ void walk_relocation_for_loh (void* profiling_context, record_surv_fn fn);
PER_HEAP
BOOL loh_enque_pinned_plug (uint8_t* plug, size_t len);
@@ -2753,9 +2760,6 @@ public:
PER_HEAP_ISOLATED
uint32_t cm_in_progress;
- PER_HEAP
- BOOL expanded_in_fgc;
-
// normally this is FALSE; we set it to TRUE at the end of the gen1 GC
// we do right before the bgc starts.
PER_HEAP_ISOLATED
@@ -2765,6 +2769,56 @@ public:
CLREvent bgc_start_event;
#endif //BACKGROUND_GC
+ // The variables in this block are known to the DAC and must come first
+ // in the gc_heap class.
+
+ // Keeps track of the highest address allocated by Alloc
+ PER_HEAP
+ uint8_t* alloc_allocated;
+
+ // The ephemeral heap segment
+ PER_HEAP
+ heap_segment* ephemeral_heap_segment;
+
+ // The finalize queue.
+ PER_HEAP
+ CFinalize* finalize_queue;
+
+ // OOM info.
+ PER_HEAP
+ oom_history oom_info;
+
+ // Interesting data, recorded per-heap.
+ PER_HEAP
+ size_t interesting_data_per_heap[max_idp_count];
+
+ PER_HEAP
+ size_t compact_reasons_per_heap[max_compact_reasons_count];
+
+ PER_HEAP
+ size_t expand_mechanisms_per_heap[max_expand_mechanisms_count];
+
+ PER_HEAP
+ size_t interesting_mechanism_bits_per_heap[max_gc_mechanism_bits_count];
+
+ PER_HEAP
+ uint8_t** internal_root_array;
+
+ PER_HEAP
+ size_t internal_root_array_index;
+
+ PER_HEAP
+ BOOL heap_analyze_success;
+
+ // The generation table. Must always be last.
+ PER_HEAP
+ generation generation_table [NUMBERGENERATIONS + 1];
+
+ // End DAC zone
+
+ PER_HEAP
+ BOOL expanded_in_fgc;
+
PER_HEAP_ISOLATED
uint32_t wait_for_gc_done(int32_t timeOut = INFINITE);
@@ -2815,12 +2869,8 @@ public:
short* brick_table;
#ifdef MARK_ARRAY
-#ifdef MULTIPLE_HEAPS
PER_HEAP
uint32_t* mark_array;
-#else
- SPTR_DECL(uint32_t, mark_array);
-#endif //MULTIPLE_HEAPS
#endif //MARK_ARRAY
#ifdef CARD_BUNDLE
@@ -2984,13 +3034,6 @@ protected:
#define heap_number (0)
#endif //MULTIPLE_HEAPS
-#ifndef MULTIPLE_HEAPS
- SPTR_DECL(heap_segment,ephemeral_heap_segment);
-#else
- PER_HEAP
- heap_segment* ephemeral_heap_segment;
-#endif // !MULTIPLE_HEAPS
-
PER_HEAP
size_t time_bgc_last;
@@ -3065,14 +3108,9 @@ protected:
uint8_t* background_written_addresses [array_size+2];
#endif //WRITE_WATCH
-#if defined (DACCESS_COMPILE) && !defined (MULTIPLE_HEAPS)
- // doesn't need to be volatile for DAC.
- SVAL_DECL(c_gc_state, current_c_gc_state);
-#else
PER_HEAP_ISOLATED
VOLATILE(c_gc_state) current_c_gc_state; //tells the large object allocator to
//mark the object as new since the start of gc.
-#endif //DACCESS_COMPILE && !MULTIPLE_HEAPS
PER_HEAP_ISOLATED
gc_mechanisms saved_bgc_settings;
@@ -3229,16 +3267,6 @@ protected:
PER_HEAP
heap_segment* saved_overflow_ephemeral_seg;
-#ifndef MULTIPLE_HEAPS
- SPTR_DECL(heap_segment, saved_sweep_ephemeral_seg);
-
- SPTR_DECL(uint8_t, saved_sweep_ephemeral_start);
-
- SPTR_DECL(uint8_t, background_saved_lowest_address);
-
- SPTR_DECL(uint8_t, background_saved_highest_address);
-#else
-
PER_HEAP
heap_segment* saved_sweep_ephemeral_seg;
@@ -3250,7 +3278,6 @@ protected:
PER_HEAP
uint8_t* background_saved_highest_address;
-#endif //!MULTIPLE_HEAPS
// This is used for synchronization between the bgc thread
// for this heap and the user threads allocating on this
@@ -3326,6 +3353,9 @@ protected:
size_t loh_allocation_no_gc;
PER_HEAP
+ bool no_gc_oom_p;
+
+ PER_HEAP
heap_segment* saved_loh_segment_no_gc;
PER_HEAP_ISOLATED
@@ -3334,14 +3364,6 @@ protected:
#define youngest_generation (generation_of (0))
#define large_object_generation (generation_of (max_generation+1))
-#ifndef MULTIPLE_HEAPS
- SPTR_DECL(uint8_t,alloc_allocated);
-#else
- PER_HEAP
- uint8_t* alloc_allocated; //keeps track of the highest
- //address allocated by alloc
-#endif // !MULTIPLE_HEAPS
-
// The more_space_lock and gc_lock is used for 3 purposes:
//
// 1) to coordinate threads that exceed their quantum (UP & MP) (more_space_lock)
@@ -3411,12 +3433,6 @@ protected:
#endif //SYNCHRONIZATION_STATS
-#ifdef MULTIPLE_HEAPS
- PER_HEAP
- generation generation_table [NUMBERGENERATIONS+1];
-#endif
-
-
#define NUM_LOH_ALIST (7)
#define BASE_LOH_ALIST (64*1024)
PER_HEAP
@@ -3493,34 +3509,14 @@ protected:
PER_HEAP_ISOLATED
BOOL alloc_wait_event_p;
-#ifndef MULTIPLE_HEAPS
- SPTR_DECL(uint8_t, next_sweep_obj);
-#else
PER_HEAP
uint8_t* next_sweep_obj;
-#endif //MULTIPLE_HEAPS
PER_HEAP
uint8_t* current_sweep_pos;
#endif //BACKGROUND_GC
-#ifndef MULTIPLE_HEAPS
- SVAL_DECL(oom_history, oom_info);
-#ifdef FEATURE_PREMORTEM_FINALIZATION
- SPTR_DECL(CFinalize,finalize_queue);
-#endif //FEATURE_PREMORTEM_FINALIZATION
-#else
-
- PER_HEAP
- oom_history oom_info;
-
-#ifdef FEATURE_PREMORTEM_FINALIZATION
- PER_HEAP
- PTR_CFinalize finalize_queue;
-#endif //FEATURE_PREMORTEM_FINALIZATION
-#endif // !MULTIPLE_HEAPS
-
PER_HEAP
fgm_history fgm_result;
@@ -3542,19 +3538,6 @@ protected:
PER_HEAP
size_t interesting_data_per_gc[max_idp_count];
-#ifdef MULTIPLE_HEAPS
- PER_HEAP
- size_t interesting_data_per_heap[max_idp_count];
-
- PER_HEAP
- size_t compact_reasons_per_heap[max_compact_reasons_count];
-
- PER_HEAP
- size_t expand_mechanisms_per_heap[max_expand_mechanisms_count];
-
- PER_HEAP
- size_t interesting_mechanism_bits_per_heap[max_gc_mechanism_bits_count];
-#endif //MULTIPLE_HEAPS
#endif //GC_CONFIG_DRIVEN
PER_HEAP
@@ -3635,21 +3618,6 @@ public:
PER_HEAP
size_t internal_root_array_length;
-#ifndef MULTIPLE_HEAPS
- SPTR_DECL(PTR_uint8_t, internal_root_array);
- SVAL_DECL(size_t, internal_root_array_index);
- SVAL_DECL(BOOL, heap_analyze_success);
-#else
- PER_HEAP
- uint8_t** internal_root_array;
-
- PER_HEAP
- size_t internal_root_array_index;
-
- PER_HEAP
- BOOL heap_analyze_success;
-#endif // !MULTIPLE_HEAPS
-
// next two fields are used to optimize the search for the object
// enclosing the current reference handled by ha_mark_object_simple.
PER_HEAP
@@ -3670,8 +3638,11 @@ public:
BOOL blocking_collection;
#ifdef MULTIPLE_HEAPS
- SVAL_DECL(int, n_heaps);
- SPTR_DECL(PTR_gc_heap, g_heaps);
+ static
+ int n_heaps;
+
+ static
+ gc_heap** g_heaps;
static
size_t* g_promoted;
@@ -3705,6 +3676,23 @@ protected:
}; // class gc_heap
+#define ASSERT_OFFSETS_MATCH(field) \
+ static_assert_no_msg(offsetof(dac_gc_heap, field) == offsetof(gc_heap, field))
+
+#ifdef MULTIPLE_HEAPS
+ASSERT_OFFSETS_MATCH(alloc_allocated);
+ASSERT_OFFSETS_MATCH(ephemeral_heap_segment);
+ASSERT_OFFSETS_MATCH(finalize_queue);
+ASSERT_OFFSETS_MATCH(oom_info);
+ASSERT_OFFSETS_MATCH(interesting_data_per_heap);
+ASSERT_OFFSETS_MATCH(compact_reasons_per_heap);
+ASSERT_OFFSETS_MATCH(expand_mechanisms_per_heap);
+ASSERT_OFFSETS_MATCH(interesting_mechanism_bits_per_heap);
+ASSERT_OFFSETS_MATCH(internal_root_array);
+ASSERT_OFFSETS_MATCH(internal_root_array_index);
+ASSERT_OFFSETS_MATCH(heap_analyze_success);
+ASSERT_OFFSETS_MATCH(generation_table);
+#endif // MULTIPLE_HEAPS
#ifdef FEATURE_PREMORTEM_FINALIZATION
class CFinalize
@@ -3712,6 +3700,9 @@ class CFinalize
#ifdef DACCESS_COMPILE
friend class ::ClrDataAccess;
#endif // DACCESS_COMPILE
+
+ friend class CFinalizeStaticAsserts;
+
private:
//adjust the count and add a constant to add a segment
@@ -3721,8 +3712,8 @@ private:
//Does not correspond to a segment
static const int FreeList = NUMBERGENERATIONS+ExtraSegCount;
- PTR_PTR_Object m_Array;
PTR_PTR_Object m_FillPointers[NUMBERGENERATIONS+ExtraSegCount];
+ PTR_PTR_Object m_Array;
PTR_PTR_Object m_EndArray;
size_t m_PromotedCount;
@@ -3776,10 +3767,18 @@ public:
void DiscardNonCriticalObjects();
//Methods used by the app domain unloading call to finalize objects in an app domain
- BOOL FinalizeAppDomain (AppDomain *pDomain, BOOL fRunFinalizers);
+ bool FinalizeAppDomain (AppDomain *pDomain, bool fRunFinalizers);
void CheckFinalizerObjects();
+
};
+
+class CFinalizeStaticAsserts {
+ static_assert(dac_finalize_queue::ExtraSegCount == CFinalize::ExtraSegCount, "ExtraSegCount mismatch");
+ static_assert(offsetof(dac_finalize_queue, m_FillPointers) == offsetof(CFinalize, m_FillPointers), "CFinalize layout mismatch");
+};
+
+
#endif // FEATURE_PREMORTEM_FINALIZATION
inline
@@ -4171,15 +4170,12 @@ public:
uint8_t* mem;
size_t flags;
PTR_heap_segment next;
- uint8_t* plan_allocated;
-#ifdef BACKGROUND_GC
uint8_t* background_allocated;
- uint8_t* saved_bg_allocated;
-#endif //BACKGROUND_GC
-
#ifdef MULTIPLE_HEAPS
gc_heap* heap;
#endif //MULTIPLE_HEAPS
+ uint8_t* plan_allocated;
+ uint8_t* saved_bg_allocated;
#ifdef _MSC_VER
// Disable this warning - we intentionally want __declspec(align()) to insert padding for us
@@ -4191,6 +4187,18 @@ public:
#endif
};
+static_assert(offsetof(dac_heap_segment, allocated) == offsetof(heap_segment, allocated), "DAC heap segment layout mismatch");
+static_assert(offsetof(dac_heap_segment, committed) == offsetof(heap_segment, committed), "DAC heap segment layout mismatch");
+static_assert(offsetof(dac_heap_segment, reserved) == offsetof(heap_segment, reserved), "DAC heap segment layout mismatch");
+static_assert(offsetof(dac_heap_segment, used) == offsetof(heap_segment, used), "DAC heap segment layout mismatch");
+static_assert(offsetof(dac_heap_segment, mem) == offsetof(heap_segment, mem), "DAC heap segment layout mismatch");
+static_assert(offsetof(dac_heap_segment, flags) == offsetof(heap_segment, flags), "DAC heap segment layout mismatch");
+static_assert(offsetof(dac_heap_segment, next) == offsetof(heap_segment, next), "DAC heap segment layout mismatch");
+static_assert(offsetof(dac_heap_segment, background_allocated) == offsetof(heap_segment, background_allocated), "DAC heap segment layout mismatch");
+#ifdef MULTIPLE_HEAPS
+static_assert(offsetof(dac_heap_segment, heap) == offsetof(heap_segment, heap), "DAC heap segment layout mismatch");
+#endif // MULTIPLE_HEAPS
+
inline
uint8_t*& heap_segment_reserved (heap_segment* inst)
{
@@ -4283,27 +4291,6 @@ gc_heap*& heap_segment_heap (heap_segment* inst)
}
#endif //MULTIPLE_HEAPS
-#ifndef MULTIPLE_HEAPS
-
-#ifndef DACCESS_COMPILE
-extern "C" {
-#endif //!DACCESS_COMPILE
-
-GARY_DECL(generation,generation_table,NUMBERGENERATIONS+1);
-
-#ifdef GC_CONFIG_DRIVEN
-GARY_DECL(size_t, interesting_data_per_heap, max_idp_count);
-GARY_DECL(size_t, compact_reasons_per_heap, max_compact_reasons_count);
-GARY_DECL(size_t, expand_mechanisms_per_heap, max_expand_mechanisms_count);
-GARY_DECL(size_t, interesting_mechanism_bits_per_heap, max_gc_mechanism_bits_count);
-#endif //GC_CONFIG_DRIVEN
-
-#ifndef DACCESS_COMPILE
-}
-#endif //!DACCESS_COMPILE
-
-#endif //MULTIPLE_HEAPS
-
inline
generation* gc_heap::generation_of (int n)
{
@@ -4329,12 +4316,14 @@ dynamic_data* gc_heap::dynamic_data_of (int gen_number)
#define card_size ((size_t)(OS_PAGE_SIZE/card_word_width))
#endif // BIT64
+// Returns the index of the card word a card is in
inline
size_t card_word (size_t card)
{
return card / card_word_width;
}
+// Returns the index of a card within its card word
inline
unsigned card_bit (size_t card)
{
diff --git a/src/gc/gcscan.cpp b/src/gc/gcscan.cpp
index b4e6352dd6..edcb533cd4 100644
--- a/src/gc/gcscan.cpp
+++ b/src/gc/gcscan.cpp
@@ -19,11 +19,7 @@
#include "gc.h"
#include "objecthandle.h"
-#ifdef DACCESS_COMPILE
-SVAL_IMPL_INIT(int32_t, GCScan, m_GcStructuresInvalidCnt, 1);
-#else //DACCESS_COMPILE
VOLATILE(int32_t) GCScan::m_GcStructuresInvalidCnt = 1;
-#endif //DACCESS_COMPILE
bool GCScan::GetGcRuntimeStructuresValid ()
{
@@ -33,18 +29,7 @@ bool GCScan::GetGcRuntimeStructuresValid ()
return (int32_t)m_GcStructuresInvalidCnt == 0;
}
-#ifdef DACCESS_COMPILE
-
-#ifndef FEATURE_REDHAWK
-void
-GCScan::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
-{
- UNREFERENCED_PARAMETER(flags);
- m_GcStructuresInvalidCnt.EnumMem();
-}
-#endif
-
-#else
+#ifndef DACCESS_COMPILE
//
// Dependent handle promotion scan support
diff --git a/src/gc/gcscan.h b/src/gc/gcscan.h
index 362370fa4a..c7060f3f51 100644
--- a/src/gc/gcscan.h
+++ b/src/gc/gcscan.h
@@ -89,19 +89,7 @@ class GCScan
static void VerifyHandleTable(int condemned, int max_gen, ScanContext* sc);
-private:
-#ifdef DACCESS_COMPILE
- SVAL_DECL(int32_t, m_GcStructuresInvalidCnt);
-#else
static VOLATILE(int32_t) m_GcStructuresInvalidCnt;
-#endif //DACCESS_COMPILE
};
-// These two functions are utilized to scan the heap if requested by ETW
-// or a profiler. The implementations of these two functions are in profheapwalkhelper.cpp.
-#if defined(FEATURE_EVENT_TRACE) | defined(GC_PROFILING)
-void ScanRootsHelper(Object* pObj, Object** ppRoot, ScanContext * pSC, DWORD dwFlags);
-BOOL HeapWalkHelper(Object * pBO, void * pvContext);
-#endif
-
#endif // _GCSCAN_H_
diff --git a/src/gc/handletable.cpp b/src/gc/handletable.cpp
index 29ee435b51..eee181959f 100644
--- a/src/gc/handletable.cpp
+++ b/src/gc/handletable.cpp
@@ -313,6 +313,10 @@ OBJECTHANDLE HndCreateHandle(HHANDLETABLE hTable, uint32_t uType, OBJECTREF obje
}
#endif // _DEBUG && !FEATURE_REDHAWK
+ // If we are creating a variable-strength handle, verify that the
+ // requested variable handle type is valid.
+ _ASSERTE(uType != HNDTYPE_VARIABLE || IS_VALID_VHT_VALUE(lExtraInfo));
+
VALIDATEOBJECTREF(object);
// fetch the handle table pointer
@@ -1334,24 +1338,6 @@ void Ref_RelocateAsyncPinHandles(HandleTableBucket *pSource, HandleTableBucket
}
#endif // !FEATURE_REDHAWK
-BOOL Ref_ContainHandle(HandleTableBucket *pBucket, OBJECTHANDLE handle)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- }
- CONTRACTL_END;
-
- int limit = getNumberOfSlots();
- for (int n = 0; n < limit; n ++ )
- {
- if (TableContainHandle(Table(pBucket->pTable[n]), handle))
- return TRUE;
- }
-
- return FALSE;
-}
/*--------------------------------------------------------------------------*/
diff --git a/src/gc/handletable.h b/src/gc/handletable.h
index bbb8b1db22..ebf8c62c33 100644
--- a/src/gc/handletable.h
+++ b/src/gc/handletable.h
@@ -14,6 +14,7 @@
#ifndef _HANDLETABLE_H
#define _HANDLETABLE_H
+#include "gcinterface.h"
/****************************************************************************
*
@@ -103,11 +104,6 @@ void HndWriteBarrier(OBJECTHANDLE handle, OBJECTREF value);
*/
void HndLogSetEvent(OBJECTHANDLE handle, _UNCHECKED_OBJECTREF value);
- /*
- * Scanning callback.
- */
-typedef void (CALLBACK *HANDLESCANPROC)(PTR_UNCHECKED_OBJECTREF pref, uintptr_t *pExtraInfo, uintptr_t param1, uintptr_t param2);
-
/*
* NON-GC handle enumeration
*/
@@ -181,8 +177,11 @@ BOOL HndFirstAssignHandle(OBJECTHANDLE handle, OBJECTREF objref);
/*
* inline handle dereferencing
+ *
+ * NOTE: Changes to this implementation should be kept in sync with ObjectFromHandle
+ * on the VM side.
+ *
*/
-
FORCEINLINE OBJECTREF HndFetchHandle(OBJECTHANDLE handle)
{
WRAPPER_NO_CONTRACT;
@@ -217,18 +216,6 @@ FORCEINLINE BOOL HndIsNull(OBJECTHANDLE handle)
}
-
-/*
- * inline handle checking
- */
-FORCEINLINE BOOL HndCheckForNullUnchecked(OBJECTHANDLE handle)
-{
- LIMITED_METHOD_CONTRACT;
-
- return (handle == NULL || (*(_UNCHECKED_OBJECTREF *)handle) == NULL);
-}
-
-
/*
*
* Checks handle value for null or special value used for free handles in cache.
diff --git a/src/gc/objecthandle.cpp b/src/gc/objecthandle.cpp
index e8eed93006..5df53baad5 100644
--- a/src/gc/objecthandle.cpp
+++ b/src/gc/objecthandle.cpp
@@ -441,13 +441,13 @@ void CALLBACK ScanPointerForProfilerAndETW(_UNCHECKED_OBJECTREF *pObjRef, uintpt
ScanContext *pSC = (ScanContext *)lp1;
uint32_t rootFlags = 0;
- BOOL isDependent = FALSE;
+ bool isDependent = false;
OBJECTHANDLE handle = (OBJECTHANDLE)(pRef);
switch (HandleFetchType(handle))
{
case HNDTYPE_DEPENDENT:
- isDependent = TRUE;
+ isDependent = true;
break;
case HNDTYPE_WEAK_SHORT:
case HNDTYPE_WEAK_LONG:
@@ -871,24 +871,6 @@ void Ref_EndSynchronousGC(uint32_t condemned, uint32_t maxgen)
*/
}
-
-OBJECTHANDLE CreateDependentHandle(HHANDLETABLE table, OBJECTREF primary, OBJECTREF secondary)
-{
- CONTRACTL
- {
- THROWS;
- GC_NOTRIGGER;
- MODE_COOPERATIVE;
- }
- CONTRACTL_END;
-
- OBJECTHANDLE handle = HndCreateHandle(table, HNDTYPE_DEPENDENT, primary);
-
- SetDependentHandleSecondary(handle, secondary);
-
- return handle;
-}
-
void SetDependentHandleSecondary(OBJECTHANDLE handle, OBJECTREF objref)
{
CONTRACTL
@@ -925,30 +907,6 @@ void SetDependentHandleSecondary(OBJECTHANDLE handle, OBJECTREF objref)
//----------------------------------------------------------------------------
/*
- * CreateVariableHandle.
- *
- * Creates a variable-strength handle.
- *
- * N.B. This routine is not a macro since we do validation in RETAIL.
- * We always validate the type here because it can come from external callers.
- */
-OBJECTHANDLE CreateVariableHandle(HHANDLETABLE hTable, OBJECTREF object, uint32_t type)
-{
- WRAPPER_NO_CONTRACT;
-
- // verify that we are being asked to create a valid type
- if (!IS_VALID_VHT_VALUE(type))
- {
- // bogus value passed in
- _ASSERTE(FALSE);
- return NULL;
- }
-
- // create the handle
- return HndCreateHandle(hTable, HNDTYPE_VARIABLE, object, (uintptr_t)type);
-}
-
-/*
* GetVariableHandleType.
*
* Retrieves the dynamic type of a variable-strength handle.
@@ -1898,51 +1856,6 @@ bool HandleTableBucket::Contains(OBJECTHANDLE handle)
return FALSE;
}
-void DestroySizedRefHandle(OBJECTHANDLE handle)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- SO_TOLERANT;
- MODE_ANY;
- }
- CONTRACTL_END;
-
- HHANDLETABLE hTable = HndGetHandleTable(handle);
- HndDestroyHandle(hTable , HNDTYPE_SIZEDREF, handle);
- AppDomain* pDomain = SystemDomain::GetAppDomainAtIndex(HndGetHandleTableADIndex(hTable));
- pDomain->DecNumSizedRefHandles();
-}
-
-#ifdef FEATURE_COMINTEROP
-
-void DestroyWinRTWeakHandle(OBJECTHANDLE handle)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- MODE_ANY;
- CAN_TAKE_LOCK;
- SO_TOLERANT;
- }
- CONTRACTL_END;
-
- // Release the WinRT weak reference if we have one. We're assuming that this will not reenter the
- // runtime, since if we are pointing at a managed object, we should not be using a HNDTYPE_WEAK_WINRT
- // but rather a HNDTYPE_WEAK_SHORT or HNDTYPE_WEAK_LONG.
- IWeakReference* pWinRTWeakReference = reinterpret_cast<IWeakReference*>(HndGetHandleExtraInfo(handle));
- if (pWinRTWeakReference != NULL)
- {
- pWinRTWeakReference->Release();
- }
-
- HndDestroyHandle(HndGetHandleTable(handle), HNDTYPE_WEAK_WINRT, handle);
-}
-
-#endif // FEATURE_COMINTEROP
-
#endif // !DACCESS_COMPILE
OBJECTREF GetDependentHandleSecondary(OBJECTHANDLE handle)
diff --git a/src/gc/objecthandle.h b/src/gc/objecthandle.h
index 34c2a0e321..d3e45f8659 100644
--- a/src/gc/objecthandle.h
+++ b/src/gc/objecthandle.h
@@ -27,172 +27,9 @@
* non-NULL. In other words, if this handle is being initialized for the first
* time.
*/
-#define ObjectFromHandle(handle) HndFetchHandle(handle)
#define StoreObjectInHandle(handle, object) HndAssignHandle(handle, object)
#define InterlockedCompareExchangeObjectInHandle(handle, object, oldObj) HndInterlockedCompareExchangeHandle(handle, object, oldObj)
#define StoreFirstObjectInHandle(handle, object) HndFirstAssignHandle(handle, object)
-#define ObjectHandleIsNull(handle) HndIsNull(handle)
-#define IsHandleNullUnchecked(handle) HndCheckForNullUnchecked(handle)
-
-
-/*
- * HANDLES
- *
- * The default type of handle is a strong handle.
- *
- */
-#define HNDTYPE_DEFAULT HNDTYPE_STRONG
-
-
-/*
- * WEAK HANDLES
- *
- * Weak handles are handles that track an object as long as it is alive,
- * but do not keep the object alive if there are no strong references to it.
- *
- * The default type of weak handle is 'long-lived' weak handle.
- *
- */
-#define HNDTYPE_WEAK_DEFAULT HNDTYPE_WEAK_LONG
-
-
-/*
- * SHORT-LIVED WEAK HANDLES
- *
- * Short-lived weak handles are weak handles that track an object until the
- * first time it is detected to be unreachable. At this point, the handle is
- * severed, even if the object will be visible from a pending finalization
- * graph. This further implies that short weak handles do not track
- * across object resurrections.
- *
- */
-#define HNDTYPE_WEAK_SHORT (0)
-
-
-/*
- * LONG-LIVED WEAK HANDLES
- *
- * Long-lived weak handles are weak handles that track an object until the
- * object is actually reclaimed. Unlike short weak handles, long weak handles
- * continue to track their referents through finalization and across any
- * resurrections that may occur.
- *
- */
-#define HNDTYPE_WEAK_LONG (1)
-
-
-/*
- * STRONG HANDLES
- *
- * Strong handles are handles which function like a normal object reference.
- * The existence of a strong handle for an object will cause the object to
- * be promoted (remain alive) through a garbage collection cycle.
- *
- */
-#define HNDTYPE_STRONG (2)
-
-
-/*
- * PINNED HANDLES
- *
- * Pinned handles are strong handles which have the added property that they
- * prevent an object from moving during a garbage collection cycle. This is
- * useful when passing a pointer to object innards out of the runtime while GC
- * may be enabled.
- *
- * NOTE: PINNING AN OBJECT IS EXPENSIVE AS IT PREVENTS THE GC FROM ACHIEVING
- * OPTIMAL PACKING OF OBJECTS DURING EPHEMERAL COLLECTIONS. THIS TYPE
- * OF HANDLE SHOULD BE USED SPARINGLY!
- */
-#define HNDTYPE_PINNED (3)
-
-
-/*
- * VARIABLE HANDLES
- *
- * Variable handles are handles whose type can be changed dynamically. They
- * are larger than other types of handles, and are scanned a little more often,
- * but are useful when the handle owner needs an efficient way to change the
- * strength of a handle on the fly.
- *
- */
-#define HNDTYPE_VARIABLE (4)
-
-#if defined(FEATURE_COMINTEROP) || defined(FEATURE_REDHAWK)
-/*
- * REFCOUNTED HANDLES
- *
- * Refcounted handles are handles that behave as strong handles while the
- * refcount on them is greater than 0 and behave as weak handles otherwise.
- *
- * N.B. These are currently NOT general purpose.
- * The implementation is tied to COM Interop.
- *
- */
-#define HNDTYPE_REFCOUNTED (5)
-#endif // FEATURE_COMINTEROP || FEATURE_REDHAWK
-
-
-/*
- * DEPENDENT HANDLES
- *
- * Dependent handles are two handles that need to have the same lifetime. One handle refers to a secondary object
- * that needs to have the same lifetime as the primary object. The secondary object should not cause the primary
- * object to be referenced, but as long as the primary object is alive, so must be the secondary
- *
- * They are currently used for EnC for adding new field members to existing instantiations under EnC modes where
- * the primary object is the original instantiation and the secondary represents the added field.
- *
- * They are also used to implement the ConditionalWeakTable class in mscorlib.dll. If you want to use
- * these from managed code, they are exposed to BCL through the managed DependentHandle class.
- *
- *
- */
-#define HNDTYPE_DEPENDENT (6)
-
-/*
- * PINNED HANDLES for asynchronous operation
- *
- * Pinned handles are strong handles which have the added property that they
- * prevent an object from moving during a garbage collection cycle. This is
- * useful when passing a pointer to object innards out of the runtime while GC
- * may be enabled.
- *
- * NOTE: PINNING AN OBJECT IS EXPENSIVE AS IT PREVENTS THE GC FROM ACHIEVING
- * OPTIMAL PACKING OF OBJECTS DURING EPHEMERAL COLLECTIONS. THIS TYPE
- * OF HANDLE SHOULD BE USED SPARINGLY!
- */
-#define HNDTYPE_ASYNCPINNED (7)
-
-
-/*
- * SIZEDREF HANDLES
- *
- * SizedRef handles are strong handles. Each handle has a piece of user data associated
- * with it that stores the size of the object this handle refers to. These handles
- * are scanned as strong roots during each GC but only during full GCs would the size
- * be calculated.
- *
- */
-#define HNDTYPE_SIZEDREF (8)
-
-#ifdef FEATURE_COMINTEROP
-
-/*
- * WINRT WEAK HANDLES
- *
- * WinRT weak reference handles hold two different types of weak handles to any
- * RCW with an underlying COM object that implements IWeakReferenceSource. The
- * object reference itself is a short weak handle to the RCW. In addition an
- * IWeakReference* to the underlying COM object is stored, allowing the handle
- * to create a new RCW if the existing RCW is collected. This ensures that any
- * code holding onto a WinRT weak reference can always access an RCW to the
- * underlying COM object as long as it has not been released by all of its strong
- * references.
- */
-#define HNDTYPE_WEAK_WINRT (9)
-
-#endif // FEATURE_COMINTEROP
typedef DPTR(struct HandleTableMap) PTR_HandleTableMap;
typedef DPTR(struct HandleTableBucket) PTR_HandleTableBucket;
@@ -234,396 +71,25 @@ struct HandleTableBucket
(flag == VHT_STRONG) || \
(flag == VHT_PINNED))
-#ifndef DACCESS_COMPILE
-/*
- * Convenience macros and prototypes for the various handle types we define
- */
-
-inline OBJECTHANDLE CreateTypedHandle(HHANDLETABLE table, OBJECTREF object, int type)
-{
- WRAPPER_NO_CONTRACT;
-
- return HndCreateHandle(table, type, object);
-}
-
-inline void DestroyTypedHandle(OBJECTHANDLE handle)
-{
- WRAPPER_NO_CONTRACT;
-
- HndDestroyHandleOfUnknownType(HndGetHandleTable(handle), handle);
-}
-
-inline OBJECTHANDLE CreateHandle(HHANDLETABLE table, OBJECTREF object)
-{
- WRAPPER_NO_CONTRACT;
-
- return HndCreateHandle(table, HNDTYPE_DEFAULT, object);
-}
-
-inline void DestroyHandle(OBJECTHANDLE handle)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- MODE_ANY;
- CAN_TAKE_LOCK;
- SO_TOLERANT;
- }
- CONTRACTL_END;
-
- HndDestroyHandle(HndGetHandleTable(handle), HNDTYPE_DEFAULT, handle);
-}
-
-inline OBJECTHANDLE CreateDuplicateHandle(OBJECTHANDLE handle) {
- WRAPPER_NO_CONTRACT;
-
- // Create a new STRONG handle in the same table as an existing handle.
- return HndCreateHandle(HndGetHandleTable(handle), HNDTYPE_DEFAULT, ObjectFromHandle(handle));
-}
-
-
-inline OBJECTHANDLE CreateWeakHandle(HHANDLETABLE table, OBJECTREF object)
-{
- WRAPPER_NO_CONTRACT;
-
- return HndCreateHandle(table, HNDTYPE_WEAK_DEFAULT, object);
-}
-
-inline void DestroyWeakHandle(OBJECTHANDLE handle)
-{
- WRAPPER_NO_CONTRACT;
-
- HndDestroyHandle(HndGetHandleTable(handle), HNDTYPE_WEAK_DEFAULT, handle);
-}
-
-inline OBJECTHANDLE CreateShortWeakHandle(HHANDLETABLE table, OBJECTREF object)
-{
- WRAPPER_NO_CONTRACT;
-
- return HndCreateHandle(table, HNDTYPE_WEAK_SHORT, object);
-}
-
-inline void DestroyShortWeakHandle(OBJECTHANDLE handle)
-{
- WRAPPER_NO_CONTRACT;
-
- HndDestroyHandle(HndGetHandleTable(handle), HNDTYPE_WEAK_SHORT, handle);
-}
-
-
-inline OBJECTHANDLE CreateLongWeakHandle(HHANDLETABLE table, OBJECTREF object)
-{
- WRAPPER_NO_CONTRACT;
-
- return HndCreateHandle(table, HNDTYPE_WEAK_LONG, object);
-}
-
-inline void DestroyLongWeakHandle(OBJECTHANDLE handle)
-{
- WRAPPER_NO_CONTRACT;
-
- HndDestroyHandle(HndGetHandleTable(handle), HNDTYPE_WEAK_LONG, handle);
-}
-
-#ifndef FEATURE_REDHAWK
-typedef Holder<OBJECTHANDLE,DoNothing<OBJECTHANDLE>,DestroyLongWeakHandle> LongWeakHandleHolder;
-#endif
-
-inline OBJECTHANDLE CreateStrongHandle(HHANDLETABLE table, OBJECTREF object)
-{
- WRAPPER_NO_CONTRACT;
-
- return HndCreateHandle(table, HNDTYPE_STRONG, object);
-}
-
-inline void DestroyStrongHandle(OBJECTHANDLE handle)
-{
- WRAPPER_NO_CONTRACT;
-
- HndDestroyHandle(HndGetHandleTable(handle), HNDTYPE_STRONG, handle);
-}
-
-inline OBJECTHANDLE CreatePinningHandle(HHANDLETABLE table, OBJECTREF object)
-{
- WRAPPER_NO_CONTRACT;
-
- return HndCreateHandle(table, HNDTYPE_PINNED, object);
-}
-
-inline void DestroyPinningHandle(OBJECTHANDLE handle)
-{
- WRAPPER_NO_CONTRACT;
-
- HndDestroyHandle(HndGetHandleTable(handle), HNDTYPE_PINNED, handle);
-}
-
-#ifndef FEATURE_REDHAWK
-typedef Wrapper<OBJECTHANDLE, DoNothing<OBJECTHANDLE>, DestroyPinningHandle, NULL> PinningHandleHolder;
-#endif
-
-inline OBJECTHANDLE CreateAsyncPinningHandle(HHANDLETABLE table, OBJECTREF object)
-{
- WRAPPER_NO_CONTRACT;
-
- return HndCreateHandle(table, HNDTYPE_ASYNCPINNED, object);
-}
-
-inline void DestroyAsyncPinningHandle(OBJECTHANDLE handle)
-{
- WRAPPER_NO_CONTRACT;
-
- HndDestroyHandle(HndGetHandleTable(handle), HNDTYPE_ASYNCPINNED, handle);
-}
-
-#ifndef FEATURE_REDHAWK
-typedef Wrapper<OBJECTHANDLE, DoNothing<OBJECTHANDLE>, DestroyAsyncPinningHandle, NULL> AsyncPinningHandleHolder;
-#endif
-
-inline OBJECTHANDLE CreateSizedRefHandle(HHANDLETABLE table, OBJECTREF object)
-{
- WRAPPER_NO_CONTRACT;
-
- return HndCreateHandle(table, HNDTYPE_SIZEDREF, object, (uintptr_t)0);
-}
-
-void DestroySizedRefHandle(OBJECTHANDLE handle);
-
-#ifndef FEATURE_REDHAWK
-typedef Wrapper<OBJECTHANDLE, DoNothing<OBJECTHANDLE>, DestroySizedRefHandle, NULL> SizeRefHandleHolder;
-#endif
-
-#ifdef FEATURE_COMINTEROP
-inline OBJECTHANDLE CreateRefcountedHandle(HHANDLETABLE table, OBJECTREF object)
-{
- WRAPPER_NO_CONTRACT;
-
- return HndCreateHandle(table, HNDTYPE_REFCOUNTED, object);
-}
-
-inline void DestroyRefcountedHandle(OBJECTHANDLE handle)
-{
- WRAPPER_NO_CONTRACT;
-
- HndDestroyHandle(HndGetHandleTable(handle), HNDTYPE_REFCOUNTED, handle);
-}
-
-inline OBJECTHANDLE CreateWinRTWeakHandle(HHANDLETABLE table, OBJECTREF object, IWeakReference* pWinRTWeakReference)
-{
- WRAPPER_NO_CONTRACT;
- _ASSERTE(pWinRTWeakReference != NULL);
- return HndCreateHandle(table, HNDTYPE_WEAK_WINRT, object, reinterpret_cast<uintptr_t>(pWinRTWeakReference));
-}
-
-void DestroyWinRTWeakHandle(OBJECTHANDLE handle);
-
-#endif // FEATURE_COMINTEROP
-
-#endif // !DACCESS_COMPILE
-
OBJECTREF GetDependentHandleSecondary(OBJECTHANDLE handle);
#ifndef DACCESS_COMPILE
-OBJECTHANDLE CreateDependentHandle(HHANDLETABLE table, OBJECTREF primary, OBJECTREF secondary);
void SetDependentHandleSecondary(OBJECTHANDLE handle, OBJECTREF secondary);
-
-inline void DestroyDependentHandle(OBJECTHANDLE handle)
-{
- WRAPPER_NO_CONTRACT;
-
- HndDestroyHandle(HndGetHandleTable(handle), HNDTYPE_DEPENDENT, handle);
-}
#endif // !DACCESS_COMPILE
#ifndef DACCESS_COMPILE
-
-OBJECTHANDLE CreateVariableHandle(HHANDLETABLE hTable, OBJECTREF object, uint32_t type);
uint32_t GetVariableHandleType(OBJECTHANDLE handle);
void UpdateVariableHandleType(OBJECTHANDLE handle, uint32_t type);
uint32_t CompareExchangeVariableHandleType(OBJECTHANDLE handle, uint32_t oldType, uint32_t newType);
-inline void DestroyVariableHandle(OBJECTHANDLE handle)
-{
- WRAPPER_NO_CONTRACT;
-
- HndDestroyHandle(HndGetHandleTable(handle), HNDTYPE_VARIABLE, handle);
-}
-
void GCHandleValidatePinnedObject(OBJECTREF obj);
/*
- * Holder for OBJECTHANDLE
- */
-
-#ifndef FEATURE_REDHAWK
-typedef Wrapper<OBJECTHANDLE, DoNothing<OBJECTHANDLE>, DestroyHandle > OHWrapper;
-
-class OBJECTHANDLEHolder : public OHWrapper
-{
-public:
- FORCEINLINE OBJECTHANDLEHolder(OBJECTHANDLE p = NULL) : OHWrapper(p)
- {
- LIMITED_METHOD_CONTRACT;
- }
- FORCEINLINE void operator=(OBJECTHANDLE p)
- {
- WRAPPER_NO_CONTRACT;
-
- OHWrapper::operator=(p);
- }
-};
-#endif
-
-#ifdef FEATURE_COMINTEROP
-
-typedef Wrapper<OBJECTHANDLE, DoNothing<OBJECTHANDLE>, DestroyRefcountedHandle> RefCountedOHWrapper;
-
-class RCOBJECTHANDLEHolder : public RefCountedOHWrapper
-{
-public:
- FORCEINLINE RCOBJECTHANDLEHolder(OBJECTHANDLE p = NULL) : RefCountedOHWrapper(p)
- {
- LIMITED_METHOD_CONTRACT;
- }
- FORCEINLINE void operator=(OBJECTHANDLE p)
- {
- WRAPPER_NO_CONTRACT;
-
- RefCountedOHWrapper::operator=(p);
- }
-};
-
-#endif // FEATURE_COMINTEROP
-/*
* Convenience prototypes for using the global handles
*/
int GetCurrentThreadHomeHeapNumber();
-inline OBJECTHANDLE CreateGlobalTypedHandle(OBJECTREF object, int type)
-{
- WRAPPER_NO_CONTRACT;
- return HndCreateHandle(g_HandleTableMap.pBuckets[0]->pTable[GetCurrentThreadHomeHeapNumber()], type, object);
-}
-
-inline void DestroyGlobalTypedHandle(OBJECTHANDLE handle)
-{
- WRAPPER_NO_CONTRACT;
-
- HndDestroyHandleOfUnknownType(HndGetHandleTable(handle), handle);
-}
-
-inline OBJECTHANDLE CreateGlobalHandle(OBJECTREF object)
-{
- WRAPPER_NO_CONTRACT;
- CONDITIONAL_CONTRACT_VIOLATION(ModeViolation, object == NULL);
-
- return HndCreateHandle(g_HandleTableMap.pBuckets[0]->pTable[GetCurrentThreadHomeHeapNumber()], HNDTYPE_DEFAULT, object);
-}
-
-inline void DestroyGlobalHandle(OBJECTHANDLE handle)
-{
- WRAPPER_NO_CONTRACT;
-
- HndDestroyHandle(HndGetHandleTable(handle), HNDTYPE_DEFAULT, handle);
-}
-
-inline OBJECTHANDLE CreateGlobalWeakHandle(OBJECTREF object)
-{
- WRAPPER_NO_CONTRACT;
-
- return HndCreateHandle(g_HandleTableMap.pBuckets[0]->pTable[GetCurrentThreadHomeHeapNumber()], HNDTYPE_WEAK_DEFAULT, object);
-}
-
-inline void DestroyGlobalWeakHandle(OBJECTHANDLE handle)
-{
- WRAPPER_NO_CONTRACT;
-
- HndDestroyHandle(HndGetHandleTable(handle), HNDTYPE_WEAK_DEFAULT, handle);
-}
-
-inline OBJECTHANDLE CreateGlobalShortWeakHandle(OBJECTREF object)
-{
- WRAPPER_NO_CONTRACT;
- CONDITIONAL_CONTRACT_VIOLATION(ModeViolation, object == NULL);
-
- return HndCreateHandle(g_HandleTableMap.pBuckets[0]->pTable[GetCurrentThreadHomeHeapNumber()], HNDTYPE_WEAK_SHORT, object);
-}
-
-inline void DestroyGlobalShortWeakHandle(OBJECTHANDLE handle)
-{
- WRAPPER_NO_CONTRACT;
-
- HndDestroyHandle(HndGetHandleTable(handle), HNDTYPE_WEAK_SHORT, handle);
-}
-
-#ifndef FEATURE_REDHAWK
-typedef Holder<OBJECTHANDLE,DoNothing<OBJECTHANDLE>,DestroyGlobalShortWeakHandle> GlobalShortWeakHandleHolder;
-#endif
-
-inline OBJECTHANDLE CreateGlobalLongWeakHandle(OBJECTREF object)
-{
- WRAPPER_NO_CONTRACT;
-
- return HndCreateHandle(g_HandleTableMap.pBuckets[0]->pTable[GetCurrentThreadHomeHeapNumber()], HNDTYPE_WEAK_LONG, object);
-}
-
-inline void DestroyGlobalLongWeakHandle(OBJECTHANDLE handle)
-{
- WRAPPER_NO_CONTRACT;
-
- HndDestroyHandle(HndGetHandleTable(handle), HNDTYPE_WEAK_LONG, handle);
-}
-
-inline OBJECTHANDLE CreateGlobalStrongHandle(OBJECTREF object)
-{
- WRAPPER_NO_CONTRACT;
- CONDITIONAL_CONTRACT_VIOLATION(ModeViolation, object == NULL);
-
- return HndCreateHandle(g_HandleTableMap.pBuckets[0]->pTable[GetCurrentThreadHomeHeapNumber()], HNDTYPE_STRONG, object);
-}
-
-inline void DestroyGlobalStrongHandle(OBJECTHANDLE handle)
-{
- WRAPPER_NO_CONTRACT;
-
- HndDestroyHandle(HndGetHandleTable(handle), HNDTYPE_STRONG, handle);
-}
-
-#ifndef FEATURE_REDHAWK
-typedef Holder<OBJECTHANDLE,DoNothing<OBJECTHANDLE>,DestroyGlobalStrongHandle> GlobalStrongHandleHolder;
-#endif
-
-inline OBJECTHANDLE CreateGlobalPinningHandle(OBJECTREF object)
-{
- WRAPPER_NO_CONTRACT;
-
- return HndCreateHandle(g_HandleTableMap.pBuckets[0]->pTable[GetCurrentThreadHomeHeapNumber()], HNDTYPE_PINNED, object);
-}
-
-inline void DestroyGlobalPinningHandle(OBJECTHANDLE handle)
-{
- WRAPPER_NO_CONTRACT;
-
- HndDestroyHandle(HndGetHandleTable(handle), HNDTYPE_PINNED, handle);
-}
-
-#ifdef FEATURE_COMINTEROP
-inline OBJECTHANDLE CreateGlobalRefcountedHandle(OBJECTREF object)
-{
- WRAPPER_NO_CONTRACT;
-
- return HndCreateHandle(g_HandleTableMap.pBuckets[0]->pTable[GetCurrentThreadHomeHeapNumber()], HNDTYPE_REFCOUNTED, object);
-}
-
-inline void DestroyGlobalRefcountedHandle(OBJECTHANDLE handle)
-{
- WRAPPER_NO_CONTRACT;
-
- HndDestroyHandle(HndGetHandleTable(handle), HNDTYPE_REFCOUNTED, handle);
-}
-#endif // FEATURE_COMINTEROP
-
inline void ResetOBJECTHANDLE(OBJECTHANDLE handle)
{
WRAPPER_NO_CONTRACT;
@@ -645,7 +111,6 @@ BOOL Ref_HandleAsyncPinHandles();
void Ref_RelocateAsyncPinHandles(HandleTableBucket *pSource, HandleTableBucket *pTarget);
void Ref_RemoveHandleTableBucket(HandleTableBucket *pBucket);
void Ref_DestroyHandleTableBucket(HandleTableBucket *pBucket);
-BOOL Ref_ContainHandle(HandleTableBucket *pBucket, OBJECTHANDLE handle);
/*
* GC-time scanning entrypoints
@@ -671,8 +136,6 @@ void Ref_ScanSizedRefHandles(uint32_t condemned, uint32_t maxgen, ScanContext* s
void Ref_ScanPointers(uint32_t condemned, uint32_t maxgen, ScanContext* sc, Ref_promote_func* fn);
#endif
-typedef void (* handle_scan_fn)(Object** pRef, Object* pSec, uint32_t flags, ScanContext* context, BOOL isDependent);
-
void Ref_CheckReachable (uint32_t uCondemnedGeneration, uint32_t uMaxGeneration, uintptr_t lp1);
void Ref_CheckAlive (uint32_t uCondemnedGeneration, uint32_t uMaxGeneration, uintptr_t lp1);
void Ref_ScanHandlesForProfilerAndETW(uint32_t uMaxGeneration, uintptr_t lp1, handle_scan_fn fn);
diff --git a/src/gc/sample/CMakeLists.txt b/src/gc/sample/CMakeLists.txt
index 29fd32f2ff..5fe7887963 100644
--- a/src/gc/sample/CMakeLists.txt
+++ b/src/gc/sample/CMakeLists.txt
@@ -10,6 +10,7 @@ set(SOURCES
gcenv.ee.cpp
../gccommon.cpp
../gceewks.cpp
+ ../gchandletable.cpp
../gcscan.cpp
../gcwks.cpp
../handletable.cpp
diff --git a/src/gc/sample/GCSample.cpp b/src/gc/sample/GCSample.cpp
index 112d291420..2914ee1665 100644
--- a/src/gc/sample/GCSample.cpp
+++ b/src/gc/sample/GCSample.cpp
@@ -126,22 +126,26 @@ int __cdecl main(int argc, char* argv[])
g_pFreeObjectMethodTable = &freeObjectMT;
//
- // Initialize handle table
- //
- if (!Ref_Initialize())
- return -1;
-
- //
// Initialize GC heap
//
- IGCHeap *pGCHeap = InitializeGarbageCollector(nullptr);
- if (!pGCHeap)
+ GcDacVars dacVars;
+ IGCHeap *pGCHeap;
+ IGCHandleTable *pGCHandleTable;
+ if (!InitializeGarbageCollector(nullptr, &pGCHeap, &pGCHandleTable, &dacVars))
+ {
return -1;
+ }
if (FAILED(pGCHeap->Initialize()))
return -1;
//
+ // Initialize handle table
+ //
+ if (!pGCHandleTable->Initialize())
+ return -1;
+
+ //
// Initialize current thread
//
ThreadStore::AttachCurrentThread();
@@ -197,41 +201,41 @@ int __cdecl main(int argc, char* argv[])
return -1;
// Create strong handle and store the object into it
- OBJECTHANDLE oh = CreateGlobalHandle(pObj);
+ OBJECTHANDLE oh = HndCreateHandle(g_HandleTableMap.pBuckets[0]->pTable[GetCurrentThreadHomeHeapNumber()], HNDTYPE_DEFAULT, pObj);
if (oh == NULL)
return -1;
for (int i = 0; i < 1000000; i++)
{
- Object * pBefore = ((My *)ObjectFromHandle(oh))->m_pOther1;
+ Object * pBefore = ((My *)HndFetchHandle(oh))->m_pOther1;
// Allocate more instances of the same object
Object * p = AllocateObject(pMyMethodTable);
if (p == NULL)
return -1;
- Object * pAfter = ((My *)ObjectFromHandle(oh))->m_pOther1;
+ Object * pAfter = ((My *)HndFetchHandle(oh))->m_pOther1;
// Uncomment this assert to see how GC triggered inside AllocateObject moved objects around
// assert(pBefore == pAfter);
// Store the newly allocated object into a field using WriteBarrier
- WriteBarrier(&(((My *)ObjectFromHandle(oh))->m_pOther1), p);
+ WriteBarrier(&(((My *)HndFetchHandle(oh))->m_pOther1), p);
}
// Create weak handle that points to our object
- OBJECTHANDLE ohWeak = CreateGlobalWeakHandle(ObjectFromHandle(oh));
+ OBJECTHANDLE ohWeak = HndCreateHandle(g_HandleTableMap.pBuckets[0]->pTable[GetCurrentThreadHomeHeapNumber()], HNDTYPE_WEAK_DEFAULT, HndFetchHandle(oh));
if (ohWeak == NULL)
return -1;
// Destroy the strong handle so that nothing will be keeping out object alive
- DestroyGlobalHandle(oh);
+ HndDestroyHandle(HndGetHandleTable(oh), HNDTYPE_DEFAULT, oh);
// Explicitly trigger full GC
pGCHeap->GarbageCollect();
// Verify that the weak handle got cleared by the GC
- assert(ObjectFromHandle(ohWeak) == NULL);
+ assert(HndFetchHandle(ohWeak) == NULL);
printf("Done\n");
diff --git a/src/gc/sample/gcenv.ee.cpp b/src/gc/sample/gcenv.ee.cpp
index e95a78dc48..fa6efbf2d6 100644
--- a/src/gc/sample/gcenv.ee.cpp
+++ b/src/gc/sample/gcenv.ee.cpp
@@ -15,6 +15,8 @@ int32_t g_TrapReturningThreads;
EEConfig * g_pConfig;
+gc_alloc_context g_global_alloc_context;
+
bool CLREventStatic::CreateManualEventNoThrow(bool bInitialState)
{
m_hEvent = CreateEventW(NULL, TRUE, bInitialState, NULL);
@@ -135,7 +137,7 @@ void ThreadStore::AttachCurrentThread()
void GCToEEInterface::SuspendEE(SUSPEND_REASON reason)
{
- g_theGCHeap->SetGCInProgress(TRUE);
+ g_theGCHeap->SetGCInProgress(true);
// TODO: Implement
}
@@ -144,7 +146,7 @@ void GCToEEInterface::RestartEE(bool bFinishedGC)
{
// TODO: Implement
- g_theGCHeap->SetGCInProgress(FALSE);
+ g_theGCHeap->SetGCInProgress(false);
}
void GCToEEInterface::GcScanRoots(promote_func* fn, int condemned, int max_gen, ScanContext* sc)
@@ -263,6 +265,32 @@ void GCToEEInterface::EnableFinalization(bool foundFinalizers)
// TODO: Implement for finalization
}
+void GCToEEInterface::HandleFatalError(unsigned int exitCode)
+{
+ abort();
+}
+
+bool GCToEEInterface::ShouldFinalizeObjectForUnload(AppDomain* pDomain, Object* obj)
+{
+ return true;
+}
+
+bool GCToEEInterface::ForceFullGCToBeBlocking()
+{
+ return false;
+}
+
+bool GCToEEInterface::EagerFinalized(Object* obj)
+{
+ // The sample does not finalize anything eagerly.
+ return false;
+}
+
+MethodTable* GCToEEInterface::GetFreeObjectMethodTable()
+{
+ return g_pFreeObjectMethodTable;
+}
+
bool IsGCSpecialThread()
{
// TODO: Implement for background GC
diff --git a/src/gc/unix/CMakeLists.txt b/src/gc/unix/CMakeLists.txt
index ef66abf32a..3e1aa5ad19 100644
--- a/src/gc/unix/CMakeLists.txt
+++ b/src/gc/unix/CMakeLists.txt
@@ -5,6 +5,7 @@ include_directories("../env")
include(configure.cmake)
set(GC_PAL_SOURCES
- gcenv.unix.cpp)
+ gcenv.unix.cpp
+ cgroup.cpp)
add_library(gc_unix STATIC ${GC_PAL_SOURCES} ${VERSION_FILE_PATH})
diff --git a/src/gc/unix/cgroup.cpp b/src/gc/unix/cgroup.cpp
new file mode 100644
index 0000000000..1775ef7ff0
--- /dev/null
+++ b/src/gc/unix/cgroup.cpp
@@ -0,0 +1,342 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+/*++
+
+Module Name:
+
+ cgroup.cpp
+
+Abstract:
+ Read memory limits for the current process
+--*/
+#include <cstdint>
+#include <cstddef>
+#include <cassert>
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/resource.h>
+#include <errno.h>
+
+#define SIZE_T_MAX (~(size_t)0)
+#define PROC_MOUNTINFO_FILENAME "/proc/self/mountinfo"
+#define PROC_CGROUP_FILENAME "/proc/self/cgroup"
+#define PROC_STATM_FILENAME "/proc/self/statm"
+#define MEM_LIMIT_FILENAME "/memory.limit_in_bytes"
+
+class CGroup
+{
+ char* m_memory_cgroup_path;
+public:
+ CGroup()
+ {
+ m_memory_cgroup_path = nullptr;
+ char* memoryHierarchyMount = nullptr;
+ char *cgroup_path_relative_to_mount = nullptr;
+ size_t len;
+ memoryHierarchyMount = FindMemoryHierarchyMount();
+ if (memoryHierarchyMount == nullptr)
+ goto done;
+
+ cgroup_path_relative_to_mount = FindCGroupPathForMemorySubsystem();
+ if (cgroup_path_relative_to_mount == nullptr)
+ goto done;
+
+ len = strlen(memoryHierarchyMount);
+ len += strlen(cgroup_path_relative_to_mount);
+ m_memory_cgroup_path = (char*)malloc(len+1);
+ if (m_memory_cgroup_path == nullptr)
+ goto done;
+
+ strcpy(m_memory_cgroup_path, memoryHierarchyMount);
+ strcat(m_memory_cgroup_path, cgroup_path_relative_to_mount);
+
+ done:
+ free(memoryHierarchyMount);
+ free(cgroup_path_relative_to_mount);
+ }
+
+ ~CGroup()
+ {
+ free(m_memory_cgroup_path);
+ }
+
+ bool GetPhysicalMemoryLimit(size_t *val)
+ {
+ char *mem_limit_filename = nullptr;
+ bool result = false;
+
+ if (m_memory_cgroup_path == nullptr)
+ return result;
+
+ size_t len = strlen(m_memory_cgroup_path);
+ len += strlen(MEM_LIMIT_FILENAME);
+ mem_limit_filename = (char*)malloc(len+1);
+ if (mem_limit_filename == nullptr)
+ return result;
+
+ strcpy(mem_limit_filename, m_memory_cgroup_path);
+ strcat(mem_limit_filename, MEM_LIMIT_FILENAME);
+ result = ReadMemoryValueFromFile(mem_limit_filename, val);
+ free(mem_limit_filename);
+ return result;
+ }
+
+private:
+ char* FindMemoryHierarchyMount()
+ {
+ char *line = nullptr;
+ size_t lineLen = 0, maxLineLen = 0;
+ char *filesystemType = nullptr;
+ char *options = nullptr;
+ char* mountpath = nullptr;
+
+ FILE *mountinfofile = fopen(PROC_MOUNTINFO_FILENAME, "r");
+ if (mountinfofile == nullptr)
+ goto done;
+
+ while (getline(&line, &lineLen, mountinfofile) != -1)
+ {
+ if (filesystemType == nullptr || lineLen > maxLineLen)
+ {
+ free(filesystemType);
+ free(options);
+ filesystemType = (char*)malloc(lineLen+1);
+ if (filesystemType == nullptr)
+ goto done;
+ options = (char*)malloc(lineLen+1);
+ if (options == nullptr)
+ goto done;
+ maxLineLen = lineLen;
+ }
+
+ char* separatorChar = strchr(line, '-');
+
+ // See man page of proc to get format for /proc/self/mountinfo file
+ int sscanfRet = sscanf(separatorChar,
+ "- %s %*s %s",
+ filesystemType,
+ options);
+ if (sscanfRet != 2)
+ {
+ assert(!"Failed to parse mount info file contents with sscanf.");
+ goto done;
+ }
+
+ if (strncmp(filesystemType, "cgroup", 6) == 0)
+ {
+ char* context = nullptr;
+ char* strTok = strtok_r(options, ",", &context);
+ while (strTok != nullptr)
+ {
+ if (strncmp("memory", strTok, 6) == 0)
+ {
+ mountpath = (char*)malloc(lineLen+1);
+ if (mountpath == nullptr)
+ goto done;
+
+ sscanfRet = sscanf(line,
+ "%*s %*s %*s %*s %s ",
+ mountpath);
+ if (sscanfRet != 1)
+ {
+ free(mountpath);
+ mountpath = nullptr;
+ assert(!"Failed to parse mount info file contents with sscanf.");
+ }
+ goto done;
+ }
+ strTok = strtok_r(nullptr, ",", &context);
+ }
+ }
+ }
+ done:
+ free(filesystemType);
+ free(options);
+ free(line);
+ if (mountinfofile)
+ fclose(mountinfofile);
+ return mountpath;
+ }
+
+ char* FindCGroupPathForMemorySubsystem()
+ {
+ char *line = nullptr;
+ size_t lineLen = 0;
+ size_t maxLineLen = 0;
+ char *subsystem_list = nullptr;
+ char *cgroup_path = nullptr;
+ bool result = false;
+
+ FILE *cgroupfile = fopen(PROC_CGROUP_FILENAME, "r");
+ if (cgroupfile == nullptr)
+ goto done;
+
+ while (!result && getline(&line, &lineLen, cgroupfile) != -1)
+ {
+ if (subsystem_list == nullptr || lineLen > maxLineLen)
+ {
+ free(subsystem_list);
+ free(cgroup_path);
+ subsystem_list = (char*)malloc(lineLen+1);
+ if (subsystem_list == nullptr)
+ goto done;
+ cgroup_path = (char*)malloc(lineLen+1);
+ if (cgroup_path == nullptr)
+ goto done;
+ maxLineLen = lineLen;
+ }
+
+ // See man page of proc to get format for /proc/self/cgroup file
+ int sscanfRet = sscanf(line,
+ "%*[^:]:%[^:]:%s",
+ subsystem_list,
+ cgroup_path);
+ if (sscanfRet != 2)
+ {
+ assert(!"Failed to parse cgroup info file contents with sscanf.");
+ goto done;
+ }
+
+ char* context = nullptr;
+ char* strTok = strtok_r(subsystem_list, ",", &context);
+ while (strTok != nullptr)
+ {
+ if (strncmp("memory", strTok, 6) == 0)
+ {
+ result = true;
+ break;
+ }
+ strTok = strtok_r(nullptr, ",", &context);
+ }
+ }
+ done:
+ free(subsystem_list);
+ if (!result)
+ {
+ free(cgroup_path);
+ cgroup_path = nullptr;
+ }
+ free(line);
+ if (cgroupfile)
+ fclose(cgroupfile);
+ return cgroup_path;
+ }
+
+ bool ReadMemoryValueFromFile(const char* filename, size_t* val)
+ {
+ bool result = false;
+ char *line = nullptr;
+ size_t lineLen = 0;
+ char* endptr = nullptr;
+ size_t num = 0, l, multiplier;
+ FILE* file = nullptr;
+
+ if (val == nullptr)
+ goto done;
+
+ file = fopen(filename, "r");
+ if (file == nullptr)
+ goto done;
+
+ if (getline(&line, &lineLen, file) == -1)
+ goto done;
+
+ errno = 0;
+ num = strtoull(line, &endptr, 0);
+ if (errno != 0)
+ goto done;
+
+ multiplier = 1;
+ switch(*endptr)
+ {
+ case 'g':
+ case 'G': multiplier = 1024;
+ case 'm':
+ case 'M': multiplier = multiplier*1024;
+ case 'k':
+ case 'K': multiplier = multiplier*1024;
+ }
+
+ *val = num * multiplier;
+ result = true;
+ if (*val/multiplier != num)
+ result = false;
+ done:
+ if (file)
+ fclose(file);
+ free(line);
+ return result;
+ }
+};
+
+size_t GetRestrictedPhysicalMemoryLimit()
+{
+ CGroup cgroup;
+ size_t physical_memory_limit;
+
+ if (!cgroup.GetPhysicalMemoryLimit(&physical_memory_limit))
+ physical_memory_limit = SIZE_T_MAX;
+
+ struct rlimit curr_rlimit;
+ size_t rlimit_soft_limit = RLIM_INFINITY;
+ if (getrlimit(RLIMIT_AS, &curr_rlimit) == 0)
+ {
+ rlimit_soft_limit = curr_rlimit.rlim_cur;
+ }
+ physical_memory_limit = (physical_memory_limit < rlimit_soft_limit) ?
+ physical_memory_limit : rlimit_soft_limit;
+
+ // Ensure that limit is not greater than real memory size
+ long pages = sysconf(_SC_PHYS_PAGES);
+ if (pages != -1)
+ {
+ long pageSize = sysconf(_SC_PAGE_SIZE);
+ if (pageSize != -1)
+ {
+ physical_memory_limit = (physical_memory_limit < (size_t)pages * pageSize)?
+ physical_memory_limit : (size_t)pages * pageSize;
+ }
+ }
+
+ return physical_memory_limit;
+}
+
+bool GetWorkingSetSize(size_t* val)
+{
+ bool result = false;
+ size_t linelen;
+ char* line = nullptr;
+
+ if (val == nullptr)
+ return false;
+
+ FILE* file = fopen(PROC_STATM_FILENAME, "r");
+ if (file != nullptr && getline(&line, &linelen, file) != -1)
+ {
+
+ char* context = nullptr;
+ char* strTok = strtok_r(line, " ", &context);
+ strTok = strtok_r(nullptr, " ", &context);
+
+ errno = 0;
+ *val = strtoull(strTok, nullptr, 0);
+ if (errno == 0)
+ {
+ long pageSize = sysconf(_SC_PAGE_SIZE);
+ if (pageSize != -1)
+ {
+ *val = *val * pageSize;
+ result = true;
+ }
+ }
+ }
+
+ if (file)
+ fclose(file);
+ free(line);
+ return result;
+}
diff --git a/src/gc/unix/gcenv.unix.cpp b/src/gc/unix/gcenv.unix.cpp
index 34a45b3cc1..45489c69a7 100644
--- a/src/gc/unix/gcenv.unix.cpp
+++ b/src/gc/unix/gcenv.unix.cpp
@@ -78,6 +78,11 @@ static uint8_t g_helperPage[OS_PAGE_SIZE] __attribute__((aligned(OS_PAGE_SIZE)))
// Mutex to make the FlushProcessWriteBuffersMutex thread safe
static pthread_mutex_t g_flushProcessWriteBuffersMutex;
+size_t GetRestrictedPhysicalMemoryLimit();
+bool GetWorkingSetSize(size_t* val);
+
+static size_t g_RestrictedPhysicalMemoryLimit = 0;
+
// Initialize the interface implementation
// Return:
// true if it has succeeded, false if it has failed
@@ -442,6 +447,18 @@ size_t GCToOSInterface::GetVirtualMemoryLimit()
// specified, it returns amount of actual physical memory.
uint64_t GCToOSInterface::GetPhysicalMemoryLimit()
{
+ size_t restricted_limit;
+ // The limit was not cached
+ if (g_RestrictedPhysicalMemoryLimit == 0)
+ {
+ restricted_limit = GetRestrictedPhysicalMemoryLimit();
+ VolatileStore(&g_RestrictedPhysicalMemoryLimit, restricted_limit);
+ }
+ restricted_limit = g_RestrictedPhysicalMemoryLimit;
+
+ if (restricted_limit != 0 && restricted_limit != SIZE_T_MAX)
+ return restricted_limit;
+
long pages = sysconf(_SC_PHYS_PAGES);
if (pages == -1)
{
@@ -471,14 +488,14 @@ void GCToOSInterface::GetMemoryStatus(uint32_t* memory_load, uint64_t* available
uint64_t available = 0;
uint32_t load = 0;
+ size_t used;
// Get the physical memory in use - from it, we can get the physical memory available.
// We do this only when we have the total physical memory available.
- if (total > 0)
+ if (total > 0 && GetWorkingSetSize(&used))
{
- available = sysconf(_SC_PHYS_PAGES) * sysconf(_SC_PAGE_SIZE);
- uint64_t used = total - available;
- load = (uint32_t)((used * 100) / total);
+ available = total > used ? total-used : 0;
+ load = (uint32_t)(((float)used * 100) / (float)total);
}
if (memory_load != nullptr)
diff --git a/src/gc/windows/gcenv.windows.cpp b/src/gc/windows/gcenv.windows.cpp
index a636478245..30232bfb09 100644
--- a/src/gc/windows/gcenv.windows.cpp
+++ b/src/gc/windows/gcenv.windows.cpp
@@ -597,6 +597,9 @@ bool GCToOSInterface::CreateThread(GCThreadFunction function, void* param, GCThr
::SetThreadAffinityMask(gc_thread, (DWORD_PTR)1 << affinity->Processor);
}
+ ResumeThread(gc_thread);
+ CloseHandle(gc_thread);
+
return true;
}