summaryrefslogtreecommitdiff
path: root/src/gc
diff options
context:
space:
mode:
Diffstat (limited to 'src/gc')
-rw-r--r--src/gc/CMakeLists.txt12
-rw-r--r--src/gc/env/gcenv.base.h2
-rw-r--r--src/gc/env/gcenv.ee.h38
-rw-r--r--src/gc/env/gcenv.object.h2
-rw-r--r--src/gc/env/gcenv.os.h3
-rw-r--r--src/gc/gc.cpp972
-rw-r--r--src/gc/gc.h571
-rw-r--r--src/gc/gccommon.cpp67
-rw-r--r--src/gc/gcee.cpp226
-rw-r--r--src/gc/gcenv.ee.standalone.inl176
-rw-r--r--src/gc/gcenv.unix.cpp (renamed from src/gc/sample/gcenv.windows.cpp)237
-rw-r--r--src/gc/gcenv.windows.cpp625
-rw-r--r--src/gc/gcimpl.h72
-rw-r--r--src/gc/gcinterface.ee.h133
-rw-r--r--src/gc/gcinterface.h622
-rw-r--r--src/gc/gcpriv.h57
-rw-r--r--src/gc/gcrecord.h2
-rw-r--r--src/gc/gcscan.cpp23
-rw-r--r--src/gc/gcscan.h6
-rw-r--r--src/gc/handletable.cpp8
-rw-r--r--src/gc/handletablecache.cpp6
-rw-r--r--src/gc/handletablecore.cpp7
-rw-r--r--src/gc/handletablescan.cpp12
-rw-r--r--src/gc/objecthandle.cpp137
-rw-r--r--src/gc/objecthandle.h7
-rw-r--r--src/gc/sample/CMakeLists.txt4
-rw-r--r--src/gc/sample/GCSample.cpp10
-rw-r--r--src/gc/sample/GCSample.vcxproj7
-rw-r--r--src/gc/sample/GCSample.vcxproj.filters2
-rw-r--r--src/gc/sample/gcenv.ee.cpp54
-rw-r--r--src/gc/sample/gcenv.h13
-rw-r--r--src/gc/sample/gcenv.unix.cpp14
-rw-r--r--src/gc/softwarewritewatch.cpp1
33 files changed, 2454 insertions, 1674 deletions
diff --git a/src/gc/CMakeLists.txt b/src/gc/CMakeLists.txt
index 61e1ced727..d32d1c2dfb 100644
--- a/src/gc/CMakeLists.txt
+++ b/src/gc/CMakeLists.txt
@@ -38,6 +38,18 @@ set( GC_SOURCES_WKS
set( GC_SOURCES_DAC
${GC_SOURCES_DAC_AND_WKS_COMMON})
+if(FEATURE_STANDALONE_GC)
+ if(CLR_CMAKE_PLATFORM_UNIX)
+ set ( GC_SOURCES_WKS
+ ${GC_SOURCES_WKS}
+ gcenv.unix.cpp)
+ else()
+ set ( GC_SOURCES_WKS
+ ${GC_SOURCES_WKS}
+ gcenv.windows.cpp)
+ endif(CLR_CMAKE_PLATFORM_UNIX)
+endif(FEATURE_STANDALONE_GC)
+
convert_to_absolute_path(GC_SOURCES_WKS ${GC_SOURCES_WKS})
convert_to_absolute_path(GC_SOURCES_DAC ${GC_SOURCES_DAC})
diff --git a/src/gc/env/gcenv.base.h b/src/gc/env/gcenv.base.h
index a94f1a6394..94f73762f8 100644
--- a/src/gc/env/gcenv.base.h
+++ b/src/gc/env/gcenv.base.h
@@ -447,7 +447,7 @@ extern bool g_fFinalizerRunOnShutDown;
// Locks
//
-struct alloc_context;
+struct gc_alloc_context;
class Thread;
Thread * GetThread();
diff --git a/src/gc/env/gcenv.ee.h b/src/gc/env/gcenv.ee.h
index 0c1fd4988a..beb0c1a98f 100644
--- a/src/gc/env/gcenv.ee.h
+++ b/src/gc/env/gcenv.ee.h
@@ -7,35 +7,11 @@
#ifndef __GCENV_EE_H__
#define __GCENV_EE_H__
-struct ScanContext;
-class CrawlFrame;
-
-typedef void promote_func(PTR_PTR_Object, ScanContext*, uint32_t);
-
-typedef void enum_alloc_context_func(alloc_context*, void*);
-
-typedef struct
-{
- promote_func* f;
- ScanContext* sc;
- CrawlFrame * cf;
-} GCCONTEXT;
-
-// GC background thread function prototype
-typedef uint32_t (__stdcall *GCBackgroundThreadFunction)(void* param);
+#include "gcinterface.h"
class GCToEEInterface
{
public:
- //
- // Suspend/Resume callbacks
- //
- typedef enum
- {
- SUSPEND_FOR_GC = 1,
- SUSPEND_FOR_GC_PREP = 6
- } SUSPEND_REASON;
-
static void SuspendEE(SUSPEND_REASON reason);
static void RestartEE(bool bFinishedGC); //resume threads.
@@ -74,12 +50,22 @@ public:
static void EnablePreemptiveGC(Thread * pThread);
static void DisablePreemptiveGC(Thread * pThread);
- static alloc_context * GetAllocContext(Thread * pThread);
+ static gc_alloc_context * GetAllocContext(Thread * pThread);
static bool CatchAtSafePoint(Thread * pThread);
static void GcEnumAllocContexts(enum_alloc_context_func* fn, void* param);
static Thread* CreateBackgroundThread(GCBackgroundThreadFunction threadStart, void* arg);
+
+ // Diagnostics methods.
+ static void DiagGCStart(int gen, bool isInduced);
+ static void DiagUpdateGenerationBounds();
+ static void DiagGCEnd(size_t index, int gen, int reason, bool fConcurrent);
+ static void DiagWalkFReachableObjects(void* gcContext);
+ static void DiagWalkSurvivors(void* gcContext);
+ static void DiagWalkLOHSurvivors(void* gcContext);
+ static void DiagWalkBGCSurvivors(void* gcContext);
+ static void StompWriteBarrier(WriteBarrierParameters* args);
};
#endif // __GCENV_EE_H__
diff --git a/src/gc/env/gcenv.object.h b/src/gc/env/gcenv.object.h
index c999e4538e..db8995a118 100644
--- a/src/gc/env/gcenv.object.h
+++ b/src/gc/env/gcenv.object.h
@@ -31,6 +31,8 @@ public:
void ClrGCBit() { m_uSyncBlockValue &= ~BIT_SBLK_GC_RESERVE; }
};
+static_assert(sizeof(ObjHeader) == sizeof(uintptr_t), "this assumption is made by the VM!");
+
#define MTFlag_ContainsPointers 1
#define MTFlag_HasFinalizer 2
#define MTFlag_IsArray 4
diff --git a/src/gc/env/gcenv.os.h b/src/gc/env/gcenv.os.h
index bb0153f117..6a126f29ed 100644
--- a/src/gc/env/gcenv.os.h
+++ b/src/gc/env/gcenv.os.h
@@ -73,13 +73,12 @@ public:
// Reserve virtual memory range.
// Parameters:
- // address - starting virtual address, it can be NULL to let the function choose the starting address
// size - size of the virtual memory range
// alignment - requested memory alignment
// flags - flags to control special settings like write watching
// Return:
// Starting virtual address of the reserved range
- static void* VirtualReserve(void *address, size_t size, size_t alignment, uint32_t flags);
+ static void* VirtualReserve(size_t size, size_t alignment, uint32_t flags);
// Release virtual memory range previously reserved using VirtualReserve
// Parameters:
diff --git a/src/gc/gc.cpp b/src/gc/gc.cpp
index a62b02d33a..6187938ff8 100644
--- a/src/gc/gc.cpp
+++ b/src/gc/gc.cpp
@@ -21,22 +21,6 @@
#define USE_INTROSORT
-#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
-inline BOOL ShouldTrackMovementForProfilerOrEtw()
-{
-#ifdef GC_PROFILING
- if (CORProfilerTrackGC())
- return true;
-#endif
-
-#ifdef FEATURE_EVENT_TRACE
- if (ETW::GCLog::ShouldTrackMovementForEtw())
- return true;
-#endif
-
- return false;
-}
-#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
#if defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE)
BOOL bgc_heap_walk_for_etw_p = FALSE;
@@ -349,8 +333,8 @@ void gc_heap::add_to_history_per_heap()
#endif //BACKGROUND_GC
current_hist->fgc_lowest = lowest_address;
current_hist->fgc_highest = highest_address;
- current_hist->g_lowest = g_lowest_address;
- current_hist->g_highest = g_highest_address;
+ current_hist->g_lowest = g_gc_lowest_address;
+ current_hist->g_highest = g_gc_highest_address;
gchist_index_per_heap++;
if (gchist_index_per_heap == max_history_count)
@@ -1418,8 +1402,8 @@ int mark_time, plan_time, sweep_time, reloc_time, compact_time;
#ifndef MULTIPLE_HEAPS
-#define ephemeral_low g_ephemeral_low
-#define ephemeral_high g_ephemeral_high
+#define ephemeral_low g_gc_ephemeral_low
+#define ephemeral_high g_gc_ephemeral_high
#endif // MULTIPLE_HEAPS
@@ -1544,7 +1528,7 @@ void WaitLongerNoInstru (int i)
}
else if (g_TrapReturningThreads)
{
- GCHeap::GetGCHeap()->WaitUntilGCComplete();
+ g_theGCHeap->WaitUntilGCComplete();
}
}
@@ -1573,7 +1557,7 @@ retry:
unsigned int i = 0;
while (VolatileLoad(lock) >= 0)
{
- if ((++i & 7) && !GCHeap::IsGCInProgress())
+ if ((++i & 7) && !IsGCInProgress())
{
if (g_SystemInfo.dwNumberOfProcessors > 1)
{
@@ -1584,11 +1568,11 @@ retry:
#endif //!MULTIPLE_HEAPS
for (int j = 0; j < spin_count; j++)
{
- if (VolatileLoad(lock) < 0 || GCHeap::IsGCInProgress())
+ if (VolatileLoad(lock) < 0 || IsGCInProgress())
break;
YieldProcessor(); // indicate to the processor that we are spining
}
- if (VolatileLoad(lock) >= 0 && !GCHeap::IsGCInProgress())
+ if (VolatileLoad(lock) >= 0 && !IsGCInProgress())
{
safe_switch_to_thread();
}
@@ -2192,6 +2176,55 @@ int log2(unsigned int n)
return pos;
}
+#ifndef DACCESS_COMPILE
+
+void stomp_write_barrier_resize(bool is_runtime_suspended, bool requires_upper_bounds_check)
+{
+ WriteBarrierParameters args = {};
+ args.operation = WriteBarrierOp::StompResize;
+ args.is_runtime_suspended = is_runtime_suspended;
+ args.requires_upper_bounds_check = requires_upper_bounds_check;
+ args.card_table = g_gc_card_table;
+ args.lowest_address = g_gc_lowest_address;
+ args.highest_address = g_gc_highest_address;
+ GCToEEInterface::StompWriteBarrier(&args);
+}
+
+void stomp_write_barrier_ephemeral(bool is_runtime_suspended, uint8_t* ephemeral_lo, uint8_t* ephemeral_hi)
+{
+ WriteBarrierParameters args = {};
+ args.operation = WriteBarrierOp::StompEphemeral;
+ args.is_runtime_suspended = is_runtime_suspended;
+ args.ephemeral_lo = g_gc_ephemeral_low;
+ args.ephemeral_hi = g_gc_ephemeral_high;
+#ifdef MULTIPLE_HEAPS
+ // It is not correct to update the EE's g_ephemeral_low and g_ephemeral_high
+ // to anything other than their default values when using Server GC, since
+ // there is no single ephemeral generation across all of the heaps.
+ // Server GC write barriers do not reference these two globals, but ErectWriteBarrier does.
+ //
+ // When MULTIPLE_HEAPS is defined, g_gc_ephemeral_low and g_gc_ephemeral_high should
+ // always have their default values.
+ assert(args.ephemeral_lo == (uint8_t*)1);
+ assert(args.ephemeral_hi == (uint8_t*)~0);
+#endif // MULTIPLE_HEAPS
+ GCToEEInterface::StompWriteBarrier(&args);
+}
+
+void stomp_write_barrier_initialize()
+{
+ WriteBarrierParameters args = {};
+ args.operation = WriteBarrierOp::Initialize;
+ args.is_runtime_suspended = true;
+ args.requires_upper_bounds_check = false;
+ args.card_table = g_gc_card_table;
+ args.lowest_address = g_gc_lowest_address;
+ args.highest_address = g_gc_highest_address;
+ GCToEEInterface::StompWriteBarrier(&args);
+}
+
+#endif // DACCESS_COMPILE
+
//extract the low bits [0,low[ of a uint32_t
#define lowbits(wrd, bits) ((wrd) & ((1 << (bits))-1))
//extract the high bits [high, 32] of a uint32_t
@@ -3422,7 +3455,7 @@ inline
size_t ro_seg_begin_index (heap_segment* seg)
{
size_t begin_index = (size_t)seg / gc_heap::min_segment_size;
- begin_index = max (begin_index, (size_t)g_lowest_address / gc_heap::min_segment_size);
+ begin_index = max (begin_index, (size_t)g_gc_lowest_address / gc_heap::min_segment_size);
return begin_index;
}
@@ -3430,14 +3463,14 @@ inline
size_t ro_seg_end_index (heap_segment* seg)
{
size_t end_index = (size_t)(heap_segment_reserved (seg) - 1) / gc_heap::min_segment_size;
- end_index = min (end_index, (size_t)g_highest_address / gc_heap::min_segment_size);
+ end_index = min (end_index, (size_t)g_gc_highest_address / gc_heap::min_segment_size);
return end_index;
}
void seg_mapping_table_add_ro_segment (heap_segment* seg)
{
#ifdef GROWABLE_SEG_MAPPING_TABLE
- if ((heap_segment_reserved (seg) <= g_lowest_address) || (heap_segment_mem (seg) >= g_highest_address))
+ if ((heap_segment_reserved (seg) <= g_gc_lowest_address) || (heap_segment_mem (seg) >= g_gc_highest_address))
return;
#endif //GROWABLE_SEG_MAPPING_TABLE
@@ -3621,7 +3654,7 @@ gc_heap* seg_mapping_table_heap_of_worker (uint8_t* o)
gc_heap* seg_mapping_table_heap_of (uint8_t* o)
{
#ifdef GROWABLE_SEG_MAPPING_TABLE
- if ((o < g_lowest_address) || (o >= g_highest_address))
+ if ((o < g_gc_lowest_address) || (o >= g_gc_highest_address))
return 0;
#endif //GROWABLE_SEG_MAPPING_TABLE
@@ -3631,7 +3664,7 @@ gc_heap* seg_mapping_table_heap_of (uint8_t* o)
gc_heap* seg_mapping_table_heap_of_gc (uint8_t* o)
{
#if defined(FEATURE_BASICFREEZE) && defined(GROWABLE_SEG_MAPPING_TABLE)
- if ((o < g_lowest_address) || (o >= g_highest_address))
+ if ((o < g_gc_lowest_address) || (o >= g_gc_highest_address))
return 0;
#endif //FEATURE_BASICFREEZE || GROWABLE_SEG_MAPPING_TABLE
@@ -3643,7 +3676,7 @@ gc_heap* seg_mapping_table_heap_of_gc (uint8_t* o)
heap_segment* seg_mapping_table_segment_of (uint8_t* o)
{
#if defined(FEATURE_BASICFREEZE) && defined(GROWABLE_SEG_MAPPING_TABLE)
- if ((o < g_lowest_address) || (o >= g_highest_address))
+ if ((o < g_gc_lowest_address) || (o >= g_gc_highest_address))
#ifdef FEATURE_BASICFREEZE
return ro_segment_lookup (o);
#else
@@ -3686,7 +3719,7 @@ heap_segment* seg_mapping_table_segment_of (uint8_t* o)
#ifdef FEATURE_BASICFREEZE
// TODO: This was originally written assuming that the seg_mapping_table would always contain entries for ro
- // segments whenever the ro segment falls into the [g_lowest_address,g_highest_address) range. I.e., it had an
+ // segments whenever the ro segment falls into the [g_gc_lowest_address,g_gc_highest_address) range. I.e., it had an
// extra "&& (size_t)(entry->seg1) & ro_in_entry" expression. However, at the moment, grow_brick_card_table does
// not correctly go through the ro segments and add them back to the seg_mapping_table when the [lowest,highest)
// range changes. We should probably go ahead and modify grow_brick_card_table and put back the
@@ -3743,9 +3776,9 @@ public:
BOOL fSmallObjectHeapPtr = FALSE, fLargeObjectHeapPtr = FALSE;
if (!noRangeChecks)
{
- fSmallObjectHeapPtr = GCHeap::GetGCHeap()->IsHeapPointer(this, TRUE);
+ fSmallObjectHeapPtr = g_theGCHeap->IsHeapPointer(this, TRUE);
if (!fSmallObjectHeapPtr)
- fLargeObjectHeapPtr = GCHeap::GetGCHeap()->IsHeapPointer(this);
+ fLargeObjectHeapPtr = g_theGCHeap->IsHeapPointer(this);
_ASSERTE(fSmallObjectHeapPtr || fLargeObjectHeapPtr);
}
@@ -3763,14 +3796,14 @@ public:
#ifdef VERIFY_HEAP
if (bDeep && (g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_GC))
- GCHeap::GetGCHeap()->ValidateObjectMember(this);
+ g_theGCHeap->ValidateObjectMember(this);
#endif
if (fSmallObjectHeapPtr)
{
#ifdef FEATURE_BASICFREEZE
- _ASSERTE(!GCHeap::GetGCHeap()->IsLargeObject(pMT) || GCHeap::GetGCHeap()->IsInFrozenSegment(this));
+ _ASSERTE(!g_theGCHeap->IsLargeObject(pMT) || g_theGCHeap->IsInFrozenSegment(this));
#else
- _ASSERTE(!GCHeap::GetGCHeap()->IsLargeObject(pMT));
+ _ASSERTE(!g_theGCHeap->IsLargeObject(pMT));
#endif
}
}
@@ -4086,8 +4119,8 @@ BOOL reserve_initial_memory (size_t normal_size, size_t large_size, size_t num_h
memory_details.current_block_normal = 0;
memory_details.current_block_large = 0;
- g_lowest_address = MAX_PTR;
- g_highest_address = 0;
+ g_gc_lowest_address = MAX_PTR;
+ g_gc_highest_address = 0;
if (((size_t)MAX_PTR - large_size) < normal_size)
{
@@ -4107,8 +4140,8 @@ BOOL reserve_initial_memory (size_t normal_size, size_t large_size, size_t num_h
uint8_t* allatonce_block = (uint8_t*)virtual_alloc (requestedMemory);
if (allatonce_block)
{
- g_lowest_address = allatonce_block;
- g_highest_address = allatonce_block + (memory_details.block_count * (large_size + normal_size));
+ g_gc_lowest_address = allatonce_block;
+ g_gc_highest_address = allatonce_block + (memory_details.block_count * (large_size + normal_size));
memory_details.allocation_pattern = initial_memory_details::ALLATONCE;
for(size_t i = 0; i < memory_details.block_count; i++)
@@ -4131,8 +4164,8 @@ BOOL reserve_initial_memory (size_t normal_size, size_t large_size, size_t num_h
if (b2)
{
memory_details.allocation_pattern = initial_memory_details::TWO_STAGE;
- g_lowest_address = min(b1,b2);
- g_highest_address = max(b1 + memory_details.block_count*normal_size,
+ g_gc_lowest_address = min(b1,b2);
+ g_gc_highest_address = max(b1 + memory_details.block_count*normal_size,
b2 + memory_details.block_count*large_size);
for(size_t i = 0; i < memory_details.block_count; i++)
{
@@ -4178,10 +4211,10 @@ BOOL reserve_initial_memory (size_t normal_size, size_t large_size, size_t num_h
}
else
{
- if (current_block->memory_base < g_lowest_address)
- g_lowest_address = current_block->memory_base;
- if (((uint8_t *) current_block->memory_base + block_size) > g_highest_address)
- g_highest_address = (current_block->memory_base + block_size);
+ if (current_block->memory_base < g_gc_lowest_address)
+ g_gc_lowest_address = current_block->memory_base;
+ if (((uint8_t *) current_block->memory_base + block_size) > g_gc_highest_address)
+ g_gc_highest_address = (current_block->memory_base + block_size);
}
reserve_success = TRUE;
}
@@ -4288,7 +4321,7 @@ void* virtual_alloc (size_t size)
flags = VirtualReserveFlags::WriteWatch;
}
#endif // !FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
- void* prgmem = GCToOSInterface::VirtualReserve (0, requested_size, card_size * card_word_width, flags);
+ void* prgmem = GCToOSInterface::VirtualReserve (requested_size, card_size * card_word_width, flags);
void *aligned_mem = prgmem;
// We don't want (prgmem + size) to be right at the end of the address space
@@ -4361,7 +4394,7 @@ static size_t get_valid_segment_size (BOOL large_seg=FALSE)
// if seg_size is small but not 0 (0 is default if config not set)
// then set the segment to the minimum size
- if (!GCHeap::IsValidSegmentSize(seg_size))
+ if (!g_theGCHeap->IsValidSegmentSize(seg_size))
{
// if requested size is between 1 byte and 4MB, use min
if ((seg_size >> 1) && !(seg_size >> 22))
@@ -4623,22 +4656,22 @@ gc_heap::get_segment (size_t size, BOOL loh_p)
{
uint8_t* start;
uint8_t* end;
- if (mem < g_lowest_address)
+ if (mem < g_gc_lowest_address)
{
start = (uint8_t*)mem;
}
else
{
- start = (uint8_t*)g_lowest_address;
+ start = (uint8_t*)g_gc_lowest_address;
}
- if (((uint8_t*)mem + size) > g_highest_address)
+ if (((uint8_t*)mem + size) > g_gc_highest_address)
{
end = (uint8_t*)mem + size;
}
else
{
- end = (uint8_t*)g_highest_address;
+ end = (uint8_t*)g_gc_highest_address;
}
if (gc_heap::grow_brick_card_tables (start, end, size, result, __this, loh_p) != 0)
@@ -4703,10 +4736,7 @@ heap_segment* gc_heap::get_segment_for_loh (size_t size
FireEtwGCCreateSegment_V1((size_t)heap_segment_mem(res), (size_t)(heap_segment_reserved (res) - heap_segment_mem(res)), ETW::GCLog::ETW_GC_INFO::LARGE_OBJECT_HEAP, GetClrInstanceId());
-#ifdef GC_PROFILING
- if (CORProfilerTrackGC())
- UpdateGenerationBounds();
-#endif // GC_PROFILING
+ GCToEEInterface::DiagUpdateGenerationBounds();
#ifdef MULTIPLE_HEAPS
hp->thread_loh_segment (res);
@@ -5231,7 +5261,7 @@ void gc_heap::gc_thread_function ()
gc_heap::ee_suspend_event.Wait(INFINITE, FALSE);
BEGIN_TIMING(suspend_ee_during_log);
- GCToEEInterface::SuspendEE(GCToEEInterface::SUSPEND_FOR_GC);
+ GCToEEInterface::SuspendEE(SUSPEND_FOR_GC);
END_TIMING(suspend_ee_during_log);
proceed_with_gc_p = TRUE;
@@ -5340,7 +5370,7 @@ heap_segment* gc_heap::segment_of (uint8_t* add, ptrdiff_t& delta, BOOL verify_p
uint8_t* sadd = add;
heap_segment* hs = 0;
heap_segment* hs1 = 0;
- if (!((add >= g_lowest_address) && (add < g_highest_address)))
+ if (!((add >= g_gc_lowest_address) && (add < g_gc_highest_address)))
{
delta = 0;
return 0;
@@ -5523,7 +5553,6 @@ public:
saved_post_plug_reloc = temp;
}
-#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
void swap_pre_plug_and_saved_for_profiler()
{
gap_reloc_pair temp;
@@ -5539,7 +5568,6 @@ public:
memcpy (saved_post_plug_info_start, &saved_post_plug, sizeof (saved_post_plug));
saved_post_plug = temp;
}
-#endif //GC_PROFILING || //FEATURE_EVENT_TRACE
// We should think about whether it's really necessary to have to copy back the pre plug
// info since it was already copied during compacting plugs. But if a plug doesn't move
@@ -5775,7 +5803,7 @@ void gc_heap::fix_allocation_context (alloc_context* acontext, BOOL for_gc_p,
//used by the heap verification for concurrent gc.
//it nulls out the words set by fix_allocation_context for heap_verification
-void repair_allocation (alloc_context* acontext, void*)
+void repair_allocation (gc_alloc_context* acontext, void*)
{
uint8_t* point = acontext->alloc_ptr;
@@ -5788,7 +5816,7 @@ void repair_allocation (alloc_context* acontext, void*)
}
}
-void void_allocation (alloc_context* acontext, void*)
+void void_allocation (gc_alloc_context* acontext, void*)
{
uint8_t* point = acontext->alloc_ptr;
@@ -5818,10 +5846,10 @@ struct fix_alloc_context_args
void* heap;
};
-void fix_alloc_context(alloc_context* acontext, void* param)
+void fix_alloc_context(gc_alloc_context* acontext, void* param)
{
fix_alloc_context_args* args = (fix_alloc_context_args*)param;
- GCHeap::GetGCHeap()->FixAllocContext(acontext, FALSE, (void*)(size_t)(args->for_gc_p), args->heap);
+ g_theGCHeap->FixAllocContext(acontext, FALSE, (void*)(size_t)(args->for_gc_p), args->heap);
}
void gc_heap::fix_allocation_contexts(BOOL for_gc_p)
@@ -6399,7 +6427,7 @@ void gc_heap::set_card (size_t card)
inline
void gset_card (size_t card)
{
- g_card_table [card_word (card)] |= (1 << card_bit (card));
+ g_gc_card_table [card_word (card)] |= (1 << card_bit (card));
}
inline
@@ -6510,7 +6538,7 @@ size_t size_card_bundle_of (uint8_t* from, uint8_t* end)
uint32_t* translate_card_bundle_table (uint32_t* cb)
{
- return (uint32_t*)((uint8_t*)cb - ((((size_t)g_lowest_address) / (card_size*card_word_width*card_bundle_size*card_bundle_word_width)) * sizeof (uint32_t)));
+ return (uint32_t*)((uint8_t*)cb - ((((size_t)g_gc_lowest_address) / (card_size*card_word_width*card_bundle_size*card_bundle_word_width)) * sizeof (uint32_t)));
}
void gc_heap::enable_card_bundles ()
@@ -6722,7 +6750,7 @@ size_t size_mark_array_of (uint8_t* from, uint8_t* end)
// according to the lowest_address.
uint32_t* translate_mark_array (uint32_t* ma)
{
- return (uint32_t*)((uint8_t*)ma - size_mark_array_of (0, g_lowest_address));
+ return (uint32_t*)((uint8_t*)ma - size_mark_array_of (0, g_gc_lowest_address));
}
// from and end must be page aligned addresses.
@@ -6850,16 +6878,16 @@ void release_card_table (uint32_t* c_table)
{
destroy_card_table (c_table);
// sever the link from the parent
- if (&g_card_table[card_word (gcard_of(g_lowest_address))] == c_table)
+ if (&g_gc_card_table[card_word (gcard_of(g_gc_lowest_address))] == c_table)
{
- g_card_table = 0;
+ g_gc_card_table = 0;
#ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
SoftwareWriteWatch::StaticClose();
#endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
}
else
{
- uint32_t* p_table = &g_card_table[card_word (gcard_of(g_lowest_address))];
+ uint32_t* p_table = &g_gc_card_table[card_word (gcard_of(g_gc_lowest_address))];
if (p_table)
{
while (p_table && (card_table_next (p_table) != c_table))
@@ -6881,8 +6909,8 @@ void destroy_card_table (uint32_t* c_table)
uint32_t* gc_heap::make_card_table (uint8_t* start, uint8_t* end)
{
- assert (g_lowest_address == start);
- assert (g_highest_address == end);
+ assert (g_gc_lowest_address == start);
+ assert (g_gc_highest_address == end);
uint32_t virtual_reserve_flags = VirtualReserveFlags::None;
@@ -6902,7 +6930,7 @@ uint32_t* gc_heap::make_card_table (uint8_t* start, uint8_t* end)
if (can_use_write_watch_for_card_table())
{
virtual_reserve_flags |= VirtualReserveFlags::WriteWatch;
- cb = size_card_bundle_of (g_lowest_address, g_highest_address);
+ cb = size_card_bundle_of (g_gc_lowest_address, g_gc_highest_address);
}
#endif //CARD_BUNDLE
@@ -6918,7 +6946,7 @@ uint32_t* gc_heap::make_card_table (uint8_t* start, uint8_t* end)
#endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
#ifdef GROWABLE_SEG_MAPPING_TABLE
- size_t st = size_seg_mapping_table_of (g_lowest_address, g_highest_address);
+ size_t st = size_seg_mapping_table_of (g_gc_lowest_address, g_gc_highest_address);
size_t st_table_offset = sizeof(card_table_info) + cs + bs + cb + wws;
size_t st_table_offset_aligned = align_for_seg_mapping_table (st_table_offset);
@@ -6932,7 +6960,7 @@ uint32_t* gc_heap::make_card_table (uint8_t* start, uint8_t* end)
size_t alloc_size = sizeof (uint8_t)*(sizeof(card_table_info) + cs + bs + cb + wws + st + ms);
size_t alloc_size_aligned = Align (alloc_size, g_SystemInfo.dwAllocationGranularity-1);
- uint8_t* mem = (uint8_t*)GCToOSInterface::VirtualReserve (0, alloc_size_aligned, 0, virtual_reserve_flags);
+ uint8_t* mem = (uint8_t*)GCToOSInterface::VirtualReserve (alloc_size_aligned, 0, virtual_reserve_flags);
if (!mem)
return 0;
@@ -6973,7 +7001,7 @@ uint32_t* gc_heap::make_card_table (uint8_t* start, uint8_t* end)
#ifdef GROWABLE_SEG_MAPPING_TABLE
seg_mapping_table = (seg_mapping*)(mem + st_table_offset_aligned);
seg_mapping_table = (seg_mapping*)((uint8_t*)seg_mapping_table -
- size_seg_mapping_table_of (0, (align_lower_segment (g_lowest_address))));
+ size_seg_mapping_table_of (0, (align_lower_segment (g_gc_lowest_address))));
#endif //GROWABLE_SEG_MAPPING_TABLE
#ifdef MARK_ARRAY
@@ -7012,10 +7040,10 @@ int gc_heap::grow_brick_card_tables (uint8_t* start,
gc_heap* hp,
BOOL loh_p)
{
- uint8_t* la = g_lowest_address;
- uint8_t* ha = g_highest_address;
- uint8_t* saved_g_lowest_address = min (start, g_lowest_address);
- uint8_t* saved_g_highest_address = max (end, g_highest_address);
+ uint8_t* la = g_gc_lowest_address;
+ uint8_t* ha = g_gc_highest_address;
+ uint8_t* saved_g_lowest_address = min (start, g_gc_lowest_address);
+ uint8_t* saved_g_highest_address = max (end, g_gc_highest_address);
#ifdef BACKGROUND_GC
// This value is only for logging purpose - it's not necessarily exactly what we
// would commit for mark array but close enough for diagnostics purpose.
@@ -7045,18 +7073,18 @@ int gc_heap::grow_brick_card_tables (uint8_t* start,
#endif // BIT64
ps *= 2;
- if (saved_g_lowest_address < g_lowest_address)
+ if (saved_g_lowest_address < g_gc_lowest_address)
{
- if (ps > (size_t)g_lowest_address)
+ if (ps > (size_t)g_gc_lowest_address)
saved_g_lowest_address = (uint8_t*)OS_PAGE_SIZE;
else
{
- assert (((size_t)g_lowest_address - ps) >= OS_PAGE_SIZE);
- saved_g_lowest_address = min (saved_g_lowest_address, (g_lowest_address - ps));
+ assert (((size_t)g_gc_lowest_address - ps) >= OS_PAGE_SIZE);
+ saved_g_lowest_address = min (saved_g_lowest_address, (g_gc_lowest_address - ps));
}
}
- if (saved_g_highest_address > g_highest_address)
+ if (saved_g_highest_address > g_gc_highest_address)
{
saved_g_highest_address = max ((saved_g_lowest_address + ps), saved_g_highest_address);
if (saved_g_highest_address > top)
@@ -7069,7 +7097,7 @@ int gc_heap::grow_brick_card_tables (uint8_t* start,
bool write_barrier_updated = false;
uint32_t virtual_reserve_flags = VirtualReserveFlags::None;
- uint32_t* saved_g_card_table = g_card_table;
+ uint32_t* saved_g_card_table = g_gc_card_table;
uint32_t* ct = 0;
uint32_t* translated_ct = 0;
short* bt = 0;
@@ -7125,7 +7153,7 @@ int gc_heap::grow_brick_card_tables (uint8_t* start,
dprintf (GC_TABLE_LOG, ("card table: %Id; brick table: %Id; card bundle: %Id; sw ww table: %Id; seg table: %Id; mark array: %Id",
cs, bs, cb, wws, st, ms));
- uint8_t* mem = (uint8_t*)GCToOSInterface::VirtualReserve (0, alloc_size_aligned, 0, virtual_reserve_flags);
+ uint8_t* mem = (uint8_t*)GCToOSInterface::VirtualReserve (alloc_size_aligned, 0, virtual_reserve_flags);
if (!mem)
{
@@ -7152,7 +7180,7 @@ int gc_heap::grow_brick_card_tables (uint8_t* start,
card_table_refcount (ct) = 0;
card_table_lowest_address (ct) = saved_g_lowest_address;
card_table_highest_address (ct) = saved_g_highest_address;
- card_table_next (ct) = &g_card_table[card_word (gcard_of (la))];
+ card_table_next (ct) = &g_gc_card_table[card_word (gcard_of (la))];
//clear the card table
/*
@@ -7179,9 +7207,9 @@ int gc_heap::grow_brick_card_tables (uint8_t* start,
seg_mapping* new_seg_mapping_table = (seg_mapping*)(mem + st_table_offset_aligned);
new_seg_mapping_table = (seg_mapping*)((uint8_t*)new_seg_mapping_table -
size_seg_mapping_table_of (0, (align_lower_segment (saved_g_lowest_address))));
- memcpy(&new_seg_mapping_table[seg_mapping_word_of(g_lowest_address)],
- &seg_mapping_table[seg_mapping_word_of(g_lowest_address)],
- size_seg_mapping_table_of(g_lowest_address, g_highest_address));
+ memcpy(&new_seg_mapping_table[seg_mapping_word_of(g_gc_lowest_address)],
+ &seg_mapping_table[seg_mapping_word_of(g_gc_lowest_address)],
+ size_seg_mapping_table_of(g_gc_lowest_address, g_gc_highest_address));
seg_mapping_table = new_seg_mapping_table;
}
@@ -7243,12 +7271,14 @@ int gc_heap::grow_brick_card_tables (uint8_t* start,
// Note on points where the runtime is suspended anywhere in this function. Upon an attempt to suspend the
// runtime, a different thread may suspend first, causing this thread to block at the point of the suspend call.
// So, at any suspend point, externally visible state needs to be consistent, as code that depends on that state
- // may run while this thread is blocked. This includes updates to g_card_table, g_lowest_address, and
- // g_highest_address.
+ // may run while this thread is blocked. This includes updates to g_gc_card_table, g_gc_lowest_address, and
+ // g_gc_highest_address.
suspend_EE();
}
- g_card_table = translated_ct;
+ g_gc_card_table = translated_ct;
+ g_gc_lowest_address = saved_g_lowest_address;
+ g_gc_highest_address = saved_g_highest_address;
SoftwareWriteWatch::SetResizedUntranslatedTable(
mem + sw_ww_table_offset,
@@ -7260,7 +7290,7 @@ int gc_heap::grow_brick_card_tables (uint8_t* start,
// grow version of the write barrier. This test tells us if the new
// segment was allocated at a lower address than the old, requiring
// that we start doing an upper bounds check in the write barrier.
- StompWriteBarrierResize(true, la != saved_g_lowest_address);
+ stomp_write_barrier_resize(true, la != saved_g_lowest_address);
write_barrier_updated = true;
if (!is_runtime_suspended)
@@ -7271,9 +7301,12 @@ int gc_heap::grow_brick_card_tables (uint8_t* start,
else
#endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
{
- g_card_table = translated_ct;
+ g_gc_card_table = translated_ct;
}
+ g_gc_lowest_address = saved_g_lowest_address;
+ g_gc_highest_address = saved_g_highest_address;
+
if (!write_barrier_updated)
{
// This passes a bool telling whether we need to switch to the post
@@ -7284,19 +7317,9 @@ int gc_heap::grow_brick_card_tables (uint8_t* start,
// to be changed, so we are doing this after all global state has
// been updated. See the comment above suspend_EE() above for more
// info.
- StompWriteBarrierResize(!!IsGCThread(), la != saved_g_lowest_address);
+ stomp_write_barrier_resize(!!IsGCThread(), la != saved_g_lowest_address);
}
- // We need to make sure that other threads executing checked write barriers
- // will see the g_card_table update before g_lowest/highest_address updates.
- // Otherwise, the checked write barrier may AV accessing the old card table
- // with address that it does not cover. Write barriers access card table
- // without memory barriers for performance reasons, so we need to flush
- // the store buffers here.
- GCToOSInterface::FlushProcessWriteBuffers();
-
- g_lowest_address = saved_g_lowest_address;
- VolatileStore(&g_highest_address, saved_g_highest_address);
return 0;
@@ -7305,7 +7328,7 @@ fail:
if (mem)
{
- assert(g_card_table == saved_g_card_table);
+ assert(g_gc_card_table == saved_g_card_table);
//delete (uint32_t*)((uint8_t*)ct - sizeof(card_table_info));
if (!GCToOSInterface::VirtualRelease (mem, alloc_size_aligned))
@@ -7463,7 +7486,7 @@ void gc_heap::copy_brick_card_table()
assert (ha == card_table_highest_address (&old_card_table[card_word (card_of (la))]));
/* todo: Need a global lock for this */
- uint32_t* ct = &g_card_table[card_word (gcard_of (g_lowest_address))];
+ uint32_t* ct = &g_gc_card_table[card_word (gcard_of (g_gc_lowest_address))];
own_card_table (ct);
card_table = translate_card_table (ct);
/* End of global lock */
@@ -7476,8 +7499,8 @@ void gc_heap::copy_brick_card_table()
if (gc_can_use_concurrent)
{
mark_array = translate_mark_array (card_table_mark_array (ct));
- assert (mark_word_of (g_highest_address) ==
- mark_word_of (align_on_mark_word (g_highest_address)));
+ assert (mark_word_of (g_gc_highest_address) ==
+ mark_word_of (align_on_mark_word (g_gc_highest_address)));
}
else
mark_array = NULL;
@@ -7486,13 +7509,13 @@ void gc_heap::copy_brick_card_table()
#ifdef CARD_BUNDLE
#if defined(MARK_ARRAY) && defined(_DEBUG)
#ifdef GROWABLE_SEG_MAPPING_TABLE
- size_t st = size_seg_mapping_table_of (g_lowest_address, g_highest_address);
+ size_t st = size_seg_mapping_table_of (g_gc_lowest_address, g_gc_highest_address);
#else //GROWABLE_SEG_MAPPING_TABLE
size_t st = 0;
#endif //GROWABLE_SEG_MAPPING_TABLE
#endif //MARK_ARRAY && _DEBUG
card_bundle_table = translate_card_bundle_table (card_table_card_bundle_table (ct));
- assert (&card_bundle_table [card_bundle_word (cardw_card_bundle (card_word (card_of (g_lowest_address))))] ==
+ assert (&card_bundle_table [card_bundle_word (cardw_card_bundle (card_word (card_of (g_gc_lowest_address))))] ==
card_table_card_bundle_table (ct));
//set the card table if we are in a heap growth scenario
@@ -9330,13 +9353,13 @@ void gc_heap::update_card_table_bundle()
bool success = GCToOSInterface::GetWriteWatch (false /* resetState */ , base_address, region_size,
(void**)g_addresses,
&bcount);
- assert (success);
+ assert (success && "GetWriteWatch failed!");
dprintf (3,("Found %d pages written", bcount));
for (unsigned i = 0; i < bcount; i++)
{
size_t bcardw = (uint32_t*)(max(g_addresses[i],base_address)) - &card_table[0];
size_t ecardw = (uint32_t*)(min(g_addresses[i]+OS_PAGE_SIZE, high_address)) - &card_table[0];
- assert (bcardw >= card_word (card_of (g_lowest_address)));
+ assert (bcardw >= card_word (card_of (g_gc_lowest_address)));
card_bundles_set (cardw_card_bundle (bcardw),
cardw_card_bundle (align_cardw_on_bundle (ecardw)));
@@ -9648,7 +9671,7 @@ void gc_heap::adjust_ephemeral_limits (bool is_runtime_suspended)
(size_t)ephemeral_low, (size_t)ephemeral_high))
// This updates the write barrier helpers with the new info.
- StompWriteBarrierEphemeral(is_runtime_suspended);
+ stomp_write_barrier_ephemeral(is_runtime_suspended, ephemeral_low, ephemeral_high);
}
#if defined(TRACE_GC) || defined(GC_CONFIG_DRIVEN)
@@ -9821,9 +9844,9 @@ HRESULT gc_heap::initialize_gc (size_t segment_size,
settings.first_init();
- g_card_table = make_card_table (g_lowest_address, g_highest_address);
+ g_gc_card_table = make_card_table (g_gc_lowest_address, g_gc_highest_address);
- if (!g_card_table)
+ if (!g_gc_card_table)
return E_OUTOFMEMORY;
gc_started = FALSE;
@@ -9972,8 +9995,8 @@ gc_heap::init_semi_shared()
{
goto cleanup;
}
-#endif //BACKGROUND_GC
}
+#endif //BACKGROUND_GC
memset (&current_no_gc_region_info, 0, sizeof (current_no_gc_region_info));
@@ -10306,7 +10329,7 @@ gc_heap::init_gc_heap (int h_number)
#endif //MULTIPLE_HEAPS
/* todo: Need a global lock for this */
- uint32_t* ct = &g_card_table [card_word (card_of (g_lowest_address))];
+ uint32_t* ct = &g_gc_card_table [card_word (card_of (g_gc_lowest_address))];
own_card_table (ct);
card_table = translate_card_table (ct);
/* End of global lock */
@@ -10317,13 +10340,13 @@ gc_heap::init_gc_heap (int h_number)
#ifdef CARD_BUNDLE
card_bundle_table = translate_card_bundle_table (card_table_card_bundle_table (ct));
- assert (&card_bundle_table [card_bundle_word (cardw_card_bundle (card_word (card_of (g_lowest_address))))] ==
+ assert (&card_bundle_table [card_bundle_word (cardw_card_bundle (card_word (card_of (g_gc_lowest_address))))] ==
card_table_card_bundle_table (ct));
#endif //CARD_BUNDLE
#ifdef MARK_ARRAY
if (gc_can_use_concurrent)
- mark_array = translate_mark_array (card_table_mark_array (&g_card_table[card_word (card_of (g_lowest_address))]));
+ mark_array = translate_mark_array (card_table_mark_array (&g_gc_card_table[card_word (card_of (g_gc_lowest_address))]));
else
mark_array = NULL;
#endif //MARK_ARRAY
@@ -10360,6 +10383,7 @@ gc_heap::init_gc_heap (int h_number)
(size_t)(heap_segment_reserved (lseg) - heap_segment_mem(lseg)),
ETW::GCLog::ETW_GC_INFO::LARGE_OBJECT_HEAP,
GetClrInstanceId());
+
#ifdef SEG_MAPPING_TABLE
seg_mapping_table_add_segment (lseg, __this);
#else //SEG_MAPPING_TABLE
@@ -10384,10 +10408,10 @@ gc_heap::init_gc_heap (int h_number)
heap_segment_heap (lseg) = this;
//initialize the alloc context heap
- generation_alloc_context (generation_of (0))->alloc_heap = vm_heap;
+ generation_alloc_context (generation_of (0))->set_alloc_heap(vm_heap);
//initialize the alloc context heap
- generation_alloc_context (generation_of (max_generation+1))->alloc_heap = vm_heap;
+ generation_alloc_context (generation_of (max_generation+1))->set_alloc_heap(vm_heap);
#endif //MULTIPLE_HEAPS
@@ -13043,12 +13067,12 @@ int gc_heap::try_allocate_more_space (alloc_context* acontext, size_t size,
if (can_allocate)
{
- //ETW trace for allocation tick
size_t alloc_context_bytes = acontext->alloc_limit + Align (min_obj_size, align_const) - acontext->alloc_ptr;
int etw_allocation_index = ((gen_number == 0) ? 0 : 1);
etw_allocation_running_amount[etw_allocation_index] += alloc_context_bytes;
+
if (etw_allocation_running_amount[etw_allocation_index] > etw_allocation_tick)
{
#ifdef FEATURE_REDHAWK
@@ -13080,10 +13104,10 @@ void gc_heap::balance_heaps (alloc_context* acontext)
{
if (acontext->alloc_count == 0)
{
- acontext->home_heap = GCHeap::GetHeap( heap_select::select_heap(acontext, 0) );
- gc_heap* hp = acontext->home_heap->pGenGCHeap;
+ acontext->set_home_heap(GCHeap::GetHeap( heap_select::select_heap(acontext, 0) ));
+ gc_heap* hp = acontext->get_home_heap()->pGenGCHeap;
dprintf (3, ("First allocation for context %Ix on heap %d\n", (size_t)acontext, (size_t)hp->heap_number));
- acontext->alloc_heap = acontext->home_heap;
+ acontext->set_alloc_heap(acontext->get_home_heap());
hp->alloc_context_count++;
}
}
@@ -13094,9 +13118,9 @@ void gc_heap::balance_heaps (alloc_context* acontext)
if (heap_select::can_find_heap_fast())
{
- if (acontext->home_heap != NULL)
- hint = acontext->home_heap->pGenGCHeap->heap_number;
- if (acontext->home_heap != GCHeap::GetHeap(hint = heap_select::select_heap(acontext, hint)) || ((acontext->alloc_count & 15) == 0))
+ if (acontext->get_home_heap() != NULL)
+ hint = acontext->get_home_heap()->pGenGCHeap->heap_number;
+ if (acontext->get_home_heap() != GCHeap::GetHeap(hint = heap_select::select_heap(acontext, hint)) || ((acontext->alloc_count & 15) == 0))
{
set_home_heap = TRUE;
}
@@ -13122,7 +13146,7 @@ void gc_heap::balance_heaps (alloc_context* acontext)
else
*/
{
- gc_heap* org_hp = acontext->alloc_heap->pGenGCHeap;
+ gc_heap* org_hp = acontext->get_alloc_heap()->pGenGCHeap;
dynamic_data* dd = org_hp->dynamic_data_of (0);
ptrdiff_t org_size = dd_new_allocation (dd);
@@ -13141,9 +13165,9 @@ try_again:
{
max_hp = org_hp;
max_size = org_size + delta;
- acontext->home_heap = GCHeap::GetHeap( heap_select::select_heap(acontext, hint) );
+ acontext->set_home_heap(GCHeap::GetHeap( heap_select::select_heap(acontext, hint) ));
- if (org_hp == acontext->home_heap->pGenGCHeap)
+ if (org_hp == acontext->get_home_heap()->pGenGCHeap)
max_size = max_size + delta;
org_alloc_context_count = org_hp->alloc_context_count;
@@ -13156,7 +13180,7 @@ try_again:
gc_heap* hp = GCHeap::GetHeap(i%n_heaps)->pGenGCHeap;
dd = hp->dynamic_data_of (0);
ptrdiff_t size = dd_new_allocation (dd);
- if (hp == acontext->home_heap->pGenGCHeap)
+ if (hp == acontext->get_home_heap()->pGenGCHeap)
size = size + delta;
int hp_alloc_context_count = hp->alloc_context_count;
if (hp_alloc_context_count > 0)
@@ -13183,7 +13207,7 @@ try_again:
{
org_hp->alloc_context_count--;
max_hp->alloc_context_count++;
- acontext->alloc_heap = GCHeap::GetHeap(max_hp->heap_number);
+ acontext->set_alloc_heap(GCHeap::GetHeap(max_hp->heap_number));
#if !defined(FEATURE_PAL)
if (CPUGroupInfo::CanEnableGCCPUGroups())
{ //only set ideal processor when max_hp and org_hp are in the same cpu
@@ -13221,7 +13245,7 @@ try_again:
#endif // !FEATURE_PAL
dprintf (3, ("Switching context %p (home heap %d) ",
acontext,
- acontext->home_heap->pGenGCHeap->heap_number));
+ acontext->get_home_heap()->pGenGCHeap->heap_number));
dprintf (3, (" from heap %d (%Id free bytes, %d contexts) ",
org_hp->heap_number,
org_size,
@@ -13239,7 +13263,7 @@ try_again:
gc_heap* gc_heap::balance_heaps_loh (alloc_context* acontext, size_t /*size*/)
{
- gc_heap* org_hp = acontext->alloc_heap->pGenGCHeap;
+ gc_heap* org_hp = acontext->get_alloc_heap()->pGenGCHeap;
//dprintf (1, ("LA: %Id", size));
//if (size > 128*1024)
@@ -13316,7 +13340,7 @@ BOOL gc_heap::allocate_more_space(alloc_context* acontext, size_t size,
if (alloc_generation_number == 0)
{
balance_heaps (acontext);
- status = acontext->alloc_heap->pGenGCHeap->try_allocate_more_space (acontext, size, alloc_generation_number);
+ status = acontext->get_alloc_heap()->pGenGCHeap->try_allocate_more_space (acontext, size, alloc_generation_number);
}
else
{
@@ -14785,6 +14809,9 @@ int gc_heap::generation_to_condemn (int n_initial,
dprintf (GTC_LOG, ("h%d: alloc full - BLOCK", heap_number));
n = max_generation;
*blocking_collection_p = TRUE;
+ if ((local_settings->reason == reason_oos_loh) ||
+ (local_settings->reason == reason_alloc_loh))
+ evaluate_elevation = FALSE;
local_condemn_reasons->set_condition (gen_before_oom);
}
@@ -15183,7 +15210,7 @@ void gc_heap::gc1()
vm_heap->GcCondemnedGeneration = settings.condemned_generation;
- assert (g_card_table == card_table);
+ assert (g_gc_card_table == card_table);
{
if (n == max_generation)
@@ -15472,7 +15499,15 @@ void gc_heap::gc1()
#ifdef FEATURE_EVENT_TRACE
if (bgc_heap_walk_for_etw_p && settings.concurrent)
{
- make_free_lists_for_profiler_for_bgc();
+ GCToEEInterface::DiagWalkBGCSurvivors(__this);
+
+#ifdef MULTIPLE_HEAPS
+ bgc_t_join.join(this, gc_join_after_profiler_heap_walk);
+ if (bgc_t_join.joined())
+ {
+ bgc_t_join.restart();
+ }
+#endif // MULTIPLE_HEAPS
}
#endif // FEATURE_EVENT_TRACE
#endif //BACKGROUND_GC
@@ -16382,7 +16417,7 @@ int gc_heap::garbage_collect (int n)
for (int i = 0; i < n_heaps; i++)
{
//copy the card and brick tables
- if (g_card_table != g_heaps[i]->card_table)
+ if (g_gc_card_table != g_heaps[i]->card_table)
{
g_heaps[i]->copy_brick_card_table();
}
@@ -16406,100 +16441,67 @@ int gc_heap::garbage_collect (int n)
}
#endif //BACKGROUND_GC
// check for card table growth
- if (g_card_table != card_table)
+ if (g_gc_card_table != card_table)
copy_brick_card_table();
#endif //MULTIPLE_HEAPS
- BOOL should_evaluate_elevation = FALSE;
- BOOL should_do_blocking_collection = FALSE;
+ BOOL should_evaluate_elevation = FALSE;
+ BOOL should_do_blocking_collection = FALSE;
#ifdef MULTIPLE_HEAPS
- int gen_max = condemned_generation_num;
- for (int i = 0; i < n_heaps; i++)
- {
- if (gen_max < g_heaps[i]->condemned_generation_num)
- gen_max = g_heaps[i]->condemned_generation_num;
- if ((!should_evaluate_elevation) && (g_heaps[i]->elevation_requested))
- should_evaluate_elevation = TRUE;
- if ((!should_do_blocking_collection) && (g_heaps[i]->blocking_collection))
- should_do_blocking_collection = TRUE;
- }
+ int gen_max = condemned_generation_num;
+ for (int i = 0; i < n_heaps; i++)
+ {
+ if (gen_max < g_heaps[i]->condemned_generation_num)
+ gen_max = g_heaps[i]->condemned_generation_num;
+ if ((!should_evaluate_elevation) && (g_heaps[i]->elevation_requested))
+ should_evaluate_elevation = TRUE;
+ if ((!should_do_blocking_collection) && (g_heaps[i]->blocking_collection))
+ should_do_blocking_collection = TRUE;
+ }
- settings.condemned_generation = gen_max;
-//logically continues after GC_PROFILING.
+ settings.condemned_generation = gen_max;
#else //MULTIPLE_HEAPS
- settings.condemned_generation = generation_to_condemn (n,
- &blocking_collection,
- &elevation_requested,
- FALSE);
- should_evaluate_elevation = elevation_requested;
- should_do_blocking_collection = blocking_collection;
-#endif //MULTIPLE_HEAPS
-
- settings.condemned_generation = joined_generation_to_condemn (
- should_evaluate_elevation,
- settings.condemned_generation,
- &should_do_blocking_collection
- STRESS_HEAP_ARG(n)
- );
+ settings.condemned_generation = generation_to_condemn (n,
+ &blocking_collection,
+ &elevation_requested,
+ FALSE);
+ should_evaluate_elevation = elevation_requested;
+ should_do_blocking_collection = blocking_collection;
+#endif //MULTIPLE_HEAPS
+
+ settings.condemned_generation = joined_generation_to_condemn (
+ should_evaluate_elevation,
+ settings.condemned_generation,
+ &should_do_blocking_collection
+ STRESS_HEAP_ARG(n)
+ );
- STRESS_LOG1(LF_GCROOTS|LF_GC|LF_GCALLOC, LL_INFO10,
- "condemned generation num: %d\n", settings.condemned_generation);
+ STRESS_LOG1(LF_GCROOTS|LF_GC|LF_GCALLOC, LL_INFO10,
+ "condemned generation num: %d\n", settings.condemned_generation);
- record_gcs_during_no_gc();
+ record_gcs_during_no_gc();
- if (settings.condemned_generation > 1)
- settings.promotion = TRUE;
+ if (settings.condemned_generation > 1)
+ settings.promotion = TRUE;
#ifdef HEAP_ANALYZE
- // At this point we've decided what generation is condemned
- // See if we've been requested to analyze survivors after the mark phase
- if (AnalyzeSurvivorsRequested(settings.condemned_generation))
- {
- heap_analyze_enabled = TRUE;
- }
-#endif // HEAP_ANALYZE
-
-#ifdef GC_PROFILING
-
- // If we're tracking GCs, then we need to walk the first generation
- // before collection to track how many items of each class has been
- // allocated.
- UpdateGenerationBounds();
- GarbageCollectionStartedCallback(settings.condemned_generation, settings.reason == reason_induced);
+ // At this point we've decided what generation is condemned
+ // See if we've been requested to analyze survivors after the mark phase
+ if (AnalyzeSurvivorsRequested(settings.condemned_generation))
{
- BEGIN_PIN_PROFILER(CORProfilerTrackGC());
- size_t profiling_context = 0;
-
-#ifdef MULTIPLE_HEAPS
- int hn = 0;
- for (hn = 0; hn < gc_heap::n_heaps; hn++)
- {
- gc_heap* hp = gc_heap::g_heaps [hn];
-
- // When we're walking objects allocated by class, then we don't want to walk the large
- // object heap because then it would count things that may have been around for a while.
- hp->walk_heap (&AllocByClassHelper, (void *)&profiling_context, 0, FALSE);
- }
-#else
- // When we're walking objects allocated by class, then we don't want to walk the large
- // object heap because then it would count things that may have been around for a while.
- gc_heap::walk_heap (&AllocByClassHelper, (void *)&profiling_context, 0, FALSE);
-#endif //MULTIPLE_HEAPS
-
- // Notify that we've reached the end of the Gen 0 scan
- g_profControlBlock.pProfInterface->EndAllocByClass(&profiling_context);
- END_PIN_PROFILER();
+ heap_analyze_enabled = TRUE;
}
+#endif // HEAP_ANALYZE
-#endif // GC_PROFILING
+ GCToEEInterface::DiagGCStart(settings.condemned_generation, settings.reason == reason_induced);
#ifdef BACKGROUND_GC
if ((settings.condemned_generation == max_generation) &&
(recursive_gc_sync::background_running_p()))
{
- //TODO BACKGROUND_GC If we just wait for the end of gc, it won't woork
+ //TODO BACKGROUND_GC If we just wait for the end of gc, it won't work
// because we have to collect 0 and 1 properly
// in particular, the allocation contexts are gone.
// For now, it is simpler to collect max_generation-1
@@ -19625,12 +19627,7 @@ void gc_heap::mark_phase (int condemned_gen_number, BOOL mark_only_p)
dprintf (3, ("Finalize marking"));
finalize_queue->ScanForFinalization (GCHeap::Promote, condemned_gen_number, mark_only_p, __this);
-#ifdef GC_PROFILING
- if (CORProfilerTrackGC())
- {
- finalize_queue->WalkFReachableObjects (__this);
- }
-#endif //GC_PROFILING
+ GCToEEInterface::DiagWalkFReachableObjects(__this);
#endif // FEATURE_PREMORTEM_FINALIZATION
// Scan dependent handles again to promote any secondaries associated with primaries that were promoted
@@ -21105,8 +21102,7 @@ void gc_heap::relocate_in_loh_compact()
generation_free_obj_space (gen)));
}
-#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
-void gc_heap::walk_relocation_loh (size_t profiling_context)
+void gc_heap::walk_relocation_for_loh (size_t profiling_context, record_surv_fn fn)
{
generation* gen = large_object_generation;
heap_segment* seg = heap_segment_rw (generation_start_segment (gen));
@@ -21136,14 +21132,7 @@ void gc_heap::walk_relocation_loh (size_t profiling_context)
STRESS_LOG_PLUG_MOVE(o, (o + size), -reloc);
- {
- ETW::GCLog::MovedReference(
- o,
- (o + size),
- reloc,
- profiling_context,
- settings.compaction);
- }
+ fn (o, (o + size), reloc, profiling_context, settings.compaction, FALSE);
o = o + size;
if (o < heap_segment_allocated (seg))
@@ -21160,7 +21149,6 @@ void gc_heap::walk_relocation_loh (size_t profiling_context)
}
}
}
-#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
BOOL gc_heap::loh_object_p (uint8_t* o)
{
@@ -22250,7 +22238,7 @@ void gc_heap::plan_phase (int condemned_gen_number)
#endif //TIME_GC
// We may update write barrier code. We assume here EE has been suspended if we are on a GC thread.
- assert(GCHeap::IsGCInProgress());
+ assert(IsGCInProgress());
BOOL should_expand = FALSE;
BOOL should_compact= FALSE;
@@ -22318,10 +22306,7 @@ void gc_heap::plan_phase (int condemned_gen_number)
if (!loh_compacted_p)
#endif //FEATURE_LOH_COMPACTION
{
-#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
- if (ShouldTrackMovementForProfilerOrEtw())
- notify_profiler_of_surviving_large_objects();
-#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
+ GCToEEInterface::DiagWalkLOHSurvivors(__this);
sweep_large_objects();
}
}
@@ -22432,7 +22417,7 @@ void gc_heap::plan_phase (int condemned_gen_number)
for (i = 0; i < n_heaps; i++)
{
//copy the card and brick tables
- if (g_card_table!= g_heaps[i]->card_table)
+ if (g_gc_card_table!= g_heaps[i]->card_table)
{
g_heaps[i]->copy_brick_card_table();
}
@@ -22523,12 +22508,7 @@ void gc_heap::plan_phase (int condemned_gen_number)
assert (generation_allocation_segment (consing_gen) ==
ephemeral_heap_segment);
-#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
- if (ShouldTrackMovementForProfilerOrEtw())
- {
- record_survived_for_profiler(condemned_gen_number, first_condemned_address);
- }
-#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
+ GCToEEInterface::DiagWalkSurvivors(__this);
relocate_phase (condemned_gen_number, first_condemned_address);
compact_phase (condemned_gen_number, first_condemned_address,
@@ -22738,12 +22718,7 @@ void gc_heap::plan_phase (int condemned_gen_number)
fix_older_allocation_area (older_gen);
}
-#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
- if (ShouldTrackMovementForProfilerOrEtw())
- {
- record_survived_for_profiler(condemned_gen_number, first_condemned_address);
- }
-#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
+ GCToEEInterface::DiagWalkSurvivors(__this);
gen0_big_free_spaces = 0;
make_free_lists (condemned_gen_number);
@@ -23949,8 +23924,7 @@ void gc_heap::relocate_survivors (int condemned_gen_number,
}
}
-#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
-void gc_heap::walk_plug (uint8_t* plug, size_t size, BOOL check_last_object_p, walk_relocate_args* args, size_t profiling_context)
+void gc_heap::walk_plug (uint8_t* plug, size_t size, BOOL check_last_object_p, walk_relocate_args* args)
{
if (check_last_object_p)
{
@@ -23970,15 +23944,10 @@ void gc_heap::walk_plug (uint8_t* plug, size_t size, BOOL check_last_object_p, w
}
ptrdiff_t last_plug_relocation = node_relocation_distance (plug);
- ptrdiff_t reloc = settings.compaction ? last_plug_relocation : 0;
-
STRESS_LOG_PLUG_MOVE(plug, (plug + size), -last_plug_relocation);
+ ptrdiff_t reloc = settings.compaction ? last_plug_relocation : 0;
- ETW::GCLog::MovedReference(plug,
- (plug + size),
- reloc,
- profiling_context,
- settings.compaction);
+ (args->fn) (plug, (plug + size), reloc, args->profiling_context, settings.compaction, FALSE);
if (check_last_object_p)
{
@@ -23995,12 +23964,12 @@ void gc_heap::walk_plug (uint8_t* plug, size_t size, BOOL check_last_object_p, w
}
}
-void gc_heap::walk_relocation_in_brick (uint8_t* tree, walk_relocate_args* args, size_t profiling_context)
+void gc_heap::walk_relocation_in_brick (uint8_t* tree, walk_relocate_args* args)
{
assert ((tree != NULL));
if (node_left_child (tree))
{
- walk_relocation_in_brick (tree + node_left_child (tree), args, profiling_context);
+ walk_relocation_in_brick (tree + node_left_child (tree), args);
}
uint8_t* plug = tree;
@@ -24029,7 +23998,7 @@ void gc_heap::walk_relocation_in_brick (uint8_t* tree, walk_relocate_args* args,
assert (last_plug_size >= Align (min_obj_size));
}
- walk_plug (args->last_plug, last_plug_size, check_last_object_p, args, profiling_context);
+ walk_plug (args->last_plug, last_plug_size, check_last_object_p, args);
}
else
{
@@ -24042,18 +24011,14 @@ void gc_heap::walk_relocation_in_brick (uint8_t* tree, walk_relocate_args* args,
if (node_right_child (tree))
{
- walk_relocation_in_brick (tree + node_right_child (tree), args, profiling_context);
-
+ walk_relocation_in_brick (tree + node_right_child (tree), args);
}
}
-void gc_heap::walk_relocation (int condemned_gen_number,
- uint8_t* first_condemned_address,
- size_t profiling_context)
-
+void gc_heap::walk_relocation (size_t profiling_context, record_surv_fn fn)
{
- generation* condemned_gen = generation_of (condemned_gen_number);
- uint8_t* start_address = first_condemned_address;
+ generation* condemned_gen = generation_of (settings.condemned_generation);
+ uint8_t* start_address = generation_allocation_start (condemned_gen);
size_t current_brick = brick_of (start_address);
heap_segment* current_heap_segment = heap_segment_rw (generation_start_segment (condemned_gen));
@@ -24066,6 +24031,8 @@ void gc_heap::walk_relocation (int condemned_gen_number,
args.is_shortened = FALSE;
args.pinned_plug_entry = 0;
args.last_plug = 0;
+ args.profiling_context = profiling_context;
+ args.fn = fn;
while (1)
{
@@ -24075,8 +24042,8 @@ void gc_heap::walk_relocation (int condemned_gen_number,
{
walk_plug (args.last_plug,
(heap_segment_allocated (current_heap_segment) - args.last_plug),
- args.is_shortened,
- &args, profiling_context);
+ args.is_shortened,
+ &args);
args.last_plug = 0;
}
if (heap_segment_next_rw (current_heap_segment))
@@ -24097,16 +24064,29 @@ void gc_heap::walk_relocation (int condemned_gen_number,
{
walk_relocation_in_brick (brick_address (current_brick) +
brick_entry - 1,
- &args,
- profiling_context);
+ &args);
}
}
current_brick++;
}
}
+void gc_heap::walk_survivors (record_surv_fn fn, size_t context, walk_surv_type type)
+{
+ if (type == walk_for_gc)
+ walk_survivors_relocation (context, fn);
+#if defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE)
+ else if (type == walk_for_bgc)
+ walk_survivors_for_bgc (context, fn);
+#endif //BACKGROUND_GC && FEATURE_EVENT_TRACE
+ else if (type == walk_for_loh)
+ walk_survivors_for_loh (context, fn);
+ else
+ assert (!"unknown type!");
+}
+
#if defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE)
-void gc_heap::walk_relocation_for_bgc(size_t profiling_context)
+void gc_heap::walk_survivors_for_bgc (size_t profiling_context, record_surv_fn fn)
{
// This should only be called for BGCs
assert(settings.concurrent);
@@ -24140,8 +24120,7 @@ void gc_heap::walk_relocation_for_bgc(size_t profiling_context)
uint8_t* end = heap_segment_allocated (seg);
while (o < end)
- {
-
+ {
if (method_table(o) == g_pFreeObjectMethodTable)
{
o += Align (size (o), align_const);
@@ -24164,51 +24143,18 @@ void gc_heap::walk_relocation_for_bgc(size_t profiling_context)
uint8_t* plug_end = o;
- // Note on last parameter: since this is for bgc, only ETW
- // should be sending these events so that existing profapi profilers
- // don't get confused.
- ETW::GCLog::MovedReference(
- plug_start,
+ fn (plug_start,
plug_end,
0, // Reloc distance == 0 as this is non-compacting
profiling_context,
FALSE, // Non-compacting
- FALSE); // fAllowProfApiNotification
+ TRUE); // BGC
}
seg = heap_segment_next (seg);
}
}
-
-void gc_heap::make_free_lists_for_profiler_for_bgc ()
-{
- assert(settings.concurrent);
-
- size_t profiling_context = 0;
- ETW::GCLog::BeginMovedReferences(&profiling_context);
-
- // This provides the profiler with information on what blocks of
- // memory are moved during a gc.
-
- walk_relocation_for_bgc(profiling_context);
-
- // Notify the EE-side profiling code that all the references have been traced for
- // this heap, and that it needs to flush all cached data it hasn't sent to the
- // profiler and release resources it no longer needs. Since this is for bgc, only
- // ETW should be sending these events so that existing profapi profilers don't get confused.
- ETW::GCLog::EndMovedReferences(profiling_context, FALSE /* fAllowProfApiNotification */);
-
-#ifdef MULTIPLE_HEAPS
- bgc_t_join.join(this, gc_join_after_profiler_heap_walk);
- if (bgc_t_join.joined())
- {
- bgc_t_join.restart();
- }
-#endif // MULTIPLE_HEAPS
-}
-
#endif // defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE)
-#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
void gc_heap::relocate_phase (int condemned_gen_number,
uint8_t* first_condemned_address)
@@ -24809,7 +24755,7 @@ void gc_heap::compact_phase (int condemned_gen_number,
#pragma warning(push)
#pragma warning(disable:4702) // C4702: unreachable code: gc_thread_function may not return
#endif //_MSC_VER
-void __stdcall gc_heap::gc_thread_stub (void* arg)
+void gc_heap::gc_thread_stub (void* arg)
{
ClrFlsSetThreadType (ThreadType_GC);
STRESS_LOG_RESERVE_MEM (GC_STRESSLOG_MULTIPLY);
@@ -25177,14 +25123,14 @@ BOOL gc_heap::commit_mark_array_new_seg (gc_heap* hp,
if (new_card_table == 0)
{
- new_card_table = g_card_table;
+ new_card_table = g_gc_card_table;
}
if (hp->card_table != new_card_table)
{
if (new_lowest_address == 0)
{
- new_lowest_address = g_lowest_address;
+ new_lowest_address = g_gc_lowest_address;
}
uint32_t* ct = &new_card_table[card_word (gcard_of (new_lowest_address))];
@@ -26046,9 +25992,9 @@ gc_heap::suspend_EE ()
dprintf (2, ("suspend_EE"));
#ifdef MULTIPLE_HEAPS
gc_heap* hp = gc_heap::g_heaps[0];
- GCToEEInterface::SuspendEE(GCToEEInterface::SUSPEND_FOR_GC_PREP);
+ GCToEEInterface::SuspendEE(SUSPEND_FOR_GC_PREP);
#else
- GCToEEInterface::SuspendEE(GCToEEInterface::SUSPEND_FOR_GC_PREP);
+ GCToEEInterface::SuspendEE(SUSPEND_FOR_GC_PREP);
#endif //MULTIPLE_HEAPS
}
@@ -26062,7 +26008,7 @@ gc_heap::bgc_suspend_EE ()
}
gc_started = TRUE;
dprintf (2, ("bgc_suspend_EE"));
- GCToEEInterface::SuspendEE(GCToEEInterface::SUSPEND_FOR_GC_PREP);
+ GCToEEInterface::SuspendEE(SUSPEND_FOR_GC_PREP);
gc_started = FALSE;
for (int i = 0; i < n_heaps; i++)
@@ -26077,7 +26023,7 @@ gc_heap::bgc_suspend_EE ()
reset_gc_done();
gc_started = TRUE;
dprintf (2, ("bgc_suspend_EE"));
- GCToEEInterface::SuspendEE(GCToEEInterface::SUSPEND_FOR_GC_PREP);
+ GCToEEInterface::SuspendEE(SUSPEND_FOR_GC_PREP);
gc_started = FALSE;
set_gc_done();
}
@@ -29174,7 +29120,7 @@ generation* gc_heap::expand_heap (int condemned_generation,
return consing_gen;
//copy the card and brick tables
- if (g_card_table!= card_table)
+ if (g_gc_card_table!= card_table)
copy_brick_card_table();
BOOL new_segment_p = (heap_segment_next (new_seg) == 0);
@@ -30469,7 +30415,7 @@ CObjectHeader* gc_heap::allocate_large_object (size_t jsize, int64_t& alloc_byte
acontext.alloc_limit = 0;
acontext.alloc_bytes = 0;
#ifdef MULTIPLE_HEAPS
- acontext.alloc_heap = vm_heap;
+ acontext.set_alloc_heap(vm_heap);
#endif //MULTIPLE_HEAPS
#ifdef MARK_ARRAY
@@ -30484,14 +30430,11 @@ CObjectHeader* gc_heap::allocate_large_object (size_t jsize, int64_t& alloc_byte
#endif //BACKGROUND_GC
#endif // MARK_ARRAY
+ #if BIT64
+ size_t maxObjectSize = (INT64_MAX - 7 - Align(min_obj_size));
+ #else
size_t maxObjectSize = (INT32_MAX - 7 - Align(min_obj_size));
-
-#ifdef BIT64
- if (g_pConfig->GetGCAllowVeryLargeObjects())
- {
- maxObjectSize = (INT64_MAX - 7 - Align(min_obj_size));
- }
-#endif
+ #endif
if (jsize >= maxObjectSize)
{
@@ -30499,12 +30442,7 @@ CObjectHeader* gc_heap::allocate_large_object (size_t jsize, int64_t& alloc_byte
{
GCToOSInterface::DebugBreak();
}
-
-#ifndef FEATURE_REDHAWK
- ThrowOutOfMemoryDimensionsExceeded();
-#else
- return 0;
-#endif
+ return NULL;
}
size_t size = AlignQword (jsize);
@@ -30627,35 +30565,21 @@ BOOL gc_heap::large_object_marked (uint8_t* o, BOOL clearp)
return m;
}
-#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
-void gc_heap::record_survived_for_profiler(int condemned_gen_number, uint8_t * start_address)
+void gc_heap::walk_survivors_relocation (size_t profiling_context, record_surv_fn fn)
{
- size_t profiling_context = 0;
-
- ETW::GCLog::BeginMovedReferences(&profiling_context);
-
// Now walk the portion of memory that is actually being relocated.
- walk_relocation(condemned_gen_number, start_address, profiling_context);
+ walk_relocation (profiling_context, fn);
#ifdef FEATURE_LOH_COMPACTION
if (loh_compacted_p)
{
- walk_relocation_loh (profiling_context);
+ walk_relocation_for_loh (profiling_context, fn);
}
#endif //FEATURE_LOH_COMPACTION
-
- // Notify the EE-side profiling code that all the references have been traced for
- // this heap, and that it needs to flush all cached data it hasn't sent to the
- // profiler and release resources it no longer needs.
- ETW::GCLog::EndMovedReferences(profiling_context);
}
-void gc_heap::notify_profiler_of_surviving_large_objects ()
+void gc_heap::walk_survivors_for_loh (size_t profiling_context, record_surv_fn fn)
{
- size_t profiling_context = 0;
-
- ETW::GCLog::BeginMovedReferences(&profiling_context);
-
generation* gen = large_object_generation;
heap_segment* seg = heap_segment_rw (generation_start_segment (gen));;
@@ -30665,13 +30589,6 @@ void gc_heap::notify_profiler_of_surviving_large_objects ()
uint8_t* plug_end = o;
uint8_t* plug_start = o;
- // Generally, we can only get here if this is TRUE:
- // (CORProfilerTrackGC() || ETW::GCLog::ShouldTrackMovementForEtw())
- // But we can't always assert that, as races could theoretically cause GC profiling
- // or ETW to turn off just before we get here. This is harmless (we do checks later
- // on, under appropriate locks, before actually calling into profilers), though it's
- // a slowdown to determine these plugs for nothing.
-
while (1)
{
if (o >= heap_segment_allocated (seg))
@@ -30699,12 +30616,7 @@ void gc_heap::notify_profiler_of_surviving_large_objects ()
plug_end = o;
- ETW::GCLog::MovedReference(
- plug_start,
- plug_end,
- 0,
- profiling_context,
- FALSE);
+ fn (plug_start, plug_end, 0, profiling_context, FALSE, FALSE);
}
else
{
@@ -30714,9 +30626,7 @@ void gc_heap::notify_profiler_of_surviving_large_objects ()
}
}
}
- ETW::GCLog::EndMovedReferences(profiling_context);
}
-#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
#ifdef BACKGROUND_GC
@@ -31946,11 +31856,10 @@ void gc_heap::descr_card_table ()
#endif //TRACE_GC
}
-#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
void gc_heap::descr_generations_to_profiler (gen_walk_fn fn, void *context)
{
#ifdef MULTIPLE_HEAPS
- int n_heaps = GCHeap::GetGCHeap()->GetNumberOfHeaps ();
+ int n_heaps = g_theGCHeap->GetNumberOfHeaps ();
for (int i = 0; i < n_heaps; i++)
{
gc_heap* hp = GCHeap::GetHeap(i)->pGenGCHeap;
@@ -32027,7 +31936,6 @@ void gc_heap::descr_generations_to_profiler (gen_walk_fn fn, void *context)
}
}
}
-#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
#ifdef TRACE_GC
// Note that when logging is on it can take a long time to go through the free items.
@@ -32522,7 +32430,7 @@ void gc_heap::clear_all_mark_array()
void gc_heap::verify_mark_array_cleared (heap_segment* seg)
{
#if defined (VERIFY_HEAP) && defined (MARK_ARRAY)
- assert (card_table == g_card_table);
+ assert (card_table == g_gc_card_table);
size_t markw = mark_word_of (heap_segment_mem (seg));
size_t markw_end = mark_word_of (heap_segment_reserved (seg));
@@ -32870,8 +32778,8 @@ gc_heap::verify_heap (BOOL begin_gc_p)
#endif //BACKGROUND_GC
#ifndef MULTIPLE_HEAPS
- if ((g_ephemeral_low != generation_allocation_start (generation_of (max_generation - 1))) ||
- (g_ephemeral_high != heap_segment_reserved (ephemeral_heap_segment)))
+ if ((g_gc_ephemeral_low != generation_allocation_start (generation_of (max_generation - 1))) ||
+ (g_gc_ephemeral_high != heap_segment_reserved (ephemeral_heap_segment)))
{
FATAL_GC_ERROR();
}
@@ -32930,7 +32838,7 @@ gc_heap::verify_heap (BOOL begin_gc_p)
for (int i = 0; i < n_heaps; i++)
{
//copy the card and brick tables
- if (g_card_table != g_heaps[i]->card_table)
+ if (g_gc_card_table != g_heaps[i]->card_table)
{
g_heaps[i]->copy_brick_card_table();
}
@@ -32939,7 +32847,7 @@ gc_heap::verify_heap (BOOL begin_gc_p)
current_join->restart();
}
#else
- if (g_card_table != card_table)
+ if (g_gc_card_table != card_table)
copy_brick_card_table();
#endif //MULTIPLE_HEAPS
@@ -33306,8 +33214,12 @@ gc_heap::verify_heap (BOOL begin_gc_p)
#endif //BACKGROUND_GC
}
+#endif //VERIFY_HEAP
+
+
void GCHeap::ValidateObjectMember (Object* obj)
{
+#ifdef VERIFY_HEAP
size_t s = size (obj);
uint8_t* o = (uint8_t*)obj;
@@ -33325,9 +33237,8 @@ void GCHeap::ValidateObjectMember (Object* obj)
}
}
} );
-
+#endif // VERIFY_HEAP
}
-#endif //VERIFY_HEAP
void DestructObject (CObjectHeader* hdr)
{
@@ -33361,11 +33272,11 @@ HRESULT GCHeap::Shutdown ()
//CloseHandle (WaitForGCEvent);
//find out if the global card table hasn't been used yet
- uint32_t* ct = &g_card_table[card_word (gcard_of (g_lowest_address))];
+ uint32_t* ct = &g_gc_card_table[card_word (gcard_of (g_gc_lowest_address))];
if (card_table_refcount (ct) == 0)
{
destroy_card_table (ct);
- g_card_table = 0;
+ g_gc_card_table = 0;
#ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
SoftwareWriteWatch::StaticClose();
#endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
@@ -33525,7 +33436,7 @@ HRESULT GCHeap::Initialize ()
return E_FAIL;
}
- StompWriteBarrierResize(true, false);
+ stomp_write_barrier_initialize();
#ifndef FEATURE_REDHAWK // Redhawk forces relocation a different way
#if defined (STRESS_HEAP) && !defined (MULTIPLE_HEAPS)
@@ -33562,10 +33473,7 @@ HRESULT GCHeap::Initialize ()
{
GCScan::GcRuntimeStructuresValid (TRUE);
-#ifdef GC_PROFILING
- if (CORProfilerTrackGC())
- UpdateGenerationBounds();
-#endif // GC_PROFILING
+ GCToEEInterface::DiagUpdateGenerationBounds();
}
return hr;
@@ -33640,16 +33548,16 @@ BOOL GCHeap::IsEphemeral (Object* object)
return hp->ephemeral_pointer_p (o);
}
-#ifdef VERIFY_HEAP
// Return NULL if can't find next object. When EE is not suspended,
// the result is not accurate: if the input arg is in gen0, the function could
// return zeroed out memory as next object
Object * GCHeap::NextObj (Object * object)
{
+#ifdef VERIFY_HEAP
uint8_t* o = (uint8_t*)object;
#ifndef FEATURE_BASICFREEZE
- if (!((o < g_highest_address) && (o >= g_lowest_address)))
+ if (!((o < g_gc_highest_address) && (o >= g_gc_lowest_address)))
{
return NULL;
}
@@ -33687,8 +33595,13 @@ Object * GCHeap::NextObj (Object * object)
}
return (Object *)nextobj;
+#else
+ return nullptr;
+#endif // VERIFY_HEAP
}
+#ifdef VERIFY_HEAP
+
#ifdef FEATURE_BASICFREEZE
BOOL GCHeap::IsInFrozenSegment (Object * object)
{
@@ -33715,7 +33628,7 @@ BOOL GCHeap::IsHeapPointer (void* vpObject, BOOL small_heap_only)
uint8_t* object = (uint8_t*) vpObject;
#ifndef FEATURE_BASICFREEZE
- if (!((object < g_highest_address) && (object >= g_lowest_address)))
+ if (!((object < g_gc_highest_address) && (object >= g_gc_lowest_address)))
return FALSE;
#endif //!FEATURE_BASICFREEZE
@@ -33918,11 +33831,16 @@ int StressRNG(int iMaxValue)
int randValue = (((lHoldrand = lHoldrand * 214013L + 2531011L) >> 16) & 0x7fff);
return randValue % iMaxValue;
}
+#endif // STRESS_HEAP
+#endif // !FEATURE_REDHAWK
// free up object so that things will move and then do a GC
//return TRUE if GC actually happens, otherwise FALSE
-BOOL GCHeap::StressHeap(alloc_context * acontext)
+BOOL GCHeap::StressHeap(gc_alloc_context * context)
{
+#if defined(STRESS_HEAP) && !defined(FEATURE_REDHAWK)
+ alloc_context* acontext = static_cast<alloc_context*>(context);
+
// if GC stress was dynamically disabled during this run we return FALSE
if (!GCStressPolicy::IsEnabled())
return FALSE;
@@ -34102,11 +34020,12 @@ BOOL GCHeap::StressHeap(alloc_context * acontext)
}
return TRUE;
+#else
+ UNREFERENCED_PARAMETER(context);
+ return FALSE;
+#endif // defined(STRESS_HEAP) && !defined(FEATURE_REDHAWK)
}
-#endif // STRESS_HEAP
-#endif // FEATURE_REDHAWK
-
#ifdef FEATURE_PREMORTEM_FINALIZATION
#define REGISTER_FOR_FINALIZATION(_object, _size) \
@@ -34115,7 +34034,6 @@ BOOL GCHeap::StressHeap(alloc_context * acontext)
#define REGISTER_FOR_FINALIZATION(_object, _size) true
#endif // FEATURE_PREMORTEM_FINALIZATION
-#ifdef FEATURE_REDHAWK
#define CHECK_ALLOC_AND_POSSIBLY_REGISTER_FOR_FINALIZATION(_object, _size, _register) do { \
if ((_object) == NULL || ((_register) && !REGISTER_FOR_FINALIZATION(_object, _size))) \
{ \
@@ -34123,19 +34041,6 @@ BOOL GCHeap::StressHeap(alloc_context * acontext)
return NULL; \
} \
} while (false)
-#else // FEATURE_REDHAWK
-#define CHECK_ALLOC_AND_POSSIBLY_REGISTER_FOR_FINALIZATION(_object, _size, _register) do { \
- if ((_object) == NULL) \
- { \
- STRESS_LOG_OOM_STACK(_size); \
- ThrowOutOfMemory(); \
- } \
- if (_register) \
- { \
- REGISTER_FOR_FINALIZATION(_object, _size); \
- } \
-} while (false)
-#endif // FEATURE_REDHAWK
//
// Small Object Allocator
@@ -34145,27 +34050,12 @@ Object *
GCHeap::Alloc( size_t size, uint32_t flags REQD_ALIGN_DCL)
{
CONTRACTL {
-#ifdef FEATURE_REDHAWK
- // Under Redhawk NULL is returned on failure.
NOTHROW;
-#else
- THROWS;
-#endif
GC_TRIGGERS;
} CONTRACTL_END;
-#if defined(_DEBUG) && !defined(FEATURE_REDHAWK)
- if (g_pConfig->ShouldInjectFault(INJECTFAULT_GCHEAP))
- {
- char *a = new char;
- delete a;
- }
-#endif //_DEBUG && !FEATURE_REDHAWK
-
TRIGGERSGC();
- assert (!GCHeap::UseAllocationContexts());
-
Object* newAlloc = NULL;
#ifdef TRACE_GC
@@ -34237,23 +34127,16 @@ GCHeap::Alloc( size_t size, uint32_t flags REQD_ALIGN_DCL)
return newAlloc;
}
-#ifdef FEATURE_64BIT_ALIGNMENT
// Allocate small object with an alignment requirement of 8-bytes. Non allocation context version.
Object *
GCHeap::AllocAlign8( size_t size, uint32_t flags)
{
+#ifdef FEATURE_64BIT_ALIGNMENT
CONTRACTL {
-#ifdef FEATURE_REDHAWK
- // Under Redhawk NULL is returned on failure.
NOTHROW;
-#else
- THROWS;
-#endif
GC_TRIGGERS;
} CONTRACTL_END;
- assert (!GCHeap::UseAllocationContexts());
-
Object* newAlloc = NULL;
{
@@ -34270,61 +34153,60 @@ GCHeap::AllocAlign8( size_t size, uint32_t flags)
}
return newAlloc;
+#else
+ UNREFERENCED_PARAMETER(size);
+ UNREFERENCED_PARAMETER(flags);
+ assert(!"should not call GCHeap::AllocAlign8 without FEATURE_64BIT_ALIGNMENT defined!");
+ return nullptr;
+#endif //FEATURE_64BIT_ALIGNMENT
}
// Allocate small object with an alignment requirement of 8-bytes. Allocation context version.
Object*
-GCHeap::AllocAlign8(alloc_context* acontext, size_t size, uint32_t flags )
+GCHeap::AllocAlign8(gc_alloc_context* ctx, size_t size, uint32_t flags )
{
+#ifdef FEATURE_64BIT_ALIGNMENT
CONTRACTL {
-#ifdef FEATURE_REDHAWK
- // Under Redhawk NULL is returned on failure.
NOTHROW;
-#else
- THROWS;
-#endif
GC_TRIGGERS;
} CONTRACTL_END;
+ alloc_context* acontext = static_cast<alloc_context*>(ctx);
+
#ifdef MULTIPLE_HEAPS
- if (acontext->alloc_heap == 0)
+ if (acontext->get_alloc_heap() == 0)
{
AssignHeap (acontext);
- assert (acontext->alloc_heap);
+ assert (acontext->get_alloc_heap());
}
- gc_heap* hp = acontext->alloc_heap->pGenGCHeap;
+ gc_heap* hp = acontext->get_alloc_heap()->pGenGCHeap;
#else
gc_heap* hp = pGenGCHeap;
#endif //MULTIPLE_HEAPS
return AllocAlign8Common(hp, acontext, size, flags);
+#else
+ UNREFERENCED_PARAMETER(ctx);
+ UNREFERENCED_PARAMETER(size);
+ UNREFERENCED_PARAMETER(flags);
+ assert(!"should not call GCHeap::AllocAlign8 without FEATURE_64BIT_ALIGNMENT defined!");
+ return nullptr;
+#endif //FEATURE_64BIT_ALIGNMENT
}
// Common code used by both variants of AllocAlign8 above.
Object*
GCHeap::AllocAlign8Common(void* _hp, alloc_context* acontext, size_t size, uint32_t flags)
{
+#ifdef FEATURE_64BIT_ALIGNMENT
CONTRACTL {
-#ifdef FEATURE_REDHAWK
- // Under Redhawk NULL is returned on failure.
NOTHROW;
-#else
- THROWS;
-#endif
GC_TRIGGERS;
} CONTRACTL_END;
gc_heap* hp = (gc_heap*)_hp;
-#if defined(_DEBUG) && !defined(FEATURE_REDHAWK)
- if (g_pConfig->ShouldInjectFault(INJECTFAULT_GCHEAP))
- {
- char *a = new char;
- delete a;
- }
-#endif //_DEBUG && !FEATURE_REDHAWK
-
TRIGGERSGC();
Object* newAlloc = NULL;
@@ -34424,30 +34306,24 @@ GCHeap::AllocAlign8Common(void* _hp, alloc_context* acontext, size_t size, uint3
AllocCount++;
#endif //TRACE_GC
return newAlloc;
-}
+#else
+ UNREFERENCED_PARAMETER(_hp);
+ UNREFERENCED_PARAMETER(acontext);
+ UNREFERENCED_PARAMETER(size);
+ UNREFERENCED_PARAMETER(flags);
+ assert(!"Should not call GCHeap::AllocAlign8Common without FEATURE_64BIT_ALIGNMENT defined!");
+ return nullptr;
#endif // FEATURE_64BIT_ALIGNMENT
+}
Object *
GCHeap::AllocLHeap( size_t size, uint32_t flags REQD_ALIGN_DCL)
{
CONTRACTL {
-#ifdef FEATURE_REDHAWK
- // Under Redhawk NULL is returned on failure.
NOTHROW;
-#else
- THROWS;
-#endif
GC_TRIGGERS;
} CONTRACTL_END;
-#if defined(_DEBUG) && !defined(FEATURE_REDHAWK)
- if (g_pConfig->ShouldInjectFault(INJECTFAULT_GCHEAP))
- {
- char *a = new char;
- delete a;
- }
-#endif //_DEBUG && !FEATURE_REDHAWK
-
TRIGGERSGC();
Object* newAlloc = NULL;
@@ -34499,29 +34375,17 @@ GCHeap::AllocLHeap( size_t size, uint32_t flags REQD_ALIGN_DCL)
}
Object*
-GCHeap::Alloc(alloc_context* acontext, size_t size, uint32_t flags REQD_ALIGN_DCL)
+GCHeap::Alloc(gc_alloc_context* context, size_t size, uint32_t flags REQD_ALIGN_DCL)
{
CONTRACTL {
-#ifdef FEATURE_REDHAWK
- // Under Redhawk NULL is returned on failure.
NOTHROW;
-#else
- THROWS;
-#endif
GC_TRIGGERS;
} CONTRACTL_END;
-#if defined(_DEBUG) && !defined(FEATURE_REDHAWK)
- if (g_pConfig->ShouldInjectFault(INJECTFAULT_GCHEAP))
- {
- char *a = new char;
- delete a;
- }
-#endif //_DEBUG && !FEATURE_REDHAWK
-
TRIGGERSGC();
Object* newAlloc = NULL;
+ alloc_context* acontext = static_cast<alloc_context*>(context);
#ifdef TRACE_GC
#ifdef COUNT_CYCLES
@@ -34534,10 +34398,10 @@ GCHeap::Alloc(alloc_context* acontext, size_t size, uint32_t flags REQD_ALIGN_DC
#endif //TRACE_GC
#ifdef MULTIPLE_HEAPS
- if (acontext->alloc_heap == 0)
+ if (acontext->get_alloc_heap() == 0)
{
AssignHeap (acontext);
- assert (acontext->alloc_heap);
+ assert (acontext->get_alloc_heap());
}
#endif //MULTIPLE_HEAPS
@@ -34546,7 +34410,7 @@ GCHeap::Alloc(alloc_context* acontext, size_t size, uint32_t flags REQD_ALIGN_DC
#endif // FEATURE_REDHAWK
#ifdef MULTIPLE_HEAPS
- gc_heap* hp = acontext->alloc_heap->pGenGCHeap;
+ gc_heap* hp = acontext->get_alloc_heap()->pGenGCHeap;
#else
gc_heap* hp = pGenGCHeap;
#ifdef _PREFAST_
@@ -34591,8 +34455,9 @@ GCHeap::Alloc(alloc_context* acontext, size_t size, uint32_t flags REQD_ALIGN_DC
}
void
-GCHeap::FixAllocContext (alloc_context* acontext, BOOL lockp, void* arg, void *heap)
+GCHeap::FixAllocContext (gc_alloc_context* context, BOOL lockp, void* arg, void *heap)
{
+ alloc_context* acontext = static_cast<alloc_context*>(context);
#ifdef MULTIPLE_HEAPS
if (arg != 0)
@@ -35017,7 +34882,6 @@ void gc_heap::do_post_gc()
{
if (!settings.concurrent)
{
- GCProfileWalkHeap();
initGCShadow();
}
@@ -35037,13 +34901,10 @@ void gc_heap::do_post_gc()
GCToEEInterface::GcDone(settings.condemned_generation);
-#ifdef GC_PROFILING
- if (!settings.concurrent)
- {
- UpdateGenerationBounds();
- GarbageCollectionFinishedCallback();
- }
-#endif // GC_PROFILING
+ GCToEEInterface::DiagGCEnd(VolatileLoad(&settings.gc_index),
+ (uint32_t)settings.condemned_generation,
+ (uint32_t)settings.reason,
+ !!settings.concurrent);
//dprintf (1, (" ****end of Garbage Collection**** %d(gen0:%d)(%d)",
dprintf (1, ("*EGC* %d(gen0:%d)(%d)(%s)",
@@ -35168,7 +35029,7 @@ GCHeap::GarbageCollectGeneration (unsigned int gen, gc_reason reason)
dprintf (2, ("Suspending EE"));
BEGIN_TIMING(suspend_ee_during_log);
- GCToEEInterface::SuspendEE(GCToEEInterface::SUSPEND_FOR_GC);
+ GCToEEInterface::SuspendEE(SUSPEND_FOR_GC);
END_TIMING(suspend_ee_during_log);
gc_heap::proceed_with_gc_p = gc_heap::should_proceed_with_gc();
gc_heap::disable_preemptive (current_thread, cooperative_mode);
@@ -35383,8 +35244,8 @@ size_t GCHeap::ApproxTotalBytesInUse(BOOL small_heap_only)
void GCHeap::AssignHeap (alloc_context* acontext)
{
// Assign heap based on processor
- acontext->alloc_heap = GetHeap(heap_select::select_heap(acontext, 0));
- acontext->home_heap = acontext->alloc_heap;
+ acontext->set_alloc_heap(GetHeap(heap_select::select_heap(acontext, 0)));
+ acontext->set_home_heap(acontext->get_alloc_heap());
}
GCHeap* GCHeap::GetHeap (int n)
{
@@ -35393,11 +35254,12 @@ GCHeap* GCHeap::GetHeap (int n)
}
#endif //MULTIPLE_HEAPS
-bool GCHeap::IsThreadUsingAllocationContextHeap(alloc_context* acontext, int thread_number)
+bool GCHeap::IsThreadUsingAllocationContextHeap(gc_alloc_context* context, int thread_number)
{
+ alloc_context* acontext = static_cast<alloc_context*>(context);
#ifdef MULTIPLE_HEAPS
- return ((acontext->home_heap == GetHeap(thread_number)) ||
- ((acontext->home_heap == 0) && (thread_number == 0)));
+ return ((acontext->get_home_heap() == GetHeap(thread_number)) ||
+ ((acontext->get_home_heap() == 0) && (thread_number == 0)));
#else
UNREFERENCED_PARAMETER(acontext);
UNREFERENCED_PARAMETER(thread_number);
@@ -35427,7 +35289,8 @@ int GCHeap::GetHomeHeapNumber ()
{
if (pThread)
{
- GCHeap *hp = GCToEEInterface::GetAllocContext(pThread)->home_heap;
+ gc_alloc_context* ctx = GCToEEInterface::GetAllocContext(pThread);
+ GCHeap *hp = static_cast<alloc_context*>(ctx)->get_home_heap();
if (hp == gc_heap::g_heaps[i]->vm_heap) return i;
}
}
@@ -35639,7 +35502,7 @@ size_t GCHeap::GetValidGen0MaxSize(size_t seg_size)
{
size_t gen0size = g_pConfig->GetGCgen0size();
- if ((gen0size == 0) || !GCHeap::IsValidGen0MaxSize(gen0size))
+ if ((gen0size == 0) || !g_theGCHeap->IsValidGen0MaxSize(gen0size))
{
#ifdef SERVER_GC
// performance data seems to indicate halving the size results
@@ -35869,19 +35732,19 @@ GCHeap::SetCardsAfterBulkCopy( Object **StartPoint, size_t len )
#ifdef BACKGROUND_GC
(!gc_heap::settings.concurrent) &&
#endif //BACKGROUND_GC
- (GCHeap::GetGCHeap()->WhichGeneration( (Object*) StartPoint ) == 0))
+ (g_theGCHeap->WhichGeneration( (Object*) StartPoint ) == 0))
return;
rover = StartPoint;
end = StartPoint + (len/sizeof(Object*));
while (rover < end)
{
- if ( (((uint8_t*)*rover) >= g_ephemeral_low) && (((uint8_t*)*rover) < g_ephemeral_high) )
+ if ( (((uint8_t*)*rover) >= g_gc_ephemeral_low) && (((uint8_t*)*rover) < g_gc_ephemeral_high) )
{
// Set Bit For Card and advance to next card
size_t card = gcard_of ((uint8_t*)rover);
- Interlocked::Or (&g_card_table[card/card_word_width], (1U << (card % card_word_width)));
+ Interlocked::Or (&g_gc_card_table[card/card_word_width], (1U << (card % card_word_width)));
// Skip to next card for the object
rover = (Object**)align_on_card ((uint8_t*)(rover+1));
}
@@ -36000,12 +35863,7 @@ bool
CFinalize::RegisterForFinalization (int gen, Object* obj, size_t size)
{
CONTRACTL {
-#ifdef FEATURE_REDHAWK
- // Under Redhawk false is returned on failure.
NOTHROW;
-#else
- THROWS;
-#endif
GC_NOTRIGGER;
} CONTRACTL_END;
@@ -36043,11 +35901,7 @@ CFinalize::RegisterForFinalization (int gen, Object* obj, size_t size)
{
GCToOSInterface::DebugBreak();
}
-#ifdef FEATURE_REDHAWK
return false;
-#else
- ThrowOutOfMemory();
-#endif
}
}
Object*** end_si = &SegQueueLimit (dest);
@@ -36333,21 +36187,17 @@ CFinalize::GcScanRoots (promote_func* fn, int hn, ScanContext *pSC)
}
}
-#ifdef GC_PROFILING
-void CFinalize::WalkFReachableObjects (gc_heap* hp)
+void CFinalize::WalkFReachableObjects (fq_walk_fn fn)
{
- BEGIN_PIN_PROFILER(CORProfilerPresent());
Object** startIndex = SegQueue (CriticalFinalizerListSeg);
Object** stopCriticalIndex = SegQueueLimit (CriticalFinalizerListSeg);
Object** stopIndex = SegQueueLimit (FinalizerListSeg);
for (Object** po = startIndex; po < stopIndex; po++)
{
//report *po
- g_profControlBlock.pProfInterface->FinalizeableObjectQueued(po < stopCriticalIndex, (ObjectID)*po);
+ fn(po < stopCriticalIndex, *po);
}
- END_PIN_PROFILER();
}
-#endif //GC_PROFILING
BOOL
CFinalize::ScanForFinalization (promote_func* pfn, int gen, BOOL mark_only_p,
@@ -36374,7 +36224,7 @@ CFinalize::ScanForFinalization (promote_func* pfn, int gen, BOOL mark_only_p,
{
CObjectHeader* obj = (CObjectHeader*)*i;
dprintf (3, ("scanning: %Ix", (size_t)obj));
- if (!GCHeap::GetGCHeap()->IsPromoted (obj))
+ if (!g_theGCHeap->IsPromoted (obj))
{
dprintf (3, ("freacheable: %Ix", (size_t)obj));
@@ -36507,7 +36357,7 @@ CFinalize::UpdatePromotedGenerations (int gen, BOOL gen_0_empty_p)
for (Object** po = startIndex;
po < SegQueueLimit (gen_segment(i)); po++)
{
- int new_gen = GCHeap::GetGCHeap()->WhichGeneration (*po);
+ int new_gen = g_theGCHeap->WhichGeneration (*po);
if (new_gen != i)
{
if (new_gen > i)
@@ -36567,7 +36417,7 @@ void CFinalize::CheckFinalizerObjects()
for (Object **po = startIndex; po < stopIndex; po++)
{
- if ((int)GCHeap::GetGCHeap()->WhichGeneration (*po) < i)
+ if ((int)g_theGCHeap->WhichGeneration (*po) < i)
FATAL_GC_ERROR ();
((CObjectHeader*)*po)->Validate();
}
@@ -36583,8 +36433,7 @@ void CFinalize::CheckFinalizerObjects()
// End of VM specific support
//
//------------------------------------------------------------------------------
-
-void gc_heap::walk_heap (walk_fn fn, void* context, int gen_number, BOOL walk_large_object_heap_p)
+void gc_heap::walk_heap_per_heap (walk_fn fn, void* context, int gen_number, BOOL walk_large_object_heap_p)
{
generation* gen = gc_heap::generation_of (gen_number);
heap_segment* seg = generation_start_segment (gen);
@@ -36640,8 +36489,28 @@ void gc_heap::walk_heap (walk_fn fn, void* context, int gen_number, BOOL walk_la
}
}
-#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
-void GCHeap::WalkObject (Object* obj, walk_fn fn, void* context)
+void gc_heap::walk_finalize_queue (fq_walk_fn fn)
+{
+#ifdef FEATURE_PREMORTEM_FINALIZATION
+ finalize_queue->WalkFReachableObjects (fn);
+#endif //FEATURE_PREMORTEM_FINALIZATION
+}
+
+void gc_heap::walk_heap (walk_fn fn, void* context, int gen_number, BOOL walk_large_object_heap_p)
+{
+#ifdef MULTIPLE_HEAPS
+ for (int hn = 0; hn < gc_heap::n_heaps; hn++)
+ {
+ gc_heap* hp = gc_heap::g_heaps [hn];
+
+ hp->walk_heap_per_heap (fn, context, gen_number, walk_large_object_heap_p);
+ }
+#else
+ walk_heap_per_heap(fn, context, gen_number, walk_large_object_heap_p);
+#endif //MULTIPLE_HEAPS
+}
+
+void GCHeap::DiagWalkObject (Object* obj, walk_fn fn, void* context)
{
uint8_t* o = (uint8_t*)obj;
if (o)
@@ -36658,7 +36527,46 @@ void GCHeap::WalkObject (Object* obj, walk_fn fn, void* context)
);
}
}
-#endif //defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
+
+void GCHeap::DiagWalkSurvivorsWithType (void* gc_context, record_surv_fn fn, size_t diag_context, walk_surv_type type)
+{
+ gc_heap* hp = (gc_heap*)gc_context;
+ hp->walk_survivors (fn, diag_context, type);
+}
+
+void GCHeap::DiagWalkHeap (walk_fn fn, void* context, int gen_number, BOOL walk_large_object_heap_p)
+{
+ gc_heap::walk_heap (fn, context, gen_number, walk_large_object_heap_p);
+}
+
+void GCHeap::DiagWalkFinalizeQueue (void* gc_context, fq_walk_fn fn)
+{
+ gc_heap* hp = (gc_heap*)gc_context;
+ hp->walk_finalize_queue (fn);
+}
+
+void GCHeap::DiagScanFinalizeQueue (fq_scan_fn fn, ScanContext* sc)
+{
+#ifdef MULTIPLE_HEAPS
+ for (int hn = 0; hn < gc_heap::n_heaps; hn++)
+ {
+ gc_heap* hp = gc_heap::g_heaps [hn];
+ hp->finalize_queue->GcScanRoots(fn, hn, sc);
+ }
+#else
+ pGenGCHeap->finalize_queue->GcScanRoots(fn, 0, sc);
+#endif //MULTIPLE_HEAPS
+}
+
+void GCHeap::DiagScanHandles (handle_scan_fn fn, int gen_number, ScanContext* context)
+{
+ GCScan::GcScanHandlesForProfilerAndETW (max_generation, context, fn);
+}
+
+void GCHeap::DiagScanDependentHandles (handle_scan_fn fn, int gen_number, ScanContext* context)
+{
+ GCScan::GcScanDependentHandlesForProfilerAndETW (max_generation, context, fn);
+}
// Go through and touch (read) each page straddled by a memory block.
void TouchPages(void * pStart, size_t cb)
@@ -36704,11 +36612,11 @@ void initGCShadow()
if (!(g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_BARRIERCHECK))
return;
- size_t len = g_highest_address - g_lowest_address;
+ size_t len = g_gc_highest_address - g_gc_lowest_address;
if (len > (size_t)(g_GCShadowEnd - g_GCShadow))
{
deleteGCShadow();
- g_GCShadowEnd = g_GCShadow = (uint8_t *)GCToOSInterface::VirtualReserve(0, len, 0, VirtualReserveFlags::None);
+ g_GCShadowEnd = g_GCShadow = (uint8_t *)GCToOSInterface::VirtualReserve(len, 0, VirtualReserveFlags::None);
if (g_GCShadow == NULL || !GCToOSInterface::VirtualCommit(g_GCShadow, len))
{
_ASSERTE(!"Not enough memory to run HeapVerify level 2");
@@ -36723,10 +36631,10 @@ void initGCShadow()
g_GCShadowEnd += len;
}
- // save the value of g_lowest_address at this time. If this value changes before
+ // save the value of g_gc_lowest_address at this time. If this value changes before
// the next call to checkGCWriteBarrier() it means we extended the heap (with a
// large object segment most probably), and the whole shadow segment is inconsistent.
- g_shadow_lowest_address = g_lowest_address;
+ g_shadow_lowest_address = g_gc_lowest_address;
//****** Copy the whole GC heap ******
//
@@ -36736,7 +36644,7 @@ void initGCShadow()
generation* gen = gc_heap::generation_of (max_generation);
heap_segment* seg = heap_segment_rw (generation_start_segment (gen));
- ptrdiff_t delta = g_GCShadow - g_lowest_address;
+ ptrdiff_t delta = g_GCShadow - g_gc_lowest_address;
BOOL small_object_segments = TRUE;
while(1)
{
@@ -36764,7 +36672,7 @@ void initGCShadow()
// test to see if 'ptr' was only updated via the write barrier.
inline void testGCShadow(Object** ptr)
{
- Object** shadow = (Object**) &g_GCShadow[((uint8_t*) ptr - g_lowest_address)];
+ Object** shadow = (Object**) &g_GCShadow[((uint8_t*) ptr - g_gc_lowest_address)];
if (*ptr != 0 && (uint8_t*) shadow < g_GCShadowEnd && *ptr != *shadow)
{
@@ -36823,9 +36731,9 @@ void testGCShadowHelper (uint8_t* x)
// Walk the whole heap, looking for pointers that were not updated with the write barrier.
void checkGCWriteBarrier()
{
- // g_shadow_lowest_address != g_lowest_address means the GC heap was extended by a segment
+ // g_shadow_lowest_address != g_gc_lowest_address means the GC heap was extended by a segment
// and the GC shadow segment did not track that change!
- if (g_GCShadowEnd <= g_GCShadow || g_shadow_lowest_address != g_lowest_address)
+ if (g_GCShadowEnd <= g_GCShadow || g_shadow_lowest_address != g_gc_lowest_address)
{
// No shadow stack, nothing to check.
return;
diff --git a/src/gc/gc.h b/src/gc/gc.h
index 14c6baee83..b7f1e956b6 100644
--- a/src/gc/gc.h
+++ b/src/gc/gc.h
@@ -14,9 +14,24 @@ Module Name:
#ifndef __GC_H
#define __GC_H
-#ifdef PROFILING_SUPPORTED
-#define GC_PROFILING //Turn on profiling
-#endif // PROFILING_SUPPORTED
+#ifdef Sleep
+// This is a funny workaround for the fact that "common.h" defines Sleep to be
+// Dont_Use_Sleep, with the hope of causing linker errors whenever someone tries to use sleep.
+//
+// However, GCToOSInterface defines a function called Sleep, which (due to this define) becomes
+// "Dont_Use_Sleep", which the GC in turn happily uses. The symbol that GCToOSInterface actually
+// exported was called "GCToOSInterface::Dont_Use_Sleep". While we progress in making the GC standalone,
+// we'll need to break the dependency on common.h (the VM header) and this problem will become moot.
+#undef Sleep
+#endif // Sleep
+
+#include "gcinterface.h"
+#include "env/gcenv.os.h"
+#include "env/gcenv.ee.h"
+
+#ifdef FEATURE_STANDALONE_GC
+#include "gcenv.ee.standalone.inl"
+#endif // FEATURE_STANDALONE_GC
/*
* Promotion Function Prototypes
@@ -26,19 +41,6 @@ typedef void enum_func (Object*);
// callback functions for heap walkers
typedef void object_callback_func(void * pvContext, void * pvDataLoc);
-// stub type to abstract a heap segment
-struct gc_heap_segment_stub;
-typedef gc_heap_segment_stub *segment_handle;
-
-struct segment_info
-{
- void * pvMem; // base of the allocation, not the first object (must add ibFirstObject)
- size_t ibFirstObject; // offset to the base of the first object in the segment
- size_t ibAllocated; // limit of allocated memory in the segment (>= firstobject)
- size_t ibCommit; // limit of committed memory in the segment (>= alllocated)
- size_t ibReserved; // limit of reserved memory in the segment (>= commit)
-};
-
/*!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!*/
/* If you modify failure_get_memory and */
/* oom_reason be sure to make the corresponding */
@@ -80,6 +82,24 @@ enum oom_reason
oom_unproductive_full_gc = 6
};
+// TODO : it would be easier to make this an ORed value
+enum gc_reason
+{
+ reason_alloc_soh = 0,
+ reason_induced = 1,
+ reason_lowmemory = 2,
+ reason_empty = 3,
+ reason_alloc_loh = 4,
+ reason_oos_soh = 5,
+ reason_oos_loh = 6,
+ reason_induced_noforce = 7, // it's an induced GC and doesn't have to be blocking.
+ reason_gcstress = 8, // this turns into reason_induced & gc_mechanisms.stress_induced = true
+ reason_lowmemory_blocking = 9,
+ reason_induced_compacting = 10,
+ reason_lowmemory_host = 11,
+ reason_max
+};
+
struct oom_history
{
oom_reason reason;
@@ -97,28 +117,16 @@ struct oom_history
class CObjectHeader;
class Object;
-class GCHeap;
+class IGCHeapInternal;
/* misc defines */
#define LARGE_OBJECT_SIZE ((size_t)(85000))
-GPTR_DECL(GCHeap, g_pGCHeap);
-
#ifdef GC_CONFIG_DRIVEN
#define MAX_GLOBAL_GC_MECHANISMS_COUNT 6
GARY_DECL(size_t, gc_global_mechanisms, MAX_GLOBAL_GC_MECHANISMS_COUNT);
#endif //GC_CONFIG_DRIVEN
-#ifndef DACCESS_COMPILE
-extern "C" {
-#endif
-GPTR_DECL(uint8_t,g_lowest_address);
-GPTR_DECL(uint8_t,g_highest_address);
-GPTR_DECL(uint32_t,g_card_table);
-#ifndef DACCESS_COMPILE
-}
-#endif
-
#ifdef DACCESS_COMPILE
class DacHeapWalker;
#endif
@@ -127,137 +135,28 @@ class DacHeapWalker;
#define _LOGALLOC
#endif
-#ifdef WRITE_BARRIER_CHECK
-//always defined, but should be 0 in Server GC
-extern uint8_t* g_GCShadow;
-extern uint8_t* g_GCShadowEnd;
-// saves the g_lowest_address in between GCs to verify the consistency of the shadow segment
-extern uint8_t* g_shadow_lowest_address;
-#endif
-
#define MP_LOCKS
-extern "C" uint8_t* g_ephemeral_low;
-extern "C" uint8_t* g_ephemeral_high;
+extern "C" uint32_t* g_gc_card_table;
+extern "C" uint8_t* g_gc_lowest_address;
+extern "C" uint8_t* g_gc_highest_address;
+extern "C" uint8_t* g_gc_ephemeral_low;
+extern "C" uint8_t* g_gc_ephemeral_high;
namespace WKS {
- ::GCHeap* CreateGCHeap();
+ ::IGCHeapInternal* CreateGCHeap();
class GCHeap;
class gc_heap;
}
#if defined(FEATURE_SVR_GC)
namespace SVR {
- ::GCHeap* CreateGCHeap();
+ ::IGCHeapInternal* CreateGCHeap();
class GCHeap;
class gc_heap;
}
#endif // defined(FEATURE_SVR_GC)
-/*
- * Ephemeral Garbage Collected Heap Interface
- */
-
-
-struct alloc_context
-{
- friend class WKS::gc_heap;
-#if defined(FEATURE_SVR_GC)
- friend class SVR::gc_heap;
- friend class SVR::GCHeap;
-#endif // defined(FEATURE_SVR_GC)
- friend struct ClassDumpInfo;
-
- uint8_t* alloc_ptr;
- uint8_t* alloc_limit;
- int64_t alloc_bytes; //Number of bytes allocated on SOH by this context
- int64_t alloc_bytes_loh; //Number of bytes allocated on LOH by this context
-#if defined(FEATURE_SVR_GC)
- SVR::GCHeap* alloc_heap;
- SVR::GCHeap* home_heap;
-#endif // defined(FEATURE_SVR_GC)
- int alloc_count;
-public:
-
- void init()
- {
- LIMITED_METHOD_CONTRACT;
-
- alloc_ptr = 0;
- alloc_limit = 0;
- alloc_bytes = 0;
- alloc_bytes_loh = 0;
-#if defined(FEATURE_SVR_GC)
- alloc_heap = 0;
- home_heap = 0;
-#endif // defined(FEATURE_SVR_GC)
- alloc_count = 0;
- }
-};
-
-struct ScanContext
-{
- Thread* thread_under_crawl;
- int thread_number;
- uintptr_t stack_limit; // Lowest point on the thread stack that the scanning logic is permitted to read
- BOOL promotion; //TRUE: Promotion, FALSE: Relocation.
- BOOL concurrent; //TRUE: concurrent scanning
-#if CHECK_APP_DOMAIN_LEAKS || defined (FEATURE_APPDOMAIN_RESOURCE_MONITORING) || defined (DACCESS_COMPILE)
- AppDomain *pCurrentDomain;
-#endif //CHECK_APP_DOMAIN_LEAKS || FEATURE_APPDOMAIN_RESOURCE_MONITORING || DACCESS_COMPILE
-
-#ifndef FEATURE_REDHAWK
-#if defined(GC_PROFILING) || defined (DACCESS_COMPILE)
- MethodDesc *pMD;
-#endif //GC_PROFILING || DACCESS_COMPILE
-#endif // FEATURE_REDHAWK
-#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
- EtwGCRootKind dwEtwRootKind;
-#endif // GC_PROFILING || FEATURE_EVENT_TRACE
-
- ScanContext()
- {
- LIMITED_METHOD_CONTRACT;
-
- thread_under_crawl = 0;
- thread_number = -1;
- stack_limit = 0;
- promotion = FALSE;
- concurrent = FALSE;
-#ifdef GC_PROFILING
- pMD = NULL;
-#endif //GC_PROFILING
-#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
- dwEtwRootKind = kEtwGCRootKindOther;
-#endif // GC_PROFILING || FEATURE_EVENT_TRACE
- }
-};
-
-typedef BOOL (* walk_fn)(Object*, void*);
-typedef void (* gen_walk_fn)(void *context, int generation, uint8_t *range_start, uint8_t * range_end, uint8_t *range_reserved);
-
-#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
-struct ProfilingScanContext : ScanContext
-{
- BOOL fProfilerPinned;
- void * pvEtwContext;
- void *pHeapId;
-
- ProfilingScanContext(BOOL fProfilerPinnedParam) : ScanContext()
- {
- LIMITED_METHOD_CONTRACT;
-
- pHeapId = NULL;
- fProfilerPinned = fProfilerPinnedParam;
- pvEtwContext = NULL;
-#ifdef FEATURE_CONSERVATIVE_GC
- // To not confuse GCScan::GcScanRoots
- promotion = g_pConfig->GetGCConservative();
-#endif
- }
-};
-#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
-
#ifdef STRESS_HEAP
#define IN_STRESS_HEAP(x) x
#define STRESS_HEAP_ARG(x) ,x
@@ -266,7 +165,6 @@ struct ProfilingScanContext : ScanContext
#define STRESS_HEAP_ARG(x)
#endif // STRESS_HEAP
-
//dynamic data interface
struct gc_counters
{
@@ -275,51 +173,6 @@ struct gc_counters
size_t collection_count;
};
-// !!!!!!!!!!!!!!!!!!!!!!!
-// make sure you change the def in bcl\system\gc.cs
-// if you change this!
-enum collection_mode
-{
- collection_non_blocking = 0x00000001,
- collection_blocking = 0x00000002,
- collection_optimized = 0x00000004,
- collection_compacting = 0x00000008
-#ifdef STRESS_HEAP
- , collection_gcstress = 0x80000000
-#endif // STRESS_HEAP
-};
-
-// !!!!!!!!!!!!!!!!!!!!!!!
-// make sure you change the def in bcl\system\gc.cs
-// if you change this!
-enum wait_full_gc_status
-{
- wait_full_gc_success = 0,
- wait_full_gc_failed = 1,
- wait_full_gc_cancelled = 2,
- wait_full_gc_timeout = 3,
- wait_full_gc_na = 4
-};
-
-// !!!!!!!!!!!!!!!!!!!!!!!
-// make sure you change the def in bcl\system\gc.cs
-// if you change this!
-enum start_no_gc_region_status
-{
- start_no_gc_success = 0,
- start_no_gc_no_memory = 1,
- start_no_gc_too_large = 2,
- start_no_gc_in_progress = 3
-};
-
-enum end_no_gc_region_status
-{
- end_no_gc_success = 0,
- end_no_gc_not_in_progress = 1,
- end_no_gc_induced = 2,
- end_no_gc_alloc_exceeded = 3
-};
-
enum bgc_state
{
bgc_not_in_process = 0,
@@ -352,321 +205,82 @@ void record_changed_seg (uint8_t* start, uint8_t* end,
void record_global_mechanism (int mech_index);
#endif //GC_CONFIG_DRIVEN
-//constants for the flags parameter to the gc call back
-
-#define GC_CALL_INTERIOR 0x1
-#define GC_CALL_PINNED 0x2
-#define GC_CALL_CHECK_APP_DOMAIN 0x4
-
-//flags for GCHeap::Alloc(...)
-#define GC_ALLOC_FINALIZE 0x1
-#define GC_ALLOC_CONTAINS_REF 0x2
-#define GC_ALLOC_ALIGN8_BIAS 0x4
-#define GC_ALLOC_ALIGN8 0x8
-
-class GCHeap {
- friend struct ::_DacGlobals;
-#ifdef DACCESS_COMPILE
- friend class ClrDataAccess;
-#endif
-
-public:
-
- virtual ~GCHeap() {}
-
- static GCHeap *GetGCHeap()
+struct alloc_context : gc_alloc_context
+{
+#ifdef FEATURE_SVR_GC
+ inline SVR::GCHeap* get_alloc_heap()
{
- LIMITED_METHOD_CONTRACT;
-
- _ASSERTE(g_pGCHeap != NULL);
- return g_pGCHeap;
+ return static_cast<SVR::GCHeap*>(gc_reserved_1);
}
-#ifndef DACCESS_COMPILE
- static BOOL IsGCInProgress(BOOL bConsiderGCStart = FALSE)
+ inline void set_alloc_heap(SVR::GCHeap* heap)
{
- WRAPPER_NO_CONTRACT;
-
- return (IsGCHeapInitialized() ? GetGCHeap()->IsGCInProgressHelper(bConsiderGCStart) : false);
- }
-#endif
-
- static BOOL IsGCHeapInitialized()
- {
- LIMITED_METHOD_CONTRACT;
-
- return (g_pGCHeap != NULL);
+ gc_reserved_1 = heap;
}
- static void WaitForGCCompletion(BOOL bConsiderGCStart = FALSE)
+ inline SVR::GCHeap* get_home_heap()
{
- WRAPPER_NO_CONTRACT;
-
- if (IsGCHeapInitialized())
- GetGCHeap()->WaitUntilGCComplete(bConsiderGCStart);
- }
-
- // The runtime needs to know whether we're using workstation or server GC
- // long before the GCHeap is created. So IsServerHeap cannot be a virtual
- // method on GCHeap. Instead we make it a static method and initialize
- // gcHeapType before any of the calls to IsServerHeap. Note that this also
- // has the advantage of getting the answer without an indirection
- // (virtual call), which is important for perf critical codepaths.
-
- #ifndef DACCESS_COMPILE
- static void InitializeHeapType(bool bServerHeap)
- {
- LIMITED_METHOD_CONTRACT;
-#ifdef FEATURE_SVR_GC
- gcHeapType = bServerHeap ? GC_HEAP_SVR : GC_HEAP_WKS;
-#ifdef WRITE_BARRIER_CHECK
- if (gcHeapType == GC_HEAP_SVR)
- {
- g_GCShadow = 0;
- g_GCShadowEnd = 0;
- }
-#endif
-#else // FEATURE_SVR_GC
- UNREFERENCED_PARAMETER(bServerHeap);
- CONSISTENCY_CHECK(bServerHeap == false);
-#endif // FEATURE_SVR_GC
- }
- #endif
-
- static BOOL IsValidSegmentSize(size_t cbSize)
- {
- //Must be aligned on a Mb and greater than 4Mb
- return (((cbSize & (1024*1024-1)) ==0) && (cbSize >> 22));
+ return static_cast<SVR::GCHeap*>(gc_reserved_2);
}
- static BOOL IsValidGen0MaxSize(size_t cbSize)
+ inline void set_home_heap(SVR::GCHeap* heap)
{
- return (cbSize >= 64*1024);
+ gc_reserved_2 = heap;
}
-
- inline static bool IsServerHeap()
- {
- LIMITED_METHOD_CONTRACT;
-#ifdef FEATURE_SVR_GC
- _ASSERTE(gcHeapType != GC_HEAP_INVALID);
- return (gcHeapType == GC_HEAP_SVR);
-#else // FEATURE_SVR_GC
- return false;
#endif // FEATURE_SVR_GC
- }
+};
- inline static bool UseAllocationContexts()
- {
- WRAPPER_NO_CONTRACT;
-#ifdef FEATURE_REDHAWK
- // SIMPLIFY: only use allocation contexts
- return true;
-#else
-#if defined(_TARGET_ARM_) || defined(FEATURE_PAL)
- return true;
-#else
- return ((IsServerHeap() ? true : (g_SystemInfo.dwNumberOfProcessors >= 2)));
+class IGCHeapInternal : public IGCHeap {
+ friend struct ::_DacGlobals;
+#ifdef DACCESS_COMPILE
+ friend class ClrDataAccess;
#endif
-#endif
- }
-
- inline static bool MarkShouldCompeteForStatics()
- {
- WRAPPER_NO_CONTRACT;
-
- return IsServerHeap() && g_SystemInfo.dwNumberOfProcessors >= 2;
- }
-#ifndef DACCESS_COMPILE
- static GCHeap * CreateGCHeap()
- {
- WRAPPER_NO_CONTRACT;
-
- GCHeap * pGCHeap;
-
-#if defined(FEATURE_SVR_GC)
- pGCHeap = (IsServerHeap() ? SVR::CreateGCHeap() : WKS::CreateGCHeap());
-#else
- pGCHeap = WKS::CreateGCHeap();
-#endif // defined(FEATURE_SVR_GC)
-
- g_pGCHeap = pGCHeap;
- return pGCHeap;
- }
-#endif // DACCESS_COMPILE
+public:
-private:
- typedef enum
- {
- GC_HEAP_INVALID = 0,
- GC_HEAP_WKS = 1,
- GC_HEAP_SVR = 2
- } GC_HEAP_TYPE;
-
-#ifdef FEATURE_SVR_GC
- SVAL_DECL(uint32_t,gcHeapType);
-#endif // FEATURE_SVR_GC
+ virtual ~IGCHeapInternal() {}
-public:
- // TODO Synchronization, should be moved out
- virtual BOOL IsGCInProgressHelper (BOOL bConsiderGCStart = FALSE) = 0;
- virtual uint32_t WaitUntilGCComplete (BOOL bConsiderGCStart = FALSE) = 0;
- virtual void SetGCInProgress(BOOL fInProgress) = 0;
- virtual CLREventStatic * GetWaitForGCEvent() = 0;
-
- virtual void SetFinalizationRun (Object* obj) = 0;
- virtual Object* GetNextFinalizable() = 0;
- virtual size_t GetNumberOfFinalizable() = 0;
-
- virtual void SetFinalizeQueueForShutdown(BOOL fHasLock) = 0;
- virtual BOOL FinalizeAppDomain(AppDomain *pDomain, BOOL fRunFinalizers) = 0;
- virtual BOOL ShouldRestartFinalizerWatchDog() = 0;
-
- //wait for concurrent GC to finish
- virtual void WaitUntilConcurrentGCComplete () = 0; // Use in managed threads
-#ifndef DACCESS_COMPILE
- virtual HRESULT WaitUntilConcurrentGCCompleteAsync(int millisecondsTimeout) = 0; // Use in native threads. TRUE if succeed. FALSE if failed or timeout
-#endif
- virtual BOOL IsConcurrentGCInProgress() = 0;
-
- // Enable/disable concurrent GC
- virtual void TemporaryEnableConcurrentGC() = 0;
- virtual void TemporaryDisableConcurrentGC() = 0;
- virtual BOOL IsConcurrentGCEnabled() = 0;
-
- virtual void FixAllocContext (alloc_context* acontext, BOOL lockp, void* arg, void *heap) = 0;
- virtual Object* Alloc (alloc_context* acontext, size_t size, uint32_t flags) = 0;
-
- // This is safe to call only when EE is suspended.
- virtual Object* GetContainingObject(void *pInteriorPtr) = 0;
-
- // TODO Should be folded into constructor
- virtual HRESULT Initialize () = 0;
-
- virtual HRESULT GarbageCollect (int generation = -1, BOOL low_memory_p=FALSE, int mode = collection_blocking) = 0;
- virtual Object* Alloc (size_t size, uint32_t flags) = 0;
-#ifdef FEATURE_64BIT_ALIGNMENT
- virtual Object* AllocAlign8 (size_t size, uint32_t flags) = 0;
- virtual Object* AllocAlign8 (alloc_context* acontext, size_t size, uint32_t flags) = 0;
private:
- virtual Object* AllocAlign8Common (void* hp, alloc_context* acontext, size_t size, uint32_t flags) = 0;
+ virtual Object* AllocAlign8Common (void* hp, alloc_context* acontext, size_t size, uint32_t flags) = 0;
public:
-#endif // FEATURE_64BIT_ALIGNMENT
- virtual Object* AllocLHeap (size_t size, uint32_t flags) = 0;
- virtual void SetReservedVMLimit (size_t vmlimit) = 0;
- virtual void SetCardsAfterBulkCopy( Object**, size_t ) = 0;
-#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
- virtual void WalkObject (Object* obj, walk_fn fn, void* context) = 0;
-#endif //defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
-
- virtual bool IsThreadUsingAllocationContextHeap(alloc_context* acontext, int thread_number) = 0;
virtual int GetNumberOfHeaps () = 0;
virtual int GetHomeHeapNumber () = 0;
-
- virtual int CollectionCount (int generation, int get_bgc_fgc_count = 0) = 0;
-
- // Finalizer queue stuff (should stay)
- virtual bool RegisterForFinalization (int gen, Object* obj) = 0;
-
- // General queries to the GC
- virtual BOOL IsPromoted (Object *object) = 0;
- virtual unsigned WhichGeneration (Object* object) = 0;
- virtual BOOL IsEphemeral (Object* object) = 0;
- virtual BOOL IsHeapPointer (void* object, BOOL small_heap_only = FALSE) = 0;
-
- virtual unsigned GetCondemnedGeneration() = 0;
- virtual int GetGcLatencyMode() = 0;
- virtual int SetGcLatencyMode(int newLatencyMode) = 0;
-
- virtual int GetLOHCompactionMode() = 0;
- virtual void SetLOHCompactionMode(int newLOHCompactionyMode) = 0;
-
- virtual BOOL RegisterForFullGCNotification(uint32_t gen2Percentage,
- uint32_t lohPercentage) = 0;
- virtual BOOL CancelFullGCNotification() = 0;
- virtual int WaitForFullGCApproach(int millisecondsTimeout) = 0;
- virtual int WaitForFullGCComplete(int millisecondsTimeout) = 0;
-
- virtual int StartNoGCRegion(uint64_t totalSize, BOOL lohSizeKnown, uint64_t lohSize, BOOL disallowFullBlockingGC) = 0;
- virtual int EndNoGCRegion() = 0;
+ virtual size_t GetPromotedBytes(int heap_index) = 0;
- virtual BOOL IsObjectInFixedHeap(Object *pObj) = 0;
- virtual size_t GetTotalBytesInUse () = 0;
- virtual size_t GetCurrentObjSize() = 0;
- virtual size_t GetLastGCStartTime(int generation) = 0;
- virtual size_t GetLastGCDuration(int generation) = 0;
- virtual size_t GetNow() = 0;
- virtual unsigned GetGcCount() = 0;
- virtual void TraceGCSegments() = 0;
+ unsigned GetMaxGeneration()
+ {
+ return IGCHeap::maxGeneration;
+ }
- virtual void PublishObject(uint8_t* obj) = 0;
+ BOOL IsValidSegmentSize(size_t cbSize)
+ {
+ //Must be aligned on a Mb and greater than 4Mb
+ return (((cbSize & (1024*1024-1)) ==0) && (cbSize >> 22));
+ }
- // static if since restricting for all heaps is fine
- virtual size_t GetValidSegmentSize(BOOL large_seg = FALSE) = 0;
+ BOOL IsValidGen0MaxSize(size_t cbSize)
+ {
+ return (cbSize >= 64*1024);
+ }
- static BOOL IsLargeObject(MethodTable *mt) {
+ BOOL IsLargeObject(MethodTable *mt)
+ {
WRAPPER_NO_CONTRACT;
return mt->GetBaseSize() >= LARGE_OBJECT_SIZE;
}
- static unsigned GetMaxGeneration() {
- LIMITED_METHOD_DAC_CONTRACT;
- return max_generation;
- }
-
- virtual size_t GetPromotedBytes(int heap_index) = 0;
-
-private:
- enum {
- max_generation = 2,
- };
-
-public:
-
-#ifdef FEATURE_BASICFREEZE
- // frozen segment management functions
- virtual segment_handle RegisterFrozenSegment(segment_info *pseginfo) = 0;
- virtual void UnregisterFrozenSegment(segment_handle seg) = 0;
-#endif //FEATURE_BASICFREEZE
-
- // debug support
-#ifndef FEATURE_REDHAWK // Redhawk forces relocation a different way
-#ifdef STRESS_HEAP
- //return TRUE if GC actually happens, otherwise FALSE
- virtual BOOL StressHeap(alloc_context * acontext = 0) = 0;
-#endif
-#endif // FEATURE_REDHAWK
-#ifdef VERIFY_HEAP
- virtual void ValidateObjectMember (Object *obj) = 0;
-#endif
-
-#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
- virtual void DescrGenerationsToProfiler (gen_walk_fn fn, void *context) = 0;
-#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
-
protected:
-#ifdef VERIFY_HEAP
public:
- // Return NULL if can't find next object. When EE is not suspended,
- // the result is not accurate: if the input arg is in gen0, the function could
- // return zeroed out memory as next object
- virtual Object * NextObj (Object * object) = 0;
-#ifdef FEATURE_BASICFREEZE
+#if defined(FEATURE_BASICFREEZE) && defined(VERIFY_HEAP)
// Return TRUE if object lives in frozen segment
virtual BOOL IsInFrozenSegment (Object * object) = 0;
-#endif //FEATURE_BASICFREEZE
-#endif //VERIFY_HEAP
+#endif // defined(FEATURE_BASICFREEZE) && defined(VERIFY_HEAP)
};
-extern VOLATILE(int32_t) m_GCLock;
-
// Go through and touch (read) each page straddled by a memory block.
void TouchPages(void * pStart, size_t cb);
-// For low memory notification from host
-extern int32_t g_bLowMemoryFromHost;
-
#ifdef WRITE_BARRIER_CHECK
void updateGCShadow(Object** ptr, Object* val);
#endif
@@ -677,4 +291,27 @@ extern MethodTable *pWeakReferenceMT;
extern MethodTable *pWeakReferenceOfTCanonMT;
extern void FinalizeWeakReference(Object * obj);
+// The single GC heap instance, shared with the VM.
+extern IGCHeapInternal* g_theGCHeap;
+
+#ifndef DACCESS_COMPILE
+inline BOOL IsGCInProgress(bool bConsiderGCStart = FALSE)
+{
+ WRAPPER_NO_CONTRACT;
+
+ return g_theGCHeap != nullptr ? g_theGCHeap->IsGCInProgressHelper(bConsiderGCStart) : false;
+}
+#endif // DACCESS_COMPILE
+
+inline BOOL IsServerHeap()
+{
+ LIMITED_METHOD_CONTRACT;
+#ifdef FEATURE_SVR_GC
+ _ASSERTE(IGCHeap::gcHeapType != IGCHeap::GC_HEAP_INVALID);
+ return (IGCHeap::gcHeapType == IGCHeap::GC_HEAP_SVR);
+#else // FEATURE_SVR_GC
+ return false;
+#endif // FEATURE_SVR_GC
+}
+
#endif // __GC_H
diff --git a/src/gc/gccommon.cpp b/src/gc/gccommon.cpp
index 779aac7296..d1ccddd205 100644
--- a/src/gc/gccommon.cpp
+++ b/src/gc/gccommon.cpp
@@ -15,17 +15,16 @@
#include "gc.h"
#ifdef FEATURE_SVR_GC
-SVAL_IMPL_INIT(uint32_t,GCHeap,gcHeapType,GCHeap::GC_HEAP_INVALID);
+SVAL_IMPL_INIT(uint32_t,IGCHeap,gcHeapType,IGCHeap::GC_HEAP_INVALID);
#endif // FEATURE_SVR_GC
-GPTR_IMPL(GCHeap,g_pGCHeap);
+SVAL_IMPL_INIT(uint32_t,IGCHeap,maxGeneration,2);
-/* global versions of the card table and brick table */
-GPTR_IMPL(uint32_t,g_card_table);
+IGCHeapInternal* g_theGCHeap;
-/* absolute bounds of the GC memory */
-GPTR_IMPL_INIT(uint8_t,g_lowest_address,0);
-GPTR_IMPL_INIT(uint8_t,g_highest_address,0);
+#ifdef FEATURE_STANDALONE_GC
+IGCToCLR* g_theGCToCLR;
+#endif // FEATURE_STANDALONE_GC
#ifdef GC_CONFIG_DRIVEN
GARY_IMPL(size_t, gc_global_mechanisms, MAX_GLOBAL_GC_MECHANISMS_COUNT);
@@ -33,15 +32,18 @@ GARY_IMPL(size_t, gc_global_mechanisms, MAX_GLOBAL_GC_MECHANISMS_COUNT);
#ifndef DACCESS_COMPILE
-uint8_t* g_ephemeral_low = (uint8_t*)1;
-uint8_t* g_ephemeral_high = (uint8_t*)~0;
-
#ifdef WRITE_BARRIER_CHECK
uint8_t* g_GCShadow;
uint8_t* g_GCShadowEnd;
uint8_t* g_shadow_lowest_address = NULL;
#endif
+uint32_t* g_gc_card_table;
+uint8_t* g_gc_lowest_address = 0;
+uint8_t* g_gc_highest_address = 0;
+uint8_t* g_gc_ephemeral_low = (uint8_t*)1;
+uint8_t* g_gc_ephemeral_high = (uint8_t*)~0;
+
VOLATILE(int32_t) m_GCLock = -1;
#ifdef GC_CONFIG_DRIVEN
@@ -112,4 +114,49 @@ void record_changed_seg (uint8_t* start, uint8_t* end,
}
}
+// The runtime needs to know whether we're using workstation or server GC
+// long before the GCHeap is created.
+void InitializeHeapType(bool bServerHeap)
+{
+ LIMITED_METHOD_CONTRACT;
+#ifdef FEATURE_SVR_GC
+ IGCHeap::gcHeapType = bServerHeap ? IGCHeap::GC_HEAP_SVR : IGCHeap::GC_HEAP_WKS;
+#ifdef WRITE_BARRIER_CHECK
+ if (IGCHeap::gcHeapType == IGCHeap::GC_HEAP_SVR)
+ {
+ g_GCShadow = 0;
+ g_GCShadowEnd = 0;
+ }
+#endif // WRITE_BARRIER_CHECK
+#else // FEATURE_SVR_GC
+ UNREFERENCED_PARAMETER(bServerHeap);
+ CONSISTENCY_CHECK(bServerHeap == false);
+#endif // FEATURE_SVR_GC
+}
+
+IGCHeap* InitializeGarbageCollector(IGCToCLR* clrToGC)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ IGCHeapInternal* heap;
+#ifdef FEATURE_SVR_GC
+ assert(IGCHeap::gcHeapType != IGCHeap::GC_HEAP_INVALID);
+ heap = IGCHeap::gcHeapType == IGCHeap::GC_HEAP_SVR ? SVR::CreateGCHeap() : WKS::CreateGCHeap();
+#else
+ heap = WKS::CreateGCHeap();
+#endif
+
+ g_theGCHeap = heap;
+
+#ifdef FEATURE_STANDALONE_GC
+ assert(clrToGC != nullptr);
+ g_theGCToCLR = clrToGC;
+#else
+ UNREFERENCED_PARAMETER(clrToGC);
+ assert(clrToGC == nullptr);
+#endif
+
+ return heap;
+}
+
#endif // !DACCESS_COMPILE
diff --git a/src/gc/gcee.cpp b/src/gc/gcee.cpp
index d37eaf4de9..c93cc91b57 100644
--- a/src/gc/gcee.cpp
+++ b/src/gc/gcee.cpp
@@ -148,7 +148,7 @@ void GCHeap::UpdatePostGCCounters()
// if a max gen garbage collection was performed, resync the GC Handle counter;
// if threads are currently suspended, we do not need to obtain a lock on each handle table
if (condemned_gen == max_generation)
- total_num_gc_handles = HndCountAllHandles(!GCHeap::IsGCInProgress());
+ total_num_gc_handles = HndCountAllHandles(!IsGCInProgress());
#endif //FEATURE_REDHAWK
// per generation calculation.
@@ -381,209 +381,6 @@ size_t GCHeap::GetNow()
return GetHighPrecisionTimeStamp();
}
-void ProfScanRootsHelper(Object** ppObject, ScanContext *pSC, uint32_t dwFlags)
-{
-#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
- Object *pObj = *ppObject;
-#ifdef INTERIOR_POINTERS
- if (dwFlags & GC_CALL_INTERIOR)
- {
- uint8_t *o = (uint8_t*)pObj;
- gc_heap* hp = gc_heap::heap_of (o);
-
- if ((o < hp->gc_low) || (o >= hp->gc_high))
- {
- return;
- }
- pObj = (Object*) hp->find_object(o, hp->gc_low);
- }
-#endif //INTERIOR_POINTERS
- ScanRootsHelper(pObj, ppObject, pSC, dwFlags);
-#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
-}
-
-// TODO - at some point we would like to completely decouple profiling
-// from ETW tracing using a pattern similar to this, where the
-// ProfilingScanContext has flags about whether or not certain things
-// should be tracked, and each one of these ProfilerShouldXYZ functions
-// will check these flags and determine what to do based upon that.
-// GCProfileWalkHeapWorker can, in turn, call those methods without fear
-// of things being ifdef'd out.
-
-// Returns TRUE if GC profiling is enabled and the profiler
-// should scan dependent handles, FALSE otherwise.
-BOOL ProfilerShouldTrackConditionalWeakTableElements()
-{
-#if defined(GC_PROFILING)
- return CORProfilerTrackConditionalWeakTableElements();
-#else
- return FALSE;
-#endif // defined (GC_PROFILING)
-}
-
-// If GC profiling is enabled, informs the profiler that we are done
-// tracing dependent handles.
-void ProfilerEndConditionalWeakTableElementReferences(void* heapId)
-{
-#if defined (GC_PROFILING)
- g_profControlBlock.pProfInterface->EndConditionalWeakTableElementReferences(heapId);
-#else
- UNREFERENCED_PARAMETER(heapId);
-#endif // defined (GC_PROFILING)
-}
-
-// If GC profiling is enabled, informs the profiler that we are done
-// tracing root references.
-void ProfilerEndRootReferences2(void* heapId)
-{
-#if defined (GC_PROFILING)
- g_profControlBlock.pProfInterface->EndRootReferences2(heapId);
-#else
- UNREFERENCED_PARAMETER(heapId);
-#endif // defined (GC_PROFILING)
-}
-
-// This is called only if we've determined that either:
-// a) The Profiling API wants to do a walk of the heap, and it has pinned the
-// profiler in place (so it cannot be detached), and it's thus safe to call into the
-// profiler, OR
-// b) ETW infrastructure wants to do a walk of the heap either to log roots,
-// objects, or both.
-// This can also be called to do a single walk for BOTH a) and b) simultaneously. Since
-// ETW can ask for roots, but not objects
-#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
-
-void GCProfileWalkHeapWorker(BOOL fProfilerPinned, BOOL fShouldWalkHeapRootsForEtw, BOOL fShouldWalkHeapObjectsForEtw)
-{
- {
- ProfilingScanContext SC(fProfilerPinned);
-
- // **** Scan roots: Only scan roots if profiling API wants them or ETW wants them.
- if (fProfilerPinned || fShouldWalkHeapRootsForEtw)
- {
-#ifdef MULTIPLE_HEAPS
- int hn;
-
- // Must emulate each GC thread number so we can hit each
- // heap for enumerating the roots.
- for (hn = 0; hn < gc_heap::n_heaps; hn++)
- {
- // Ask the vm to go over all of the roots for this specific
- // heap.
- gc_heap* hp = gc_heap::g_heaps [hn];
- SC.thread_number = hn;
- GCScan::GcScanRoots(&ProfScanRootsHelper, max_generation, max_generation, &SC);
-
- // The finalizer queue is also a source of roots
- SC.dwEtwRootKind = kEtwGCRootKindFinalizer;
- hp->finalize_queue->GcScanRoots(&ProfScanRootsHelper, hn, &SC);
- }
-#else
- // Ask the vm to go over all of the roots
- GCScan::GcScanRoots(&ProfScanRootsHelper, max_generation, max_generation, &SC);
-
- // The finalizer queue is also a source of roots
- SC.dwEtwRootKind = kEtwGCRootKindFinalizer;
- pGenGCHeap->finalize_queue->GcScanRoots(&ProfScanRootsHelper, 0, &SC);
-
-#endif // MULTIPLE_HEAPS
- // Handles are kept independent of wks/svr/concurrent builds
- SC.dwEtwRootKind = kEtwGCRootKindHandle;
- GCScan::GcScanHandlesForProfilerAndETW(max_generation, &SC);
-
- // indicate that regular handle scanning is over, so we can flush the buffered roots
- // to the profiler. (This is for profapi only. ETW will flush after the
- // entire heap was is complete, via ETW::GCLog::EndHeapDump.)
- if (fProfilerPinned)
- {
- ProfilerEndRootReferences2(&SC.pHeapId);
- }
- }
-
- // **** Scan dependent handles: only if the profiler supports it or ETW wants roots
- if ((fProfilerPinned && ProfilerShouldTrackConditionalWeakTableElements()) ||
- fShouldWalkHeapRootsForEtw)
- {
- // GcScanDependentHandlesForProfiler double-checks
- // CORProfilerTrackConditionalWeakTableElements() before calling into the profiler
-
- GCScan::GcScanDependentHandlesForProfilerAndETW(max_generation, &SC);
-
- // indicate that dependent handle scanning is over, so we can flush the buffered roots
- // to the profiler. (This is for profapi only. ETW will flush after the
- // entire heap was is complete, via ETW::GCLog::EndHeapDump.)
- if (fProfilerPinned && ProfilerShouldTrackConditionalWeakTableElements())
- {
- ProfilerEndConditionalWeakTableElementReferences(&SC.pHeapId);
- }
- }
-
- ProfilerWalkHeapContext profilerWalkHeapContext(fProfilerPinned, SC.pvEtwContext);
-
- // **** Walk objects on heap: only if profiling API wants them or ETW wants them.
- if (fProfilerPinned || fShouldWalkHeapObjectsForEtw)
- {
-#ifdef MULTIPLE_HEAPS
- int hn;
-
- // Walk the heap and provide the objref to the profiler
- for (hn = 0; hn < gc_heap::n_heaps; hn++)
- {
- gc_heap* hp = gc_heap::g_heaps [hn];
- hp->walk_heap(&HeapWalkHelper, &profilerWalkHeapContext, max_generation, TRUE /* walk the large object heap */);
- }
-#else
- gc_heap::walk_heap(&HeapWalkHelper, &profilerWalkHeapContext, max_generation, TRUE);
-#endif //MULTIPLE_HEAPS
- }
-
-#ifdef FEATURE_EVENT_TRACE
- // **** Done! Indicate to ETW helpers that the heap walk is done, so any buffers
- // should be flushed into the ETW stream
- if (fShouldWalkHeapObjectsForEtw || fShouldWalkHeapRootsForEtw)
- {
- ETW::GCLog::EndHeapDump(&profilerWalkHeapContext);
- }
-#endif // FEATURE_EVENT_TRACE
- }
-}
-#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
-
-void GCProfileWalkHeap()
-{
- BOOL fWalkedHeapForProfiler = FALSE;
-
-#ifdef FEATURE_EVENT_TRACE
- if (ETW::GCLog::ShouldWalkStaticsAndCOMForEtw())
- ETW::GCLog::WalkStaticsAndCOMForETW();
-
- BOOL fShouldWalkHeapRootsForEtw = ETW::GCLog::ShouldWalkHeapRootsForEtw();
- BOOL fShouldWalkHeapObjectsForEtw = ETW::GCLog::ShouldWalkHeapObjectsForEtw();
-#else // !FEATURE_EVENT_TRACE
- BOOL fShouldWalkHeapRootsForEtw = FALSE;
- BOOL fShouldWalkHeapObjectsForEtw = FALSE;
-#endif // FEATURE_EVENT_TRACE
-
-#if defined (GC_PROFILING)
- {
- BEGIN_PIN_PROFILER(CORProfilerTrackGC());
- GCProfileWalkHeapWorker(TRUE /* fProfilerPinned */, fShouldWalkHeapRootsForEtw, fShouldWalkHeapObjectsForEtw);
- fWalkedHeapForProfiler = TRUE;
- END_PIN_PROFILER();
- }
-#endif // defined (GC_PROFILING)
-
-#if defined (GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
- // we need to walk the heap if one of GC_PROFILING or FEATURE_EVENT_TRACE
- // is defined, since both of them make use of the walk heap worker.
- if (!fWalkedHeapForProfiler &&
- (fShouldWalkHeapRootsForEtw || fShouldWalkHeapObjectsForEtw))
- {
- GCProfileWalkHeapWorker(FALSE /* fProfilerPinned */, fShouldWalkHeapRootsForEtw, fShouldWalkHeapObjectsForEtw);
- }
-#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
-}
-
BOOL GCHeap::IsGCInProgressHelper (BOOL bConsiderGCStart)
{
return GcInProgress || (bConsiderGCStart? VolatileLoad(&gc_heap::gc_started) : FALSE);
@@ -782,11 +579,11 @@ void gc_heap::background_gc_wait_lh (alloc_wait_reason awr)
/******************************************************************************/
-::GCHeap* CreateGCHeap() {
+IGCHeapInternal* CreateGCHeap() {
return new(nothrow) GCHeap(); // we return wks or svr
}
-void GCHeap::TraceGCSegments()
+void GCHeap::DiagTraceGCSegments()
{
#ifdef FEATURE_EVENT_TRACE
heap_segment* seg = 0;
@@ -823,16 +620,16 @@ void GCHeap::TraceGCSegments()
#endif // FEATURE_EVENT_TRACE
}
-#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
-void GCHeap::DescrGenerationsToProfiler (gen_walk_fn fn, void *context)
+void GCHeap::DiagDescrGenerations (gen_walk_fn fn, void *context)
{
+#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
pGenGCHeap->descr_generations_to_profiler(fn, context);
-}
#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
+}
-#ifdef FEATURE_BASICFREEZE
segment_handle GCHeap::RegisterFrozenSegment(segment_info *pseginfo)
{
+#ifdef FEATURE_BASICFREEZE
heap_segment * seg = new (nothrow) heap_segment;
if (!seg)
{
@@ -863,10 +660,15 @@ segment_handle GCHeap::RegisterFrozenSegment(segment_info *pseginfo)
}
return reinterpret_cast< segment_handle >(seg);
+#else
+ assert(!"Should not call GCHeap::RegisterFrozenSegment without FEATURE_BASICFREEZE defined!");
+ return NULL;
+#endif // FEATURE_BASICFREEZE
}
void GCHeap::UnregisterFrozenSegment(segment_handle seg)
{
+#ifdef FEATURE_BASICFREEZE
#if defined (MULTIPLE_HEAPS) && !defined (ISOLATED_HEAPS)
gc_heap* heap = gc_heap::g_heaps[0];
#else
@@ -874,8 +676,10 @@ void GCHeap::UnregisterFrozenSegment(segment_handle seg)
#endif //MULTIPLE_HEAPS && !ISOLATED_HEAPS
heap->remove_ro_segment(reinterpret_cast<heap_segment*>(seg));
-}
+#else
+ assert(!"Should not call GCHeap::UnregisterFrozenSegment without FEATURE_BASICFREEZE defined!");
#endif // FEATURE_BASICFREEZE
+}
#endif // !DACCESS_COMPILE
diff --git a/src/gc/gcenv.ee.standalone.inl b/src/gc/gcenv.ee.standalone.inl
new file mode 100644
index 0000000000..3b64586d70
--- /dev/null
+++ b/src/gc/gcenv.ee.standalone.inl
@@ -0,0 +1,176 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+#ifndef __GCTOENV_EE_STANDALONE_INL__
+#define __GCTOENV_EE_STANDALONE_INL__
+
+#include "env/gcenv.ee.h"
+
+// The singular interface instance. All calls in GCToEEInterface
+// will be fowarded to this interface instance.
+extern IGCToCLR* g_theGCToCLR;
+
+// When we are building the GC in a standalone environment, we
+// will be dispatching virtually against g_theGCToCLR to call
+// into the EE. This class provides an identical API to the existing
+// GCToEEInterface, but only forwards the call onto the global
+// g_theGCToCLR instance.
+inline void GCToEEInterface::SuspendEE(SUSPEND_REASON reason)
+{
+ assert(g_theGCToCLR != nullptr);
+ g_theGCToCLR->SuspendEE(reason);
+}
+
+inline void GCToEEInterface::RestartEE(bool bFinishedGC)
+{
+ assert(g_theGCToCLR != nullptr);
+ g_theGCToCLR->RestartEE(bFinishedGC);
+}
+
+inline void GCToEEInterface::GcScanRoots(promote_func* fn, int condemned, int max_gen, ScanContext* sc)
+{
+ assert(g_theGCToCLR != nullptr);
+ g_theGCToCLR->GcScanRoots(fn, condemned, max_gen, sc);
+}
+
+inline void GCToEEInterface::GcStartWork(int condemned, int max_gen)
+{
+ assert(g_theGCToCLR != nullptr);
+ g_theGCToCLR->GcStartWork(condemned, max_gen);
+}
+
+inline void GCToEEInterface::AfterGcScanRoots(int condemned, int max_gen, ScanContext* sc)
+{
+ assert(g_theGCToCLR != nullptr);
+ g_theGCToCLR->AfterGcScanRoots(condemned, max_gen, sc);
+}
+
+inline void GCToEEInterface::GcBeforeBGCSweepWork()
+{
+ assert(g_theGCToCLR != nullptr);
+ g_theGCToCLR->GcBeforeBGCSweepWork();
+}
+
+inline void GCToEEInterface::GcDone(int condemned)
+{
+ assert(g_theGCToCLR != nullptr);
+ g_theGCToCLR->GcDone(condemned);
+}
+
+inline bool GCToEEInterface::RefCountedHandleCallbacks(Object * pObject)
+{
+ assert(g_theGCToCLR != nullptr);
+ return g_theGCToCLR->RefCountedHandleCallbacks(pObject);
+}
+
+inline void GCToEEInterface::SyncBlockCacheWeakPtrScan(HANDLESCANPROC scanProc, uintptr_t lp1, uintptr_t lp2)
+{
+ assert(g_theGCToCLR != nullptr);
+ g_theGCToCLR->SyncBlockCacheWeakPtrScan(scanProc, lp1, lp2);
+}
+
+inline void GCToEEInterface::SyncBlockCacheDemote(int max_gen)
+{
+ assert(g_theGCToCLR != nullptr);
+ g_theGCToCLR->SyncBlockCacheDemote(max_gen);
+}
+
+inline void GCToEEInterface::SyncBlockCachePromotionsGranted(int max_gen)
+{
+ assert(g_theGCToCLR != nullptr);
+ g_theGCToCLR->SyncBlockCachePromotionsGranted(max_gen);
+}
+
+inline bool GCToEEInterface::IsPreemptiveGCDisabled(Thread * pThread)
+{
+ assert(g_theGCToCLR != nullptr);
+ return g_theGCToCLR->IsPreemptiveGCDisabled(pThread);
+}
+
+
+inline void GCToEEInterface::EnablePreemptiveGC(Thread * pThread)
+{
+ assert(g_theGCToCLR != nullptr);
+ g_theGCToCLR->EnablePreemptiveGC(pThread);
+}
+
+inline void GCToEEInterface::DisablePreemptiveGC(Thread * pThread)
+{
+ assert(g_theGCToCLR != nullptr);
+ g_theGCToCLR->DisablePreemptiveGC(pThread);
+}
+
+inline gc_alloc_context * GCToEEInterface::GetAllocContext(Thread * pThread)
+{
+ assert(g_theGCToCLR != nullptr);
+ return g_theGCToCLR->GetAllocContext(pThread);
+}
+
+inline bool GCToEEInterface::CatchAtSafePoint(Thread * pThread)
+{
+ assert(g_theGCToCLR != nullptr);
+ return g_theGCToCLR->CatchAtSafePoint(pThread);
+}
+
+inline void GCToEEInterface::GcEnumAllocContexts(enum_alloc_context_func* fn, void* param)
+{
+ assert(g_theGCToCLR != nullptr);
+ g_theGCToCLR->GcEnumAllocContexts(fn, param);
+}
+
+inline Thread* GCToEEInterface::CreateBackgroundThread(GCBackgroundThreadFunction threadStart, void* arg)
+{
+ assert(g_theGCToCLR != nullptr);
+ return g_theGCToCLR->CreateBackgroundThread(threadStart, arg);
+}
+
+inline void GCToEEInterface::DiagGCStart(int gen, bool isInduced)
+{
+ assert(g_theGCToCLR != nullptr);
+ g_theGCToCLR->DiagGCStart(gen, isInduced);
+}
+
+inline void GCToEEInterface::DiagUpdateGenerationBounds()
+{
+ assert(g_theGCToCLR != nullptr);
+ g_theGCToCLR->DiagUpdateGenerationBounds();
+}
+
+inline void GCToEEInterface::DiagGCEnd(size_t index, int gen, int reason, bool fConcurrent)
+{
+ assert(g_theGCToCLR != nullptr);
+ g_theGCToCLR->DiagGCEnd(index, gen, reason, fConcurrent);
+}
+
+inline void GCToEEInterface::DiagWalkFReachableObjects(void* gcContext)
+{
+ assert(g_theGCToCLR != nullptr);
+ g_theGCToCLR->DiagWalkFReachableObjects(gcContext);
+}
+
+inline void GCToEEInterface::DiagWalkSurvivors(void* gcContext)
+{
+ assert(g_theGCToCLR != nullptr);
+ g_theGCToCLR->DiagWalkSurvivors(gcContext);
+}
+
+inline void GCToEEInterface::DiagWalkLOHSurvivors(void* gcContext)
+{
+ assert(g_theGCToCLR != nullptr);
+ g_theGCToCLR->DiagWalkLOHSurvivors(gcContext);
+}
+
+inline void GCToEEInterface::DiagWalkBGCSurvivors(void* gcContext)
+{
+ assert(g_theGCToCLR != nullptr);
+ return g_theGCToCLR->DiagWalkBGCSurvivors(gcContext);
+}
+
+inline void GCToEEInterface::StompWriteBarrier(WriteBarrierParameters* args)
+{
+ assert(g_theGCToCLR != nullptr);
+ g_theGCToCLR->StompWriteBarrier(args);
+}
+
+#endif // __GCTOENV_EE_STANDALONE_INL__
diff --git a/src/gc/sample/gcenv.windows.cpp b/src/gc/gcenv.unix.cpp
index 76187f2185..0235952e28 100644
--- a/src/gc/sample/gcenv.windows.cpp
+++ b/src/gc/gcenv.unix.cpp
@@ -2,67 +2,37 @@
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
-//
-// Implementation of the GC environment
-//
-
-#include "common.h"
-
-#include "windows.h"
-
-#include "gcenv.h"
-#include "gc.h"
-
-MethodTable * g_pFreeObjectMethodTable;
-
-int32_t g_TrapReturningThreads;
-
-bool g_fFinalizerRunOnShutDown;
-
-GCSystemInfo g_SystemInfo;
-
-static LARGE_INTEGER g_performanceFrequency;
+#include "env/gcenv.structs.h"
+#include "env/gcenv.base.h"
+#include "env/gcenv.os.h"
// Initialize the interface implementation
// Return:
// true if it has succeeded, false if it has failed
bool GCToOSInterface::Initialize()
{
- if (!::QueryPerformanceFrequency(&g_performanceFrequency))
- {
- return false;
- }
-
- SYSTEM_INFO systemInfo;
- GetSystemInfo(&systemInfo);
-
- g_SystemInfo.dwNumberOfProcessors = systemInfo.dwNumberOfProcessors;
- g_SystemInfo.dwPageSize = systemInfo.dwPageSize;
- g_SystemInfo.dwAllocationGranularity = systemInfo.dwAllocationGranularity;
-
- return true;
+ throw nullptr;
}
// Shutdown the interface implementation
void GCToOSInterface::Shutdown()
{
+ throw nullptr;
}
-// Get numeric id of the current thread if possible on the
+// Get numeric id of the current thread if possible on the
// current platform. It is indended for logging purposes only.
// Return:
// Numeric id of the current thread or 0 if the
uint64_t GCToOSInterface::GetCurrentThreadIdForLogging()
{
- return ::GetCurrentThreadId();
+ throw nullptr;
}
// Get id of the process
-// Return:
-// Id of the current process
uint32_t GCToOSInterface::GetCurrentProcessId()
{
- return ::GetCurrentProcessId();
+ throw nullptr;
}
// Set ideal affinity for the current thread
@@ -72,63 +42,37 @@ uint32_t GCToOSInterface::GetCurrentProcessId()
// true if it has succeeded, false if it has failed
bool GCToOSInterface::SetCurrentThreadIdealAffinity(GCThreadAffinity* affinity)
{
- bool success = true;
-
-#if !defined(FEATURE_CORESYSTEM)
- SetThreadIdealProcessor(GetCurrentThread(), (DWORD)affinity->Processor);
-#else
- PROCESSOR_NUMBER proc;
-
- if (affinity->Group != -1)
- {
- proc.Group = (WORD)affinity->Group;
- proc.Number = (BYTE)affinity->Processor;
- proc.Reserved = 0;
-
- success = !!SetThreadIdealProcessorEx(GetCurrentThread(), &proc, NULL);
- }
- else
- {
- if (GetThreadIdealProcessorEx(GetCurrentThread(), &proc))
- {
- proc.Number = affinity->Processor;
- success = !!SetThreadIdealProcessorEx(GetCurrentThread(), &proc, NULL);
- }
- }
-#endif
-
- return success;
+ throw nullptr;
}
// Get the number of the current processor
uint32_t GCToOSInterface::GetCurrentProcessorNumber()
{
- _ASSERTE(GCToOSInterface::CanGetCurrentProcessorNumber());
- return ::GetCurrentProcessorNumber();
+ throw nullptr;
}
// Check if the OS supports getting current processor number
bool GCToOSInterface::CanGetCurrentProcessorNumber()
{
- return true;
+ throw nullptr;
}
// Flush write buffers of processors that are executing threads of the current process
void GCToOSInterface::FlushProcessWriteBuffers()
{
- ::FlushProcessWriteBuffers();
+ throw nullptr;
}
// Break into a debugger
void GCToOSInterface::DebugBreak()
{
- ::DebugBreak();
+ throw nullptr;
}
// Get number of logical processors
uint32_t GCToOSInterface::GetLogicalCpuCount()
{
- return g_SystemInfo.dwNumberOfProcessors;
+ throw nullptr;
}
// Causes the calling thread to sleep for the specified number of milliseconds
@@ -136,7 +80,7 @@ uint32_t GCToOSInterface::GetLogicalCpuCount()
// sleepMSec - time to sleep before switching to another thread
void GCToOSInterface::Sleep(uint32_t sleepMSec)
{
- ::Sleep(sleepMSec);
+ throw nullptr;
}
// Causes the calling thread to yield execution to another thread that is ready to run on the current processor.
@@ -144,21 +88,19 @@ void GCToOSInterface::Sleep(uint32_t sleepMSec)
// switchCount - number of times the YieldThread was called in a loop
void GCToOSInterface::YieldThread(uint32_t switchCount)
{
- SwitchToThread();
+ throw nullptr;
}
// Reserve virtual memory range.
// Parameters:
-// address - starting virtual address, it can be NULL to let the function choose the starting address
// size - size of the virtual memory range
-// alignment - requested memory alignment
+// alignment - requested memory alignment, 0 means no specific alignment requested
// flags - flags to control special settings like write watching
// Return:
// Starting virtual address of the reserved range
-void* GCToOSInterface::VirtualReserve(void* address, size_t size, size_t alignment, uint32_t flags)
+void* GCToOSInterface::VirtualReserve(size_t size, size_t alignment, uint32_t flags)
{
- DWORD memFlags = (flags & VirtualReserveFlags::WriteWatch) ? (MEM_RESERVE | MEM_WRITE_WATCH) : MEM_RESERVE;
- return ::VirtualAlloc(0, size, memFlags, PAGE_READWRITE);
+ throw nullptr;
}
// Release virtual memory range previously reserved using VirtualReserve
@@ -169,8 +111,7 @@ void* GCToOSInterface::VirtualReserve(void* address, size_t size, size_t alignme
// true if it has succeeded, false if it has failed
bool GCToOSInterface::VirtualRelease(void* address, size_t size)
{
- UNREFERENCED_PARAMETER(size);
- return !!::VirtualFree(address, 0, MEM_RELEASE);
+ throw nullptr;
}
// Commit virtual memory range. It must be part of a range reserved using VirtualReserve.
@@ -181,7 +122,7 @@ bool GCToOSInterface::VirtualRelease(void* address, size_t size)
// true if it has succeeded, false if it has failed
bool GCToOSInterface::VirtualCommit(void* address, size_t size)
{
- return ::VirtualAlloc(address, size, MEM_COMMIT, PAGE_READWRITE) != NULL;
+ throw nullptr;
}
// Decomit virtual memory range.
@@ -192,10 +133,10 @@ bool GCToOSInterface::VirtualCommit(void* address, size_t size)
// true if it has succeeded, false if it has failed
bool GCToOSInterface::VirtualDecommit(void* address, size_t size)
{
- return !!::VirtualFree(address, size, MEM_DECOMMIT);
+ throw nullptr;
}
-// Reset virtual memory range. Indicates that data in the memory range specified by address and size is no
+// Reset virtual memory range. Indicates that data in the memory range specified by address and size is no
// longer of interest, but it should not be decommitted.
// Parameters:
// address - starting virtual address
@@ -205,20 +146,13 @@ bool GCToOSInterface::VirtualDecommit(void* address, size_t size)
// true if it has succeeded, false if it has failed
bool GCToOSInterface::VirtualReset(void * address, size_t size, bool unlock)
{
- bool success = ::VirtualAlloc(address, size, MEM_RESET, PAGE_READWRITE) != NULL;
- if (success && unlock)
- {
- // Remove the page range from the working set
- ::VirtualUnlock(address, size);
- }
-
- return success;
+ throw nullptr;
}
// Check if the OS supports write watching
bool GCToOSInterface::SupportsWriteWatch()
{
- return false;
+ throw nullptr;
}
// Reset the write tracking state for the specified virtual memory range.
@@ -227,6 +161,7 @@ bool GCToOSInterface::SupportsWriteWatch()
// size - size of the virtual memory range
void GCToOSInterface::ResetWriteWatch(void* address, size_t size)
{
+ throw nullptr;
}
// Retrieve addresses of the pages that are written to in a region of virtual memory
@@ -241,7 +176,7 @@ void GCToOSInterface::ResetWriteWatch(void* address, size_t size)
// true if it has succeeded, false if it has failed
bool GCToOSInterface::GetWriteWatch(bool resetState, void* address, size_t size, void** pageAddresses, uintptr_t* pageAddressesCount)
{
- return false;
+ throw nullptr;
}
// Get size of the largest cache on the processor die
@@ -252,8 +187,7 @@ bool GCToOSInterface::GetWriteWatch(bool resetState, void* address, size_t size,
// Size of the cache
size_t GCToOSInterface::GetLargestOnDieCacheSize(bool trueSize)
{
- // TODO: implement
- return 0;
+ throw nullptr;
}
// Get affinity mask of the current process
@@ -271,7 +205,7 @@ size_t GCToOSInterface::GetLargestOnDieCacheSize(bool trueSize)
// specify a 1 bit for a processor when the system affinity mask specifies a 0 bit for that processor.
bool GCToOSInterface::GetCurrentProcessAffinityMask(uintptr_t* processMask, uintptr_t* systemMask)
{
- return false;
+ throw nullptr;
}
// Get number of processors assigned to the current process
@@ -279,7 +213,7 @@ bool GCToOSInterface::GetCurrentProcessAffinityMask(uintptr_t* processMask, uint
// The number of processors
uint32_t GCToOSInterface::GetCurrentProcessCpuCount()
{
- return g_SystemInfo.dwNumberOfProcessors;
+ throw nullptr;
}
// Return the size of the user-mode portion of the virtual address space of this process.
@@ -287,27 +221,18 @@ uint32_t GCToOSInterface::GetCurrentProcessCpuCount()
// non zero if it has succeeded, 0 if it has failed
size_t GCToOSInterface::GetVirtualMemoryLimit()
{
- MEMORYSTATUSEX memStatus;
-
- memStatus.dwLength = sizeof(MEMORYSTATUSEX);
- BOOL fRet = GlobalMemoryStatusEx(&memStatus);
- _ASSERTE(fRet);
-
- return (size_t)memStatus.ullTotalVirtual;
+ throw nullptr;
}
// Get the physical memory that this process can use.
// Return:
// non zero if it has succeeded, 0 if it has failed
+// Remarks:
+// If a process runs with a restricted memory limit, it returns the limit. If there's no limit
+// specified, it returns amount of actual physical memory.
uint64_t GCToOSInterface::GetPhysicalMemoryLimit()
{
- MEMORYSTATUSEX memStatus;
-
- memStatus.dwLength = sizeof(MEMORYSTATUSEX);
- BOOL fRet = GlobalMemoryStatusEx(&memStatus);
- _ASSERTE(fRet);
-
- return memStatus.ullTotalPhys;
+ throw nullptr;
}
// Get memory status
@@ -318,25 +243,7 @@ uint64_t GCToOSInterface::GetPhysicalMemoryLimit()
// available_page_file - The maximum amount of memory the current process can commit, in bytes.
void GCToOSInterface::GetMemoryStatus(uint32_t* memory_load, uint64_t* available_physical, uint64_t* available_page_file)
{
- MEMORYSTATUSEX memStatus;
-
- memStatus.dwLength = sizeof(MEMORYSTATUSEX);
- BOOL fRet = GlobalMemoryStatusEx(&memStatus);
- _ASSERTE (fRet);
-
- // If the machine has more RAM than virtual address limit, let us cap it.
- // The GC can never use more than virtual address limit.
- if (memStatus.ullAvailPhys > memStatus.ullTotalVirtual)
- {
- memStatus.ullAvailPhys = memStatus.ullAvailVirtual;
- }
-
- if (memory_load != NULL)
- *memory_load = memStatus.dwMemoryLoad;
- if (available_physical != NULL)
- *available_physical = memStatus.ullAvailPhys;
- if (available_page_file != NULL)
- *available_page_file = memStatus.ullAvailPageFile;
+ throw nullptr;
}
// Get a high precision performance counter
@@ -344,14 +251,7 @@ void GCToOSInterface::GetMemoryStatus(uint32_t* memory_load, uint64_t* available
// The counter value
int64_t GCToOSInterface::QueryPerformanceCounter()
{
- LARGE_INTEGER ts;
- if (!::QueryPerformanceCounter(&ts))
- {
- _ASSERTE(!"Fatal Error - cannot query performance counter.");
- abort();
- }
-
- return ts.QuadPart;
+ throw nullptr;
}
// Get a frequency of the high precision performance counter
@@ -359,7 +259,7 @@ int64_t GCToOSInterface::QueryPerformanceCounter()
// The counter frequency
int64_t GCToOSInterface::QueryPerformanceFrequency()
{
- return g_performanceFrequency.QuadPart;
+ throw nullptr;
}
// Get a time stamp with a low precision
@@ -367,31 +267,11 @@ int64_t GCToOSInterface::QueryPerformanceFrequency()
// Time stamp in milliseconds
uint32_t GCToOSInterface::GetLowPrecisionTimeStamp()
{
- return ::GetTickCount();
+ throw nullptr;
}
-// Parameters of the GC thread stub
-struct GCThreadStubParam
-{
- GCThreadFunction GCThreadFunction;
- void* GCThreadParam;
-};
-// GC thread stub to convert GC thread function to an OS specific thread function
-static DWORD __stdcall GCThreadStub(void* param)
-{
- GCThreadStubParam *stubParam = (GCThreadStubParam*)param;
- GCThreadFunction function = stubParam->GCThreadFunction;
- void* threadParam = stubParam->GCThreadParam;
-
- delete stubParam;
-
- function(threadParam);
-
- return 0;
-}
-
-// Create a new thread
+// Create a new thread for GC use
// Parameters:
// function - the function to be executed by the thread
// param - parameters of the thread
@@ -400,54 +280,29 @@ static DWORD __stdcall GCThreadStub(void* param)
// true if it has succeeded, false if it has failed
bool GCToOSInterface::CreateThread(GCThreadFunction function, void* param, GCThreadAffinity* affinity)
{
- DWORD thread_id;
-
- GCThreadStubParam* stubParam = new (nothrow) GCThreadStubParam();
- if (stubParam == NULL)
- {
- return false;
- }
-
- stubParam->GCThreadFunction = function;
- stubParam->GCThreadParam = param;
-
- HANDLE gc_thread = ::CreateThread(NULL, 0, GCThreadStub, stubParam, CREATE_SUSPENDED, &thread_id);
-
- if (!gc_thread)
- {
- delete stubParam;
- return false;
- }
-
- SetThreadPriority(gc_thread, /* THREAD_PRIORITY_ABOVE_NORMAL );*/ THREAD_PRIORITY_HIGHEST );
-
- ResumeThread(gc_thread);
-
- CloseHandle(gc_thread);
-
- return true;
+ throw nullptr;
}
// Initialize the critical section
void CLRCriticalSection::Initialize()
{
- ::InitializeCriticalSection(&m_cs);
+ throw nullptr;
}
// Destroy the critical section
void CLRCriticalSection::Destroy()
{
- ::DeleteCriticalSection(&m_cs);
+ throw nullptr;
}
// Enter the critical section. Blocks until the section can be entered.
void CLRCriticalSection::Enter()
{
- ::EnterCriticalSection(&m_cs);
+ throw nullptr;
}
// Leave the critical section
void CLRCriticalSection::Leave()
{
- ::LeaveCriticalSection(&m_cs);
-}
+ throw nullptr;
+} \ No newline at end of file
diff --git a/src/gc/gcenv.windows.cpp b/src/gc/gcenv.windows.cpp
new file mode 100644
index 0000000000..a636478245
--- /dev/null
+++ b/src/gc/gcenv.windows.cpp
@@ -0,0 +1,625 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+#include <cstdint>
+#include <cassert>
+#include <cstddef>
+#include <memory>
+#include "windows.h"
+#include "psapi.h"
+#include "env/gcenv.structs.h"
+#include "env/gcenv.base.h"
+#include "env/gcenv.os.h"
+
+GCSystemInfo g_SystemInfo;
+
+typedef BOOL (WINAPI *PGET_PROCESS_MEMORY_INFO)(HANDLE handle, PROCESS_MEMORY_COUNTERS* memCounters, uint32_t cb);
+static PGET_PROCESS_MEMORY_INFO GCGetProcessMemoryInfo = 0;
+
+static size_t g_RestrictedPhysicalMemoryLimit = (size_t)UINTPTR_MAX;
+
+typedef BOOL (WINAPI *PIS_PROCESS_IN_JOB)(HANDLE processHandle, HANDLE jobHandle, BOOL* result);
+typedef BOOL (WINAPI *PQUERY_INFORMATION_JOB_OBJECT)(HANDLE jobHandle, JOBOBJECTINFOCLASS jobObjectInfoClass, void* lpJobObjectInfo, DWORD cbJobObjectInfoLength, LPDWORD lpReturnLength);
+
+namespace {
+
+void GetProcessMemoryLoad(LPMEMORYSTATUSEX pMSEX)
+{
+ pMSEX->dwLength = sizeof(MEMORYSTATUSEX);
+ BOOL fRet = ::GlobalMemoryStatusEx(pMSEX);
+ assert(fRet);
+
+ // If the machine has more RAM than virtual address limit, let us cap it.
+ // Our GC can never use more than virtual address limit.
+ if (pMSEX->ullAvailPhys > pMSEX->ullTotalVirtual)
+ {
+ pMSEX->ullAvailPhys = pMSEX->ullAvailVirtual;
+ }
+}
+
+static size_t GetRestrictedPhysicalMemoryLimit()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // The limit was cached already
+ if (g_RestrictedPhysicalMemoryLimit != (size_t)UINTPTR_MAX)
+ return g_RestrictedPhysicalMemoryLimit;
+
+ size_t job_physical_memory_limit = (size_t)UINTPTR_MAX;
+ BOOL in_job_p = FALSE;
+ HINSTANCE hinstKernel32 = 0;
+
+ PIS_PROCESS_IN_JOB GCIsProcessInJob = 0;
+ PQUERY_INFORMATION_JOB_OBJECT GCQueryInformationJobObject = 0;
+
+ hinstKernel32 = LoadLibraryEx(L"kernel32.dll", nullptr, LOAD_LIBRARY_SEARCH_SYSTEM32);
+ if (!hinstKernel32)
+ goto exit;
+
+ GCIsProcessInJob = (PIS_PROCESS_IN_JOB)GetProcAddress(hinstKernel32, "IsProcessInJob");
+ if (!GCIsProcessInJob)
+ goto exit;
+
+ if (!GCIsProcessInJob(GetCurrentProcess(), NULL, &in_job_p))
+ goto exit;
+
+ if (in_job_p)
+ {
+ GCGetProcessMemoryInfo = (PGET_PROCESS_MEMORY_INFO)GetProcAddress(hinstKernel32, "K32GetProcessMemoryInfo");
+
+ if (!GCGetProcessMemoryInfo)
+ goto exit;
+
+ GCQueryInformationJobObject = (PQUERY_INFORMATION_JOB_OBJECT)GetProcAddress(hinstKernel32, "QueryInformationJobObject");
+
+ if (!GCQueryInformationJobObject)
+ goto exit;
+
+ JOBOBJECT_EXTENDED_LIMIT_INFORMATION limit_info;
+ if (GCQueryInformationJobObject (NULL, JobObjectExtendedLimitInformation, &limit_info,
+ sizeof(limit_info), NULL))
+ {
+ size_t job_memory_limit = (size_t)UINTPTR_MAX;
+ size_t job_process_memory_limit = (size_t)UINTPTR_MAX;
+ size_t job_workingset_limit = (size_t)UINTPTR_MAX;
+
+ // Notes on the NT job object:
+ //
+ // You can specific a bigger process commit or working set limit than
+ // job limit which is pointless so we use the smallest of all 3 as
+ // to calculate our "physical memory load" or "available physical memory"
+ // when running inside a job object, ie, we treat this as the amount of physical memory
+ // our process is allowed to use.
+ //
+ // The commit limit is already reflected by default when you run in a
+ // job but the physical memory load is not.
+ //
+ if ((limit_info.BasicLimitInformation.LimitFlags & JOB_OBJECT_LIMIT_JOB_MEMORY) != 0)
+ job_memory_limit = limit_info.JobMemoryLimit;
+ if ((limit_info.BasicLimitInformation.LimitFlags & JOB_OBJECT_LIMIT_PROCESS_MEMORY) != 0)
+ job_process_memory_limit = limit_info.ProcessMemoryLimit;
+ if ((limit_info.BasicLimitInformation.LimitFlags & JOB_OBJECT_LIMIT_WORKINGSET) != 0)
+ job_workingset_limit = limit_info.BasicLimitInformation.MaximumWorkingSetSize;
+
+ job_physical_memory_limit = min (job_memory_limit, job_process_memory_limit);
+ job_physical_memory_limit = min (job_physical_memory_limit, job_workingset_limit);
+
+ MEMORYSTATUSEX ms;
+ ::GetProcessMemoryLoad(&ms);
+
+ // A sanity check in case someone set a larger limit than there is actual physical memory.
+ job_physical_memory_limit = (size_t) min (job_physical_memory_limit, ms.ullTotalPhys);
+ }
+ }
+
+exit:
+ if (job_physical_memory_limit == (size_t)UINTPTR_MAX)
+ {
+ job_physical_memory_limit = 0;
+
+ FreeLibrary(hinstKernel32);
+ }
+
+ VolatileStore(&g_RestrictedPhysicalMemoryLimit, job_physical_memory_limit);
+ return g_RestrictedPhysicalMemoryLimit;
+}
+
+} // anonymous namespace
+
+// Initialize the interface implementation
+// Return:
+// true if it has succeeded, false if it has failed
+bool GCToOSInterface::Initialize()
+{
+ SYSTEM_INFO systemInfo;
+ GetSystemInfo(&systemInfo);
+
+ g_SystemInfo.dwNumberOfProcessors = systemInfo.dwNumberOfProcessors;
+ g_SystemInfo.dwPageSize = systemInfo.dwPageSize;
+ g_SystemInfo.dwAllocationGranularity = systemInfo.dwAllocationGranularity;
+
+ return true;
+}
+
+// Shutdown the interface implementation
+void GCToOSInterface::Shutdown()
+{
+ // nothing to do.
+}
+
+// Get numeric id of the current thread if possible on the
+// current platform. It is indended for logging purposes only.
+// Return:
+// Numeric id of the current thread or 0 if the
+uint64_t GCToOSInterface::GetCurrentThreadIdForLogging()
+{
+ return ::GetCurrentThreadId();
+}
+
+// Get id of the process
+uint32_t GCToOSInterface::GetCurrentProcessId()
+{
+ return ::GetCurrentThreadId();
+}
+
+// Set ideal affinity for the current thread
+// Parameters:
+// affinity - ideal processor affinity for the thread
+// Return:
+// true if it has succeeded, false if it has failed
+bool GCToOSInterface::SetCurrentThreadIdealAffinity(GCThreadAffinity* affinity)
+{
+ bool success = true;
+
+#if !defined(FEATURE_CORESYSTEM)
+ SetThreadIdealProcessor(GetCurrentThread(), (DWORD)affinity->Processor);
+#else
+ PROCESSOR_NUMBER proc;
+
+ if (affinity->Group != -1)
+ {
+ proc.Group = (WORD)affinity->Group;
+ proc.Number = (BYTE)affinity->Processor;
+ proc.Reserved = 0;
+
+ success = !!SetThreadIdealProcessorEx(GetCurrentThread(), &proc, NULL);
+ }
+ else
+ {
+ if (GetThreadIdealProcessorEx(GetCurrentThread(), &proc))
+ {
+ proc.Number = affinity->Processor;
+ success = !!SetThreadIdealProcessorEx(GetCurrentThread(), &proc, NULL);
+ }
+ }
+#endif
+
+ return success;
+}
+
+// Get the number of the current processor
+uint32_t GCToOSInterface::GetCurrentProcessorNumber()
+{
+ assert(GCToOSInterface::CanGetCurrentProcessorNumber());
+ return ::GetCurrentProcessorNumber();
+}
+
+// Check if the OS supports getting current processor number
+bool GCToOSInterface::CanGetCurrentProcessorNumber()
+{
+ // on all Windows platforms we support this API exists
+ return true;
+}
+
+// Flush write buffers of processors that are executing threads of the current process
+void GCToOSInterface::FlushProcessWriteBuffers()
+{
+ ::FlushProcessWriteBuffers();
+}
+
+// Break into a debugger
+void GCToOSInterface::DebugBreak()
+{
+ ::DebugBreak();
+}
+
+// Get number of logical processors
+uint32_t GCToOSInterface::GetLogicalCpuCount()
+{
+ // TODO(segilles) processor detection
+ return 1;
+}
+
+// Causes the calling thread to sleep for the specified number of milliseconds
+// Parameters:
+// sleepMSec - time to sleep before switching to another thread
+void GCToOSInterface::Sleep(uint32_t sleepMSec)
+{
+ // TODO(segilles) CLR implementation of __SwitchToThread spins for short sleep durations
+ // to avoid context switches - is that interesting or useful here?
+ if (sleepMSec > 0)
+ {
+ ::SleepEx(sleepMSec, FALSE);
+ }
+}
+
+// Causes the calling thread to yield execution to another thread that is ready to run on the current processor.
+// Parameters:
+// switchCount - number of times the YieldThread was called in a loop
+void GCToOSInterface::YieldThread(uint32_t switchCount)
+{
+ UNREFERENCED_PARAMETER(switchCount);
+ SwitchToThread();
+}
+
+// Reserve virtual memory range.
+// Parameters:
+// address - starting virtual address, it can be NULL to let the function choose the starting address
+// size - size of the virtual memory range
+// alignment - requested memory alignment, 0 means no specific alignment requested
+// flags - flags to control special settings like write watching
+// Return:
+// Starting virtual address of the reserved range
+void* GCToOSInterface::VirtualReserve(size_t size, size_t alignment, uint32_t flags)
+{
+ // Windows already ensures 64kb alignment on VirtualAlloc. The current CLR
+ // implementation ignores it on Windows, other than making some sanity checks on it.
+ UNREFERENCED_PARAMETER(alignment);
+ assert((alignment & (alignment - 1)) == 0);
+ assert(alignment <= 0x10000);
+ DWORD memFlags = (flags & VirtualReserveFlags::WriteWatch) ? (MEM_RESERVE | MEM_WRITE_WATCH) : MEM_RESERVE;
+ return ::VirtualAlloc(nullptr, size, memFlags, PAGE_READWRITE);
+}
+
+// Release virtual memory range previously reserved using VirtualReserve
+// Parameters:
+// address - starting virtual address
+// size - size of the virtual memory range
+// Return:
+// true if it has succeeded, false if it has failed
+bool GCToOSInterface::VirtualRelease(void* address, size_t size)
+{
+ return !!::VirtualFree(address, 0, MEM_RELEASE);
+}
+
+// Commit virtual memory range. It must be part of a range reserved using VirtualReserve.
+// Parameters:
+// address - starting virtual address
+// size - size of the virtual memory range
+// Return:
+// true if it has succeeded, false if it has failed
+bool GCToOSInterface::VirtualCommit(void* address, size_t size)
+{
+ return ::VirtualAlloc(address, size, MEM_COMMIT, PAGE_READWRITE) != nullptr;
+}
+
+// Decomit virtual memory range.
+// Parameters:
+// address - starting virtual address
+// size - size of the virtual memory range
+// Return:
+// true if it has succeeded, false if it has failed
+bool GCToOSInterface::VirtualDecommit(void* address, size_t size)
+{
+ return !!::VirtualFree(address, size, MEM_DECOMMIT);
+}
+
+// Reset virtual memory range. Indicates that data in the memory range specified by address and size is no
+// longer of interest, but it should not be decommitted.
+// Parameters:
+// address - starting virtual address
+// size - size of the virtual memory range
+// unlock - true if the memory range should also be unlocked
+// Return:
+// true if it has succeeded, false if it has failed. Returns false also if
+// unlocking was requested but the unlock failed.
+bool GCToOSInterface::VirtualReset(void * address, size_t size, bool unlock)
+{
+ bool success = ::VirtualAlloc(address, size, MEM_RESET, PAGE_READWRITE) != nullptr;
+ if (success && unlock)
+ {
+ ::VirtualUnlock(address, size);
+ }
+
+ return success;
+}
+
+// Check if the OS supports write watching
+bool GCToOSInterface::SupportsWriteWatch()
+{
+ void* mem = GCToOSInterface::VirtualReserve(g_SystemInfo.dwAllocationGranularity, 0, VirtualReserveFlags::WriteWatch);
+ if (mem != nullptr)
+ {
+ GCToOSInterface::VirtualRelease(mem, g_SystemInfo.dwAllocationGranularity);
+ return true;
+ }
+
+ return false;
+}
+
+// Reset the write tracking state for the specified virtual memory range.
+// Parameters:
+// address - starting virtual address
+// size - size of the virtual memory range
+void GCToOSInterface::ResetWriteWatch(void* address, size_t size)
+{
+ ::ResetWriteWatch(address, size);
+}
+
+// Retrieve addresses of the pages that are written to in a region of virtual memory
+// Parameters:
+// resetState - true indicates to reset the write tracking state
+// address - starting virtual address
+// size - size of the virtual memory range
+// pageAddresses - buffer that receives an array of page addresses in the memory region
+// pageAddressesCount - on input, size of the lpAddresses array, in array elements
+// on output, the number of page addresses that are returned in the array.
+// Return:
+// true if it has succeeded, false if it has failed
+bool GCToOSInterface::GetWriteWatch(bool resetState, void* address, size_t size, void** pageAddresses, uintptr_t* pageAddressesCount)
+{
+ uint32_t flags = resetState ? 1 : 0;
+ ULONG granularity;
+
+ bool success = ::GetWriteWatch(flags, address, size, pageAddresses, (ULONG_PTR*)pageAddressesCount, &granularity) == 0;
+ if (success)
+ {
+ assert(granularity == OS_PAGE_SIZE);
+ }
+
+ return success;
+}
+
+// Get size of the largest cache on the processor die
+// Parameters:
+// trueSize - true to return true cache size, false to return scaled up size based on
+// the processor architecture
+// Return:
+// Size of the cache
+size_t GCToOSInterface::GetLargestOnDieCacheSize(bool trueSize)
+{
+ // TODO(segilles) processor detection (see src/vm/util.cpp:1935)
+ return 0;
+}
+
+// Get affinity mask of the current process
+// Parameters:
+// processMask - affinity mask for the specified process
+// systemMask - affinity mask for the system
+// Return:
+// true if it has succeeded, false if it has failed
+// Remarks:
+// A process affinity mask is a bit vector in which each bit represents the processors that
+// a process is allowed to run on. A system affinity mask is a bit vector in which each bit
+// represents the processors that are configured into a system.
+// A process affinity mask is a subset of the system affinity mask. A process is only allowed
+// to run on the processors configured into a system. Therefore, the process affinity mask cannot
+// specify a 1 bit for a processor when the system affinity mask specifies a 0 bit for that processor.
+bool GCToOSInterface::GetCurrentProcessAffinityMask(uintptr_t* processMask, uintptr_t* systemMask)
+{
+ return !!::GetProcessAffinityMask(::GetCurrentProcess(), (PDWORD_PTR)processMask, (PDWORD_PTR)systemMask);
+}
+
+// Get number of processors assigned to the current process
+// Return:
+// The number of processors
+uint32_t GCToOSInterface::GetCurrentProcessCpuCount()
+{
+ // TODO(segilles) this does not take into account process affinity
+ return g_SystemInfo.dwNumberOfProcessors;
+}
+
+// Return the size of the user-mode portion of the virtual address space of this process.
+// Return:
+// non zero if it has succeeded, 0 if it has failed
+size_t GCToOSInterface::GetVirtualMemoryLimit()
+{
+ MEMORYSTATUSEX memStatus;
+ if (::GlobalMemoryStatusEx(&memStatus))
+ {
+ return (size_t)memStatus.ullAvailVirtual;
+ }
+
+ return 0;
+}
+
+// Get the physical memory that this process can use.
+// Return:
+// non zero if it has succeeded, 0 if it has failed
+// Remarks:
+// If a process runs with a restricted memory limit, it returns the limit. If there's no limit
+// specified, it returns amount of actual physical memory.
+uint64_t GCToOSInterface::GetPhysicalMemoryLimit()
+{
+ size_t restricted_limit = GetRestrictedPhysicalMemoryLimit();
+ if (restricted_limit != 0)
+ return restricted_limit;
+
+ MEMORYSTATUSEX memStatus;
+ if (::GlobalMemoryStatusEx(&memStatus))
+ {
+ return memStatus.ullTotalPhys;
+ }
+
+ return 0;
+}
+
+// Get memory status
+// Parameters:
+// memory_load - A number between 0 and 100 that specifies the approximate percentage of physical memory
+// that is in use (0 indicates no memory use and 100 indicates full memory use).
+// available_physical - The amount of physical memory currently available, in bytes.
+// available_page_file - The maximum amount of memory the current process can commit, in bytes.
+void GCToOSInterface::GetMemoryStatus(uint32_t* memory_load, uint64_t* available_physical, uint64_t* available_page_file)
+{
+ uint64_t restricted_limit = GetRestrictedPhysicalMemoryLimit();
+ if (restricted_limit != 0)
+ {
+ PROCESS_MEMORY_COUNTERS pmc;
+ if (GCGetProcessMemoryInfo(GetCurrentProcess(), &pmc, sizeof(pmc)))
+ {
+ if (memory_load)
+ *memory_load = (uint32_t)((float)pmc.WorkingSetSize * 100.0 / (float)restricted_limit);
+ if (available_physical)
+ *available_physical = restricted_limit - pmc.WorkingSetSize;
+ // Available page file doesn't mean much when physical memory is restricted since
+ // we don't know how much of it is available to this process so we are not going to
+ // bother to make another OS call for it.
+ if (available_page_file)
+ *available_page_file = 0;
+
+ return;
+ }
+ }
+
+ MEMORYSTATUSEX ms;
+ ::GetProcessMemoryLoad(&ms);
+
+ if (memory_load != nullptr)
+ *memory_load = ms.dwMemoryLoad;
+ if (available_physical != nullptr)
+ *available_physical = ms.ullAvailPhys;
+ if (available_page_file != nullptr)
+ *available_page_file = ms.ullAvailPageFile;
+}
+
+// Get a high precision performance counter
+// Return:
+// The counter value
+int64_t GCToOSInterface::QueryPerformanceCounter()
+{
+ LARGE_INTEGER ts;
+ if (!::QueryPerformanceCounter(&ts))
+ {
+ assert(false && "Failed to query performance counter");
+ }
+
+ return ts.QuadPart;
+}
+
+// Get a frequency of the high precision performance counter
+// Return:
+// The counter frequency
+int64_t GCToOSInterface::QueryPerformanceFrequency()
+{
+ LARGE_INTEGER ts;
+ if (!::QueryPerformanceFrequency(&ts))
+ {
+ assert(false && "Failed to query performance counter");
+ }
+
+ return ts.QuadPart;
+}
+
+// Get a time stamp with a low precision
+// Return:
+// Time stamp in milliseconds
+uint32_t GCToOSInterface::GetLowPrecisionTimeStamp()
+{
+ return ::GetTickCount();
+}
+
+// Parameters of the GC thread stub
+struct GCThreadStubParam
+{
+ GCThreadFunction GCThreadFunction;
+ void* GCThreadParam;
+};
+
+// GC thread stub to convert GC thread function to an OS specific thread function
+static DWORD GCThreadStub(void* param)
+{
+ GCThreadStubParam *stubParam = (GCThreadStubParam*)param;
+ GCThreadFunction function = stubParam->GCThreadFunction;
+ void* threadParam = stubParam->GCThreadParam;
+
+ delete stubParam;
+
+ function(threadParam);
+
+ return 0;
+}
+
+
+// Create a new thread for GC use
+// Parameters:
+// function - the function to be executed by the thread
+// param - parameters of the thread
+// affinity - processor affinity of the thread
+// Return:
+// true if it has succeeded, false if it has failed
+bool GCToOSInterface::CreateThread(GCThreadFunction function, void* param, GCThreadAffinity* affinity)
+{
+ uint32_t thread_id;
+
+ std::unique_ptr<GCThreadStubParam> stubParam(new (std::nothrow) GCThreadStubParam());
+ if (!stubParam)
+ {
+ return false;
+ }
+
+ stubParam->GCThreadFunction = function;
+ stubParam->GCThreadParam = param;
+
+ HANDLE gc_thread = ::CreateThread(
+ nullptr,
+ 512 * 1024 /* Thread::StackSize_Medium */,
+ (LPTHREAD_START_ROUTINE)GCThreadStub,
+ stubParam.get(),
+ CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION,
+ (DWORD*)&thread_id);
+
+ if (!gc_thread)
+ {
+ return false;
+ }
+
+ stubParam.release();
+ bool result = !!::SetThreadPriority(gc_thread, /* THREAD_PRIORITY_ABOVE_NORMAL );*/ THREAD_PRIORITY_HIGHEST );
+ assert(result && "failed to set thread priority");
+
+ if (affinity->Group != GCThreadAffinity::None)
+ {
+ assert(affinity->Processor != GCThreadAffinity::None);
+ GROUP_AFFINITY ga;
+ ga.Group = (WORD)affinity->Group;
+ ga.Reserved[0] = 0; // reserve must be filled with zero
+ ga.Reserved[1] = 0; // otherwise call may fail
+ ga.Reserved[2] = 0;
+ ga.Mask = (size_t)1 << affinity->Processor;
+
+ bool result = !!::SetThreadGroupAffinity(gc_thread, &ga, nullptr);
+ assert(result && "failed to set thread affinity");
+ }
+ else if (affinity->Processor != GCThreadAffinity::None)
+ {
+ ::SetThreadAffinityMask(gc_thread, (DWORD_PTR)1 << affinity->Processor);
+ }
+
+ return true;
+}
+
+// Initialize the critical section
+void CLRCriticalSection::Initialize()
+{
+ ::InitializeCriticalSection(&m_cs);
+}
+
+// Destroy the critical section
+void CLRCriticalSection::Destroy()
+{
+ ::DeleteCriticalSection(&m_cs);
+}
+
+// Enter the critical section. Blocks until the section can be entered.
+void CLRCriticalSection::Enter()
+{
+ ::EnterCriticalSection(&m_cs);
+}
+
+// Leave the critical section
+void CLRCriticalSection::Leave()
+{
+ ::LeaveCriticalSection(&m_cs);
+}
diff --git a/src/gc/gcimpl.h b/src/gc/gcimpl.h
index 6a4ee86cd8..7e3a13a743 100644
--- a/src/gc/gcimpl.h
+++ b/src/gc/gcimpl.h
@@ -36,29 +36,10 @@ inline void checkGCWriteBarrier() {}
void GCProfileWalkHeap();
-class GCHeap;
class gc_heap;
class CFinalize;
-// TODO : it would be easier to make this an ORed value
-enum gc_reason
-{
- reason_alloc_soh = 0,
- reason_induced = 1,
- reason_lowmemory = 2,
- reason_empty = 3,
- reason_alloc_loh = 4,
- reason_oos_soh = 5,
- reason_oos_loh = 6,
- reason_induced_noforce = 7, // it's an induced GC and doesn't have to be blocking.
- reason_gcstress = 8, // this turns into reason_induced & gc_mechanisms.stress_induced = true
- reason_lowmemory_blocking = 9,
- reason_induced_compacting = 10,
- reason_lowmemory_host = 11,
- reason_max
-};
-
-class GCHeap : public ::GCHeap
+class GCHeap : public IGCHeapInternal
{
protected:
@@ -96,7 +77,7 @@ public:
size_t GetLastGCDuration(int generation);
size_t GetNow();
- void TraceGCSegments ();
+ void DiagTraceGCSegments ();
void PublishObject(uint8_t* obj);
BOOL IsGCInProgressHelper (BOOL bConsiderGCStart = FALSE);
@@ -111,17 +92,15 @@ public:
//flags can be GC_ALLOC_CONTAINS_REF GC_ALLOC_FINALIZE
Object* Alloc (size_t size, uint32_t flags);
-#ifdef FEATURE_64BIT_ALIGNMENT
Object* AllocAlign8 (size_t size, uint32_t flags);
- Object* AllocAlign8 (alloc_context* acontext, size_t size, uint32_t flags);
+ Object* AllocAlign8 (gc_alloc_context* acontext, size_t size, uint32_t flags);
private:
Object* AllocAlign8Common (void* hp, alloc_context* acontext, size_t size, uint32_t flags);
public:
-#endif // FEATURE_64BIT_ALIGNMENT
Object* AllocLHeap (size_t size, uint32_t flags);
- Object* Alloc (alloc_context* acontext, size_t size, uint32_t flags);
+ Object* Alloc (gc_alloc_context* acontext, size_t size, uint32_t flags);
- void FixAllocContext (alloc_context* acontext,
+ void FixAllocContext (gc_alloc_context* acontext,
BOOL lockp, void* arg, void *heap);
Object* GetContainingObject(void *pInteriorPtr);
@@ -132,7 +111,7 @@ public:
#endif //MULTIPLE_HEAPS
int GetHomeHeapNumber ();
- bool IsThreadUsingAllocationContextHeap(alloc_context* acontext, int thread_number);
+ bool IsThreadUsingAllocationContextHeap(gc_alloc_context* acontext, int thread_number);
int GetNumberOfHeaps ();
void HideAllocContext(alloc_context*);
void RevealAllocContext(alloc_context*);
@@ -176,9 +155,7 @@ public:
BOOL IsEphemeral (Object* object);
BOOL IsHeapPointer (void* object, BOOL small_heap_only = FALSE);
-#ifdef VERIFY_HEAP
void ValidateObjectMember (Object *obj);
-#endif //_DEBUG
PER_HEAP size_t ApproxTotalBytesInUse(BOOL small_heap_only = FALSE);
PER_HEAP size_t ApproxFreeBytes();
@@ -199,8 +176,6 @@ public:
int StartNoGCRegion(uint64_t totalSize, BOOL lohSizeKnown, uint64_t lohSize, BOOL disallowFullBlockingGC);
int EndNoGCRegion();
-
- PER_HEAP_ISOLATED unsigned GetMaxGeneration();
unsigned GetGcCount();
@@ -224,9 +199,7 @@ public:
BOOL ShouldRestartFinalizerWatchDog();
void SetCardsAfterBulkCopy( Object**, size_t);
-#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
- void WalkObject (Object* obj, walk_fn fn, void* context);
-#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
+ void DiagWalkObject (Object* obj, walk_fn fn, void* context);
public: // FIX
@@ -249,11 +222,9 @@ public: // FIX
// Interface with gc_heap
size_t GarbageCollectTry (int generation, BOOL low_memory_p=FALSE, int mode=collection_blocking);
-#ifdef FEATURE_BASICFREEZE
// frozen segment management functions
virtual segment_handle RegisterFrozenSegment(segment_info *pseginfo);
virtual void UnregisterFrozenSegment(segment_handle seg);
-#endif // FEATURE_BASICFREEZE
void WaitUntilConcurrentGCComplete (); // Use in managd threads
#ifndef DACCESS_COMPILE
@@ -281,11 +252,12 @@ private:
// the condition here may have to change as well.
return g_TrapReturningThreads == 0;
}
-#ifndef FEATURE_REDHAWK // Redhawk forces relocation a different way
-#ifdef STRESS_HEAP
public:
//return TRUE if GC actually happens, otherwise FALSE
- BOOL StressHeap(alloc_context * acontext = 0);
+ BOOL StressHeap(gc_alloc_context * acontext = 0);
+
+#ifndef FEATURE_REDHAWK // Redhawk forces relocation a different way
+#ifdef STRESS_HEAP
protected:
// only used in BACKGROUND_GC, but the symbol is not defined yet...
@@ -300,17 +272,25 @@ protected:
#endif // STRESS_HEAP
#endif // FEATURE_REDHAWK
-#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
- virtual void DescrGenerationsToProfiler (gen_walk_fn fn, void *context);
-#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
+ virtual void DiagDescrGenerations (gen_walk_fn fn, void *context);
+
+ virtual void DiagWalkSurvivorsWithType (void* gc_context, record_surv_fn fn, size_t diag_context, walk_surv_type type);
+
+ virtual void DiagWalkFinalizeQueue (void* gc_context, fq_walk_fn fn);
+
+ virtual void DiagScanFinalizeQueue (fq_scan_fn fn, ScanContext* context);
+
+ virtual void DiagScanHandles (handle_scan_fn fn, int gen_number, ScanContext* context);
+
+ virtual void DiagScanDependentHandles (handle_scan_fn fn, int gen_number, ScanContext* context);
+
+ virtual void DiagWalkHeap(walk_fn fn, void* context, int gen_number, BOOL walk_large_object_heap_p);
-#ifdef VERIFY_HEAP
public:
Object * NextObj (Object * object);
-#ifdef FEATURE_BASICFREEZE
+#if defined (FEATURE_BASICFREEZE) && defined (VERIFY_HEAP)
BOOL IsInFrozenSegment (Object * object);
-#endif //FEATURE_BASICFREEZE
-#endif //VERIFY_HEAP
+#endif // defined (FEATURE_BASICFREEZE) && defined (VERIFY_HEAP)
};
#endif // GCIMPL_H_
diff --git a/src/gc/gcinterface.ee.h b/src/gc/gcinterface.ee.h
new file mode 100644
index 0000000000..c5f87ef031
--- /dev/null
+++ b/src/gc/gcinterface.ee.h
@@ -0,0 +1,133 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+#ifndef _GCINTERFACE_EE_H_
+#define _GCINTERFACE_EE_H_
+
+// This interface provides the interface that the GC will use to speak to the rest
+// of the execution engine. Everything that the GC does that requires the EE
+// to be informed or that requires EE action must go through this interface.
+//
+// When FEATURE_STANDALONE_GC is defined, this class is named IGCToCLR and is
+// an abstract class. The EE will provide a class that fulfills this interface,
+// and the GC will dispatch virtually on it to call into the EE. When FEATURE_STANDALONE_GC
+// is not defined, this class is named GCToEEInterface and the GC will dispatch statically on it.
+class IGCToCLR {
+public:
+ // Suspends the EE for the given reason.
+ virtual
+ void SuspendEE(SUSPEND_REASON reason) = 0;
+
+ // Resumes all paused threads, with a boolean indicating
+ // if the EE is being restarted because a GC is complete.
+ virtual
+ void RestartEE(bool bFinishedGC) = 0;
+
+ // Performs a stack walk of all managed threads and invokes the given promote_func
+ // on all GC roots encountered on the stack. Depending on the condemned generation,
+ // this function may also enumerate all static GC refs if necessary.
+ virtual
+ void GcScanRoots(promote_func* fn, int condemned, int max_gen, ScanContext* sc) = 0;
+
+ // Callback from the GC informing the EE that it is preparing to start working.
+ virtual
+ void GcStartWork(int condemned, int max_gen) = 0;
+
+ // Callback from the GC informing the EE that it has completed the managed stack
+ // scan. User threads are still suspended at this point.
+ virtual
+ void AfterGcScanRoots(int condemned, int max_gen, ScanContext* sc) = 0;
+
+ // Callback from the GC informing the EE that the background sweep phase of a BGC is
+ // about to begin.
+ virtual
+ void GcBeforeBGCSweepWork() = 0;
+
+ // Callback from the GC informing the EE that a GC has completed.
+ virtual
+ void GcDone(int condemned) = 0;
+
+ // Predicate for the GC to query whether or not a given refcounted handle should
+ // be promoted.
+ virtual
+ bool RefCountedHandleCallbacks(Object * pObject) = 0;
+
+ // Performs a weak pointer scan of the sync block cache.
+ virtual
+ void SyncBlockCacheWeakPtrScan(HANDLESCANPROC scanProc, uintptr_t lp1, uintptr_t lp2) = 0;
+
+ // Indicates to the EE that the GC intends to demote objects in the sync block cache.
+ virtual
+ void SyncBlockCacheDemote(int max_gen) = 0;
+
+ // Indicates to the EE that the GC has granted promotion to objects in the sync block cache.
+ virtual
+ void SyncBlockCachePromotionsGranted(int max_gen) = 0;
+
+ // Queries whether or not the given thread has preemptive GC disabled.
+ virtual
+ bool IsPreemptiveGCDisabled(Thread * pThread) = 0;
+
+ // Enables preemptive GC on the given thread.
+ virtual
+ void EnablePreemptiveGC(Thread * pThread) = 0;
+
+ // Disables preemptive GC on the given thread.
+ virtual
+ void DisablePreemptiveGC(Thread * pThread) = 0;
+
+ // Retrieves the alloc context associated with a given thread.
+ virtual
+ gc_alloc_context * GetAllocContext(Thread * pThread) = 0;
+
+ // Returns true if this thread is waiting to reach a safe point.
+ virtual
+ bool CatchAtSafePoint(Thread * pThread) = 0;
+
+ // Calls the given enum_alloc_context_func with every active alloc context.
+ virtual
+ void GcEnumAllocContexts(enum_alloc_context_func* fn, void* param) = 0;
+
+ // Creates and returns a new background thread.
+ virtual
+ Thread* CreateBackgroundThread(GCBackgroundThreadFunction threadStart, void* arg) = 0;
+
+ // When a GC starts, gives the diagnostics code a chance to run.
+ virtual
+ void DiagGCStart(int gen, bool isInduced) = 0;
+
+ // When GC heap segments change, gives the diagnostics code a chance to run.
+ virtual
+ void DiagUpdateGenerationBounds() = 0;
+
+ // When a GC ends, gives the diagnostics code a chance to run.
+ virtual
+ void DiagGCEnd(size_t index, int gen, int reason, bool fConcurrent) = 0;
+
+ // During a GC after we discover what objects' finalizers should run, gives the diagnostics code a chance to run.
+ virtual
+ void DiagWalkFReachableObjects(void* gcContext) = 0;
+
+ // During a GC after we discover the survivors and the relocation info,
+ // gives the diagnostics code a chance to run. This includes LOH if we are
+ // compacting LOH.
+ virtual
+ void DiagWalkSurvivors(void* gcContext) = 0;
+
+ // During a full GC after we discover what objects to survive on LOH,
+ // gives the diagnostics code a chance to run.
+ virtual
+ void DiagWalkLOHSurvivors(void* gcContext) = 0;
+
+ // At the end of a background GC, gives the diagnostics code a chance to run.
+ virtual
+ void DiagWalkBGCSurvivors(void* gcContext) = 0;
+
+ // Informs the EE of changes to the location of the card table, potentially updating the write
+ // barrier if it needs to be updated.
+ virtual
+ void StompWriteBarrier(WriteBarrierParameters* args) = 0;
+};
+
+#endif // _GCINTERFACE_EE_H_
diff --git a/src/gc/gcinterface.h b/src/gc/gcinterface.h
new file mode 100644
index 0000000000..1457848992
--- /dev/null
+++ b/src/gc/gcinterface.h
@@ -0,0 +1,622 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+#ifndef _GC_INTERFACE_H_
+#define _GC_INTERFACE_H_
+
+struct ScanContext;
+struct gc_alloc_context;
+class CrawlFrame;
+
+// Callback passed to GcScanRoots.
+typedef void promote_func(PTR_PTR_Object, ScanContext*, uint32_t);
+
+// Callback passed to GcEnumAllocContexts.
+typedef void enum_alloc_context_func(gc_alloc_context*, void*);
+
+// Callback passed to CreateBackgroundThread.
+typedef uint32_t (__stdcall *GCBackgroundThreadFunction)(void* param);
+
+// Struct often used as a parameter to callbacks.
+typedef struct
+{
+ promote_func* f;
+ ScanContext* sc;
+ CrawlFrame * cf;
+} GCCONTEXT;
+
+// SUSPEND_REASON is the reason why the GC wishes to suspend the EE,
+// used as an argument to IGCToCLR::SuspendEE.
+typedef enum
+{
+ SUSPEND_FOR_GC = 1,
+ SUSPEND_FOR_GC_PREP = 6
+} SUSPEND_REASON;
+
+typedef enum
+{
+ walk_for_gc = 1,
+ walk_for_bgc = 2,
+ walk_for_loh = 3
+} walk_surv_type;
+
+// Different operations that can be done by GCToEEInterface::StompWriteBarrier
+enum class WriteBarrierOp
+{
+ StompResize,
+ StompEphemeral,
+ Initialize
+};
+
+// Arguments to GCToEEInterface::StompWriteBarrier
+struct WriteBarrierParameters
+{
+ // The operation that StompWriteBarrier will perform.
+ WriteBarrierOp operation;
+
+ // Whether or not the runtime is currently suspended. If it is not,
+ // the EE will need to suspend it before bashing the write barrier.
+ // Used for all operations.
+ bool is_runtime_suspended;
+
+ // Whether or not the GC has moved the ephemeral generation to no longer
+ // be at the top of the heap. When the ephemeral generation is at the top
+ // of the heap, and the write barrier observes that a pointer is greater than
+ // g_ephemeral_low, it does not need to check that the pointer is less than
+ // g_ephemeral_high because there is nothing in the GC heap above the ephemeral
+ // generation. When this is not the case, however, the GC must inform the EE
+ // so that the EE can switch to a write barrier that checks that a pointer
+ // is both greater than g_ephemeral_low and less than g_ephemeral_high.
+ // Used for WriteBarrierOp::StompResize.
+ bool requires_upper_bounds_check;
+
+ // The new card table location. May or may not be the same as the previous
+ // card table. Used for WriteBarrierOp::Initialize and WriteBarrierOp::StompResize.
+ uint32_t* card_table;
+
+ // The heap's new low boundary. May or may not be the same as the previous
+ // value. Used for WriteBarrierOp::Initialize and WriteBarrierOp::StompResize.
+ uint8_t* lowest_address;
+
+ // The heap's new high boundary. May or may not be the same as the previous
+ // value. Used for WriteBarrierOp::Initialize and WriteBarrierOp::StompResize.
+ uint8_t* highest_address;
+
+ // The new start of the ephemeral generation.
+ // Used for WriteBarrierOp::StompEphemeral.
+ uint8_t* ephemeral_lo;
+
+ // The new end of the ephemeral generation.
+ // Used for WriteBarrierOp::StompEphemeral.
+ uint8_t* ephemeral_hi;
+};
+
+#include "gcinterface.ee.h"
+
+// The allocation context must be known to the VM for use in the allocation
+// fast path and known to the GC for performing the allocation. Every Thread
+// has its own allocation context that it hands to the GC when allocating.
+struct gc_alloc_context
+{
+ uint8_t* alloc_ptr;
+ uint8_t* alloc_limit;
+ int64_t alloc_bytes; //Number of bytes allocated on SOH by this context
+ int64_t alloc_bytes_loh; //Number of bytes allocated on LOH by this context
+ // These two fields are deliberately not exposed past the EE-GC interface.
+ void* gc_reserved_1;
+ void* gc_reserved_2;
+ int alloc_count;
+public:
+
+ void init()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ alloc_ptr = 0;
+ alloc_limit = 0;
+ alloc_bytes = 0;
+ alloc_bytes_loh = 0;
+ gc_reserved_1 = 0;
+ gc_reserved_2 = 0;
+ alloc_count = 0;
+ }
+};
+
+// stub type to abstract a heap segment
+struct gc_heap_segment_stub;
+typedef gc_heap_segment_stub *segment_handle;
+
+struct segment_info
+{
+ void * pvMem; // base of the allocation, not the first object (must add ibFirstObject)
+ size_t ibFirstObject; // offset to the base of the first object in the segment
+ size_t ibAllocated; // limit of allocated memory in the segment (>= firstobject)
+ size_t ibCommit; // limit of committed memory in the segment (>= alllocated)
+ size_t ibReserved; // limit of reserved memory in the segment (>= commit)
+};
+
+#ifdef PROFILING_SUPPORTED
+#define GC_PROFILING //Turn on profiling
+#endif // PROFILING_SUPPORTED
+
+#define LARGE_OBJECT_SIZE ((size_t)(85000))
+
+// The minimum size of an object is three pointers wide: one for the syncblock,
+// one for the object header, and one for the first field in the object.
+#define min_obj_size ((sizeof(uint8_t*) + sizeof(uintptr_t) + sizeof(size_t)))
+
+#define max_generation 2
+
+class Object;
+class IGCHeap;
+
+// Initializes the garbage collector. Should only be called
+// once, during EE startup.
+IGCHeap* InitializeGarbageCollector(IGCToCLR* clrToGC);
+
+// The runtime needs to know whether we're using workstation or server GC
+// long before the GCHeap is created. This function sets the type of
+// heap that will be created, before InitializeGarbageCollector is called
+// and the heap is actually recated.
+void InitializeHeapType(bool bServerHeap);
+
+#ifdef WRITE_BARRIER_CHECK
+//always defined, but should be 0 in Server GC
+extern uint8_t* g_GCShadow;
+extern uint8_t* g_GCShadowEnd;
+// saves the g_lowest_address in between GCs to verify the consistency of the shadow segment
+extern uint8_t* g_shadow_lowest_address;
+#endif
+
+// For low memory notification from host
+extern int32_t g_bLowMemoryFromHost;
+
+extern VOLATILE(int32_t) m_GCLock;
+
+// !!!!!!!!!!!!!!!!!!!!!!!
+// make sure you change the def in bcl\system\gc.cs
+// if you change this!
+enum collection_mode
+{
+ collection_non_blocking = 0x00000001,
+ collection_blocking = 0x00000002,
+ collection_optimized = 0x00000004,
+ collection_compacting = 0x00000008
+#ifdef STRESS_HEAP
+ , collection_gcstress = 0x80000000
+#endif // STRESS_HEAP
+};
+
+// !!!!!!!!!!!!!!!!!!!!!!!
+// make sure you change the def in bcl\system\gc.cs
+// if you change this!
+enum wait_full_gc_status
+{
+ wait_full_gc_success = 0,
+ wait_full_gc_failed = 1,
+ wait_full_gc_cancelled = 2,
+ wait_full_gc_timeout = 3,
+ wait_full_gc_na = 4
+};
+
+// !!!!!!!!!!!!!!!!!!!!!!!
+// make sure you change the def in bcl\system\gc.cs
+// if you change this!
+enum start_no_gc_region_status
+{
+ start_no_gc_success = 0,
+ start_no_gc_no_memory = 1,
+ start_no_gc_too_large = 2,
+ start_no_gc_in_progress = 3
+};
+
+enum end_no_gc_region_status
+{
+ end_no_gc_success = 0,
+ end_no_gc_not_in_progress = 1,
+ end_no_gc_induced = 2,
+ end_no_gc_alloc_exceeded = 3
+};
+
+typedef BOOL (* walk_fn)(Object*, void*);
+typedef void (* gen_walk_fn)(void* context, int generation, uint8_t* range_start, uint8_t* range_end, uint8_t* range_reserved);
+typedef void (* record_surv_fn)(uint8_t* begin, uint8_t* end, ptrdiff_t reloc, size_t context, BOOL compacting_p, BOOL bgc_p);
+typedef void (* fq_walk_fn)(BOOL, void*);
+typedef void (* fq_scan_fn)(Object** ppObject, ScanContext *pSC, uint32_t dwFlags);
+typedef void (* handle_scan_fn)(Object** pRef, Object* pSec, uint32_t flags, ScanContext* context, BOOL isDependent);
+
+// IGCHeap is the interface that the VM will use when interacting with the GC.
+class IGCHeap {
+public:
+ /*
+ ===========================================================================
+ Hosting APIs. These are used by GC hosting. The code that
+ calls these methods may possibly be moved behind the interface -
+ today, the VM handles the setting of segment size and max gen 0 size.
+ (See src/vm/corehost.cpp)
+ ===========================================================================
+ */
+
+ // Returns whether or not the given size is a valid segment size.
+ virtual BOOL IsValidSegmentSize(size_t size) = 0;
+
+ // Returns whether or not the given size is a valid gen 0 max size.
+ virtual BOOL IsValidGen0MaxSize(size_t size) = 0;
+
+ // Gets a valid segment size.
+ virtual size_t GetValidSegmentSize(BOOL large_seg = FALSE) = 0;
+
+ // Sets the limit for reserved virtual memory.
+ virtual void SetReservedVMLimit(size_t vmlimit) = 0;
+
+ /*
+ ===========================================================================
+ Concurrent GC routines. These are used in various places in the VM
+ to synchronize with the GC, when the VM wants to update something that
+ the GC is potentially using, if it's doing a background GC.
+
+ Concrete examples of this are moving async pinned handles across appdomains
+ and profiling/ETW scenarios.
+ ===========================================================================
+ */
+
+ // Blocks until any running concurrent GCs complete.
+ virtual void WaitUntilConcurrentGCComplete() = 0;
+
+ // Returns true if a concurrent GC is in progress, false otherwise.
+ virtual BOOL IsConcurrentGCInProgress() = 0;
+
+ // Temporarily enables concurrent GC, used during profiling.
+ virtual void TemporaryEnableConcurrentGC() = 0;
+
+ // Temporarily disables concurrent GC, used during profiling.
+ virtual void TemporaryDisableConcurrentGC() = 0;
+
+ // Returns whether or not Concurrent GC is enabled.
+ virtual BOOL IsConcurrentGCEnabled() = 0;
+
+ // Wait for a concurrent GC to complete if one is in progress, with the given timeout.
+ virtual HRESULT WaitUntilConcurrentGCCompleteAsync(int millisecondsTimeout) = 0; // Use in native threads. TRUE if succeed. FALSE if failed or timeout
+
+
+ /*
+ ===========================================================================
+ Finalization routines. These are used by the finalizer thread to communicate
+ with the GC.
+ ===========================================================================
+ */
+
+ // Finalizes an app domain by finalizing objects within that app domain.
+ virtual BOOL FinalizeAppDomain(AppDomain* pDomain, BOOL fRunFinalizers) = 0;
+
+ // Finalizes all registered objects for shutdown, even if they are still reachable.
+ virtual void SetFinalizeQueueForShutdown(BOOL fHasLock) = 0;
+
+ // Gets the number of finalizable objects.
+ virtual size_t GetNumberOfFinalizable() = 0;
+
+ // Traditionally used by the finalizer thread on shutdown to determine
+ // whether or not to time out. Returns true if the GC lock has not been taken.
+ virtual BOOL ShouldRestartFinalizerWatchDog() = 0;
+
+ // Gets the next finalizable object.
+ virtual Object* GetNextFinalizable() = 0;
+
+ /*
+ ===========================================================================
+ BCL routines. These are routines that are directly exposed by mscorlib
+ as a part of the `System.GC` class. These routines behave in the same
+ manner as the functions on `System.GC`.
+ ===========================================================================
+ */
+
+ // Gets the current GC latency mode.
+ virtual int GetGcLatencyMode() = 0;
+
+ // Sets the current GC latency mode. newLatencyMode has already been
+ // verified by mscorlib to be valid.
+ virtual int SetGcLatencyMode(int newLatencyMode) = 0;
+
+ // Gets the current LOH compaction mode.
+ virtual int GetLOHCompactionMode() = 0;
+
+ // Sets the current LOH compaction mode. newLOHCompactionMode has
+ // already been verified by mscorlib to be valid.
+ virtual void SetLOHCompactionMode(int newLOHCompactionMode) = 0;
+
+ // Registers for a full GC notification, raising a notification if the gen 2 or
+ // LOH object heap thresholds are exceeded.
+ virtual BOOL RegisterForFullGCNotification(uint32_t gen2Percentage, uint32_t lohPercentage) = 0;
+
+ // Cancels a full GC notification that was requested by `RegisterForFullGCNotification`.
+ virtual BOOL CancelFullGCNotification() = 0;
+
+ // Returns the status of a registered notification for determining whether a blocking
+ // Gen 2 collection is about to be initiated, with the given timeout.
+ virtual int WaitForFullGCApproach(int millisecondsTimeout) = 0;
+
+ // Returns the status of a registered notification for determining whether a blocking
+ // Gen 2 collection has completed, with the given timeout.
+ virtual int WaitForFullGCComplete(int millisecondsTimeout) = 0;
+
+ // Returns the generation in which obj is found. Also used by the VM
+ // in some places, in particular syncblk code.
+ virtual unsigned WhichGeneration(Object* obj) = 0;
+
+ // Returns the number of GCs that have transpired in the given generation
+ // since the beginning of the life of the process. Also used by the VM
+ // for debug code and app domains.
+ virtual int CollectionCount(int generation, int get_bgc_fgc_coutn = 0) = 0;
+
+ // Begins a no-GC region, returning a code indicating whether entering the no-GC
+ // region was successful.
+ virtual int StartNoGCRegion(uint64_t totalSize, BOOL lohSizeKnown, uint64_t lohSize, BOOL disallowFullBlockingGC) = 0;
+
+ // Exits a no-GC region.
+ virtual int EndNoGCRegion() = 0;
+
+ // Gets the total number of bytes in use.
+ virtual size_t GetTotalBytesInUse() = 0;
+
+ // Forces a garbage collection of the given generation. Also used extensively
+ // throughout the VM.
+ virtual HRESULT GarbageCollect(int generation = -1, BOOL low_memory_p = FALSE, int mode = collection_blocking) = 0;
+
+ // Gets the largest GC generation. Also used extensively throughout the VM.
+ virtual unsigned GetMaxGeneration() = 0;
+
+ // Indicates that an object's finalizer should not be run upon the object's collection.
+ virtual void SetFinalizationRun(Object* obj) = 0;
+
+ // Indicates that an object's finalizer should be run upon the object's collection.
+ virtual bool RegisterForFinalization(int gen, Object* obj) = 0;
+
+ /*
+ ===========================================================================
+ Miscellaneous routines used by the VM.
+ ===========================================================================
+ */
+
+ // Initializes the GC heap, returning whether or not the initialization
+ // was successful.
+ virtual HRESULT Initialize() = 0;
+
+ // Returns whether nor this GC was promoted by the last GC.
+ virtual BOOL IsPromoted(Object* object) = 0;
+
+ // Returns true if this pointer points into a GC heap, false otherwise.
+ virtual BOOL IsHeapPointer(void* object, BOOL small_heap_only = FALSE) = 0;
+
+ // Return the generation that has been condemned by the current GC.
+ virtual unsigned GetCondemnedGeneration() = 0;
+
+ // Returns whether or not a GC is in progress.
+ virtual BOOL IsGCInProgressHelper(BOOL bConsiderGCStart = FALSE) = 0;
+
+ // Returns the number of GCs that have occured. Mainly used for
+ // sanity checks asserting that a GC has not occured.
+ virtual unsigned GetGcCount() = 0;
+
+ // Sets cards after an object has been memmoved.
+ virtual void SetCardsAfterBulkCopy(Object** obj, size_t length) = 0;
+
+ // Gets whether or not the home heap of this alloc context matches the heap
+ // associated with this thread.
+ virtual bool IsThreadUsingAllocationContextHeap(gc_alloc_context* acontext, int thread_number) = 0;
+
+ // Returns whether or not this object resides in an ephemeral generation.
+ virtual BOOL IsEphemeral(Object* object) = 0;
+
+ // Blocks until a GC is complete, returning a code indicating the wait was successful.
+ virtual uint32_t WaitUntilGCComplete(BOOL bConsiderGCStart = FALSE) = 0;
+
+ // "Fixes" an allocation context by binding its allocation pointer to a
+ // location on the heap.
+ virtual void FixAllocContext(gc_alloc_context* acontext, BOOL lockp, void* arg, void* heap) = 0;
+
+ // Gets the total survived size plus the total allocated bytes on the heap.
+ virtual size_t GetCurrentObjSize() = 0;
+
+ // Sets whether or not a GC is in progress.
+ virtual void SetGCInProgress(BOOL fInProgress) = 0;
+
+ /*
+ ============================================================================
+ Add/RemoveMemoryPressure support routines. These are on the interface
+ for now, but we should move Add/RemoveMemoryPressure from the VM to the GC.
+ When that occurs, these three routines can be removed from the interface.
+ ============================================================================
+ */
+
+ // Get the timestamp corresponding to the last GC that occured for the
+ // given generation.
+ virtual size_t GetLastGCStartTime(int generation) = 0;
+
+ // Gets the duration of the last GC that occured for the given generation.
+ virtual size_t GetLastGCDuration(int generation) = 0;
+
+ // Gets a timestamp for the current moment in time.
+ virtual size_t GetNow() = 0;
+
+ /*
+ ===========================================================================
+ Allocation routines. These all call into the GC's allocator and may trigger a garbage
+ collection. All allocation routines return NULL when the allocation request
+ couldn't be serviced due to being out of memory.
+ ===========================================================================
+ */
+
+ // Allocates an object on the given allocation context with the given size and flags.
+ virtual Object* Alloc(gc_alloc_context* acontext, size_t size, uint32_t flags) = 0;
+
+ // Allocates an object on the default allocation context with the given size and flags.
+ virtual Object* Alloc(size_t size, uint32_t flags) = 0;
+
+ // Allocates an object on the large object heap with the given size and flags.
+ virtual Object* AllocLHeap(size_t size, uint32_t flags) = 0;
+
+ // Allocates an object on the default allocation context, aligned to 64 bits,
+ // with the given size and flags.
+ virtual Object* AllocAlign8 (size_t size, uint32_t flags) = 0;
+
+ // Allocates an object on the given allocation context, aligned to 64 bits,
+ // with the given size and flags.
+ virtual Object* AllocAlign8 (gc_alloc_context* acontext, size_t size, uint32_t flags) = 0;
+
+ // This is for the allocator to indicate it's done allocating a large object during a
+ // background GC as the BGC threads also need to walk LOH.
+ virtual void PublishObject(uint8_t* obj) = 0;
+
+ // Gets the event that suspended threads will use to wait for the
+ // end of a GC.
+ virtual CLREventStatic* GetWaitForGCEvent() = 0;
+
+ /*
+ ===========================================================================
+ Heap verification routines. These are used during heap verification only.
+ ===========================================================================
+ */
+ // Returns whether or not this object is in the fixed heap.
+ virtual BOOL IsObjectInFixedHeap(Object* pObj) = 0;
+
+ // Walks an object and validates its members.
+ virtual void ValidateObjectMember(Object* obj) = 0;
+
+ // Retrieves the next object after the given object. When the EE
+ // is not suspended, the result is not accurate - if the input argument
+ // is in Gen0, the function could return zeroed out memory as the next object.
+ virtual Object* NextObj(Object* object) = 0;
+
+ // Given an interior pointer, return a pointer to the object
+ // containing that pointer. This is safe to call only when the EE is suspended.
+ virtual Object* GetContainingObject(void* pInteriorPtr) = 0;
+
+ /*
+ ===========================================================================
+ Profiling routines. Used for event tracing and profiling to broadcast
+ information regarding the heap.
+ ===========================================================================
+ */
+
+ // Walks an object, invoking a callback on each member.
+ virtual void DiagWalkObject(Object* obj, walk_fn fn, void* context) = 0;
+
+ // Walk the heap object by object.
+ virtual void DiagWalkHeap(walk_fn fn, void* context, int gen_number, BOOL walk_large_object_heap_p) = 0;
+
+ // Walks the survivors and get the relocation information if objects have moved.
+ virtual void DiagWalkSurvivorsWithType(void* gc_context, record_surv_fn fn, size_t diag_context, walk_surv_type type) = 0;
+
+ // Walks the finalization queue.
+ virtual void DiagWalkFinalizeQueue(void* gc_context, fq_walk_fn fn) = 0;
+
+ // Scan roots on finalizer queue. This is a generic function.
+ virtual void DiagScanFinalizeQueue(fq_scan_fn fn, ScanContext* context) = 0;
+
+ // Scan handles for profiling or ETW.
+ virtual void DiagScanHandles(handle_scan_fn fn, int gen_number, ScanContext* context) = 0;
+
+ // Scan dependent handles for profiling or ETW.
+ virtual void DiagScanDependentHandles(handle_scan_fn fn, int gen_number, ScanContext* context) = 0;
+
+ // Describes all generations to the profiler, invoking a callback on each generation.
+ virtual void DiagDescrGenerations(gen_walk_fn fn, void* context) = 0;
+
+ // Traces all GC segments and fires ETW events with information on them.
+ virtual void DiagTraceGCSegments() = 0;
+
+ /*
+ ===========================================================================
+ GC Stress routines. Used only when running under GC Stress.
+ ===========================================================================
+ */
+
+ // Returns TRUE if GC actually happens, otherwise FALSE
+ virtual BOOL StressHeap(gc_alloc_context* acontext = 0) = 0;
+
+ /*
+ ===========================================================================
+ Routines to register read only segments for frozen objects.
+ Only valid if FEATURE_BASICFREEZE is defined.
+ ===========================================================================
+ */
+
+ // Registers a frozen segment with the GC.
+ virtual segment_handle RegisterFrozenSegment(segment_info *pseginfo) = 0;
+
+ // Unregisters a frozen segment.
+ virtual void UnregisterFrozenSegment(segment_handle seg) = 0;
+
+ IGCHeap() {}
+ virtual ~IGCHeap() {}
+
+ typedef enum
+ {
+ GC_HEAP_INVALID = 0,
+ GC_HEAP_WKS = 1,
+ GC_HEAP_SVR = 2
+ } GC_HEAP_TYPE;
+
+#ifdef FEATURE_SVR_GC
+ SVAL_DECL(uint32_t, gcHeapType);
+#endif
+
+ SVAL_DECL(uint32_t, maxGeneration);
+};
+
+#ifdef WRITE_BARRIER_CHECK
+void updateGCShadow(Object** ptr, Object* val);
+#endif
+
+//constants for the flags parameter to the gc call back
+
+#define GC_CALL_INTERIOR 0x1
+#define GC_CALL_PINNED 0x2
+#define GC_CALL_CHECK_APP_DOMAIN 0x4
+
+//flags for IGCHeapAlloc(...)
+#define GC_ALLOC_FINALIZE 0x1
+#define GC_ALLOC_CONTAINS_REF 0x2
+#define GC_ALLOC_ALIGN8_BIAS 0x4
+#define GC_ALLOC_ALIGN8 0x8
+
+struct ScanContext
+{
+ Thread* thread_under_crawl;
+ int thread_number;
+ uintptr_t stack_limit; // Lowest point on the thread stack that the scanning logic is permitted to read
+ BOOL promotion; //TRUE: Promotion, FALSE: Relocation.
+ BOOL concurrent; //TRUE: concurrent scanning
+#if CHECK_APP_DOMAIN_LEAKS || defined (FEATURE_APPDOMAIN_RESOURCE_MONITORING) || defined (DACCESS_COMPILE)
+ AppDomain *pCurrentDomain;
+#endif //CHECK_APP_DOMAIN_LEAKS || FEATURE_APPDOMAIN_RESOURCE_MONITORING || DACCESS_COMPILE
+
+#ifndef FEATURE_REDHAWK
+#if defined(GC_PROFILING) || defined (DACCESS_COMPILE)
+ MethodDesc *pMD;
+#endif //GC_PROFILING || DACCESS_COMPILE
+#endif // FEATURE_REDHAWK
+#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
+ EtwGCRootKind dwEtwRootKind;
+#endif // GC_PROFILING || FEATURE_EVENT_TRACE
+
+ ScanContext()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ thread_under_crawl = 0;
+ thread_number = -1;
+ stack_limit = 0;
+ promotion = FALSE;
+ concurrent = FALSE;
+#ifdef GC_PROFILING
+ pMD = NULL;
+#endif //GC_PROFILING
+#ifdef FEATURE_EVENT_TRACE
+ dwEtwRootKind = kEtwGCRootKindOther;
+#endif // FEATURE_EVENT_TRACE
+ }
+};
+
+#endif // _GC_INTERFACE_H_
diff --git a/src/gc/gcpriv.h b/src/gc/gcpriv.h
index 03a23454a0..3bed8c2cf8 100644
--- a/src/gc/gcpriv.h
+++ b/src/gc/gcpriv.h
@@ -24,7 +24,9 @@
inline void FATAL_GC_ERROR()
{
+#ifndef DACCESS_COMPILE
GCToOSInterface::DebugBreak();
+#endif // DACCESS_COMPILE
_ASSERTE(!"Fatal Error in GC.");
EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE);
}
@@ -1073,9 +1075,6 @@ enum interesting_data_point
};
//class definition of the internal class
-#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
-extern void GCProfileWalkHeapWorker(BOOL fProfilerPinned, BOOL fShouldWalkHeapRootsForEtw, BOOL fShouldWalkHeapObjectsForEtw);
-#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
class gc_heap
{
friend struct ::_DacGlobals;
@@ -1225,7 +1224,7 @@ public:
static
gc_heap* balance_heaps_loh (alloc_context* acontext, size_t size);
static
- void __stdcall gc_thread_stub (void* arg);
+ void gc_thread_stub (void* arg);
#endif //MULTIPLE_HEAPS
CObjectHeader* try_fast_alloc (size_t jsize);
@@ -1283,35 +1282,48 @@ public:
protected:
- PER_HEAP
+ PER_HEAP_ISOLATED
void walk_heap (walk_fn fn, void* context, int gen_number, BOOL walk_large_object_heap_p);
+ PER_HEAP
+ void walk_heap_per_heap (walk_fn fn, void* context, int gen_number, BOOL walk_large_object_heap_p);
+
struct walk_relocate_args
{
uint8_t* last_plug;
BOOL is_shortened;
mark* pinned_plug_entry;
+ size_t profiling_context;
+ record_surv_fn fn;
};
PER_HEAP
+ void walk_survivors (record_surv_fn fn, size_t context, walk_surv_type type);
+
+ PER_HEAP
void walk_plug (uint8_t* plug, size_t size, BOOL check_last_object_p,
- walk_relocate_args* args, size_t profiling_context);
+ walk_relocate_args* args);
PER_HEAP
- void walk_relocation (int condemned_gen_number,
- uint8_t* first_condemned_address, size_t profiling_context);
+ void walk_relocation (size_t profiling_context, record_surv_fn fn);
PER_HEAP
- void walk_relocation_in_brick (uint8_t* tree, walk_relocate_args* args, size_t profiling_context);
+ void walk_relocation_in_brick (uint8_t* tree, walk_relocate_args* args);
-#if defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE)
PER_HEAP
- void walk_relocation_for_bgc(size_t profiling_context);
+ void walk_finalize_queue (fq_walk_fn fn);
+#if defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE)
PER_HEAP
- void make_free_lists_for_profiler_for_bgc();
+ void walk_survivors_for_bgc (size_t profiling_context, record_surv_fn fn);
#endif // defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE)
+ // used in blocking GCs after plan phase so this walks the plugs.
+ PER_HEAP
+ void walk_survivors_relocation (size_t profiling_context, record_surv_fn fn);
+ PER_HEAP
+ void walk_survivors_for_loh (size_t profiling_context, record_surv_fn fn);
+
PER_HEAP
int generation_to_condemn (int n,
BOOL* blocking_collection_p,
@@ -2148,10 +2160,8 @@ protected:
PER_HEAP
void relocate_in_loh_compact();
-#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
PER_HEAP
- void walk_relocation_loh (size_t profiling_context);
-#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
+ void walk_relocation_for_loh (size_t profiling_context, record_surv_fn fn);
PER_HEAP
BOOL loh_enque_pinned_plug (uint8_t* plug, size_t len);
@@ -2547,14 +2557,8 @@ protected:
PER_HEAP
void descr_generations (BOOL begin_gc_p);
-#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
PER_HEAP_ISOLATED
void descr_generations_to_profiler (gen_walk_fn fn, void *context);
- PER_HEAP
- void record_survived_for_profiler(int condemned_gen_number, uint8_t * first_condemned_address);
- PER_HEAP
- void notify_profiler_of_surviving_large_objects ();
-#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
/*------------ Multiple non isolated heaps ----------------*/
#ifdef MULTIPLE_HEAPS
@@ -2978,7 +2982,7 @@ protected:
PER_HEAP
VOLATILE(int) alloc_context_count;
#else //MULTIPLE_HEAPS
-#define vm_heap ((GCHeap*) g_pGCHeap)
+#define vm_heap ((GCHeap*) g_theGCHeap)
#define heap_number (0)
#endif //MULTIPLE_HEAPS
@@ -3763,9 +3767,7 @@ public:
Object* GetNextFinalizableObject (BOOL only_non_critical=FALSE);
BOOL ScanForFinalization (promote_func* fn, int gen,BOOL mark_only_p, gc_heap* hp);
void RelocateFinalizationData (int gen, gc_heap* hp);
-#ifdef GC_PROFILING
- void WalkFReachableObjects (gc_heap* hp);
-#endif //GC_PROFILING
+ void WalkFReachableObjects (fq_walk_fn fn);
void GcScanRoots (promote_func* fn, int hn, ScanContext *pSC);
void UpdatePromotedGenerations (int gen, BOOL gen_0_empty_p);
size_t GetPromotedCount();
@@ -4073,8 +4075,6 @@ size_t generation_unusable_fragmentation (generation* inst)
}
#define plug_skew sizeof(ObjHeader)
-#define min_obj_size (sizeof(uint8_t*)+plug_skew+sizeof(size_t))//syncblock + vtable+ first field
-//Note that this encodes the fact that plug_skew is a multiple of uint8_t*.
// We always use USE_PADDING_TAIL when fitting so items on the free list should be
// twice the min_obj_size.
#define min_free_list (2*min_obj_size)
@@ -4319,9 +4319,6 @@ dynamic_data* gc_heap::dynamic_data_of (int gen_number)
return &dynamic_data_table [ gen_number ];
}
-extern "C" uint8_t* g_ephemeral_low;
-extern "C" uint8_t* g_ephemeral_high;
-
#define card_word_width ((size_t)32)
//
diff --git a/src/gc/gcrecord.h b/src/gc/gcrecord.h
index 8c95ad04d3..fff1fc5c8b 100644
--- a/src/gc/gcrecord.h
+++ b/src/gc/gcrecord.h
@@ -13,7 +13,7 @@ Module Name:
#ifndef __gc_record_h__
#define __gc_record_h__
-#define max_generation 2
+//#define max_generation 2
// We pack the dynamic tuning for deciding which gen to condemn in a uint32_t.
// We assume that 2 bits are enough to represent the generation.
diff --git a/src/gc/gcscan.cpp b/src/gc/gcscan.cpp
index 42989e0414..b4e6352dd6 100644
--- a/src/gc/gcscan.cpp
+++ b/src/gc/gcscan.cpp
@@ -129,7 +129,7 @@ static void CALLBACK CheckPromoted(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t * /*
LOG((LF_GC, LL_INFO100000, LOG_HANDLE_OBJECT_CLASS("Checking referent of Weak-", pObjRef, "to ", *pObjRef)));
Object **pRef = (Object **)pObjRef;
- if (!GCHeap::GetGCHeap()->IsPromoted(*pRef))
+ if (!g_theGCHeap->IsPromoted(*pRef))
{
LOG((LF_GC, LL_INFO100, LOG_HANDLE_OBJECT_CLASS("Severing Weak-", pObjRef, "to unreachable ", *pObjRef)));
@@ -192,33 +192,32 @@ void GCScan::GcScanHandles (promote_func* fn, int condemned, int max_gen,
}
}
-
-#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
-
/*
* Scan all handle roots in this 'namespace' for profiling
*/
-void GCScan::GcScanHandlesForProfilerAndETW (int max_gen, ScanContext* sc)
+void GCScan::GcScanHandlesForProfilerAndETW (int max_gen, ScanContext* sc, handle_scan_fn fn)
{
LIMITED_METHOD_CONTRACT;
+#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
LOG((LF_GC|LF_GCROOTS, LL_INFO10, "Profiler Root Scan Phase, Handles\n"));
- Ref_ScanPointersForProfilerAndETW(max_gen, (uintptr_t)sc);
+ Ref_ScanHandlesForProfilerAndETW(max_gen, (uintptr_t)sc, fn);
+#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
}
/*
* Scan dependent handles in this 'namespace' for profiling
*/
-void GCScan::GcScanDependentHandlesForProfilerAndETW (int max_gen, ProfilingScanContext* sc)
+void GCScan::GcScanDependentHandlesForProfilerAndETW (int max_gen, ScanContext* sc, handle_scan_fn fn)
{
LIMITED_METHOD_CONTRACT;
+#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
LOG((LF_GC|LF_GCROOTS, LL_INFO10, "Profiler Root Scan Phase, DependentHandles\n"));
- Ref_ScanDependentHandlesForProfilerAndETW(max_gen, sc);
-}
-
+ Ref_ScanDependentHandlesForProfilerAndETW(max_gen, sc, fn);
#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
+}
void GCScan::GcRuntimeStructuresValid (BOOL bValid)
{
@@ -240,14 +239,14 @@ void GCScan::GcRuntimeStructuresValid (BOOL bValid)
void GCScan::GcDemote (int condemned, int max_gen, ScanContext* sc)
{
Ref_RejuvenateHandles (condemned, max_gen, (uintptr_t)sc);
- if (!GCHeap::IsServerHeap() || sc->thread_number == 0)
+ if (!IsServerHeap() || sc->thread_number == 0)
GCToEEInterface::SyncBlockCacheDemote(max_gen);
}
void GCScan::GcPromotionsGranted (int condemned, int max_gen, ScanContext* sc)
{
Ref_AgeHandles(condemned, max_gen, (uintptr_t)sc);
- if (!GCHeap::IsServerHeap() || sc->thread_number == 0)
+ if (!IsServerHeap() || sc->thread_number == 0)
GCToEEInterface::SyncBlockCachePromotionsGranted(max_gen);
}
diff --git a/src/gc/gcscan.h b/src/gc/gcscan.h
index 3515b8e1b6..362370fa4a 100644
--- a/src/gc/gcscan.h
+++ b/src/gc/gcscan.h
@@ -52,10 +52,8 @@ class GCScan
static void EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
#endif // DACCESS_COMPILE
-#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
- static void GcScanHandlesForProfilerAndETW (int max_gen, ScanContext* sc);
- static void GcScanDependentHandlesForProfilerAndETW (int max_gen, ProfilingScanContext* sc);
-#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
+ static void GcScanHandlesForProfilerAndETW (int max_gen, ScanContext* sc, handle_scan_fn fn);
+ static void GcScanDependentHandlesForProfilerAndETW (int max_gen, ScanContext* sc, handle_scan_fn fn);
// scan for dead weak pointers
static void GcWeakPtrScan (promote_func* fn, int condemned, int max_gen, ScanContext*sc );
diff --git a/src/gc/handletable.cpp b/src/gc/handletable.cpp
index 43b43ffcea..29ee435b51 100644
--- a/src/gc/handletable.cpp
+++ b/src/gc/handletable.cpp
@@ -755,7 +755,7 @@ void HndLogSetEvent(OBJECTHANDLE handle, _UNCHECKED_OBJECTREF value)
uint32_t hndType = HandleFetchType(handle);
ADIndex appDomainIndex = HndGetHandleADIndex(handle);
AppDomain* pAppDomain = SystemDomain::GetAppDomainAtIndex(appDomainIndex);
- uint32_t generation = value != 0 ? GCHeap::GetGCHeap()->WhichGeneration(value) : 0;
+ uint32_t generation = value != 0 ? g_theGCHeap->WhichGeneration(value) : 0;
FireEtwSetGCHandle((void*) handle, value, hndType, generation, (int64_t) pAppDomain, GetClrInstanceId());
FireEtwPrvSetGCHandle((void*) handle, value, hndType, generation, (int64_t) pAppDomain, GetClrInstanceId());
@@ -774,14 +774,14 @@ void HndLogSetEvent(OBJECTHANDLE handle, _UNCHECKED_OBJECTREF value)
for (size_t i = 0; i < num; i ++)
{
value = ppObj[i];
- uint32_t generation = value != 0 ? GCHeap::GetGCHeap()->WhichGeneration(value) : 0;
+ uint32_t generation = value != 0 ? g_theGCHeap->WhichGeneration(value) : 0;
FireEtwSetGCHandle(overlapped, value, HNDTYPE_PINNED, generation, (int64_t) pAppDomain, GetClrInstanceId());
}
}
else
{
value = OBJECTREF_TO_UNCHECKED_OBJECTREF(overlapped->m_userObject);
- uint32_t generation = value != 0 ? GCHeap::GetGCHeap()->WhichGeneration(value) : 0;
+ uint32_t generation = value != 0 ? g_theGCHeap->WhichGeneration(value) : 0;
FireEtwSetGCHandle(overlapped, value, HNDTYPE_PINNED, generation, (int64_t) pAppDomain, GetClrInstanceId());
}
}
@@ -838,7 +838,7 @@ void HndWriteBarrier(OBJECTHANDLE handle, OBJECTREF objref)
if (*pClumpAge != 0) // Perf optimization: if clumpAge is 0, nothing more to do
{
// find out generation
- int generation = GCHeap::GetGCHeap()->WhichGeneration(value);
+ int generation = g_theGCHeap->WhichGeneration(value);
uint32_t uType = HandleFetchType(handle);
#ifndef FEATURE_REDHAWK
diff --git a/src/gc/handletablecache.cpp b/src/gc/handletablecache.cpp
index b2af40c829..aaf3370bd6 100644
--- a/src/gc/handletablecache.cpp
+++ b/src/gc/handletablecache.cpp
@@ -15,6 +15,12 @@
#include "gcenv.h"
+#ifdef Sleep // TODO(segilles)
+#undef Sleep
+#endif // Sleep
+
+#include "env/gcenv.os.h"
+
#include "handletablepriv.h"
/****************************************************************************
diff --git a/src/gc/handletablecore.cpp b/src/gc/handletablecore.cpp
index 5e077de8a2..5776c26ace 100644
--- a/src/gc/handletablecore.cpp
+++ b/src/gc/handletablecore.cpp
@@ -14,6 +14,7 @@
#include "common.h"
#include "gcenv.h"
+#include "gc.h"
#ifndef FEATURE_REDHAWK
#include "nativeoverlapped.h"
@@ -610,7 +611,7 @@ TableSegment *SegmentAlloc(HandleTable *pTable)
_ASSERTE(HANDLE_SEGMENT_ALIGNMENT >= HANDLE_SEGMENT_SIZE);
_ASSERTE(HANDLE_SEGMENT_ALIGNMENT == 0x10000);
- pSegment = (TableSegment *)GCToOSInterface::VirtualReserve(NULL, HANDLE_SEGMENT_SIZE, HANDLE_SEGMENT_ALIGNMENT, VirtualReserveFlags::None);
+ pSegment = (TableSegment *)GCToOSInterface::VirtualReserve(HANDLE_SEGMENT_SIZE, HANDLE_SEGMENT_ALIGNMENT, VirtualReserveFlags::None);
_ASSERTE(((size_t)pSegment % HANDLE_SEGMENT_ALIGNMENT) == 0);
// bail out if we couldn't get any memory
@@ -1111,13 +1112,13 @@ SLOW_PATH:
// we have the lock held but the part we care about (the async table scan) takes the table lock during
// a preparation step so we'll be able to complete our segment moves before the async scan has a
// chance to interfere with us (or vice versa).
- if (GCHeap::GetGCHeap()->IsConcurrentGCInProgress())
+ if (g_theGCHeap->IsConcurrentGCInProgress())
{
// A concurrent GC is in progress so someone might be scanning our segments asynchronously.
// Release the lock, wait for the GC to complete and try again. The order is important; if we wait
// before releasing the table lock we can deadlock with an async table scan.
ch.Release();
- GCHeap::GetGCHeap()->WaitUntilConcurrentGCComplete();
+ g_theGCHeap->WaitUntilConcurrentGCComplete();
continue;
}
diff --git a/src/gc/handletablescan.cpp b/src/gc/handletablescan.cpp
index 863b5a52b0..86ce62d5b1 100644
--- a/src/gc/handletablescan.cpp
+++ b/src/gc/handletablescan.cpp
@@ -818,7 +818,7 @@ void BlockResetAgeMapForBlocksWorker(uint32_t *pdwGen, uint32_t dwClumpMask, Sca
{
if (!HndIsNullOrDestroyedHandle(*pValue))
{
- int thisAge = GCHeap::GetGCHeap()->WhichGeneration(*pValue);
+ int thisAge = g_theGCHeap->WhichGeneration(*pValue);
if (minAge > thisAge)
minAge = thisAge;
@@ -830,7 +830,7 @@ void BlockResetAgeMapForBlocksWorker(uint32_t *pdwGen, uint32_t dwClumpMask, Sca
if (pOverlapped->m_userObject != NULL)
{
Object * pUserObject = OBJECTREFToObject(pOverlapped->m_userObject);
- thisAge = GCHeap::GetGCHeap()->WhichGeneration(pUserObject);
+ thisAge = g_theGCHeap->WhichGeneration(pUserObject);
if (minAge > thisAge)
minAge = thisAge;
if (pOverlapped->m_isArray)
@@ -840,7 +840,7 @@ void BlockResetAgeMapForBlocksWorker(uint32_t *pdwGen, uint32_t dwClumpMask, Sca
size_t num = pUserArrayObject->GetNumComponents();
for (size_t i = 0; i < num; i ++)
{
- thisAge = GCHeap::GetGCHeap()->WhichGeneration(pObj[i]);
+ thisAge = g_theGCHeap->WhichGeneration(pObj[i]);
if (minAge > thisAge)
minAge = thisAge;
}
@@ -925,10 +925,10 @@ static void VerifyObjectAndAge(_UNCHECKED_OBJECTREF *pValue, _UNCHECKED_OBJECTRE
UNREFERENCED_PARAMETER(pValue);
VerifyObject(from, obj);
- int thisAge = GCHeap::GetGCHeap()->WhichGeneration(obj);
+ int thisAge = g_theGCHeap->WhichGeneration(obj);
//debugging code
- //if (minAge > thisAge && thisAge < GCHeap::GetGCHeap()->GetMaxGeneration())
+ //if (minAge > thisAge && thisAge < g_theGCHeap->GetMaxGeneration())
//{
// if ((*pValue) == obj)
// printf("Handle (age %u) %p -> %p (age %u)", minAge, pValue, obj, thisAge);
@@ -946,7 +946,7 @@ static void VerifyObjectAndAge(_UNCHECKED_OBJECTREF *pValue, _UNCHECKED_OBJECTRE
// }
//}
- if (minAge >= GEN_MAX_AGE || (minAge > thisAge && thisAge < static_cast<int>(GCHeap::GetGCHeap()->GetMaxGeneration())))
+ if (minAge >= GEN_MAX_AGE || (minAge > thisAge && thisAge < static_cast<int>(g_theGCHeap->GetMaxGeneration())))
{
_ASSERTE(!"Fatal Error in HandleTable.");
EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE);
diff --git a/src/gc/objecthandle.cpp b/src/gc/objecthandle.cpp
index 74a8a71c5e..e8eed93006 100644
--- a/src/gc/objecthandle.cpp
+++ b/src/gc/objecthandle.cpp
@@ -95,7 +95,7 @@ void CALLBACK PromoteRefCounted(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *pExtra
Object *pOldObj = pObj;
#endif
- if (!HndIsNullOrDestroyedHandle(pObj) && !GCHeap::GetGCHeap()->IsPromoted(pObj))
+ if (!HndIsNullOrDestroyedHandle(pObj) && !g_theGCHeap->IsPromoted(pObj))
{
if (GCToEEInterface::RefCountedHandleCallbacks(pObj))
{
@@ -110,6 +110,21 @@ void CALLBACK PromoteRefCounted(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *pExtra
}
#endif // FEATURE_COMINTEROP || FEATURE_REDHAWK
+
+// Only used by profiling/ETW.
+//----------------------------------------------------------------------------
+
+/*
+ * struct DIAG_DEPSCANINFO
+ *
+ * used when tracing dependent handles for profiling/ETW.
+ */
+struct DIAG_DEPSCANINFO
+{
+ HANDLESCANPROC pfnTrace; // tracing function to use
+ uintptr_t pfnProfilingOrETW;
+};
+
void CALLBACK TraceDependentHandle(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *pExtraInfo, uintptr_t lp1, uintptr_t lp2)
{
WRAPPER_NO_CONTRACT;
@@ -122,14 +137,15 @@ void CALLBACK TraceDependentHandle(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *pEx
// object should also be non-NULL.
_ASSERTE(*pExtraInfo == NULL || *pObjRef != NULL);
- // lp2 is a HANDLESCANPROC
- HANDLESCANPROC pfnTrace = (HANDLESCANPROC) lp2;
+ struct DIAG_DEPSCANINFO *pInfo = (struct DIAG_DEPSCANINFO*)lp2;
+
+ HANDLESCANPROC pfnTrace = pInfo->pfnTrace;
// is the handle's secondary object non-NULL?
if ((*pObjRef != NULL) && (*pExtraInfo != 0))
{
// yes - call the tracing function for this handle
- pfnTrace(pObjRef, NULL, lp1, *pExtraInfo);
+ pfnTrace(pObjRef, NULL, lp1, (uintptr_t)(pInfo->pfnProfilingOrETW));
}
}
@@ -186,9 +202,9 @@ void CALLBACK PromoteDependentHandle(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *p
ScanContext *sc = (ScanContext*)lp1;
DhContext *pDhContext = Ref_GetDependentHandleContext(sc);
- if (*pObjRef && GCHeap::GetGCHeap()->IsPromoted(*pPrimaryRef))
+ if (*pObjRef && g_theGCHeap->IsPromoted(*pPrimaryRef))
{
- if (!GCHeap::GetGCHeap()->IsPromoted(*pSecondaryRef))
+ if (!g_theGCHeap->IsPromoted(*pSecondaryRef))
{
LOG((LF_GC|LF_ENC, LL_INFO10000, "\tPromoting secondary " LOG_OBJECT_CLASS(*pSecondaryRef)));
_ASSERTE(lp2);
@@ -221,7 +237,7 @@ void CALLBACK ClearDependentHandle(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *pEx
LOG((LF_GC|LF_ENC, LL_INFO1000, LOG_HANDLE_OBJECT_CLASS("\tPrimary:\t", pPrimaryRef, "to ", *pPrimaryRef)));
LOG((LF_GC|LF_ENC, LL_INFO1000, LOG_HANDLE_OBJECT_CLASS("\tSecondary\t", pSecondaryRef, "to ", *pSecondaryRef)));
- if (!GCHeap::GetGCHeap()->IsPromoted(*pPrimaryRef))
+ if (!g_theGCHeap->IsPromoted(*pPrimaryRef))
{
LOG((LF_GC|LF_ENC, LL_INFO1000, "\tunreachable ", LOG_OBJECT_CLASS(*pPrimaryRef)));
LOG((LF_GC|LF_ENC, LL_INFO1000, "\tunreachable ", LOG_OBJECT_CLASS(*pSecondaryRef)));
@@ -230,7 +246,7 @@ void CALLBACK ClearDependentHandle(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *pEx
}
else
{
- _ASSERTE(GCHeap::GetGCHeap()->IsPromoted(*pSecondaryRef));
+ _ASSERTE(g_theGCHeap->IsPromoted(*pSecondaryRef));
LOG((LF_GC|LF_ENC, LL_INFO10000, "\tPrimary is reachable " LOG_OBJECT_CLASS(*pPrimaryRef)));
LOG((LF_GC|LF_ENC, LL_INFO10000, "\tSecondary is reachable " LOG_OBJECT_CLASS(*pSecondaryRef)));
}
@@ -330,7 +346,7 @@ void CALLBACK CheckPromoted(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *pExtraInfo
LOG((LF_GC, LL_INFO100000, LOG_HANDLE_OBJECT_CLASS("Checking referent of Weak-", pObjRef, "to ", *pObjRef)));
Object **ppRef = (Object **)pObjRef;
- if (!GCHeap::GetGCHeap()->IsPromoted(*ppRef))
+ if (!g_theGCHeap->IsPromoted(*ppRef))
{
LOG((LF_GC, LL_INFO100, LOG_HANDLE_OBJECT_CLASS("Severing Weak-", pObjRef, "to unreachable ", *pObjRef)));
@@ -355,9 +371,9 @@ void CALLBACK CalculateSizedRefSize(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *pE
ScanContext* sc = (ScanContext *)lp1;
promote_func* callback = (promote_func*) lp2;
- size_t sizeBegin = GCHeap::GetGCHeap()->GetPromotedBytes(sc->thread_number);
+ size_t sizeBegin = g_theGCHeap->GetPromotedBytes(sc->thread_number);
callback(ppSizedRef, (ScanContext *)lp1, 0);
- size_t sizeEnd = GCHeap::GetGCHeap()->GetPromotedBytes(sc->thread_number);
+ size_t sizeEnd = g_theGCHeap->GetPromotedBytes(sc->thread_number);
*pSize = sizeEnd - sizeBegin;
}
@@ -414,7 +430,7 @@ void CALLBACK ScanPointerForProfilerAndETW(_UNCHECKED_OBJECTREF *pObjRef, uintpt
CONTRACTL_END;
#endif // FEATURE_REDHAWK
UNREFERENCED_PARAMETER(pExtraInfo);
- UNREFERENCED_PARAMETER(lp2);
+ handle_scan_fn fn = (handle_scan_fn)lp2;
LOG((LF_GC | LF_CORPROF, LL_INFO100000, LOG_HANDLE_OBJECT_CLASS("Notifying profiler of ", pObjRef, "to ", *pObjRef)));
@@ -422,7 +438,7 @@ void CALLBACK ScanPointerForProfilerAndETW(_UNCHECKED_OBJECTREF *pObjRef, uintpt
Object **pRef = (Object **)pObjRef;
// Get a hold of the heap ID that's tacked onto the end of the scancontext struct.
- ProfilingScanContext *pSC = (ProfilingScanContext *)lp1;
+ ScanContext *pSC = (ScanContext *)lp1;
uint32_t rootFlags = 0;
BOOL isDependent = FALSE;
@@ -487,60 +503,15 @@ void CALLBACK ScanPointerForProfilerAndETW(_UNCHECKED_OBJECTREF *pObjRef, uintpt
_UNCHECKED_OBJECTREF pSec = NULL;
-#ifdef GC_PROFILING
- // Give the profiler the objectref.
- if (pSC->fProfilerPinned)
+ if (isDependent)
{
- if (!isDependent)
- {
- BEGIN_PIN_PROFILER(CORProfilerTrackGC());
- g_profControlBlock.pProfInterface->RootReference2(
- (uint8_t *)*pRef,
- kEtwGCRootKindHandle,
- (EtwGCRootFlags)rootFlags,
- pRef,
- &pSC->pHeapId);
- END_PIN_PROFILER();
- }
- else
- {
- BEGIN_PIN_PROFILER(CORProfilerTrackConditionalWeakTableElements());
- pSec = (_UNCHECKED_OBJECTREF)HndGetHandleExtraInfo(handle);
- g_profControlBlock.pProfInterface->ConditionalWeakTableElementReference(
- (uint8_t*)*pRef,
- (uint8_t*)pSec,
- pRef,
- &pSC->pHeapId);
- END_PIN_PROFILER();
- }
+ pSec = (_UNCHECKED_OBJECTREF)HndGetHandleExtraInfo(handle);
}
-#endif // GC_PROFILING
-
-#if defined(FEATURE_EVENT_TRACE)
- // Notify ETW of the handle
- if (ETW::GCLog::ShouldWalkHeapRootsForEtw())
- {
- if (isDependent && (pSec == NULL))
- {
- pSec = (_UNCHECKED_OBJECTREF)HndGetHandleExtraInfo(handle);
- }
-
- ETW::GCLog::RootReference(
- handle,
- *pRef, // object being rooted
- pSec, // pSecondaryNodeForDependentHandle
- isDependent,
- pSC,
- 0, // dwGCFlags,
- rootFlags); // ETW handle flags
- }
-#endif // defined(FEATURE_EVENT_TRACE)
+ fn(pRef, pSec, rootFlags, pSC, isDependent);
}
#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
-
-
/*
* Scan callback for updating pointers.
*
@@ -583,10 +554,10 @@ int getNumberOfSlots()
{
WRAPPER_NO_CONTRACT;
- // when Ref_Initialize called, GCHeap::GetNumberOfHeaps() is still 0, so use #procs as a workaround
+ // when Ref_Initialize called, IGCHeap::GetNumberOfHeaps() is still 0, so use #procs as a workaround
// it is legal since even if later #heaps < #procs we create handles by thread home heap
// and just have extra unused slots in HandleTableBuckets, which does not take a lot of space
- if (!GCHeap::IsServerHeap())
+ if (!IsServerHeap())
return 1;
#ifdef FEATURE_REDHAWK
@@ -874,7 +845,7 @@ int getSlotNumber(ScanContext* sc)
{
WRAPPER_NO_CONTRACT;
- return (GCHeap::IsServerHeap() ? sc->thread_number : 0);
+ return (IsServerHeap() ? sc->thread_number : 0);
}
// <TODO> - reexpress as complete only like hndtable does now!!! -fmh</REVISIT_TODO>
@@ -1152,7 +1123,7 @@ void Ref_TraceNormalRoots(uint32_t condemned, uint32_t maxgen, ScanContext* sc,
// promote objects pointed to by strong handles
// during ephemeral GCs we also want to promote the ones pointed to by sizedref handles.
uint32_t types[2] = {HNDTYPE_STRONG, HNDTYPE_SIZEDREF};
- uint32_t uTypeCount = (((condemned >= maxgen) && !GCHeap::GetGCHeap()->IsConcurrentGCInProgress()) ? 1 : _countof(types));
+ uint32_t uTypeCount = (((condemned >= maxgen) && !g_theGCHeap->IsConcurrentGCInProgress()) ? 1 : _countof(types));
uint32_t flags = (sc->concurrent) ? HNDGCF_ASYNC : HNDGCF_NORMAL;
HandleTableMap *walk = &g_HandleTableMap;
@@ -1417,13 +1388,15 @@ void Ref_ScanDependentHandlesForRelocation(uint32_t condemned, uint32_t maxgen,
/*
loop scan version of TraceVariableHandles for single-thread-managed Ref_* functions
should be kept in sync with the code above
+ Only used by profiling/ETW.
*/
-void TraceDependentHandlesBySingleThread(HANDLESCANPROC pfnTrace, uintptr_t lp1, uint32_t condemned, uint32_t maxgen, uint32_t flags)
+void TraceDependentHandlesBySingleThread(HANDLESCANPROC pfnTrace, uintptr_t lp1, uintptr_t lp2, uint32_t condemned, uint32_t maxgen, uint32_t flags)
{
WRAPPER_NO_CONTRACT;
// set up to scan variable handles with the specified mask and trace function
uint32_t type = HNDTYPE_DEPENDENT;
+ struct DIAG_DEPSCANINFO info = { pfnTrace, lp2 };
HandleTableMap *walk = &g_HandleTableMap;
while (walk) {
@@ -1436,14 +1409,13 @@ void TraceDependentHandlesBySingleThread(HANDLESCANPROC pfnTrace, uintptr_t lp1,
HHANDLETABLE hTable = walk->pBuckets[i]->pTable[uCPUindex];
if (hTable)
HndScanHandlesForGC(hTable, TraceDependentHandle,
- lp1, (uintptr_t)pfnTrace, &type, 1, condemned, maxgen, HNDGCF_EXTRAINFO | flags);
+ lp1, (uintptr_t)&info, &type, 1, condemned, maxgen, HNDGCF_EXTRAINFO | flags);
}
}
walk = walk->pNext;
}
}
-
// We scan handle tables by their buckets (ie, AD index). We could get into the situation where
// the AD indices are not very compacted (for example if we have just unloaded ADs and their
// indices haven't been reused yet) and we could be scanning them in an unbalanced fashion.
@@ -1454,7 +1426,7 @@ void ScanSizedRefByAD(uint32_t maxgen, HANDLESCANPROC scanProc, ScanContext* sc,
HandleTableMap *walk = &g_HandleTableMap;
uint32_t type = HNDTYPE_SIZEDREF;
int uCPUindex = getSlotNumber(sc);
- int n_slots = GCHeap::GetGCHeap()->GetNumberOfHeaps();
+ int n_slots = g_theGCHeap->GetNumberOfHeaps();
while (walk)
{
@@ -1574,11 +1546,11 @@ void Ref_UpdatePointers(uint32_t condemned, uint32_t maxgen, ScanContext* sc, Re
// @TODO cwb: wait for compelling performance measurements.</REVISIT_TODO>
BOOL bDo = TRUE;
- if (GCHeap::IsServerHeap())
+ if (IsServerHeap())
{
bDo = (Interlocked::Increment(&uCount) == 1);
- Interlocked::CompareExchange (&uCount, 0, GCHeap::GetGCHeap()->GetNumberOfHeaps());
- _ASSERTE (uCount <= GCHeap::GetGCHeap()->GetNumberOfHeaps());
+ Interlocked::CompareExchange (&uCount, 0, g_theGCHeap->GetNumberOfHeaps());
+ _ASSERTE (uCount <= g_theGCHeap->GetNumberOfHeaps());
}
if (bDo)
@@ -1623,7 +1595,7 @@ void Ref_UpdatePointers(uint32_t condemned, uint32_t maxgen, ScanContext* sc, Re
#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
// Please update this if you change the Ref_UpdatePointers function above.
-void Ref_ScanPointersForProfilerAndETW(uint32_t maxgen, uintptr_t lp1)
+void Ref_ScanHandlesForProfilerAndETW(uint32_t maxgen, uintptr_t lp1, handle_scan_fn fn)
{
WRAPPER_NO_CONTRACT;
@@ -1662,16 +1634,16 @@ void Ref_ScanPointersForProfilerAndETW(uint32_t maxgen, uintptr_t lp1)
{
HHANDLETABLE hTable = walk->pBuckets[i]->pTable[uCPUindex];
if (hTable)
- HndScanHandlesForGC(hTable, &ScanPointerForProfilerAndETW, lp1, 0, types, _countof(types), maxgen, maxgen, flags);
+ HndScanHandlesForGC(hTable, &ScanPointerForProfilerAndETW, lp1, (uintptr_t)fn, types, _countof(types), maxgen, maxgen, flags);
}
walk = walk->pNext;
}
// update pointers in variable handles whose dynamic type is VHT_WEAK_SHORT, VHT_WEAK_LONG or VHT_STRONG
- TraceVariableHandlesBySingleThread(&ScanPointerForProfilerAndETW, lp1, 0, VHT_WEAK_SHORT | VHT_WEAK_LONG | VHT_STRONG, maxgen, maxgen, flags);
+ TraceVariableHandlesBySingleThread(&ScanPointerForProfilerAndETW, lp1, (uintptr_t)fn, VHT_WEAK_SHORT | VHT_WEAK_LONG | VHT_STRONG, maxgen, maxgen, flags);
}
-void Ref_ScanDependentHandlesForProfilerAndETW(uint32_t maxgen, ProfilingScanContext * SC)
+void Ref_ScanDependentHandlesForProfilerAndETW(uint32_t maxgen, ScanContext * SC, handle_scan_fn fn)
{
WRAPPER_NO_CONTRACT;
@@ -1680,12 +1652,7 @@ void Ref_ScanDependentHandlesForProfilerAndETW(uint32_t maxgen, ProfilingScanCon
uint32_t flags = HNDGCF_NORMAL;
uintptr_t lp1 = (uintptr_t)SC;
- // we'll re-use pHeapId (which was either unused (0) or freed by EndRootReferences2
- // (-1)), so reset it to NULL
- _ASSERTE((*((size_t *)(&SC->pHeapId)) == (size_t)(-1)) ||
- (*((size_t *)(&SC->pHeapId)) == (size_t)(0)));
- SC->pHeapId = NULL;
- TraceDependentHandlesBySingleThread(&ScanPointerForProfilerAndETW, lp1, maxgen, maxgen, flags);
+ TraceDependentHandlesBySingleThread(&ScanPointerForProfilerAndETW, lp1, (uintptr_t)fn, maxgen, maxgen, flags);
}
#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
@@ -1906,9 +1873,9 @@ int GetCurrentThreadHomeHeapNumber()
{
WRAPPER_NO_CONTRACT;
- if (!GCHeap::IsGCHeapInitialized())
+ if (g_theGCHeap == nullptr)
return 0;
- return GCHeap::GetGCHeap()->GetHomeHeapNumber();
+ return g_theGCHeap->GetHomeHeapNumber();
}
bool HandleTableBucket::Contains(OBJECTHANDLE handle)
@@ -1921,7 +1888,7 @@ bool HandleTableBucket::Contains(OBJECTHANDLE handle)
}
HHANDLETABLE hTable = HndGetHandleTable(handle);
- for (int uCPUindex=0; uCPUindex < GCHeap::GetGCHeap()->GetNumberOfHeaps(); uCPUindex++)
+ for (int uCPUindex=0; uCPUindex < g_theGCHeap->GetNumberOfHeaps(); uCPUindex++)
{
if (hTable == this->pTable[uCPUindex])
{
diff --git a/src/gc/objecthandle.h b/src/gc/objecthandle.h
index 89365267d6..34c2a0e321 100644
--- a/src/gc/objecthandle.h
+++ b/src/gc/objecthandle.h
@@ -652,7 +652,6 @@ BOOL Ref_ContainHandle(HandleTableBucket *pBucket, OBJECTHANDLE handle);
*/
struct ScanContext;
struct DhContext;
-struct ProfilingScanContext;
void Ref_BeginSynchronousGC (uint32_t uCondemnedGeneration, uint32_t uMaxGeneration);
void Ref_EndSynchronousGC (uint32_t uCondemnedGeneration, uint32_t uMaxGeneration);
@@ -672,10 +671,12 @@ void Ref_ScanSizedRefHandles(uint32_t condemned, uint32_t maxgen, ScanContext* s
void Ref_ScanPointers(uint32_t condemned, uint32_t maxgen, ScanContext* sc, Ref_promote_func* fn);
#endif
+typedef void (* handle_scan_fn)(Object** pRef, Object* pSec, uint32_t flags, ScanContext* context, BOOL isDependent);
+
void Ref_CheckReachable (uint32_t uCondemnedGeneration, uint32_t uMaxGeneration, uintptr_t lp1);
void Ref_CheckAlive (uint32_t uCondemnedGeneration, uint32_t uMaxGeneration, uintptr_t lp1);
-void Ref_ScanPointersForProfilerAndETW(uint32_t uMaxGeneration, uintptr_t lp1);
-void Ref_ScanDependentHandlesForProfilerAndETW(uint32_t uMaxGeneration, ProfilingScanContext * SC);
+void Ref_ScanHandlesForProfilerAndETW(uint32_t uMaxGeneration, uintptr_t lp1, handle_scan_fn fn);
+void Ref_ScanDependentHandlesForProfilerAndETW(uint32_t uMaxGeneration, ScanContext * SC, handle_scan_fn fn);
void Ref_AgeHandles (uint32_t uCondemnedGeneration, uint32_t uMaxGeneration, uintptr_t lp1);
void Ref_RejuvenateHandles(uint32_t uCondemnedGeneration, uint32_t uMaxGeneration, uintptr_t lp1);
diff --git a/src/gc/sample/CMakeLists.txt b/src/gc/sample/CMakeLists.txt
index 572fba371f..9552cc51e2 100644
--- a/src/gc/sample/CMakeLists.txt
+++ b/src/gc/sample/CMakeLists.txt
@@ -22,11 +22,11 @@ set(SOURCES
if(WIN32)
list(APPEND SOURCES
- gcenv.windows.cpp)
+ ../gcenv.windows.cpp)
add_definitions(-DUNICODE=1)
else()
list(APPEND SOURCES
- gcenv.unix.cpp)
+ ../gcenv.unix.cpp)
endif()
_add_executable(gcsample
diff --git a/src/gc/sample/GCSample.cpp b/src/gc/sample/GCSample.cpp
index 7e07834ced..664dc38e94 100644
--- a/src/gc/sample/GCSample.cpp
+++ b/src/gc/sample/GCSample.cpp
@@ -68,7 +68,7 @@ Object * AllocateObject(MethodTable * pMT)
}
else
{
- pObject = GCHeap::GetGCHeap()->Alloc(acontext, size, 0);
+ pObject = g_theGCHeap->Alloc(acontext, size, 0);
if (pObject == NULL)
return NULL;
}
@@ -91,14 +91,14 @@ inline void ErectWriteBarrier(Object ** dst, Object * ref)
{
// if the dst is outside of the heap (unboxed value classes) then we
// simply exit
- if (((uint8_t*)dst < g_lowest_address) || ((uint8_t*)dst >= g_highest_address))
+ if (((uint8_t*)dst < g_gc_lowest_address) || ((uint8_t*)dst >= g_gc_highest_address))
return;
- if((uint8_t*)ref >= g_ephemeral_low && (uint8_t*)ref < g_ephemeral_high)
+ if((uint8_t*)ref >= g_gc_ephemeral_low && (uint8_t*)ref < g_gc_ephemeral_high)
{
// volatile is used here to prevent fetch of g_card_table from being reordered
// with g_lowest/highest_address check above. See comment in code:gc_heap::grow_brick_card_tables.
- uint8_t* pCardByte = (uint8_t *)*(volatile uint8_t **)(&g_card_table) + card_byte((uint8_t *)dst);
+ uint8_t* pCardByte = (uint8_t *)*(volatile uint8_t **)(&g_gc_card_table) + card_byte((uint8_t *)dst);
if(*pCardByte != 0xFF)
*pCardByte = 0xFF;
}
@@ -137,7 +137,7 @@ int __cdecl main(int argc, char* argv[])
//
// Initialize GC heap
//
- GCHeap *pGCHeap = GCHeap::CreateGCHeap();
+ IGCHeap *pGCHeap = InitializeGarbageCollector(nullptr);
if (!pGCHeap)
return -1;
diff --git a/src/gc/sample/GCSample.vcxproj b/src/gc/sample/GCSample.vcxproj
index b196e1f34c..1716f462ee 100644
--- a/src/gc/sample/GCSample.vcxproj
+++ b/src/gc/sample/GCSample.vcxproj
@@ -84,10 +84,12 @@
</ItemGroup>
<ItemGroup>
<ClCompile Include="gcenv.ee.cpp" />
- <ClCompile Include="gcenv.windows.cpp" />
<ClCompile Include="GCSample.cpp" />
<ClCompile Include="..\gccommon.cpp" />
<ClCompile Include="..\gceewks.cpp" />
+ <ClCompile Include="..\gcenv.windows.cpp">
+ <PrecompiledHeader>NotUsing</PrecompiledHeader>
+ </ClCompile>
<ClCompile Include="..\gcscan.cpp" />
<ClCompile Include="..\gcwks.cpp" />
<ClCompile Include="..\handletable.cpp" />
@@ -96,8 +98,7 @@
<ClCompile Include="..\handletablescan.cpp" />
<ClCompile Include="..\objecthandle.cpp" />
<ClCompile Include="..\env\common.cpp">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">Create</PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">Create</PrecompiledHeader>
+ <PrecompiledHeader>Create</PrecompiledHeader>
</ClCompile>
</ItemGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
diff --git a/src/gc/sample/GCSample.vcxproj.filters b/src/gc/sample/GCSample.vcxproj.filters
index e46c054565..f6aacfd0c7 100644
--- a/src/gc/sample/GCSample.vcxproj.filters
+++ b/src/gc/sample/GCSample.vcxproj.filters
@@ -59,7 +59,7 @@
<ClCompile Include="gcenv.ee.cpp">
<Filter>Source Files</Filter>
</ClCompile>
- <ClCompile Include="gcenv.windows.cpp">
+ <ClCompile Include="..\gcenv.windows.cpp">
<Filter>Source Files</Filter>
</ClCompile>
</ItemGroup>
diff --git a/src/gc/sample/gcenv.ee.cpp b/src/gc/sample/gcenv.ee.cpp
index 330564a380..ac227b4823 100644
--- a/src/gc/sample/gcenv.ee.cpp
+++ b/src/gc/sample/gcenv.ee.cpp
@@ -9,6 +9,12 @@
#include "gcenv.h"
#include "gc.h"
+MethodTable * g_pFreeObjectMethodTable;
+
+int32_t g_TrapReturningThreads;
+
+bool g_fFinalizerRunOnShutDown;
+
EEConfig * g_pConfig;
bool CLREventStatic::CreateManualEventNoThrow(bool bInitialState)
@@ -129,9 +135,9 @@ void ThreadStore::AttachCurrentThread()
g_pThreadList = pThread;
}
-void GCToEEInterface::SuspendEE(GCToEEInterface::SUSPEND_REASON reason)
+void GCToEEInterface::SuspendEE(SUSPEND_REASON reason)
{
- GCHeap::GetGCHeap()->SetGCInProgress(TRUE);
+ g_theGCHeap->SetGCInProgress(TRUE);
// TODO: Implement
}
@@ -140,7 +146,7 @@ void GCToEEInterface::RestartEE(bool bFinishedGC)
{
// TODO: Implement
- GCHeap::GetGCHeap()->SetGCInProgress(FALSE);
+ g_theGCHeap->SetGCInProgress(FALSE);
}
void GCToEEInterface::GcScanRoots(promote_func* fn, int condemned, int max_gen, ScanContext* sc)
@@ -184,7 +190,7 @@ void GCToEEInterface::DisablePreemptiveGC(Thread * pThread)
pThread->DisablePreemptiveGC();
}
-alloc_context * GCToEEInterface::GetAllocContext(Thread * pThread)
+gc_alloc_context * GCToEEInterface::GetAllocContext(Thread * pThread)
{
return pThread->GetAllocContext();
}
@@ -221,6 +227,38 @@ Thread* GCToEEInterface::CreateBackgroundThread(GCBackgroundThreadFunction threa
return NULL;
}
+void GCToEEInterface::DiagGCStart(int gen, bool isInduced)
+{
+}
+
+void GCToEEInterface::DiagUpdateGenerationBounds()
+{
+}
+
+void GCToEEInterface::DiagGCEnd(size_t index, int gen, int reason, bool fConcurrent)
+{
+}
+
+void GCToEEInterface::DiagWalkFReachableObjects(void* gcContext)
+{
+}
+
+void GCToEEInterface::DiagWalkSurvivors(void* gcContext)
+{
+}
+
+void GCToEEInterface::DiagWalkLOHSurvivors(void* gcContext)
+{
+}
+
+void GCToEEInterface::DiagWalkBGCSurvivors(void* gcContext)
+{
+}
+
+void GCToEEInterface::StompWriteBarrier(WriteBarrierParameters* args)
+{
+}
+
void FinalizerThread::EnableFinalization()
{
// Signal to finalizer thread that there are objects to finalize
@@ -238,14 +276,6 @@ bool IsGCSpecialThread()
return false;
}
-void StompWriteBarrierEphemeral(bool /* isRuntimeSuspended */)
-{
-}
-
-void StompWriteBarrierResize(bool /* isRuntimeSuspended */, bool /*bReqUpperBoundsCheck*/)
-{
-}
-
bool IsGCThread()
{
return false;
diff --git a/src/gc/sample/gcenv.h b/src/gc/sample/gcenv.h
index d560789751..4505f1af30 100644
--- a/src/gc/sample/gcenv.h
+++ b/src/gc/sample/gcenv.h
@@ -2,6 +2,12 @@
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
+// The sample is to be kept simple, so building the sample
+// in tandem with a standalone GC is currently not supported.
+#ifdef FEATURE_STANDALONE_GC
+#undef FEATURE_STANDALONE_GC
+#endif // FEATURE_STANDALONE_GC
+
#if defined(_DEBUG)
#ifndef _DEBUG_IMPL
#define _DEBUG_IMPL 1
@@ -17,12 +23,12 @@
#include "gcenv.structs.h"
#include "gcenv.base.h"
-#include "gcenv.ee.h"
#include "gcenv.os.h"
#include "gcenv.interlocked.h"
#include "gcenv.interlocked.inl"
#include "gcenv.object.h"
#include "gcenv.sync.h"
+#include "gcenv.ee.h"
#define MAX_LONGPATH 1024
@@ -64,6 +70,9 @@
#define LOG(x)
+#define SVAL_IMPL_INIT(type, cls, var, init) \
+ type cls::var = init
+
//
// Thread
//
@@ -177,8 +186,6 @@ public:
int GetGCTrimCommit() const { return 0; }
int GetGCLOHCompactionMode() const { return 0; }
- bool GetGCAllowVeryLargeObjects() const { return false; }
-
bool GetGCConservative() const { return true; }
};
diff --git a/src/gc/sample/gcenv.unix.cpp b/src/gc/sample/gcenv.unix.cpp
deleted file mode 100644
index a5e9e83ee2..0000000000
--- a/src/gc/sample/gcenv.unix.cpp
+++ /dev/null
@@ -1,14 +0,0 @@
-// Licensed to the .NET Foundation under one or more agreements.
-// The .NET Foundation licenses this file to you under the MIT license.
-// See the LICENSE file in the project root for more information.
-
-//
-// Implementation of the GC environment
-//
-
-#include "common.h"
-
-#include "gcenv.h"
-#include "gc.h"
-
-// TODO: Implement
diff --git a/src/gc/softwarewritewatch.cpp b/src/gc/softwarewritewatch.cpp
index 519744900b..fa14a04897 100644
--- a/src/gc/softwarewritewatch.cpp
+++ b/src/gc/softwarewritewatch.cpp
@@ -6,6 +6,7 @@
#include "softwarewritewatch.h"
#include "gcenv.h"
+#include "env/gcenv.os.h"
#ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
#ifndef DACCESS_COMPILE