summaryrefslogtreecommitdiff
path: root/src/gc
diff options
context:
space:
mode:
authorMaoni0 <maonis@microsoft.com>2016-11-18 00:45:29 -0800
committerMaoni0 <maonis@microsoft.com>2016-11-22 20:10:16 -0800
commit1af571470c91528c31c0caf7a1726428c21a2be0 (patch)
treebfb5f05ab36a5acdab3a7c5c2b755e49765e419c /src/gc
parent204f6e8859e3676114c85f49b8d2c311458ac715 (diff)
downloadcoreclr-1af571470c91528c31c0caf7a1726428c21a2be0.tar.gz
coreclr-1af571470c91528c31c0caf7a1726428c21a2be0.tar.bz2
coreclr-1af571470c91528c31c0caf7a1726428c21a2be0.zip
This is to separate the diagnostics code out from gc.cpp (except
GC's own logging), gcee.cpp and objecthandle.cpp. The rule is GC will call the diagnostics functions at various points to communicate info and these diagnostics functions might call back into the GC if they require initimate knowledge of the GC in order to get certain info. This way gc.cpp does not need to know about details of the diagnostics components, eg, the profiling context; and the diagnostics components do not need to know details about the GC. So got rid of ProfilingScanContext in gcinterface.h and passed scanning functions to GC and handle table. Got rid of where it goes through things per heap as this is not something that diagnostics should care about, including going through stack roots per heap (which is only done in GC for scalability purposes but profiling is doing this on a single thread). This also makes it faster. Got rid of the checks for gc_low/high in ProfScanRootsHelper as this is knowledge profiling shouldn't have. And it was also incorrectly not including all interior pointer stack roots.
Diffstat (limited to 'src/gc')
-rw-r--r--src/gc/env/gcenv.ee.h9
-rw-r--r--src/gc/gc.cpp412
-rw-r--r--src/gc/gcee.cpp207
-rw-r--r--src/gc/gcenv.ee.standalone.inl41
-rw-r--r--src/gc/gcimpl.h18
-rw-r--r--src/gc/gcinterface.ee.h31
-rw-r--r--src/gc/gcinterface.h63
-rw-r--r--src/gc/gcpriv.h46
-rw-r--r--src/gc/gcrecord.h2
-rw-r--r--src/gc/gcscan.cpp17
-rw-r--r--src/gc/gcscan.h6
-rw-r--r--src/gc/objecthandle.cpp99
-rw-r--r--src/gc/objecthandle.h7
-rw-r--r--src/gc/sample/gcenv.ee.cpp28
14 files changed, 397 insertions, 589 deletions
diff --git a/src/gc/env/gcenv.ee.h b/src/gc/env/gcenv.ee.h
index dc6c1d84bb..5588698806 100644
--- a/src/gc/env/gcenv.ee.h
+++ b/src/gc/env/gcenv.ee.h
@@ -56,6 +56,15 @@ public:
static void GcEnumAllocContexts(enum_alloc_context_func* fn, void* param);
static Thread* CreateBackgroundThread(GCBackgroundThreadFunction threadStart, void* arg);
+
+ // Diagnostics methods.
+ static void DiagGCStart(int gen, bool isInduced);
+ static void DiagUpdateGenerationBounds();
+ static void DiagGCEnd(size_t index, int gen, int reason, bool fConcurrent);
+ static void DiagWalkFReachableObjects(void* gcContext);
+ static void DiagWalkSurvivors(void* gcContext);
+ static void DiagWalkLOHSurvivors(void* gcContext);
+ static void DiagWalkBGCSurvivors(void* gcContext);
};
#endif // __GCENV_EE_H__
diff --git a/src/gc/gc.cpp b/src/gc/gc.cpp
index 28e19aa04d..991cdf1288 100644
--- a/src/gc/gc.cpp
+++ b/src/gc/gc.cpp
@@ -21,22 +21,6 @@
#define USE_INTROSORT
-#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
-inline BOOL ShouldTrackMovementForProfilerOrEtw()
-{
-#ifdef GC_PROFILING
- if (CORProfilerTrackGC())
- return true;
-#endif
-
-#ifdef FEATURE_EVENT_TRACE
- if (ETW::GCLog::ShouldTrackMovementForEtw())
- return true;
-#endif
-
- return false;
-}
-#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
#if defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE)
BOOL bgc_heap_walk_for_etw_p = FALSE;
@@ -4703,10 +4687,7 @@ heap_segment* gc_heap::get_segment_for_loh (size_t size
FireEtwGCCreateSegment_V1((size_t)heap_segment_mem(res), (size_t)(heap_segment_reserved (res) - heap_segment_mem(res)), ETW::GCLog::ETW_GC_INFO::LARGE_OBJECT_HEAP, GetClrInstanceId());
-#ifdef GC_PROFILING
- if (CORProfilerTrackGC())
- UpdateGenerationBounds();
-#endif // GC_PROFILING
+ GCToEEInterface::DiagUpdateGenerationBounds();
#ifdef MULTIPLE_HEAPS
hp->thread_loh_segment (res);
@@ -5523,7 +5504,6 @@ public:
saved_post_plug_reloc = temp;
}
-#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
void swap_pre_plug_and_saved_for_profiler()
{
gap_reloc_pair temp;
@@ -5539,7 +5519,6 @@ public:
memcpy (saved_post_plug_info_start, &saved_post_plug, sizeof (saved_post_plug));
saved_post_plug = temp;
}
-#endif //GC_PROFILING || //FEATURE_EVENT_TRACE
// We should think about whether it's really necessary to have to copy back the pre plug
// info since it was already copied during compacting plugs. But if a plug doesn't move
@@ -10360,6 +10339,7 @@ gc_heap::init_gc_heap (int h_number)
(size_t)(heap_segment_reserved (lseg) - heap_segment_mem(lseg)),
ETW::GCLog::ETW_GC_INFO::LARGE_OBJECT_HEAP,
GetClrInstanceId());
+
#ifdef SEG_MAPPING_TABLE
seg_mapping_table_add_segment (lseg, __this);
#else //SEG_MAPPING_TABLE
@@ -13043,12 +13023,12 @@ int gc_heap::try_allocate_more_space (alloc_context* acontext, size_t size,
if (can_allocate)
{
- //ETW trace for allocation tick
size_t alloc_context_bytes = acontext->alloc_limit + Align (min_obj_size, align_const) - acontext->alloc_ptr;
int etw_allocation_index = ((gen_number == 0) ? 0 : 1);
etw_allocation_running_amount[etw_allocation_index] += alloc_context_bytes;
+
if (etw_allocation_running_amount[etw_allocation_index] > etw_allocation_tick)
{
#ifdef FEATURE_REDHAWK
@@ -15475,7 +15455,15 @@ void gc_heap::gc1()
#ifdef FEATURE_EVENT_TRACE
if (bgc_heap_walk_for_etw_p && settings.concurrent)
{
- make_free_lists_for_profiler_for_bgc();
+ GCToEEInterface::DiagWalkBGCSurvivors(__this);
+
+#ifdef MULTIPLE_HEAPS
+ bgc_t_join.join(this, gc_join_after_profiler_heap_walk);
+ if (bgc_t_join.joined())
+ {
+ bgc_t_join.restart();
+ }
+#endif // MULTIPLE_HEAPS
}
#endif // FEATURE_EVENT_TRACE
#endif //BACKGROUND_GC
@@ -16414,89 +16402,56 @@ int gc_heap::garbage_collect (int n)
#endif //MULTIPLE_HEAPS
- BOOL should_evaluate_elevation = FALSE;
- BOOL should_do_blocking_collection = FALSE;
+ BOOL should_evaluate_elevation = FALSE;
+ BOOL should_do_blocking_collection = FALSE;
#ifdef MULTIPLE_HEAPS
- int gen_max = condemned_generation_num;
- for (int i = 0; i < n_heaps; i++)
- {
- if (gen_max < g_heaps[i]->condemned_generation_num)
- gen_max = g_heaps[i]->condemned_generation_num;
- if ((!should_evaluate_elevation) && (g_heaps[i]->elevation_requested))
- should_evaluate_elevation = TRUE;
- if ((!should_do_blocking_collection) && (g_heaps[i]->blocking_collection))
- should_do_blocking_collection = TRUE;
- }
+ int gen_max = condemned_generation_num;
+ for (int i = 0; i < n_heaps; i++)
+ {
+ if (gen_max < g_heaps[i]->condemned_generation_num)
+ gen_max = g_heaps[i]->condemned_generation_num;
+ if ((!should_evaluate_elevation) && (g_heaps[i]->elevation_requested))
+ should_evaluate_elevation = TRUE;
+ if ((!should_do_blocking_collection) && (g_heaps[i]->blocking_collection))
+ should_do_blocking_collection = TRUE;
+ }
- settings.condemned_generation = gen_max;
-//logically continues after GC_PROFILING.
+ settings.condemned_generation = gen_max;
#else //MULTIPLE_HEAPS
- settings.condemned_generation = generation_to_condemn (n,
- &blocking_collection,
- &elevation_requested,
- FALSE);
- should_evaluate_elevation = elevation_requested;
- should_do_blocking_collection = blocking_collection;
-#endif //MULTIPLE_HEAPS
+ settings.condemned_generation = generation_to_condemn (n,
+ &blocking_collection,
+ &elevation_requested,
+ FALSE);
+ should_evaluate_elevation = elevation_requested;
+ should_do_blocking_collection = blocking_collection;
+#endif //MULTIPLE_HEAPS
+
+ settings.condemned_generation = joined_generation_to_condemn (
+ should_evaluate_elevation,
+ settings.condemned_generation,
+ &should_do_blocking_collection
+ STRESS_HEAP_ARG(n)
+ );
- settings.condemned_generation = joined_generation_to_condemn (
- should_evaluate_elevation,
- settings.condemned_generation,
- &should_do_blocking_collection
- STRESS_HEAP_ARG(n)
- );
+ STRESS_LOG1(LF_GCROOTS|LF_GC|LF_GCALLOC, LL_INFO10,
+ "condemned generation num: %d\n", settings.condemned_generation);
- STRESS_LOG1(LF_GCROOTS|LF_GC|LF_GCALLOC, LL_INFO10,
- "condemned generation num: %d\n", settings.condemned_generation);
+ record_gcs_during_no_gc();
- record_gcs_during_no_gc();
-
- if (settings.condemned_generation > 1)
- settings.promotion = TRUE;
+ if (settings.condemned_generation > 1)
+ settings.promotion = TRUE;
#ifdef HEAP_ANALYZE
- // At this point we've decided what generation is condemned
- // See if we've been requested to analyze survivors after the mark phase
- if (AnalyzeSurvivorsRequested(settings.condemned_generation))
- {
- heap_analyze_enabled = TRUE;
- }
-#endif // HEAP_ANALYZE
-
-#ifdef GC_PROFILING
-
- // If we're tracking GCs, then we need to walk the first generation
- // before collection to track how many items of each class has been
- // allocated.
- UpdateGenerationBounds();
- GarbageCollectionStartedCallback(settings.condemned_generation, settings.reason == reason_induced);
+ // At this point we've decided what generation is condemned
+ // See if we've been requested to analyze survivors after the mark phase
+ if (AnalyzeSurvivorsRequested(settings.condemned_generation))
{
- BEGIN_PIN_PROFILER(CORProfilerTrackGC());
- size_t profiling_context = 0;
-
-#ifdef MULTIPLE_HEAPS
- int hn = 0;
- for (hn = 0; hn < gc_heap::n_heaps; hn++)
- {
- gc_heap* hp = gc_heap::g_heaps [hn];
-
- // When we're walking objects allocated by class, then we don't want to walk the large
- // object heap because then it would count things that may have been around for a while.
- hp->walk_heap (&AllocByClassHelper, (void *)&profiling_context, 0, FALSE);
- }
-#else
- // When we're walking objects allocated by class, then we don't want to walk the large
- // object heap because then it would count things that may have been around for a while.
- gc_heap::walk_heap (&AllocByClassHelper, (void *)&profiling_context, 0, FALSE);
-#endif //MULTIPLE_HEAPS
-
- // Notify that we've reached the end of the Gen 0 scan
- g_profControlBlock.pProfInterface->EndAllocByClass(&profiling_context);
- END_PIN_PROFILER();
+ heap_analyze_enabled = TRUE;
}
+#endif // HEAP_ANALYZE
-#endif // GC_PROFILING
+ GCToEEInterface::DiagGCStart(settings.condemned_generation, settings.reason == reason_induced);
#ifdef BACKGROUND_GC
if ((settings.condemned_generation == max_generation) &&
@@ -19628,12 +19583,7 @@ void gc_heap::mark_phase (int condemned_gen_number, BOOL mark_only_p)
dprintf (3, ("Finalize marking"));
finalize_queue->ScanForFinalization (GCHeap::Promote, condemned_gen_number, mark_only_p, __this);
-#ifdef GC_PROFILING
- if (CORProfilerTrackGC())
- {
- finalize_queue->WalkFReachableObjects (__this);
- }
-#endif //GC_PROFILING
+ GCToEEInterface::DiagWalkFReachableObjects(__this);
#endif // FEATURE_PREMORTEM_FINALIZATION
// Scan dependent handles again to promote any secondaries associated with primaries that were promoted
@@ -21108,8 +21058,7 @@ void gc_heap::relocate_in_loh_compact()
generation_free_obj_space (gen)));
}
-#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
-void gc_heap::walk_relocation_loh (size_t profiling_context)
+void gc_heap::walk_relocation_for_loh (size_t profiling_context, record_surv_fn fn)
{
generation* gen = large_object_generation;
heap_segment* seg = heap_segment_rw (generation_start_segment (gen));
@@ -21139,14 +21088,7 @@ void gc_heap::walk_relocation_loh (size_t profiling_context)
STRESS_LOG_PLUG_MOVE(o, (o + size), -reloc);
- {
- ETW::GCLog::MovedReference(
- o,
- (o + size),
- reloc,
- profiling_context,
- settings.compaction);
- }
+ fn (o, (o + size), reloc, profiling_context, settings.compaction, FALSE);
o = o + size;
if (o < heap_segment_allocated (seg))
@@ -21163,7 +21105,6 @@ void gc_heap::walk_relocation_loh (size_t profiling_context)
}
}
}
-#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
BOOL gc_heap::loh_object_p (uint8_t* o)
{
@@ -22321,10 +22262,7 @@ void gc_heap::plan_phase (int condemned_gen_number)
if (!loh_compacted_p)
#endif //FEATURE_LOH_COMPACTION
{
-#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
- if (ShouldTrackMovementForProfilerOrEtw())
- notify_profiler_of_surviving_large_objects();
-#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
+ GCToEEInterface::DiagWalkLOHSurvivors(__this);
sweep_large_objects();
}
}
@@ -22526,12 +22464,7 @@ void gc_heap::plan_phase (int condemned_gen_number)
assert (generation_allocation_segment (consing_gen) ==
ephemeral_heap_segment);
-#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
- if (ShouldTrackMovementForProfilerOrEtw())
- {
- record_survived_for_profiler(condemned_gen_number, first_condemned_address);
- }
-#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
+ GCToEEInterface::DiagWalkSurvivors(__this);
relocate_phase (condemned_gen_number, first_condemned_address);
compact_phase (condemned_gen_number, first_condemned_address,
@@ -22741,12 +22674,7 @@ void gc_heap::plan_phase (int condemned_gen_number)
fix_older_allocation_area (older_gen);
}
-#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
- if (ShouldTrackMovementForProfilerOrEtw())
- {
- record_survived_for_profiler(condemned_gen_number, first_condemned_address);
- }
-#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
+ GCToEEInterface::DiagWalkSurvivors(__this);
gen0_big_free_spaces = 0;
make_free_lists (condemned_gen_number);
@@ -23952,8 +23880,7 @@ void gc_heap::relocate_survivors (int condemned_gen_number,
}
}
-#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
-void gc_heap::walk_plug (uint8_t* plug, size_t size, BOOL check_last_object_p, walk_relocate_args* args, size_t profiling_context)
+void gc_heap::walk_plug (uint8_t* plug, size_t size, BOOL check_last_object_p, walk_relocate_args* args)
{
if (check_last_object_p)
{
@@ -23973,15 +23900,10 @@ void gc_heap::walk_plug (uint8_t* plug, size_t size, BOOL check_last_object_p, w
}
ptrdiff_t last_plug_relocation = node_relocation_distance (plug);
- ptrdiff_t reloc = settings.compaction ? last_plug_relocation : 0;
-
STRESS_LOG_PLUG_MOVE(plug, (plug + size), -last_plug_relocation);
+ ptrdiff_t reloc = settings.compaction ? last_plug_relocation : 0;
- ETW::GCLog::MovedReference(plug,
- (plug + size),
- reloc,
- profiling_context,
- settings.compaction);
+ (args->fn) (plug, (plug + size), reloc, args->profiling_context, settings.compaction, FALSE);
if (check_last_object_p)
{
@@ -23998,12 +23920,12 @@ void gc_heap::walk_plug (uint8_t* plug, size_t size, BOOL check_last_object_p, w
}
}
-void gc_heap::walk_relocation_in_brick (uint8_t* tree, walk_relocate_args* args, size_t profiling_context)
+void gc_heap::walk_relocation_in_brick (uint8_t* tree, walk_relocate_args* args)
{
assert ((tree != NULL));
if (node_left_child (tree))
{
- walk_relocation_in_brick (tree + node_left_child (tree), args, profiling_context);
+ walk_relocation_in_brick (tree + node_left_child (tree), args);
}
uint8_t* plug = tree;
@@ -24032,7 +23954,7 @@ void gc_heap::walk_relocation_in_brick (uint8_t* tree, walk_relocate_args* args,
assert (last_plug_size >= Align (min_obj_size));
}
- walk_plug (args->last_plug, last_plug_size, check_last_object_p, args, profiling_context);
+ walk_plug (args->last_plug, last_plug_size, check_last_object_p, args);
}
else
{
@@ -24045,18 +23967,14 @@ void gc_heap::walk_relocation_in_brick (uint8_t* tree, walk_relocate_args* args,
if (node_right_child (tree))
{
- walk_relocation_in_brick (tree + node_right_child (tree), args, profiling_context);
-
+ walk_relocation_in_brick (tree + node_right_child (tree), args);
}
}
-void gc_heap::walk_relocation (int condemned_gen_number,
- uint8_t* first_condemned_address,
- size_t profiling_context)
-
+void gc_heap::walk_relocation (size_t profiling_context, record_surv_fn fn)
{
- generation* condemned_gen = generation_of (condemned_gen_number);
- uint8_t* start_address = first_condemned_address;
+ generation* condemned_gen = generation_of (settings.condemned_generation);
+ uint8_t* start_address = generation_allocation_start (condemned_gen);
size_t current_brick = brick_of (start_address);
heap_segment* current_heap_segment = heap_segment_rw (generation_start_segment (condemned_gen));
@@ -24069,6 +23987,8 @@ void gc_heap::walk_relocation (int condemned_gen_number,
args.is_shortened = FALSE;
args.pinned_plug_entry = 0;
args.last_plug = 0;
+ args.profiling_context = profiling_context;
+ args.fn = fn;
while (1)
{
@@ -24078,8 +23998,8 @@ void gc_heap::walk_relocation (int condemned_gen_number,
{
walk_plug (args.last_plug,
(heap_segment_allocated (current_heap_segment) - args.last_plug),
- args.is_shortened,
- &args, profiling_context);
+ args.is_shortened,
+ &args);
args.last_plug = 0;
}
if (heap_segment_next_rw (current_heap_segment))
@@ -24100,16 +24020,29 @@ void gc_heap::walk_relocation (int condemned_gen_number,
{
walk_relocation_in_brick (brick_address (current_brick) +
brick_entry - 1,
- &args,
- profiling_context);
+ &args);
}
}
current_brick++;
}
}
+void gc_heap::walk_survivors (record_surv_fn fn, size_t context, walk_surv_type type)
+{
+ if (type == walk_for_gc)
+ walk_survivors_relocation (context, fn);
#if defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE)
-void gc_heap::walk_relocation_for_bgc(size_t profiling_context)
+ else if (type == walk_for_bgc)
+ walk_survivors_for_bgc (context, fn);
+#endif //BACKGROUND_GC && FEATURE_EVENT_TRACE
+ else if (type == walk_for_loh)
+ walk_survivors_for_loh (context, fn);
+ else
+ assert (!"unknown type!");
+}
+
+#if defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE)
+void gc_heap::walk_survivors_for_bgc (size_t profiling_context, record_surv_fn fn)
{
// This should only be called for BGCs
assert(settings.concurrent);
@@ -24143,8 +24076,7 @@ void gc_heap::walk_relocation_for_bgc(size_t profiling_context)
uint8_t* end = heap_segment_allocated (seg);
while (o < end)
- {
-
+ {
if (method_table(o) == g_pFreeObjectMethodTable)
{
o += Align (size (o), align_const);
@@ -24167,51 +24099,18 @@ void gc_heap::walk_relocation_for_bgc(size_t profiling_context)
uint8_t* plug_end = o;
- // Note on last parameter: since this is for bgc, only ETW
- // should be sending these events so that existing profapi profilers
- // don't get confused.
- ETW::GCLog::MovedReference(
- plug_start,
+ fn (plug_start,
plug_end,
0, // Reloc distance == 0 as this is non-compacting
profiling_context,
FALSE, // Non-compacting
- FALSE); // fAllowProfApiNotification
+ TRUE); // BGC
}
seg = heap_segment_next (seg);
}
}
-
-void gc_heap::make_free_lists_for_profiler_for_bgc ()
-{
- assert(settings.concurrent);
-
- size_t profiling_context = 0;
- ETW::GCLog::BeginMovedReferences(&profiling_context);
-
- // This provides the profiler with information on what blocks of
- // memory are moved during a gc.
-
- walk_relocation_for_bgc(profiling_context);
-
- // Notify the EE-side profiling code that all the references have been traced for
- // this heap, and that it needs to flush all cached data it hasn't sent to the
- // profiler and release resources it no longer needs. Since this is for bgc, only
- // ETW should be sending these events so that existing profapi profilers don't get confused.
- ETW::GCLog::EndMovedReferences(profiling_context, FALSE /* fAllowProfApiNotification */);
-
-#ifdef MULTIPLE_HEAPS
- bgc_t_join.join(this, gc_join_after_profiler_heap_walk);
- if (bgc_t_join.joined())
- {
- bgc_t_join.restart();
- }
-#endif // MULTIPLE_HEAPS
-}
-
#endif // defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE)
-#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
void gc_heap::relocate_phase (int condemned_gen_number,
uint8_t* first_condemned_address)
@@ -30622,35 +30521,21 @@ BOOL gc_heap::large_object_marked (uint8_t* o, BOOL clearp)
return m;
}
-#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
-void gc_heap::record_survived_for_profiler(int condemned_gen_number, uint8_t * start_address)
+void gc_heap::walk_survivors_relocation (size_t profiling_context, record_surv_fn fn)
{
- size_t profiling_context = 0;
-
- ETW::GCLog::BeginMovedReferences(&profiling_context);
-
// Now walk the portion of memory that is actually being relocated.
- walk_relocation(condemned_gen_number, start_address, profiling_context);
+ walk_relocation (profiling_context, fn);
#ifdef FEATURE_LOH_COMPACTION
if (loh_compacted_p)
{
- walk_relocation_loh (profiling_context);
+ walk_relocation_for_loh (profiling_context, fn);
}
#endif //FEATURE_LOH_COMPACTION
-
- // Notify the EE-side profiling code that all the references have been traced for
- // this heap, and that it needs to flush all cached data it hasn't sent to the
- // profiler and release resources it no longer needs.
- ETW::GCLog::EndMovedReferences(profiling_context);
}
-void gc_heap::notify_profiler_of_surviving_large_objects ()
+void gc_heap::walk_survivors_for_loh (size_t profiling_context, record_surv_fn fn)
{
- size_t profiling_context = 0;
-
- ETW::GCLog::BeginMovedReferences(&profiling_context);
-
generation* gen = large_object_generation;
heap_segment* seg = heap_segment_rw (generation_start_segment (gen));;
@@ -30660,13 +30545,6 @@ void gc_heap::notify_profiler_of_surviving_large_objects ()
uint8_t* plug_end = o;
uint8_t* plug_start = o;
- // Generally, we can only get here if this is TRUE:
- // (CORProfilerTrackGC() || ETW::GCLog::ShouldTrackMovementForEtw())
- // But we can't always assert that, as races could theoretically cause GC profiling
- // or ETW to turn off just before we get here. This is harmless (we do checks later
- // on, under appropriate locks, before actually calling into profilers), though it's
- // a slowdown to determine these plugs for nothing.
-
while (1)
{
if (o >= heap_segment_allocated (seg))
@@ -30694,12 +30572,7 @@ void gc_heap::notify_profiler_of_surviving_large_objects ()
plug_end = o;
- ETW::GCLog::MovedReference(
- plug_start,
- plug_end,
- 0,
- profiling_context,
- FALSE);
+ fn (plug_start, plug_end, 0, profiling_context, FALSE, FALSE);
}
else
{
@@ -30709,9 +30582,7 @@ void gc_heap::notify_profiler_of_surviving_large_objects ()
}
}
}
- ETW::GCLog::EndMovedReferences(profiling_context);
}
-#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
#ifdef BACKGROUND_GC
@@ -31943,7 +31814,6 @@ void gc_heap::descr_card_table ()
void gc_heap::descr_generations_to_profiler (gen_walk_fn fn, void *context)
{
-#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
#ifdef MULTIPLE_HEAPS
int n_heaps = g_theGCHeap->GetNumberOfHeaps ();
for (int i = 0; i < n_heaps; i++)
@@ -32021,7 +31891,6 @@ void gc_heap::descr_generations_to_profiler (gen_walk_fn fn, void *context)
curr_gen_number0--;
}
}
-#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
}
#ifdef TRACE_GC
@@ -33560,10 +33429,7 @@ HRESULT GCHeap::Initialize ()
{
GCScan::GcRuntimeStructuresValid (TRUE);
-#ifdef GC_PROFILING
- if (CORProfilerTrackGC())
- UpdateGenerationBounds();
-#endif // GC_PROFILING
+ GCToEEInterface::DiagUpdateGenerationBounds();
}
return hr;
@@ -34972,7 +34838,6 @@ void gc_heap::do_post_gc()
{
if (!settings.concurrent)
{
- GCProfileWalkHeap();
initGCShadow();
}
@@ -34992,13 +34857,10 @@ void gc_heap::do_post_gc()
GCToEEInterface::GcDone(settings.condemned_generation);
-#ifdef GC_PROFILING
- if (!settings.concurrent)
- {
- UpdateGenerationBounds();
- GarbageCollectionFinishedCallback();
- }
-#endif // GC_PROFILING
+ GCToEEInterface::DiagGCEnd(VolatileLoad(&settings.gc_index),
+ (uint32_t)settings.condemned_generation,
+ (uint32_t)settings.reason,
+ !!settings.concurrent);
//dprintf (1, (" ****end of Garbage Collection**** %d(gen0:%d)(%d)",
dprintf (1, ("*EGC* %d(gen0:%d)(%d)(%s)",
@@ -36281,21 +36143,17 @@ CFinalize::GcScanRoots (promote_func* fn, int hn, ScanContext *pSC)
}
}
-#ifdef GC_PROFILING
-void CFinalize::WalkFReachableObjects (gc_heap* hp)
+void CFinalize::WalkFReachableObjects (fq_walk_fn fn)
{
- BEGIN_PIN_PROFILER(CORProfilerPresent());
Object** startIndex = SegQueue (CriticalFinalizerListSeg);
Object** stopCriticalIndex = SegQueueLimit (CriticalFinalizerListSeg);
Object** stopIndex = SegQueueLimit (FinalizerListSeg);
for (Object** po = startIndex; po < stopIndex; po++)
{
//report *po
- g_profControlBlock.pProfInterface->FinalizeableObjectQueued(po < stopCriticalIndex, (ObjectID)*po);
+ fn(po < stopCriticalIndex, *po);
}
- END_PIN_PROFILER();
}
-#endif //GC_PROFILING
BOOL
CFinalize::ScanForFinalization (promote_func* pfn, int gen, BOOL mark_only_p,
@@ -36531,8 +36389,7 @@ void CFinalize::CheckFinalizerObjects()
// End of VM specific support
//
//------------------------------------------------------------------------------
-
-void gc_heap::walk_heap (walk_fn fn, void* context, int gen_number, BOOL walk_large_object_heap_p)
+void gc_heap::walk_heap_per_heap (walk_fn fn, void* context, int gen_number, BOOL walk_large_object_heap_p)
{
generation* gen = gc_heap::generation_of (gen_number);
heap_segment* seg = generation_start_segment (gen);
@@ -36588,9 +36445,29 @@ void gc_heap::walk_heap (walk_fn fn, void* context, int gen_number, BOOL walk_la
}
}
-void GCHeap::WalkObject (Object* obj, walk_fn fn, void* context)
+void gc_heap::walk_finalize_queue (fq_walk_fn fn)
+{
+#ifdef FEATURE_PREMORTEM_FINALIZATION
+ finalize_queue->WalkFReachableObjects (fn);
+#endif //FEATURE_PREMORTEM_FINALIZATION
+}
+
+void gc_heap::walk_heap (walk_fn fn, void* context, int gen_number, BOOL walk_large_object_heap_p)
+{
+#ifdef MULTIPLE_HEAPS
+ for (int hn = 0; hn < gc_heap::n_heaps; hn++)
+ {
+ gc_heap* hp = gc_heap::g_heaps [hn];
+
+ hp->walk_heap_per_heap (fn, context, gen_number, walk_large_object_heap_p);
+ }
+#else
+ walk_heap_per_heap(fn, context, gen_number, walk_large_object_heap_p);
+#endif //MULTIPLE_HEAPS
+}
+
+void GCHeap::DiagWalkObject (Object* obj, walk_fn fn, void* context)
{
-#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
uint8_t* o = (uint8_t*)obj;
if (o)
{
@@ -36605,7 +36482,46 @@ void GCHeap::WalkObject (Object* obj, walk_fn fn, void* context)
}
);
}
-#endif //defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
+}
+
+void GCHeap::DiagWalkSurvivorsWithType (void* gc_context, record_surv_fn fn, size_t diag_context, walk_surv_type type)
+{
+ gc_heap* hp = (gc_heap*)gc_context;
+ hp->walk_survivors (fn, diag_context, type);
+}
+
+void GCHeap::DiagWalkHeap (walk_fn fn, void* context, int gen_number, BOOL walk_large_object_heap_p)
+{
+ gc_heap::walk_heap (fn, context, gen_number, walk_large_object_heap_p);
+}
+
+void GCHeap::DiagWalkFinalizeQueue (void* gc_context, fq_walk_fn fn)
+{
+ gc_heap* hp = (gc_heap*)gc_context;
+ hp->walk_finalize_queue (fn);
+}
+
+void GCHeap::DiagScanFinalizeQueue (fq_scan_fn fn, ScanContext* sc)
+{
+#ifdef MULTIPLE_HEAPS
+ for (int hn = 0; hn < gc_heap::n_heaps; hn++)
+ {
+ gc_heap* hp = gc_heap::g_heaps [hn];
+ hp->finalize_queue->GcScanRoots(fn, hn, sc);
+ }
+#else
+ pGenGCHeap->finalize_queue->GcScanRoots(fn, 0, sc);
+#endif //MULTIPLE_HEAPS
+}
+
+void GCHeap::DiagScanHandles (handle_scan_fn fn, int gen_number, ScanContext* context)
+{
+ GCScan::GcScanHandlesForProfilerAndETW (max_generation, context, fn);
+}
+
+void GCHeap::DiagScanDependentHandles (handle_scan_fn fn, int gen_number, ScanContext* context)
+{
+ GCScan::GcScanDependentHandlesForProfilerAndETW (max_generation, context, fn);
}
// Go through and touch (read) each page straddled by a memory block.
diff --git a/src/gc/gcee.cpp b/src/gc/gcee.cpp
index 58a553661e..c93cc91b57 100644
--- a/src/gc/gcee.cpp
+++ b/src/gc/gcee.cpp
@@ -381,209 +381,6 @@ size_t GCHeap::GetNow()
return GetHighPrecisionTimeStamp();
}
-void ProfScanRootsHelper(Object** ppObject, ScanContext *pSC, uint32_t dwFlags)
-{
-#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
- Object *pObj = *ppObject;
-#ifdef INTERIOR_POINTERS
- if (dwFlags & GC_CALL_INTERIOR)
- {
- uint8_t *o = (uint8_t*)pObj;
- gc_heap* hp = gc_heap::heap_of (o);
-
- if ((o < hp->gc_low) || (o >= hp->gc_high))
- {
- return;
- }
- pObj = (Object*) hp->find_object(o, hp->gc_low);
- }
-#endif //INTERIOR_POINTERS
- ScanRootsHelper(pObj, ppObject, pSC, dwFlags);
-#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
-}
-
-// TODO - at some point we would like to completely decouple profiling
-// from ETW tracing using a pattern similar to this, where the
-// ProfilingScanContext has flags about whether or not certain things
-// should be tracked, and each one of these ProfilerShouldXYZ functions
-// will check these flags and determine what to do based upon that.
-// GCProfileWalkHeapWorker can, in turn, call those methods without fear
-// of things being ifdef'd out.
-
-// Returns TRUE if GC profiling is enabled and the profiler
-// should scan dependent handles, FALSE otherwise.
-BOOL ProfilerShouldTrackConditionalWeakTableElements()
-{
-#if defined(GC_PROFILING)
- return CORProfilerTrackConditionalWeakTableElements();
-#else
- return FALSE;
-#endif // defined (GC_PROFILING)
-}
-
-// If GC profiling is enabled, informs the profiler that we are done
-// tracing dependent handles.
-void ProfilerEndConditionalWeakTableElementReferences(void* heapId)
-{
-#if defined (GC_PROFILING)
- g_profControlBlock.pProfInterface->EndConditionalWeakTableElementReferences(heapId);
-#else
- UNREFERENCED_PARAMETER(heapId);
-#endif // defined (GC_PROFILING)
-}
-
-// If GC profiling is enabled, informs the profiler that we are done
-// tracing root references.
-void ProfilerEndRootReferences2(void* heapId)
-{
-#if defined (GC_PROFILING)
- g_profControlBlock.pProfInterface->EndRootReferences2(heapId);
-#else
- UNREFERENCED_PARAMETER(heapId);
-#endif // defined (GC_PROFILING)
-}
-
-// This is called only if we've determined that either:
-// a) The Profiling API wants to do a walk of the heap, and it has pinned the
-// profiler in place (so it cannot be detached), and it's thus safe to call into the
-// profiler, OR
-// b) ETW infrastructure wants to do a walk of the heap either to log roots,
-// objects, or both.
-// This can also be called to do a single walk for BOTH a) and b) simultaneously. Since
-// ETW can ask for roots, but not objects
-#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
-
-void GCProfileWalkHeapWorker(BOOL fProfilerPinned, BOOL fShouldWalkHeapRootsForEtw, BOOL fShouldWalkHeapObjectsForEtw)
-{
- {
- ProfilingScanContext SC(fProfilerPinned);
-
- // **** Scan roots: Only scan roots if profiling API wants them or ETW wants them.
- if (fProfilerPinned || fShouldWalkHeapRootsForEtw)
- {
-#ifdef MULTIPLE_HEAPS
- int hn;
-
- // Must emulate each GC thread number so we can hit each
- // heap for enumerating the roots.
- for (hn = 0; hn < gc_heap::n_heaps; hn++)
- {
- // Ask the vm to go over all of the roots for this specific
- // heap.
- gc_heap* hp = gc_heap::g_heaps [hn];
- SC.thread_number = hn;
- GCScan::GcScanRoots(&ProfScanRootsHelper, max_generation, max_generation, &SC);
-
- // The finalizer queue is also a source of roots
- SC.dwEtwRootKind = kEtwGCRootKindFinalizer;
- hp->finalize_queue->GcScanRoots(&ProfScanRootsHelper, hn, &SC);
- }
-#else
- // Ask the vm to go over all of the roots
- GCScan::GcScanRoots(&ProfScanRootsHelper, max_generation, max_generation, &SC);
-
- // The finalizer queue is also a source of roots
- SC.dwEtwRootKind = kEtwGCRootKindFinalizer;
- pGenGCHeap->finalize_queue->GcScanRoots(&ProfScanRootsHelper, 0, &SC);
-
-#endif // MULTIPLE_HEAPS
- // Handles are kept independent of wks/svr/concurrent builds
- SC.dwEtwRootKind = kEtwGCRootKindHandle;
- GCScan::GcScanHandlesForProfilerAndETW(max_generation, &SC);
-
- // indicate that regular handle scanning is over, so we can flush the buffered roots
- // to the profiler. (This is for profapi only. ETW will flush after the
- // entire heap was is complete, via ETW::GCLog::EndHeapDump.)
- if (fProfilerPinned)
- {
- ProfilerEndRootReferences2(&SC.pHeapId);
- }
- }
-
- // **** Scan dependent handles: only if the profiler supports it or ETW wants roots
- if ((fProfilerPinned && ProfilerShouldTrackConditionalWeakTableElements()) ||
- fShouldWalkHeapRootsForEtw)
- {
- // GcScanDependentHandlesForProfiler double-checks
- // CORProfilerTrackConditionalWeakTableElements() before calling into the profiler
-
- GCScan::GcScanDependentHandlesForProfilerAndETW(max_generation, &SC);
-
- // indicate that dependent handle scanning is over, so we can flush the buffered roots
- // to the profiler. (This is for profapi only. ETW will flush after the
- // entire heap was is complete, via ETW::GCLog::EndHeapDump.)
- if (fProfilerPinned && ProfilerShouldTrackConditionalWeakTableElements())
- {
- ProfilerEndConditionalWeakTableElementReferences(&SC.pHeapId);
- }
- }
-
- ProfilerWalkHeapContext profilerWalkHeapContext(fProfilerPinned, SC.pvEtwContext);
-
- // **** Walk objects on heap: only if profiling API wants them or ETW wants them.
- if (fProfilerPinned || fShouldWalkHeapObjectsForEtw)
- {
-#ifdef MULTIPLE_HEAPS
- int hn;
-
- // Walk the heap and provide the objref to the profiler
- for (hn = 0; hn < gc_heap::n_heaps; hn++)
- {
- gc_heap* hp = gc_heap::g_heaps [hn];
- hp->walk_heap(&HeapWalkHelper, &profilerWalkHeapContext, max_generation, TRUE /* walk the large object heap */);
- }
-#else
- gc_heap::walk_heap(&HeapWalkHelper, &profilerWalkHeapContext, max_generation, TRUE);
-#endif //MULTIPLE_HEAPS
- }
-
-#ifdef FEATURE_EVENT_TRACE
- // **** Done! Indicate to ETW helpers that the heap walk is done, so any buffers
- // should be flushed into the ETW stream
- if (fShouldWalkHeapObjectsForEtw || fShouldWalkHeapRootsForEtw)
- {
- ETW::GCLog::EndHeapDump(&profilerWalkHeapContext);
- }
-#endif // FEATURE_EVENT_TRACE
- }
-}
-#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
-
-void GCProfileWalkHeap()
-{
- BOOL fWalkedHeapForProfiler = FALSE;
-
-#ifdef FEATURE_EVENT_TRACE
- if (ETW::GCLog::ShouldWalkStaticsAndCOMForEtw())
- ETW::GCLog::WalkStaticsAndCOMForETW();
-
- BOOL fShouldWalkHeapRootsForEtw = ETW::GCLog::ShouldWalkHeapRootsForEtw();
- BOOL fShouldWalkHeapObjectsForEtw = ETW::GCLog::ShouldWalkHeapObjectsForEtw();
-#else // !FEATURE_EVENT_TRACE
- BOOL fShouldWalkHeapRootsForEtw = FALSE;
- BOOL fShouldWalkHeapObjectsForEtw = FALSE;
-#endif // FEATURE_EVENT_TRACE
-
-#if defined (GC_PROFILING)
- {
- BEGIN_PIN_PROFILER(CORProfilerTrackGC());
- GCProfileWalkHeapWorker(TRUE /* fProfilerPinned */, fShouldWalkHeapRootsForEtw, fShouldWalkHeapObjectsForEtw);
- fWalkedHeapForProfiler = TRUE;
- END_PIN_PROFILER();
- }
-#endif // defined (GC_PROFILING)
-
-#if defined (GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
- // we need to walk the heap if one of GC_PROFILING or FEATURE_EVENT_TRACE
- // is defined, since both of them make use of the walk heap worker.
- if (!fWalkedHeapForProfiler &&
- (fShouldWalkHeapRootsForEtw || fShouldWalkHeapObjectsForEtw))
- {
- GCProfileWalkHeapWorker(FALSE /* fProfilerPinned */, fShouldWalkHeapRootsForEtw, fShouldWalkHeapObjectsForEtw);
- }
-#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
-}
-
BOOL GCHeap::IsGCInProgressHelper (BOOL bConsiderGCStart)
{
return GcInProgress || (bConsiderGCStart? VolatileLoad(&gc_heap::gc_started) : FALSE);
@@ -786,7 +583,7 @@ IGCHeapInternal* CreateGCHeap() {
return new(nothrow) GCHeap(); // we return wks or svr
}
-void GCHeap::TraceGCSegments()
+void GCHeap::DiagTraceGCSegments()
{
#ifdef FEATURE_EVENT_TRACE
heap_segment* seg = 0;
@@ -823,7 +620,7 @@ void GCHeap::TraceGCSegments()
#endif // FEATURE_EVENT_TRACE
}
-void GCHeap::DescrGenerationsToProfiler (gen_walk_fn fn, void *context)
+void GCHeap::DiagDescrGenerations (gen_walk_fn fn, void *context)
{
#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
pGenGCHeap->descr_generations_to_profiler(fn, context);
diff --git a/src/gc/gcenv.ee.standalone.inl b/src/gc/gcenv.ee.standalone.inl
index 2ecc6fc83d..a4f86e152b 100644
--- a/src/gc/gcenv.ee.standalone.inl
+++ b/src/gc/gcenv.ee.standalone.inl
@@ -125,4 +125,45 @@ inline Thread* GCToEEInterface::CreateBackgroundThread(GCBackgroundThreadFunctio
return g_theGCToCLR->CreateBackgroundThread(threadStart, arg);
}
+inline void GCToEEInterface::DiagGCStart(int gen, bool isInduced)
+{
+ assert(g_theGCToCLR != nullptr);
+ g_theGCToCLR->DiagGCStart(gen, isInduced);
+}
+
+inline void GCToEEInterface::DiagUpdateGenerationBounds()
+{
+ assert(g_theGCToCLR != nullptr);
+ g_theGCToCLR->DiagUpdateGenerationBounds();
+}
+
+inline void GCToEEInterface::DiagGCEnd(size_t index, int gen, int reason, bool fConcurrent)
+{
+ assert(g_theGCToCLR != nullptr);
+ g_theGCToCLR->DiagGCEnd(index, gen, reason, fConcurrent);
+}
+
+inline void GCToEEInterface::DiagWalkFReachableObjects(void* gcContext)
+{
+ assert(g_theGCToCLR != nullptr);
+ g_theGCToCLR->DiagWalkFReachableObjects(gcContext);
+}
+
+inline void GCToEEInterface::DiagWalkSurvivors(void* gcContext)
+{
+ assert(g_theGCToCLR != nullptr);
+ g_theGCToCLR->DiagWalkSurvivors(gcContext);
+}
+
+inline void GCToEEInterface::DiagWalkLOHSurvivors(void* gcContext)
+{
+ assert(g_theGCToCLR != nullptr);
+ g_theGCToCLR->DiagWalkLOHSurvivors(gcContext);
+}
+
+inline void GCToEEInterface::DiagWalkBGCSurvivors(void* gcContext)
+{
+ assert(g_theGCToCLR != nullptr);
+ return g_theGCToCLR->DiagWalkBGCSurvivors(gcContext);
+}
#endif // __GCTOENV_EE_STANDALONE_INL__
diff --git a/src/gc/gcimpl.h b/src/gc/gcimpl.h
index d7393c3575..7e3a13a743 100644
--- a/src/gc/gcimpl.h
+++ b/src/gc/gcimpl.h
@@ -77,7 +77,7 @@ public:
size_t GetLastGCDuration(int generation);
size_t GetNow();
- void TraceGCSegments ();
+ void DiagTraceGCSegments ();
void PublishObject(uint8_t* obj);
BOOL IsGCInProgressHelper (BOOL bConsiderGCStart = FALSE);
@@ -199,7 +199,7 @@ public:
BOOL ShouldRestartFinalizerWatchDog();
void SetCardsAfterBulkCopy( Object**, size_t);
- void WalkObject (Object* obj, walk_fn fn, void* context);
+ void DiagWalkObject (Object* obj, walk_fn fn, void* context);
public: // FIX
@@ -272,7 +272,19 @@ protected:
#endif // STRESS_HEAP
#endif // FEATURE_REDHAWK
- virtual void DescrGenerationsToProfiler (gen_walk_fn fn, void *context);
+ virtual void DiagDescrGenerations (gen_walk_fn fn, void *context);
+
+ virtual void DiagWalkSurvivorsWithType (void* gc_context, record_surv_fn fn, size_t diag_context, walk_surv_type type);
+
+ virtual void DiagWalkFinalizeQueue (void* gc_context, fq_walk_fn fn);
+
+ virtual void DiagScanFinalizeQueue (fq_scan_fn fn, ScanContext* context);
+
+ virtual void DiagScanHandles (handle_scan_fn fn, int gen_number, ScanContext* context);
+
+ virtual void DiagScanDependentHandles (handle_scan_fn fn, int gen_number, ScanContext* context);
+
+ virtual void DiagWalkHeap(walk_fn fn, void* context, int gen_number, BOOL walk_large_object_heap_p);
public:
Object * NextObj (Object * object);
diff --git a/src/gc/gcinterface.ee.h b/src/gc/gcinterface.ee.h
index 36d20c7195..3e6631bb64 100644
--- a/src/gc/gcinterface.ee.h
+++ b/src/gc/gcinterface.ee.h
@@ -92,6 +92,37 @@ public:
// Creates and returns a new background thread.
virtual
Thread* CreateBackgroundThread(GCBackgroundThreadFunction threadStart, void* arg) = 0;
+
+ // When a GC starts, gives the diagnostics code a chance to run.
+ virtual
+ void DiagGCStart(int gen, bool isInduced) = 0;
+
+ // When GC heap segments change, gives the diagnostics code a chance to run.
+ virtual
+ void DiagUpdateGenerationBounds() = 0;
+
+ // When a GC ends, gives the diagnostics code a chance to run.
+ virtual
+ void DiagGCEnd(size_t index, int gen, int reason, bool fConcurrent) = 0;
+
+ // During a GC after we discover what objects' finalizers should run, gives the diagnostics code a chance to run.
+ virtual
+ void DiagWalkFReachableObjects(void* gcContext) = 0;
+
+ // During a GC after we discover the survivors and the relocation info,
+ // gives the diagnostics code a chance to run. This includes LOH if we are
+ // compacting LOH.
+ virtual
+ void DiagWalkSurvivors(void* gcContext) = 0;
+
+ // During a full GC after we discover what objects to survive on LOH,
+ // gives the diagnostics code a chance to run.
+ virtual
+ void DiagWalkLOHSurvivors(void* gcContext) = 0;
+
+ // At the end of a background GC, gives the diagnostics code a chance to run.
+ virtual
+ void DiagWalkBGCSurvivors(void* gcContext) = 0;
};
#endif // _GCINTERFACE_EE_H_
diff --git a/src/gc/gcinterface.h b/src/gc/gcinterface.h
index ac14332dcf..1f668099e5 100644
--- a/src/gc/gcinterface.h
+++ b/src/gc/gcinterface.h
@@ -34,6 +34,13 @@ typedef enum
SUSPEND_FOR_GC_PREP = 6
} SUSPEND_REASON;
+typedef enum
+{
+ walk_for_gc = 1,
+ walk_for_bgc = 2,
+ walk_for_loh = 3
+} walk_surv_type;
+
#include "gcinterface.ee.h"
// The allocation context must be known to the VM for use in the allocation
@@ -88,6 +95,8 @@ struct segment_info
// one for the object header, and one for the first field in the object.
#define min_obj_size ((sizeof(uint8_t*) + sizeof(uintptr_t) + sizeof(size_t)))
+#define max_generation 2
+
class Object;
class IGCHeap;
@@ -174,6 +183,10 @@ enum end_no_gc_region_status
typedef BOOL (* walk_fn)(Object*, void*);
typedef void (* gen_walk_fn)(void* context, int generation, uint8_t* range_start, uint8_t* range_end, uint8_t* range_reserved);
+typedef void (* record_surv_fn)(uint8_t* begin, uint8_t* end, ptrdiff_t reloc, size_t context, BOOL compacting_p, BOOL bgc_p);
+typedef void (* fq_walk_fn)(BOOL, void*);
+typedef void (* fq_scan_fn)(Object** ppObject, ScanContext *pSC, uint32_t dwFlags);
+typedef void (* handle_scan_fn)(Object** pRef, Object* pSec, uint32_t flags, ScanContext* context, BOOL isDependent);
// IGCHeap is the interface that the VM will use when interacting with the GC.
class IGCHeap {
@@ -413,8 +426,8 @@ public:
// with the given size and flags.
virtual Object* AllocAlign8 (gc_alloc_context* acontext, size_t size, uint32_t flags) = 0;
- // If allocating on the LOH, blocks if a BGC is in a position (concurrent mark)
- // where the LOH allocator can't allocate.
+ // This is for the allocator to indicate it's done allocating a large object during a
+ // background GC as the BGC threads also need to walk LOH.
virtual void PublishObject(uint8_t* obj) = 0;
// Gets the event that suspended threads will use to wait for the
@@ -449,13 +462,31 @@ public:
*/
// Walks an object, invoking a callback on each member.
- virtual void WalkObject(Object* obj, walk_fn fn, void* context) = 0;
+ virtual void DiagWalkObject(Object* obj, walk_fn fn, void* context) = 0;
+
+ // Walk the heap object by object.
+ virtual void DiagWalkHeap(walk_fn fn, void* context, int gen_number, BOOL walk_large_object_heap_p) = 0;
+
+ // Walks the survivors and get the relocation information if objects have moved.
+ virtual void DiagWalkSurvivorsWithType(void* gc_context, record_surv_fn fn, size_t diag_context, walk_surv_type type) = 0;
+
+ // Walks the finalization queue.
+ virtual void DiagWalkFinalizeQueue(void* gc_context, fq_walk_fn fn) = 0;
+
+ // Scan roots on finalizer queue. This is a generic function.
+ virtual void DiagScanFinalizeQueue(fq_scan_fn fn, ScanContext* context) = 0;
+
+ // Scan handles for profiling or ETW.
+ virtual void DiagScanHandles(handle_scan_fn fn, int gen_number, ScanContext* context) = 0;
+
+ // Scan dependent handles for profiling or ETW.
+ virtual void DiagScanDependentHandles(handle_scan_fn fn, int gen_number, ScanContext* context) = 0;
// Describes all generations to the profiler, invoking a callback on each generation.
- virtual void DescrGenerationsToProfiler(gen_walk_fn fn, void* context) = 0;
+ virtual void DiagDescrGenerations(gen_walk_fn fn, void* context) = 0;
// Traces all GC segments and fires ETW events with information on them.
- virtual void TraceGCSegments() = 0;
+ virtual void DiagTraceGCSegments() = 0;
/*
===========================================================================
@@ -550,26 +581,4 @@ struct ScanContext
}
};
-#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
-struct ProfilingScanContext : ScanContext
-{
- BOOL fProfilerPinned;
- void * pvEtwContext;
- void *pHeapId;
-
- ProfilingScanContext(BOOL fProfilerPinnedParam) : ScanContext()
- {
- LIMITED_METHOD_CONTRACT;
-
- pHeapId = NULL;
- fProfilerPinned = fProfilerPinnedParam;
- pvEtwContext = NULL;
-#ifdef FEATURE_CONSERVATIVE_GC
- // To not confuse GCScan::GcScanRoots
- promotion = g_pConfig->GetGCConservative();
-#endif
- }
-};
-#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
-
#endif // _GC_INTERFACE_H_
diff --git a/src/gc/gcpriv.h b/src/gc/gcpriv.h
index 364b0045a4..3c9e2234a8 100644
--- a/src/gc/gcpriv.h
+++ b/src/gc/gcpriv.h
@@ -1075,9 +1075,6 @@ enum interesting_data_point
};
//class definition of the internal class
-#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
-extern void GCProfileWalkHeapWorker(BOOL fProfilerPinned, BOOL fShouldWalkHeapRootsForEtw, BOOL fShouldWalkHeapObjectsForEtw);
-#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
class gc_heap
{
friend struct ::_DacGlobals;
@@ -1285,35 +1282,48 @@ public:
protected:
- PER_HEAP
+ PER_HEAP_ISOLATED
void walk_heap (walk_fn fn, void* context, int gen_number, BOOL walk_large_object_heap_p);
+ PER_HEAP
+ void walk_heap_per_heap (walk_fn fn, void* context, int gen_number, BOOL walk_large_object_heap_p);
+
struct walk_relocate_args
{
uint8_t* last_plug;
BOOL is_shortened;
mark* pinned_plug_entry;
+ size_t profiling_context;
+ record_surv_fn fn;
};
PER_HEAP
+ void walk_survivors (record_surv_fn fn, size_t context, walk_surv_type type);
+
+ PER_HEAP
void walk_plug (uint8_t* plug, size_t size, BOOL check_last_object_p,
- walk_relocate_args* args, size_t profiling_context);
+ walk_relocate_args* args);
PER_HEAP
- void walk_relocation (int condemned_gen_number,
- uint8_t* first_condemned_address, size_t profiling_context);
+ void walk_relocation (size_t profiling_context, record_surv_fn fn);
PER_HEAP
- void walk_relocation_in_brick (uint8_t* tree, walk_relocate_args* args, size_t profiling_context);
+ void walk_relocation_in_brick (uint8_t* tree, walk_relocate_args* args);
-#if defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE)
PER_HEAP
- void walk_relocation_for_bgc(size_t profiling_context);
+ void walk_finalize_queue (fq_walk_fn fn);
+#if defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE)
PER_HEAP
- void make_free_lists_for_profiler_for_bgc();
+ void walk_survivors_for_bgc (size_t profiling_context, record_surv_fn fn);
#endif // defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE)
+ // used in blocking GCs after plan phase so this walks the plugs.
+ PER_HEAP
+ void walk_survivors_relocation (size_t profiling_context, record_surv_fn fn);
+ PER_HEAP
+ void walk_survivors_for_loh (size_t profiling_context, record_surv_fn fn);
+
PER_HEAP
int generation_to_condemn (int n,
BOOL* blocking_collection_p,
@@ -2150,10 +2160,8 @@ protected:
PER_HEAP
void relocate_in_loh_compact();
-#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
PER_HEAP
- void walk_relocation_loh (size_t profiling_context);
-#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
+ void walk_relocation_for_loh (size_t profiling_context, record_surv_fn fn);
PER_HEAP
BOOL loh_enque_pinned_plug (uint8_t* plug, size_t len);
@@ -2551,12 +2559,6 @@ protected:
PER_HEAP_ISOLATED
void descr_generations_to_profiler (gen_walk_fn fn, void *context);
-#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
- PER_HEAP
- void record_survived_for_profiler(int condemned_gen_number, uint8_t * first_condemned_address);
- PER_HEAP
- void notify_profiler_of_surviving_large_objects ();
-#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
/*------------ Multiple non isolated heaps ----------------*/
#ifdef MULTIPLE_HEAPS
@@ -3765,9 +3767,7 @@ public:
Object* GetNextFinalizableObject (BOOL only_non_critical=FALSE);
BOOL ScanForFinalization (promote_func* fn, int gen,BOOL mark_only_p, gc_heap* hp);
void RelocateFinalizationData (int gen, gc_heap* hp);
-#ifdef GC_PROFILING
- void WalkFReachableObjects (gc_heap* hp);
-#endif //GC_PROFILING
+ void WalkFReachableObjects (fq_walk_fn fn);
void GcScanRoots (promote_func* fn, int hn, ScanContext *pSC);
void UpdatePromotedGenerations (int gen, BOOL gen_0_empty_p);
size_t GetPromotedCount();
diff --git a/src/gc/gcrecord.h b/src/gc/gcrecord.h
index 8c95ad04d3..fff1fc5c8b 100644
--- a/src/gc/gcrecord.h
+++ b/src/gc/gcrecord.h
@@ -13,7 +13,7 @@ Module Name:
#ifndef __gc_record_h__
#define __gc_record_h__
-#define max_generation 2
+//#define max_generation 2
// We pack the dynamic tuning for deciding which gen to condemn in a uint32_t.
// We assume that 2 bits are enough to represent the generation.
diff --git a/src/gc/gcscan.cpp b/src/gc/gcscan.cpp
index f021554fd6..b4e6352dd6 100644
--- a/src/gc/gcscan.cpp
+++ b/src/gc/gcscan.cpp
@@ -192,33 +192,32 @@ void GCScan::GcScanHandles (promote_func* fn, int condemned, int max_gen,
}
}
-
-#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
-
/*
* Scan all handle roots in this 'namespace' for profiling
*/
-void GCScan::GcScanHandlesForProfilerAndETW (int max_gen, ScanContext* sc)
+void GCScan::GcScanHandlesForProfilerAndETW (int max_gen, ScanContext* sc, handle_scan_fn fn)
{
LIMITED_METHOD_CONTRACT;
+#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
LOG((LF_GC|LF_GCROOTS, LL_INFO10, "Profiler Root Scan Phase, Handles\n"));
- Ref_ScanPointersForProfilerAndETW(max_gen, (uintptr_t)sc);
+ Ref_ScanHandlesForProfilerAndETW(max_gen, (uintptr_t)sc, fn);
+#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
}
/*
* Scan dependent handles in this 'namespace' for profiling
*/
-void GCScan::GcScanDependentHandlesForProfilerAndETW (int max_gen, ProfilingScanContext* sc)
+void GCScan::GcScanDependentHandlesForProfilerAndETW (int max_gen, ScanContext* sc, handle_scan_fn fn)
{
LIMITED_METHOD_CONTRACT;
+#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
LOG((LF_GC|LF_GCROOTS, LL_INFO10, "Profiler Root Scan Phase, DependentHandles\n"));
- Ref_ScanDependentHandlesForProfilerAndETW(max_gen, sc);
-}
-
+ Ref_ScanDependentHandlesForProfilerAndETW(max_gen, sc, fn);
#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
+}
void GCScan::GcRuntimeStructuresValid (BOOL bValid)
{
diff --git a/src/gc/gcscan.h b/src/gc/gcscan.h
index 3515b8e1b6..362370fa4a 100644
--- a/src/gc/gcscan.h
+++ b/src/gc/gcscan.h
@@ -52,10 +52,8 @@ class GCScan
static void EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
#endif // DACCESS_COMPILE
-#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
- static void GcScanHandlesForProfilerAndETW (int max_gen, ScanContext* sc);
- static void GcScanDependentHandlesForProfilerAndETW (int max_gen, ProfilingScanContext* sc);
-#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
+ static void GcScanHandlesForProfilerAndETW (int max_gen, ScanContext* sc, handle_scan_fn fn);
+ static void GcScanDependentHandlesForProfilerAndETW (int max_gen, ScanContext* sc, handle_scan_fn fn);
// scan for dead weak pointers
static void GcWeakPtrScan (promote_func* fn, int condemned, int max_gen, ScanContext*sc );
diff --git a/src/gc/objecthandle.cpp b/src/gc/objecthandle.cpp
index d8834b72f0..e8eed93006 100644
--- a/src/gc/objecthandle.cpp
+++ b/src/gc/objecthandle.cpp
@@ -110,6 +110,21 @@ void CALLBACK PromoteRefCounted(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *pExtra
}
#endif // FEATURE_COMINTEROP || FEATURE_REDHAWK
+
+// Only used by profiling/ETW.
+//----------------------------------------------------------------------------
+
+/*
+ * struct DIAG_DEPSCANINFO
+ *
+ * used when tracing dependent handles for profiling/ETW.
+ */
+struct DIAG_DEPSCANINFO
+{
+ HANDLESCANPROC pfnTrace; // tracing function to use
+ uintptr_t pfnProfilingOrETW;
+};
+
void CALLBACK TraceDependentHandle(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *pExtraInfo, uintptr_t lp1, uintptr_t lp2)
{
WRAPPER_NO_CONTRACT;
@@ -122,14 +137,15 @@ void CALLBACK TraceDependentHandle(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *pEx
// object should also be non-NULL.
_ASSERTE(*pExtraInfo == NULL || *pObjRef != NULL);
- // lp2 is a HANDLESCANPROC
- HANDLESCANPROC pfnTrace = (HANDLESCANPROC) lp2;
+ struct DIAG_DEPSCANINFO *pInfo = (struct DIAG_DEPSCANINFO*)lp2;
+
+ HANDLESCANPROC pfnTrace = pInfo->pfnTrace;
// is the handle's secondary object non-NULL?
if ((*pObjRef != NULL) && (*pExtraInfo != 0))
{
// yes - call the tracing function for this handle
- pfnTrace(pObjRef, NULL, lp1, *pExtraInfo);
+ pfnTrace(pObjRef, NULL, lp1, (uintptr_t)(pInfo->pfnProfilingOrETW));
}
}
@@ -414,7 +430,7 @@ void CALLBACK ScanPointerForProfilerAndETW(_UNCHECKED_OBJECTREF *pObjRef, uintpt
CONTRACTL_END;
#endif // FEATURE_REDHAWK
UNREFERENCED_PARAMETER(pExtraInfo);
- UNREFERENCED_PARAMETER(lp2);
+ handle_scan_fn fn = (handle_scan_fn)lp2;
LOG((LF_GC | LF_CORPROF, LL_INFO100000, LOG_HANDLE_OBJECT_CLASS("Notifying profiler of ", pObjRef, "to ", *pObjRef)));
@@ -422,7 +438,7 @@ void CALLBACK ScanPointerForProfilerAndETW(_UNCHECKED_OBJECTREF *pObjRef, uintpt
Object **pRef = (Object **)pObjRef;
// Get a hold of the heap ID that's tacked onto the end of the scancontext struct.
- ProfilingScanContext *pSC = (ProfilingScanContext *)lp1;
+ ScanContext *pSC = (ScanContext *)lp1;
uint32_t rootFlags = 0;
BOOL isDependent = FALSE;
@@ -487,60 +503,15 @@ void CALLBACK ScanPointerForProfilerAndETW(_UNCHECKED_OBJECTREF *pObjRef, uintpt
_UNCHECKED_OBJECTREF pSec = NULL;
-#ifdef GC_PROFILING
- // Give the profiler the objectref.
- if (pSC->fProfilerPinned)
+ if (isDependent)
{
- if (!isDependent)
- {
- BEGIN_PIN_PROFILER(CORProfilerTrackGC());
- g_profControlBlock.pProfInterface->RootReference2(
- (uint8_t *)*pRef,
- kEtwGCRootKindHandle,
- (EtwGCRootFlags)rootFlags,
- pRef,
- &pSC->pHeapId);
- END_PIN_PROFILER();
- }
- else
- {
- BEGIN_PIN_PROFILER(CORProfilerTrackConditionalWeakTableElements());
- pSec = (_UNCHECKED_OBJECTREF)HndGetHandleExtraInfo(handle);
- g_profControlBlock.pProfInterface->ConditionalWeakTableElementReference(
- (uint8_t*)*pRef,
- (uint8_t*)pSec,
- pRef,
- &pSC->pHeapId);
- END_PIN_PROFILER();
- }
+ pSec = (_UNCHECKED_OBJECTREF)HndGetHandleExtraInfo(handle);
}
-#endif // GC_PROFILING
-
-#if defined(FEATURE_EVENT_TRACE)
- // Notify ETW of the handle
- if (ETW::GCLog::ShouldWalkHeapRootsForEtw())
- {
- if (isDependent && (pSec == NULL))
- {
- pSec = (_UNCHECKED_OBJECTREF)HndGetHandleExtraInfo(handle);
- }
-
- ETW::GCLog::RootReference(
- handle,
- *pRef, // object being rooted
- pSec, // pSecondaryNodeForDependentHandle
- isDependent,
- pSC,
- 0, // dwGCFlags,
- rootFlags); // ETW handle flags
- }
-#endif // defined(FEATURE_EVENT_TRACE)
+ fn(pRef, pSec, rootFlags, pSC, isDependent);
}
#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
-
-
/*
* Scan callback for updating pointers.
*
@@ -1417,13 +1388,15 @@ void Ref_ScanDependentHandlesForRelocation(uint32_t condemned, uint32_t maxgen,
/*
loop scan version of TraceVariableHandles for single-thread-managed Ref_* functions
should be kept in sync with the code above
+ Only used by profiling/ETW.
*/
-void TraceDependentHandlesBySingleThread(HANDLESCANPROC pfnTrace, uintptr_t lp1, uint32_t condemned, uint32_t maxgen, uint32_t flags)
+void TraceDependentHandlesBySingleThread(HANDLESCANPROC pfnTrace, uintptr_t lp1, uintptr_t lp2, uint32_t condemned, uint32_t maxgen, uint32_t flags)
{
WRAPPER_NO_CONTRACT;
// set up to scan variable handles with the specified mask and trace function
uint32_t type = HNDTYPE_DEPENDENT;
+ struct DIAG_DEPSCANINFO info = { pfnTrace, lp2 };
HandleTableMap *walk = &g_HandleTableMap;
while (walk) {
@@ -1436,14 +1409,13 @@ void TraceDependentHandlesBySingleThread(HANDLESCANPROC pfnTrace, uintptr_t lp1,
HHANDLETABLE hTable = walk->pBuckets[i]->pTable[uCPUindex];
if (hTable)
HndScanHandlesForGC(hTable, TraceDependentHandle,
- lp1, (uintptr_t)pfnTrace, &type, 1, condemned, maxgen, HNDGCF_EXTRAINFO | flags);
+ lp1, (uintptr_t)&info, &type, 1, condemned, maxgen, HNDGCF_EXTRAINFO | flags);
}
}
walk = walk->pNext;
}
}
-
// We scan handle tables by their buckets (ie, AD index). We could get into the situation where
// the AD indices are not very compacted (for example if we have just unloaded ADs and their
// indices haven't been reused yet) and we could be scanning them in an unbalanced fashion.
@@ -1623,7 +1595,7 @@ void Ref_UpdatePointers(uint32_t condemned, uint32_t maxgen, ScanContext* sc, Re
#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
// Please update this if you change the Ref_UpdatePointers function above.
-void Ref_ScanPointersForProfilerAndETW(uint32_t maxgen, uintptr_t lp1)
+void Ref_ScanHandlesForProfilerAndETW(uint32_t maxgen, uintptr_t lp1, handle_scan_fn fn)
{
WRAPPER_NO_CONTRACT;
@@ -1662,16 +1634,16 @@ void Ref_ScanPointersForProfilerAndETW(uint32_t maxgen, uintptr_t lp1)
{
HHANDLETABLE hTable = walk->pBuckets[i]->pTable[uCPUindex];
if (hTable)
- HndScanHandlesForGC(hTable, &ScanPointerForProfilerAndETW, lp1, 0, types, _countof(types), maxgen, maxgen, flags);
+ HndScanHandlesForGC(hTable, &ScanPointerForProfilerAndETW, lp1, (uintptr_t)fn, types, _countof(types), maxgen, maxgen, flags);
}
walk = walk->pNext;
}
// update pointers in variable handles whose dynamic type is VHT_WEAK_SHORT, VHT_WEAK_LONG or VHT_STRONG
- TraceVariableHandlesBySingleThread(&ScanPointerForProfilerAndETW, lp1, 0, VHT_WEAK_SHORT | VHT_WEAK_LONG | VHT_STRONG, maxgen, maxgen, flags);
+ TraceVariableHandlesBySingleThread(&ScanPointerForProfilerAndETW, lp1, (uintptr_t)fn, VHT_WEAK_SHORT | VHT_WEAK_LONG | VHT_STRONG, maxgen, maxgen, flags);
}
-void Ref_ScanDependentHandlesForProfilerAndETW(uint32_t maxgen, ProfilingScanContext * SC)
+void Ref_ScanDependentHandlesForProfilerAndETW(uint32_t maxgen, ScanContext * SC, handle_scan_fn fn)
{
WRAPPER_NO_CONTRACT;
@@ -1680,12 +1652,7 @@ void Ref_ScanDependentHandlesForProfilerAndETW(uint32_t maxgen, ProfilingScanCon
uint32_t flags = HNDGCF_NORMAL;
uintptr_t lp1 = (uintptr_t)SC;
- // we'll re-use pHeapId (which was either unused (0) or freed by EndRootReferences2
- // (-1)), so reset it to NULL
- _ASSERTE((*((size_t *)(&SC->pHeapId)) == (size_t)(-1)) ||
- (*((size_t *)(&SC->pHeapId)) == (size_t)(0)));
- SC->pHeapId = NULL;
- TraceDependentHandlesBySingleThread(&ScanPointerForProfilerAndETW, lp1, maxgen, maxgen, flags);
+ TraceDependentHandlesBySingleThread(&ScanPointerForProfilerAndETW, lp1, (uintptr_t)fn, maxgen, maxgen, flags);
}
#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
diff --git a/src/gc/objecthandle.h b/src/gc/objecthandle.h
index 89365267d6..34c2a0e321 100644
--- a/src/gc/objecthandle.h
+++ b/src/gc/objecthandle.h
@@ -652,7 +652,6 @@ BOOL Ref_ContainHandle(HandleTableBucket *pBucket, OBJECTHANDLE handle);
*/
struct ScanContext;
struct DhContext;
-struct ProfilingScanContext;
void Ref_BeginSynchronousGC (uint32_t uCondemnedGeneration, uint32_t uMaxGeneration);
void Ref_EndSynchronousGC (uint32_t uCondemnedGeneration, uint32_t uMaxGeneration);
@@ -672,10 +671,12 @@ void Ref_ScanSizedRefHandles(uint32_t condemned, uint32_t maxgen, ScanContext* s
void Ref_ScanPointers(uint32_t condemned, uint32_t maxgen, ScanContext* sc, Ref_promote_func* fn);
#endif
+typedef void (* handle_scan_fn)(Object** pRef, Object* pSec, uint32_t flags, ScanContext* context, BOOL isDependent);
+
void Ref_CheckReachable (uint32_t uCondemnedGeneration, uint32_t uMaxGeneration, uintptr_t lp1);
void Ref_CheckAlive (uint32_t uCondemnedGeneration, uint32_t uMaxGeneration, uintptr_t lp1);
-void Ref_ScanPointersForProfilerAndETW(uint32_t uMaxGeneration, uintptr_t lp1);
-void Ref_ScanDependentHandlesForProfilerAndETW(uint32_t uMaxGeneration, ProfilingScanContext * SC);
+void Ref_ScanHandlesForProfilerAndETW(uint32_t uMaxGeneration, uintptr_t lp1, handle_scan_fn fn);
+void Ref_ScanDependentHandlesForProfilerAndETW(uint32_t uMaxGeneration, ScanContext * SC, handle_scan_fn fn);
void Ref_AgeHandles (uint32_t uCondemnedGeneration, uint32_t uMaxGeneration, uintptr_t lp1);
void Ref_RejuvenateHandles(uint32_t uCondemnedGeneration, uint32_t uMaxGeneration, uintptr_t lp1);
diff --git a/src/gc/sample/gcenv.ee.cpp b/src/gc/sample/gcenv.ee.cpp
index 25d829e79e..779d0340ce 100644
--- a/src/gc/sample/gcenv.ee.cpp
+++ b/src/gc/sample/gcenv.ee.cpp
@@ -221,6 +221,34 @@ Thread* GCToEEInterface::CreateBackgroundThread(GCBackgroundThreadFunction threa
return NULL;
}
+void GCToEEInterface::DiagGCStart(int gen, bool isInduced)
+{
+}
+
+void GCToEEInterface::DiagUpdateGenerationBounds()
+{
+}
+
+void GCToEEInterface::DiagGCEnd(size_t index, int gen, int reason, bool fConcurrent)
+{
+}
+
+void GCToEEInterface::DiagWalkFReachableObjects(void* gcContext)
+{
+}
+
+void GCToEEInterface::DiagWalkSurvivors(void* gcContext)
+{
+}
+
+void GCToEEInterface::DiagWalkLOHSurvivors(void* gcContext)
+{
+}
+
+void GCToEEInterface::DiagWalkBGCSurvivors(void* gcContext)
+{
+}
+
void FinalizerThread::EnableFinalization()
{
// Signal to finalizer thread that there are objects to finalize