diff options
author | Maoni Stephens <Maoni0@users.noreply.github.com> | 2016-07-01 10:49:06 -0700 |
---|---|---|
committer | GitHub <noreply@github.com> | 2016-07-01 10:49:06 -0700 |
commit | fc8689e2486d09cd17f40538ac06ce5aafd2d87d (patch) | |
tree | 6d080983ee733b0030c4adaf881c5bcd97d12e94 | |
parent | 01d4bb6e1898ffb4d52084fe2c81347cedac27a0 (diff) | |
parent | fa2ff58d3198122cbbc216e0fd664d9e09120669 (diff) | |
download | coreclr-fc8689e2486d09cd17f40538ac06ce5aafd2d87d.tar.gz coreclr-fc8689e2486d09cd17f40538ac06ce5aafd2d87d.tar.bz2 coreclr-fc8689e2486d09cd17f40538ac06ce5aafd2d87d.zip |
Merge pull request #6084 from Maoni0/gc_etw
GC ETW fixes
-rw-r--r-- | src/gc/gc.cpp | 32 | ||||
-rwxr-xr-x | src/gc/gcee.cpp | 246 | ||||
-rw-r--r-- | src/gc/gcpriv.h | 15 | ||||
-rw-r--r-- | src/gc/handletable.cpp | 20 | ||||
-rw-r--r-- | src/vm/syncblk.cpp | 6 | ||||
-rw-r--r-- | src/vm/syncblk.h | 5 | ||||
-rw-r--r-- | src/vm/vars.cpp | 4 | ||||
-rw-r--r-- | src/vm/vars.hpp | 6 |
8 files changed, 190 insertions, 144 deletions
diff --git a/src/gc/gc.cpp b/src/gc/gc.cpp index dd4bcb3416..45888786b6 100644 --- a/src/gc/gc.cpp +++ b/src/gc/gc.cpp @@ -2457,6 +2457,10 @@ BOOL gc_heap::verify_pinned_queue_p = FALSE; uint8_t* gc_heap::oldest_pinned_plug = 0; +#if defined(ENABLE_PERF_COUNTERS) || defined(FEATURE_EVENT_TRACE) +size_t gc_heap::num_pinned_objects = 0; +#endif //ENABLE_PERF_COUNTERS || FEATURE_EVENT_TRACE + #ifdef FEATURE_LOH_COMPACTION size_t gc_heap::loh_pinned_queue_tos = 0; @@ -16360,6 +16364,10 @@ int gc_heap::garbage_collect (int n) settings.reason = gc_trigger_reason; verify_pinned_queue_p = FALSE; +#if defined(ENABLE_PERF_COUNTERS) || defined(FEATURE_EVENT_TRACE) + num_pinned_objects = 0; +#endif //ENABLE_PERF_COUNTERS || FEATURE_EVENT_TRACE + #ifdef STRESS_HEAP if (settings.reason == reason_gcstress) { @@ -19869,10 +19877,30 @@ void gc_heap::pin_object (uint8_t* o, uint8_t** ppObject, uint8_t* low, uint8_t* { fire_etw_pin_object_event(o, ppObject); } -#endif // FEATURE_EVENT_TRACE - COUNTER_ONLY(GetPerfCounters().m_GC.cPinnedObj ++); +#endif // FEATURE_EVENT_TRACE + +#if defined(ENABLE_PERF_COUNTERS) || defined(FEATURE_EVENT_TRACE) + num_pinned_objects++; +#endif //ENABLE_PERF_COUNTERS || FEATURE_EVENT_TRACE + } +} + +#if defined(ENABLE_PERF_COUNTERS) || defined(FEATURE_EVENT_TRACE) +size_t gc_heap::get_total_pinned_objects() +{ +#ifdef MULTIPLE_HEAPS + size_t total_num_pinned_objects = 0; + for (int i = 0; i < gc_heap::n_heaps; i++) + { + gc_heap* hp = gc_heap::g_heaps[i]; + total_num_pinned_objects += hp->num_pinned_objects; } + return total_num_pinned_objects; +#else //MULTIPLE_HEAPS + return num_pinned_objects; +#endif //MULTIPLE_HEAPS } +#endif //ENABLE_PERF_COUNTERS || FEATURE_EVENT_TRACE void gc_heap::reset_mark_stack () { diff --git a/src/gc/gcee.cpp b/src/gc/gcee.cpp index 3578b6d7f5..bb0a6536f4 100755 --- a/src/gc/gcee.cpp +++ b/src/gc/gcee.cpp @@ -15,6 +15,11 @@ COUNTER_ONLY(PERF_COUNTER_TIMER_PRECISION g_TotalTimeInGC = 0); COUNTER_ONLY(PERF_COUNTER_TIMER_PRECISION g_TotalTimeSinceLastGCEnd = 0); +#if defined(ENABLE_PERF_COUNTERS) || defined(FEATURE_EVENT_TRACE) +size_t g_GenerationSizes[NUMBERGENERATIONS]; +size_t g_GenerationPromotedSizes[NUMBERGENERATIONS]; +#endif // ENABLE_PERF_COUNTERS || FEATURE_EVENT_TRACE + void GCHeap::UpdatePreGCCounters() { #if defined(ENABLE_PERF_COUNTERS) @@ -56,7 +61,6 @@ void GCHeap::UpdatePreGCCounters() GetPerfCounters().m_GC.cbAlloc += allocation_0; GetPerfCounters().m_GC.cbAlloc += allocation_3; GetPerfCounters().m_GC.cbLargeAlloc += allocation_3; - GetPerfCounters().m_GC.cPinnedObj = 0; #ifdef _PREFAST_ // prefix complains about us dereferencing hp in wks build even though we only access static members @@ -106,9 +110,13 @@ void GCHeap::UpdatePreGCCounters() void GCHeap::UpdatePostGCCounters() { -#ifdef FEATURE_EVENT_TRACE - // Use of temporary variables to avoid rotor build warnings - ETW::GCLog::ETW_GC_INFO Info; + totalSurvivedSize = gc_heap::get_total_survived_size(); + + // + // The following is for instrumentation. + // + // Calculate the common ones for ETW and perf counters. +#if defined(ENABLE_PERF_COUNTERS) || defined(FEATURE_EVENT_TRACE) #ifdef MULTIPLE_HEAPS //take the first heap.... gc_mechanisms *pSettings = &gc_heap::g_heaps[0]->settings; @@ -117,147 +125,151 @@ void GCHeap::UpdatePostGCCounters() #endif //MULTIPLE_HEAPS int condemned_gen = pSettings->condemned_generation; - Info.GCEnd.Depth = condemned_gen; - Info.GCEnd.Count = (uint32_t)pSettings->gc_index; - ETW::GCLog::FireGcEndAndGenerationRanges(Info.GCEnd.Count, Info.GCEnd.Depth); - int xGen; - ETW::GCLog::ETW_GC_INFO HeapInfo; - ZeroMemory(&HeapInfo, sizeof(HeapInfo)); - size_t youngest_gen_size = 0; + memset (g_GenerationSizes, 0, sizeof (g_GenerationSizes)); + memset (g_GenerationPromotedSizes, 0, sizeof (g_GenerationPromotedSizes)); -#ifdef MULTIPLE_HEAPS - //take the first heap.... - gc_heap* hp1 = gc_heap::g_heaps[0]; -#else - gc_heap* hp1 = pGenGCHeap; -#endif //MULTIPLE_HEAPS + size_t total_num_gc_handles = g_dwHandles; + uint32_t total_num_sync_blocks = SyncBlockCache::GetSyncBlockCache()->GetActiveCount(); + + // Note this is however for perf counter only, for legacy reasons. What we showed + // in perf counters for "gen0 size" was really the gen0 budget which made + // sense (somewhat) at the time. For backward compatibility we are keeping + // this calculated the same way. For ETW we use the true gen0 size (and + // gen0 budget is also reported in an event). + size_t youngest_budget = 0; size_t promoted_finalization_mem = 0; + size_t total_num_pinned_objects = gc_heap::get_total_pinned_objects(); - totalSurvivedSize = gc_heap::get_total_survived_size(); +#ifndef FEATURE_REDHAWK + // if a max gen garbage collection was performed, resync the GC Handle counter; + // if threads are currently suspended, we do not need to obtain a lock on each handle table + if (condemned_gen == max_generation) + total_num_gc_handles = HndCountAllHandles(!GCHeap::IsGCInProgress()); +#endif //FEATURE_REDHAWK - for (xGen = 0; xGen <= (max_generation+1); xGen++) + // per generation calculation. + for (int gen_index = 0; gen_index <= (max_generation+1); gen_index++) { - size_t gensize = 0; - size_t promoted_mem = 0; - #ifdef MULTIPLE_HEAPS int hn = 0; - for (hn = 0; hn < gc_heap::n_heaps; hn++) { - gc_heap* hp2 = gc_heap::g_heaps [hn]; - dynamic_data* dd2 = hp2->dynamic_data_of (xGen); - - // Generation 0 is empty (if there isn't demotion) so its size is 0 - // It is more interesting to report the desired size before next collection. - // Gen 1 is also more accurate if desired is reported due to sampling intervals. - if (xGen == 0) + gc_heap* hp = gc_heap::g_heaps[hn]; +#else + gc_heap* hp = pGenGCHeap; { - youngest_gen_size += dd_desired_allocation (hp2->dynamic_data_of (xGen)); - } +#endif //MULTIPLE_HEAPS + dynamic_data* dd = hp->dynamic_data_of (gen_index); - gensize += hp2->generation_size(xGen); + if (gen_index == 0) + { + youngest_budget += dd_desired_allocation (hp->dynamic_data_of (gen_index)); + } - if (xGen <= condemned_gen) - { - promoted_mem += dd_promoted_size (dd2); - } + g_GenerationSizes[gen_index] += hp->generation_size (gen_index); - if ((xGen == (max_generation+1)) && (condemned_gen == max_generation)) - { - promoted_mem += dd_promoted_size (dd2); - } + if (gen_index <= condemned_gen) + { + g_GenerationPromotedSizes[gen_index] += dd_promoted_size (dd); + } - if (xGen == 0) - { - promoted_finalization_mem += dd_freach_previous_promotion (dd2); + if ((gen_index == (max_generation+1)) && (condemned_gen == max_generation)) + { + g_GenerationPromotedSizes[gen_index] += dd_promoted_size (dd); + } + + if (gen_index == 0) + { + promoted_finalization_mem += dd_freach_previous_promotion (dd); + } +#ifdef MULTIPLE_HEAPS } - } #else - if (xGen == 0) - { - youngest_gen_size = dd_desired_allocation (hp1->dynamic_data_of (xGen)); - } - - gensize = hp1->generation_size(xGen); - if (xGen <= condemned_gen) - { - promoted_mem = dd_promoted_size (hp1->dynamic_data_of (xGen)); } +#endif //MULTIPLE_HEAPS + } +#endif //ENABLE_PERF_COUNTERS || FEATURE_EVENT_TRACE - if ((xGen == (max_generation+1)) && (condemned_gen == max_generation)) - { - promoted_mem = dd_promoted_size (hp1->dynamic_data_of (max_generation+1)); - } +#ifdef FEATURE_EVENT_TRACE + ETW::GCLog::ETW_GC_INFO Info; - if (xGen == 0) - { - promoted_finalization_mem = dd_freach_previous_promotion (hp1->dynamic_data_of (xGen)); - } + Info.GCEnd.Depth = condemned_gen; + Info.GCEnd.Count = (uint32_t)pSettings->gc_index; + ETW::GCLog::FireGcEndAndGenerationRanges(Info.GCEnd.Count, Info.GCEnd.Depth); -#endif //MULTIPLE_HEAPS + ETW::GCLog::ETW_GC_INFO HeapInfo; + ZeroMemory(&HeapInfo, sizeof(HeapInfo)); - HeapInfo.HeapStats.GenInfo[xGen].GenerationSize = gensize; - HeapInfo.HeapStats.GenInfo[xGen].TotalPromotedSize = promoted_mem; + for (int gen_index = 0; gen_index <= (max_generation+1); gen_index++) + { + HeapInfo.HeapStats.GenInfo[gen_index].GenerationSize = g_GenerationSizes[gen_index]; + HeapInfo.HeapStats.GenInfo[gen_index].TotalPromotedSize = g_GenerationPromotedSizes[gen_index]; } - { #ifdef SIMPLE_DPRINTF - dprintf (2, ("GC#%d: 0: %Id(%Id); 1: %Id(%Id); 2: %Id(%Id); 3: %Id(%Id)", - Info.GCEnd.Count, - HeapInfo.HeapStats.GenInfo[0].GenerationSize, - HeapInfo.HeapStats.GenInfo[0].TotalPromotedSize, - HeapInfo.HeapStats.GenInfo[1].GenerationSize, - HeapInfo.HeapStats.GenInfo[1].TotalPromotedSize, - HeapInfo.HeapStats.GenInfo[2].GenerationSize, - HeapInfo.HeapStats.GenInfo[2].TotalPromotedSize, - HeapInfo.HeapStats.GenInfo[3].GenerationSize, - HeapInfo.HeapStats.GenInfo[3].TotalPromotedSize)); + dprintf (2, ("GC#%d: 0: %Id(%Id); 1: %Id(%Id); 2: %Id(%Id); 3: %Id(%Id)", + Info.GCEnd.Count, + HeapInfo.HeapStats.GenInfo[0].GenerationSize, + HeapInfo.HeapStats.GenInfo[0].TotalPromotedSize, + HeapInfo.HeapStats.GenInfo[1].GenerationSize, + HeapInfo.HeapStats.GenInfo[1].TotalPromotedSize, + HeapInfo.HeapStats.GenInfo[2].GenerationSize, + HeapInfo.HeapStats.GenInfo[2].TotalPromotedSize, + HeapInfo.HeapStats.GenInfo[3].GenerationSize, + HeapInfo.HeapStats.GenInfo[3].TotalPromotedSize)); #endif //SIMPLE_DPRINTF - } HeapInfo.HeapStats.FinalizationPromotedSize = promoted_finalization_mem; HeapInfo.HeapStats.FinalizationPromotedCount = GetFinalizablePromotedCount(); + HeapInfo.HeapStats.PinnedObjectCount = (uint32_t)total_num_pinned_objects; + HeapInfo.HeapStats.SinkBlockCount = total_num_sync_blocks; + HeapInfo.HeapStats.GCHandleCount = (uint32_t)total_num_gc_handles; -#if defined(ENABLE_PERF_COUNTERS) - - // if a max gen garbage collection was performed, resync the GC Handle counter; - // if threads are currently suspended, we do not need to obtain a lock on each handle table - if (condemned_gen == max_generation) - GetPerfCounters().m_GC.cHandles = HndCountAllHandles(!GCHeap::IsGCInProgress()); + FireEtwGCHeapStats_V1(HeapInfo.HeapStats.GenInfo[0].GenerationSize, HeapInfo.HeapStats.GenInfo[0].TotalPromotedSize, + HeapInfo.HeapStats.GenInfo[1].GenerationSize, HeapInfo.HeapStats.GenInfo[1].TotalPromotedSize, + HeapInfo.HeapStats.GenInfo[2].GenerationSize, HeapInfo.HeapStats.GenInfo[2].TotalPromotedSize, + HeapInfo.HeapStats.GenInfo[3].GenerationSize, HeapInfo.HeapStats.GenInfo[3].TotalPromotedSize, + HeapInfo.HeapStats.FinalizationPromotedSize, + HeapInfo.HeapStats.FinalizationPromotedCount, + HeapInfo.HeapStats.PinnedObjectCount, + HeapInfo.HeapStats.SinkBlockCount, + HeapInfo.HeapStats.GCHandleCount, + GetClrInstanceId()); +#endif // FEATURE_EVENT_TRACE - for (xGen = 0; xGen <= (max_generation+1); xGen++) +#if defined(ENABLE_PERF_COUNTERS) + for (int gen_index = 0; gen_index <= (max_generation+1); gen_index++) { - _ASSERTE(FitsIn<size_t>(HeapInfo.HeapStats.GenInfo[xGen].GenerationSize)); - _ASSERTE(FitsIn<size_t>(HeapInfo.HeapStats.GenInfo[xGen].TotalPromotedSize)); + _ASSERTE(FitsIn<size_t>(g_GenerationSizes[gen_index]); + _ASSERTE(FitsIn<size_t>(g_GenerationPromotedSizes[gen_index]); - if (xGen == (max_generation+1)) + if (gen_index == (max_generation+1)) { - GetPerfCounters().m_GC.cLrgObjSize = static_cast<size_t>(HeapInfo.HeapStats.GenInfo[xGen].GenerationSize); + GetPerfCounters().m_GC.cLrgObjSize = static_cast<size_t>(g_GenerationSizes[gen_index]); } else { - GetPerfCounters().m_GC.cGenHeapSize[xGen] = ((xGen == 0) ? - youngest_gen_size : - static_cast<size_t>(HeapInfo.HeapStats.GenInfo[xGen].GenerationSize)); + GetPerfCounters().m_GC.cGenHeapSize[gen_index] = ((gen_index == 0) ? + youngest_budget : + static_cast<size_t>(g_GenerationSizes[gen_index])); } // the perf counters only count the promoted size for gen0 and gen1. - if (xGen < max_generation) + if (gen_index < max_generation) { - GetPerfCounters().m_GC.cbPromotedMem[xGen] = static_cast<size_t>(HeapInfo.HeapStats.GenInfo[xGen].TotalPromotedSize); + GetPerfCounters().m_GC.cbPromotedMem[gen_index] = static_cast<size_t>(g_GenerationPromotedSizes[gen_index]); } - if (xGen <= max_generation) + if (gen_index <= max_generation) { - GetPerfCounters().m_GC.cGenCollections[xGen] = - dd_collection_count (hp1->dynamic_data_of (xGen)); + GetPerfCounters().m_GC.cGenCollections[gen_index] = + dd_collection_count (hp1->dynamic_data_of (gen_index)); } } - //Committed memory + // Committed and reserved memory { size_t committed_mem = 0; size_t reserved_mem = 0; @@ -265,24 +277,20 @@ void GCHeap::UpdatePostGCCounters() int hn = 0; for (hn = 0; hn < gc_heap::n_heaps; hn++) { - gc_heap* hp2 = gc_heap::g_heaps [hn]; + gc_heap* hp = gc_heap::g_heaps [hn]; #else - gc_heap* hp2 = hp1; + gc_heap* hp = pGenGCHeap; { #endif //MULTIPLE_HEAPS - heap_segment* seg = - generation_start_segment (hp2->generation_of (max_generation)); + heap_segment* seg = generation_start_segment (hp->generation_of (max_generation)); while (seg) { - committed_mem += heap_segment_committed (seg) - - heap_segment_mem (seg); - reserved_mem += heap_segment_reserved (seg) - - heap_segment_mem (seg); + committed_mem += heap_segment_committed (seg) - heap_segment_mem (seg); + reserved_mem += heap_segment_reserved (seg) - heap_segment_mem (seg); seg = heap_segment_next (seg); } //same for large segments - seg = - generation_start_segment (hp2->generation_of (max_generation + 1)); + seg = generation_start_segment (hp->generation_of (max_generation + 1)); while (seg) { committed_mem += heap_segment_committed (seg) - @@ -297,10 +305,8 @@ void GCHeap::UpdatePostGCCounters() } #endif //MULTIPLE_HEAPS - GetPerfCounters().m_GC.cTotalCommittedBytes = - committed_mem; - GetPerfCounters().m_GC.cTotalReservedBytes = - reserved_mem; + GetPerfCounters().m_GC.cTotalCommittedBytes = committed_mem; + GetPerfCounters().m_GC.cTotalReservedBytes = reserved_mem; } _ASSERTE(FitsIn<size_t>(HeapInfo.HeapStats.FinalizationPromotedSize)); @@ -333,22 +339,10 @@ void GCHeap::UpdatePostGCCounters() g_TotalTimeSinceLastGCEnd = _currentPerfCounterTimer; - HeapInfo.HeapStats.PinnedObjectCount = (uint32_t)(GetPerfCounters().m_GC.cPinnedObj); - HeapInfo.HeapStats.SinkBlockCount = (uint32_t)(GetPerfCounters().m_GC.cSinkBlocks); - HeapInfo.HeapStats.GCHandleCount = (uint32_t)(GetPerfCounters().m_GC.cHandles); + GetPerfCounters().m_GC.cPinnedObj = total_num_pinned_objects; + GetPerfCounters().m_GC.cHandles = total_num_gc_handles; + GetPerfCounters().m_GC.cSinkBlocks = total_num_sync_blocks; #endif //ENABLE_PERF_COUNTERS - - FireEtwGCHeapStats_V1(HeapInfo.HeapStats.GenInfo[0].GenerationSize, HeapInfo.HeapStats.GenInfo[0].TotalPromotedSize, - HeapInfo.HeapStats.GenInfo[1].GenerationSize, HeapInfo.HeapStats.GenInfo[1].TotalPromotedSize, - HeapInfo.HeapStats.GenInfo[2].GenerationSize, HeapInfo.HeapStats.GenInfo[2].TotalPromotedSize, - HeapInfo.HeapStats.GenInfo[3].GenerationSize, HeapInfo.HeapStats.GenInfo[3].TotalPromotedSize, - HeapInfo.HeapStats.FinalizationPromotedSize, - HeapInfo.HeapStats.FinalizationPromotedCount, - HeapInfo.HeapStats.PinnedObjectCount, - HeapInfo.HeapStats.SinkBlockCount, - HeapInfo.HeapStats.GCHandleCount, - GetClrInstanceId()); -#endif // FEATURE_EVENT_TRACE } size_t GCHeap::GetCurrentObjSize() diff --git a/src/gc/gcpriv.h b/src/gc/gcpriv.h index 703059a636..4a2e929620 100644 --- a/src/gc/gcpriv.h +++ b/src/gc/gcpriv.h @@ -2072,6 +2072,12 @@ protected: PER_HEAP void pin_object (uint8_t* o, uint8_t** ppObject, uint8_t* low, uint8_t* high); + +#if defined(ENABLE_PERF_COUNTERS) || defined(FEATURE_EVENT_TRACE) + PER_HEAP_ISOLATED + size_t get_total_pinned_objects(); +#endif //ENABLE_PERF_COUNTERS || FEATURE_EVENT_TRACE + PER_HEAP void reset_mark_stack (); PER_HEAP @@ -3006,10 +3012,15 @@ protected: mark* mark_stack_array; PER_HEAP - BOOL verify_pinned_queue_p; + BOOL verify_pinned_queue_p; + + PER_HEAP + uint8_t* oldest_pinned_plug; +#if defined(ENABLE_PERF_COUNTERS) || defined(FEATURE_EVENT_TRACE) PER_HEAP - uint8_t* oldest_pinned_plug; + size_t num_pinned_objects; +#endif //ENABLE_PERF_COUNTERS || FEATURE_EVENT_TRACE #ifdef FEATURE_LOH_COMPACTION PER_HEAP diff --git a/src/gc/handletable.cpp b/src/gc/handletable.cpp index a1e86f898e..43b43ffcea 100644 --- a/src/gc/handletable.cpp +++ b/src/gc/handletable.cpp @@ -353,8 +353,9 @@ OBJECTHANDLE HndCreateHandle(HHANDLETABLE hTable, uint32_t uType, OBJECTREF obje // store the reference HndAssignHandle(handle, object); - // update perf-counters: track number of handles - COUNTER_ONLY(GetPerfCounters().m_GC.cHandles ++); +#if defined(ENABLE_PERF_COUNTERS) || defined(FEATURE_EVENT_TRACE) + g_dwHandles++; +#endif // ENABLE_PERF_COUNTERS || FEATURE_EVENT_TRACE #ifdef GC_PROFILING { @@ -503,8 +504,9 @@ void HndDestroyHandle(HHANDLETABLE hTable, uint32_t uType, OBJECTHANDLE handle) } #endif //GC_PROFILING - // update perf-counters: track number of handles - COUNTER_ONLY(GetPerfCounters().m_GC.cHandles --); +#if defined(ENABLE_PERF_COUNTERS) || defined(FEATURE_EVENT_TRACE) + g_dwHandles--; +#endif // ENABLE_PERF_COUNTERS || FEATURE_EVENT_TRACE // sanity check the type index _ASSERTE(uType < pTable->uTypeCount); @@ -583,8 +585,9 @@ uint32_t HndCreateHandles(HHANDLETABLE hTable, uint32_t uType, OBJECTHANDLE *pHa uSatisfied += TableAllocHandlesFromCache(pTable, uType, pHandles + uSatisfied, uCount - uSatisfied); } - // update perf-counters: track number of handles - COUNTER_ONLY(GetPerfCounters().m_GC.cHandles += uSatisfied); +#if defined(ENABLE_PERF_COUNTERS) || defined(FEATURE_EVENT_TRACE) + g_dwHandles += uSatisfied; +#endif // ENABLE_PERF_COUNTERS || FEATURE_EVENT_TRACE #ifdef GC_PROFILING { @@ -629,8 +632,9 @@ void HndDestroyHandles(HHANDLETABLE hTable, uint32_t uType, const OBJECTHANDLE * } #endif - // update perf-counters: track number of handles - COUNTER_ONLY(GetPerfCounters().m_GC.cHandles -= uCount); +#if defined(ENABLE_PERF_COUNTERS) || defined(FEATURE_EVENT_TRACE) + g_dwHandles -= uCount; +#endif // ENABLE_PERF_COUNTERS || FEATURE_EVENT_TRACE // is this a small number of handles? if (uCount <= SMALL_ALLOC_COUNT) diff --git a/src/vm/syncblk.cpp b/src/vm/syncblk.cpp index 8fded72f30..3975542d98 100644 --- a/src/vm/syncblk.cpp +++ b/src/vm/syncblk.cpp @@ -1055,7 +1055,6 @@ SyncBlock *SyncBlockCache::GetNextFreeSyncBlock() SyncBlock *psb; SLink *plst = m_FreeBlockList; - COUNTER_ONLY(GetPerfCounters().m_GC.cSinkBlocks ++); m_ActiveCount++; if (plst) @@ -1311,8 +1310,6 @@ void SyncBlockCache::DeleteSyncBlockMemory(SyncBlock *psb) } CONTRACTL_END - COUNTER_ONLY(GetPerfCounters().m_GC.cSinkBlocks --); - m_ActiveCount--; m_FreeCount++; @@ -1337,9 +1334,6 @@ void SyncBlockCache::GCDeleteSyncBlock(SyncBlock *psb) // operator delete). delete psb; - COUNTER_ONLY(GetPerfCounters().m_GC.cSinkBlocks --); - - m_ActiveCount--; m_FreeCount++; diff --git a/src/vm/syncblk.h b/src/vm/syncblk.h index 82930dd636..6d32e3eafa 100644 --- a/src/vm/syncblk.h +++ b/src/vm/syncblk.h @@ -1028,6 +1028,11 @@ class SyncBlockCache return m_bSyncBlockCleanupInProgress; } + DWORD GetActiveCount() + { + return m_ActiveCount; + } + // Encapsulate a CrstHolder, so that clients of our lock don't have to know // the details of our implementation. class LockHolder : public CrstHolder diff --git a/src/vm/vars.cpp b/src/vm/vars.cpp index e4543bf3cf..b737e66cd5 100644 --- a/src/vm/vars.cpp +++ b/src/vm/vars.cpp @@ -110,6 +110,10 @@ GPTR_IMPL(Thread,g_pSuspensionThread); // Global SyncBlock cache GPTR_IMPL(SyncTableEntry,g_pSyncTable); +#if defined(ENABLE_PERF_COUNTERS) || defined(FEATURE_EVENT_TRACE) +DWORD g_dwHandles = 0; +#endif // ENABLE_PERF_COUNTERS || FEATURE_EVENT_TRACE + #ifdef STRESS_LOG GPTR_IMPL_INIT(StressLog, g_pStressLog, &StressLog::theLog); #endif diff --git a/src/vm/vars.hpp b/src/vm/vars.hpp index 576f0061db..d197e0559d 100644 --- a/src/vm/vars.hpp +++ b/src/vm/vars.hpp @@ -472,6 +472,12 @@ GPTR_DECL(Thread,g_pSuspensionThread); typedef DPTR(SyncTableEntry) PTR_SyncTableEntry; GPTR_DECL(SyncTableEntry, g_pSyncTable); +#if defined(ENABLE_PERF_COUNTERS) || defined(FEATURE_EVENT_TRACE) +// Note this is not updated in a thread safe way so the value may not be accurate. We get +// it accurately in full GCs if the handle count is requested. +extern DWORD g_dwHandles; +#endif // ENABLE_PERF_COUNTERS || FEATURE_EVENT_TRACE + #ifdef FEATURE_COMINTEROP // Global RCW cleanup list typedef DPTR(RCWCleanupList) PTR_RCWCleanupList; |