summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSean Gillespie <segilles@microsoft.com>2016-09-08 11:27:24 -0700
committerSean Gillespie <segilles@microsoft.com>2016-09-08 11:27:24 -0700
commit6519911fbccbec049f9592484f69c213b4b78264 (patch)
treecfb37485afcc96a1780e106a4f99d192ff42ab2a
parentcda9b0fc9a5f206b391b1bd104e43bd6c29aad7f (diff)
downloadcoreclr-6519911fbccbec049f9592484f69c213b4b78264.tar.gz
coreclr-6519911fbccbec049f9592484f69c213b4b78264.tar.bz2
coreclr-6519911fbccbec049f9592484f69c213b4b78264.zip
Introduce an interface separating the GC and the VM,
modifying the VM to utilize this interface. Introduce an interface separating the GC and the rest of the VM Remove static members of both IGCHeap and IGCHeapInternal and move the management of the singular GC heap to the VM. Rename uses of IGCHeap in the VM to GCHeapHolder, as well as other misc. renames throughout the VM and GC. Split each interface function into categories, document them, use consistent formatting across the interface Undo some accidental find/replace collateral damage Remove all ifdefs from the GC interface Deduplicate function declarations between IGCHeap and IGCHeapInternal, expose AllocAlign8 through the interface and the reference to alloc_context to repair the ARM build Paper cut: false -> nullptr Repair the ARM and x86 builds Rename GCHeapHolder -> GCHeapUtilities and address documentation feedback Rebase against master Rename gcholder.h/cpp -> gcheaputilities.h/cpp Fix an uninitialized field on alloc_context causing test failures on clang Rename the include guard for gcheaputilities.h Un-breaks SOS by making the following changes: 1) Instructs the DAC to look for IGCHeap::gcHeapType by name, instead of assuming that it exists near g_pGCHeap, 2) Eliminate all virtual calls on IGCHeap in the DAC, since we cannot dispatch on an object in another process, 3) Because of 2, expose the number of generations past the GC interface using a static variable on IGCHeap that the DAC can read directly. repair the Windows build
-rw-r--r--src/classlibnative/bcltype/arraynative.cpp2
-rw-r--r--src/classlibnative/bcltype/system.cpp2
-rw-r--r--src/debug/daccess/daccess.cpp6
-rw-r--r--src/debug/daccess/dacdbiimpl.cpp8
-rw-r--r--src/debug/daccess/enummem.cpp6
-rw-r--r--src/debug/daccess/request.cpp76
-rw-r--r--src/debug/daccess/request_svr.cpp4
-rw-r--r--src/gc/env/gcenv.ee.h5
-rw-r--r--src/gc/gc.cpp168
-rw-r--r--src/gc/gc.h531
-rw-r--r--src/gc/gccommon.cpp43
-rw-r--r--src/gc/gcee.cpp8
-rw-r--r--src/gc/gcimpl.h52
-rw-r--r--src/gc/gcinterface.h509
-rw-r--r--src/gc/gcpriv.h4
-rw-r--r--src/gc/gcscan.cpp6
-rw-r--r--src/gc/handletable.cpp8
-rw-r--r--src/gc/handletablecore.cpp5
-rw-r--r--src/gc/handletablescan.cpp12
-rw-r--r--src/gc/objecthandle.cpp38
-rw-r--r--src/gc/sample/GCSample.cpp4
-rw-r--r--src/gc/sample/gcenv.ee.cpp6
-rw-r--r--src/gc/sample/gcenv.h3
-rw-r--r--src/inc/dacvars.h5
-rw-r--r--src/inc/stresslog.h2
-rw-r--r--src/strongname/api/common.h2
-rw-r--r--src/vm/CMakeLists.txt1
-rw-r--r--src/vm/amd64/asmconstants.h4
-rw-r--r--src/vm/amd64/excepamd64.cpp2
-rw-r--r--src/vm/amd64/jitinterfaceamd64.cpp2
-rw-r--r--src/vm/appdomain.cpp26
-rw-r--r--src/vm/appdomain.hpp11
-rw-r--r--src/vm/arm/asmconstants.h4
-rw-r--r--src/vm/arm/stubs.cpp2
-rw-r--r--src/vm/ceeload.cpp6
-rw-r--r--src/vm/ceemain.cpp9
-rw-r--r--src/vm/classcompat.cpp2
-rw-r--r--src/vm/codeman.cpp6
-rw-r--r--src/vm/commemoryfailpoint.cpp2
-rw-r--r--src/vm/common.h2
-rw-r--r--src/vm/comutilnative.cpp54
-rw-r--r--src/vm/corhost.cpp10
-rw-r--r--src/vm/crossgencompile.cpp4
-rw-r--r--src/vm/crst.cpp4
-rw-r--r--src/vm/debugdebugger.cpp2
-rw-r--r--src/vm/domainfile.cpp4
-rw-r--r--src/vm/dwreport.cpp2
-rw-r--r--src/vm/eetoprofinterfaceimpl.cpp20
-rw-r--r--src/vm/eventtrace.cpp20
-rw-r--r--src/vm/excep.cpp2
-rw-r--r--src/vm/finalizerthread.cpp28
-rw-r--r--src/vm/frames.cpp2
-rw-r--r--src/vm/gccover.cpp12
-rw-r--r--src/vm/gcenv.ee.cpp6
-rw-r--r--src/vm/gcheaputilities.cpp9
-rw-r--r--src/vm/gcheaputilities.h110
-rw-r--r--src/vm/gchelpers.cpp32
-rw-r--r--src/vm/gchost.cpp12
-rw-r--r--src/vm/gcinterface.h (renamed from src/vm/gc.h)2
-rw-r--r--src/vm/gcstress.h12
-rw-r--r--src/vm/hash.cpp4
-rw-r--r--src/vm/i386/excepx86.cpp2
-rw-r--r--src/vm/i386/jitinterfacex86.cpp14
-rw-r--r--src/vm/i386/virtualcallstubcpu.hpp2
-rw-r--r--src/vm/interoputil.cpp2
-rw-r--r--src/vm/interpreter.cpp2
-rw-r--r--src/vm/jithelpers.cpp20
-rw-r--r--src/vm/jitinterface.cpp2
-rw-r--r--src/vm/jitinterfacegen.cpp2
-rw-r--r--src/vm/marshalnative.cpp2
-rw-r--r--src/vm/mdaassistants.cpp4
-rw-r--r--src/vm/memberload.cpp2
-rw-r--r--src/vm/message.cpp2
-rw-r--r--src/vm/methodtable.cpp8
-rw-r--r--src/vm/nativeoverlapped.h2
-rw-r--r--src/vm/object.cpp30
-rw-r--r--src/vm/prestub.cpp2
-rw-r--r--src/vm/profattach.cpp2
-rw-r--r--src/vm/profilinghelper.cpp2
-rw-r--r--src/vm/proftoeeinterfaceimpl.cpp14
-rw-r--r--src/vm/rcwwalker.cpp4
-rw-r--r--src/vm/runtimecallablewrapper.cpp6
-rw-r--r--src/vm/safehandle.cpp4
-rw-r--r--src/vm/siginfo.cpp2
-rw-r--r--src/vm/stubhelpers.cpp12
-rw-r--r--src/vm/syncblk.cpp16
-rw-r--r--src/vm/syncclean.cpp2
-rw-r--r--src/vm/testhookmgr.cpp2
-rw-r--r--src/vm/threadpoolrequest.cpp4
-rw-r--r--src/vm/threads.cpp14
-rw-r--r--src/vm/threads.h8
-rw-r--r--src/vm/threadsuspend.cpp44
-rw-r--r--src/vm/vars.hpp2
-rw-r--r--src/vm/win32threadpool.cpp14
94 files changed, 1257 insertions, 938 deletions
diff --git a/src/classlibnative/bcltype/arraynative.cpp b/src/classlibnative/bcltype/arraynative.cpp
index b1aa6f8751..f247fc46c5 100644
--- a/src/classlibnative/bcltype/arraynative.cpp
+++ b/src/classlibnative/bcltype/arraynative.cpp
@@ -961,7 +961,7 @@ void memmoveGCRefs(void *dest, const void *src, size_t len)
}
}
- GCHeap::GetGCHeap()->SetCardsAfterBulkCopy((Object**)dest, len);
+ GCHeapUtilities::GetGCHeap()->SetCardsAfterBulkCopy((Object**)dest, len);
}
void ArrayNative::ArrayCopyNoTypeCheck(BASEARRAYREF pSrc, unsigned int srcIndex, BASEARRAYREF pDest, unsigned int destIndex, unsigned int length)
diff --git a/src/classlibnative/bcltype/system.cpp b/src/classlibnative/bcltype/system.cpp
index e902734b23..bb3e1792b4 100644
--- a/src/classlibnative/bcltype/system.cpp
+++ b/src/classlibnative/bcltype/system.cpp
@@ -673,7 +673,7 @@ FCIMPL0(FC_BOOL_RET, SystemNative::IsServerGC)
{
FCALL_CONTRACT;
- FC_RETURN_BOOL(GCHeap::IsServerHeap());
+ FC_RETURN_BOOL(GCHeapUtilities::IsServerHeap());
}
FCIMPLEND
diff --git a/src/debug/daccess/daccess.cpp b/src/debug/daccess/daccess.cpp
index ba3995b1f7..aebfa43a91 100644
--- a/src/debug/daccess/daccess.cpp
+++ b/src/debug/daccess/daccess.cpp
@@ -7974,7 +7974,7 @@ HRESULT DacHandleWalker::Init(ClrDataAccess *dac, UINT types[], UINT typeCount,
{
SUPPORTS_DAC;
- if (gen < 0 || gen > (int)GCHeap::GetMaxGeneration())
+ if (gen < 0 || gen > (int)GCHeapUtilities::GetMaxGeneration())
return E_INVALIDARG;
mGenerationFilter = gen;
@@ -8033,7 +8033,7 @@ bool DacHandleWalker::FetchMoreHandles(HANDLESCANPROC callback)
int max_slots = 1;
#ifdef FEATURE_SVR_GC
- if (GCHeap::IsServerHeap())
+ if (GCHeapUtilities::IsServerHeap())
max_slots = GCHeapCount();
#endif // FEATURE_SVR_GC
@@ -8089,7 +8089,7 @@ bool DacHandleWalker::FetchMoreHandles(HANDLESCANPROC callback)
HndScanHandlesForGC(hTable, callback,
(LPARAM)&param, 0,
&handleType, 1,
- mGenerationFilter, GCHeap::GetMaxGeneration(), 0);
+ mGenerationFilter, GCHeapUtilities::GetMaxGeneration(), 0);
else
HndEnumHandles(hTable, &handleType, 1, callback, (LPARAM)&param, 0, FALSE);
}
diff --git a/src/debug/daccess/dacdbiimpl.cpp b/src/debug/daccess/dacdbiimpl.cpp
index 9b17f4cd46..cb9c0310cc 100644
--- a/src/debug/daccess/dacdbiimpl.cpp
+++ b/src/debug/daccess/dacdbiimpl.cpp
@@ -6517,7 +6517,7 @@ HRESULT DacHeapWalker::Init(CORDB_ADDRESS start, CORDB_ADDRESS end)
if (thread == NULL)
continue;
- alloc_context *ctx = thread->GetAllocContext();
+ gc_alloc_context *ctx = thread->GetAllocContext();
if (ctx == NULL)
continue;
@@ -6533,7 +6533,7 @@ HRESULT DacHeapWalker::Init(CORDB_ADDRESS start, CORDB_ADDRESS end)
}
#ifdef FEATURE_SVR_GC
- HRESULT hr = GCHeap::IsServerHeap() ? InitHeapDataSvr(mHeaps, mHeapCount) : InitHeapDataWks(mHeaps, mHeapCount);
+ HRESULT hr = GCHeapUtilities::IsServerHeap() ? InitHeapDataSvr(mHeaps, mHeapCount) : InitHeapDataWks(mHeaps, mHeapCount);
#else
HRESULT hr = InitHeapDataWks(mHeaps, mHeapCount);
#endif
@@ -6777,7 +6777,7 @@ HRESULT DacDbiInterfaceImpl::GetHeapSegments(OUT DacDbiArrayList<COR_SEGMENT> *p
HeapData *heaps = 0;
#ifdef FEATURE_SVR_GC
- HRESULT hr = GCHeap::IsServerHeap() ? DacHeapWalker::InitHeapDataSvr(heaps, heapCount) : DacHeapWalker::InitHeapDataWks(heaps, heapCount);
+ HRESULT hr = GCHeapUtilities::IsServerHeap() ? DacHeapWalker::InitHeapDataSvr(heaps, heapCount) : DacHeapWalker::InitHeapDataWks(heaps, heapCount);
#else
HRESULT hr = DacHeapWalker::InitHeapDataWks(heaps, heapCount);
#endif
@@ -7171,7 +7171,7 @@ void DacDbiInterfaceImpl::GetGCHeapInformation(COR_HEAPINFO * pHeapInfo)
pHeapInfo->areGCStructuresValid = GCScan::GetGcRuntimeStructuresValid();
#ifdef FEATURE_SVR_GC
- if (GCHeap::IsServerHeap())
+ if (GCHeapUtilities::IsServerHeap())
{
pHeapInfo->gcType = CorDebugServerGC;
pHeapInfo->numHeaps = DacGetNumHeaps();
diff --git a/src/debug/daccess/enummem.cpp b/src/debug/daccess/enummem.cpp
index 068c2f2b13..cc3ae4f606 100644
--- a/src/debug/daccess/enummem.cpp
+++ b/src/debug/daccess/enummem.cpp
@@ -250,9 +250,9 @@ HRESULT ClrDataAccess::EnumMemCLRStatic(IN CLRDataEnumMemoryFlags flags)
ReportMem(m_globalBase + g_dacGlobals.SharedDomain__m_pSharedDomain,
sizeof(SharedDomain));
- // We need GCHeap pointer to make EEVersion work
+ // We need IGCHeap pointer to make EEVersion work
ReportMem(m_globalBase + g_dacGlobals.dac__g_pGCHeap,
- sizeof(GCHeap *));
+ sizeof(IGCHeap *));
// see synblk.cpp, the pointer is pointed to a static byte[]
SyncBlockCache::s_pSyncBlockCache.EnumMem();
@@ -316,7 +316,7 @@ HRESULT ClrDataAccess::EnumMemCLRStatic(IN CLRDataEnumMemoryFlags flags)
#ifdef FEATURE_SVR_GC
CATCH_ALL_EXCEPT_RETHROW_COR_E_OPERATIONCANCELLED
(
- GCHeap::gcHeapType.EnumMem();
+ IGCHeap::gcHeapType.EnumMem();
);
#endif // FEATURE_SVR_GC
diff --git a/src/debug/daccess/request.cpp b/src/debug/daccess/request.cpp
index a30ec37eac..9de09e7f85 100644
--- a/src/debug/daccess/request.cpp
+++ b/src/debug/daccess/request.cpp
@@ -725,7 +725,7 @@ ClrDataAccess::GetHeapAllocData(unsigned int count, struct DacpGenerationAllocDa
SOSDacEnter();
#if defined(FEATURE_SVR_GC)
- if (GCHeap::IsServerHeap())
+ if (GCHeapUtilities::IsServerHeap())
{
hr = GetServerAllocData(count, data, pNeeded);
}
@@ -2809,7 +2809,7 @@ ClrDataAccess::GetGCHeapDetails(CLRDATA_ADDRESS heap, struct DacpGcHeapDetails *
SOSDacEnter();
// doesn't make sense to call this on WKS mode
- if (!GCHeap::IsServerHeap())
+ if (!GCHeapUtilities::IsServerHeap())
hr = E_INVALIDARG;
else
#ifdef FEATURE_SVR_GC
@@ -2884,7 +2884,7 @@ ClrDataAccess::GetHeapSegmentData(CLRDATA_ADDRESS seg, struct DacpHeapSegmentDat
SOSDacEnter();
- if (GCHeap::IsServerHeap())
+ if (GCHeapUtilities::IsServerHeap())
{
#if !defined(FEATURE_SVR_GC)
_ASSERTE(0);
@@ -2924,7 +2924,7 @@ ClrDataAccess::GetGCHeapList(unsigned int count, CLRDATA_ADDRESS heaps[], unsign
SOSDacEnter();
// make sure we called this in appropriate circumstances (i.e., we have multiple heaps)
- if (GCHeap::IsServerHeap())
+ if (GCHeapUtilities::IsServerHeap())
{
#if !defined(FEATURE_SVR_GC)
_ASSERTE(0);
@@ -2960,45 +2960,53 @@ ClrDataAccess::GetGCHeapData(struct DacpGcHeapData *gcheapData)
SOSDacEnter();
- // Now get the heap type. The first data member of the GCHeap class is the GC_HEAP_TYPE, which has
- // three possible values:
- // GC_HEAP_INVALID = 0,
- // GC_HEAP_WKS = 1,
- // GC_HEAP_SVR = 2
-
- TADDR gcHeapLocation = g_pGCHeap.GetAddrRaw (); // get the starting address of the global GCHeap instance
- size_t gcHeapValue = 0; // this will hold the heap type
+ size_t gcHeapValue = 0;
ULONG32 returned = 0;
+ TADDR gcHeapTypeLocation = m_globalBase + g_dacGlobals.IGCHeap__gcHeapType;
// @todo Microsoft: we should probably be capturing the HRESULT from ReadVirtual. We could
// provide a more informative error message. E_FAIL is a wretchedly vague thing to return.
- hr = m_pTarget->ReadVirtual(gcHeapLocation, (PBYTE)&gcHeapValue, sizeof(gcHeapValue), &returned);
+ hr = m_pTarget->ReadVirtual(gcHeapTypeLocation, (PBYTE)&gcHeapValue, sizeof(gcHeapValue), &returned);
+ if (!SUCCEEDED(hr))
+ {
+ goto cleanup;
+ }
+
+ // GC_HEAP_TYPE has three possible values:
+ // GC_HEAP_INVALID = 0,
+ // GC_HEAP_WKS = 1,
+ // GC_HEAP_SVR = 2
+ // If we get something other than that, we probably read the wrong location.
+ _ASSERTE(gcHeapValue >= 0 && gcHeapValue <= 2);
//@todo Microsoft: We have an enumerated type, we probably should use the symbolic name
// we have GC_HEAP_INVALID if gcHeapValue == 0, so we're done
if (SUCCEEDED(hr) && ((returned != sizeof(gcHeapValue)) || (gcHeapValue == 0)))
+ {
hr = E_FAIL;
+ goto cleanup;
+ }
- if (SUCCEEDED(hr))
+ // Now we can get other important information about the heap
+ gcheapData->g_max_generation = GCHeapUtilities::GetMaxGeneration();
+ gcheapData->bServerMode = GCHeapUtilities::IsServerHeap();
+ gcheapData->bGcStructuresValid = GCScan::GetGcRuntimeStructuresValid();
+ if (GCHeapUtilities::IsServerHeap())
{
- // Now we can get other important information about the heap
- gcheapData->g_max_generation = GCHeap::GetMaxGeneration();
- gcheapData->bServerMode = GCHeap::IsServerHeap();
- gcheapData->bGcStructuresValid = GCScan::GetGcRuntimeStructuresValid();
- if (GCHeap::IsServerHeap())
- {
#if !defined (FEATURE_SVR_GC)
- _ASSERTE(0);
- gcheapData->HeapCount = 1;
+ _ASSERTE(0);
+ gcheapData->HeapCount = 1;
#else // !defined (FEATURE_SVR_GC)
- gcheapData->HeapCount = GCHeapCount();
+ gcheapData->HeapCount = GCHeapCount();
#endif // !defined (FEATURE_SVR_GC)
- }
- else
- {
- gcheapData->HeapCount = 1;
- }
}
+ else
+ {
+ gcheapData->HeapCount = 1;
+ }
+
+cleanup:
+ ;
SOSDacLeave();
return hr;
@@ -3014,7 +3022,7 @@ ClrDataAccess::GetOOMStaticData(struct DacpOomData *oomData)
memset(oomData, 0, sizeof(DacpOomData));
- if (!GCHeap::IsServerHeap())
+ if (!GCHeapUtilities::IsServerHeap())
{
oom_history* pOOMInfo = &(WKS::gc_heap::oom_info);
oomData->reason = pOOMInfo->reason;
@@ -3043,7 +3051,7 @@ ClrDataAccess::GetOOMData(CLRDATA_ADDRESS oomAddr, struct DacpOomData *data)
SOSDacEnter();
memset(data, 0, sizeof(DacpOomData));
- if (!GCHeap::IsServerHeap())
+ if (!GCHeapUtilities::IsServerHeap())
hr = E_FAIL; // doesn't make sense to call this on WKS mode
#ifdef FEATURE_SVR_GC
@@ -3090,7 +3098,7 @@ ClrDataAccess::GetGCInterestingInfoStaticData(struct DacpGCInterestingInfoData *
SOSDacEnter();
memset(data, 0, sizeof(DacpGCInterestingInfoData));
- if (!GCHeap::IsServerHeap())
+ if (!GCHeapUtilities::IsServerHeap())
{
for (int i = 0; i < NUM_GC_DATA_POINTS; i++)
data->interestingDataPoints[i] = WKS::interesting_data_per_heap[i];
@@ -3123,7 +3131,7 @@ ClrDataAccess::GetGCInterestingInfoData(CLRDATA_ADDRESS interestingInfoAddr, str
SOSDacEnter();
memset(data, 0, sizeof(DacpGCInterestingInfoData));
- if (!GCHeap::IsServerHeap())
+ if (!GCHeapUtilities::IsServerHeap())
hr = E_FAIL; // doesn't make sense to call this on WKS mode
#ifdef FEATURE_SVR_GC
@@ -3149,7 +3157,7 @@ ClrDataAccess::GetHeapAnalyzeData(CLRDATA_ADDRESS addr, struct DacpGcHeapAnalyz
SOSDacEnter();
- if (!GCHeap::IsServerHeap())
+ if (!GCHeapUtilities::IsServerHeap())
hr = E_FAIL; // doesn't make sense to call this on WKS mode
#ifdef FEATURE_SVR_GC
@@ -3856,7 +3864,7 @@ ClrDataAccess::EnumWksGlobalMemoryRegions(CLRDataEnumMemoryFlags flags)
// enumerating the generations from max (which is normally gen2) to max+1 gives you
// the segment list for all the normal segements plus the large heap segment (max+1)
// this is the convention in the GC so it is repeated here
- for (ULONG i = GCHeap::GetMaxGeneration(); i <= GCHeap::GetMaxGeneration()+1; i++)
+ for (ULONG i = GCHeapUtilities::GetMaxGeneration(); i <= GCHeapUtilities::GetMaxGeneration()+1; i++)
{
__DPtr<WKS::heap_segment> seg = dac_cast<TADDR>(WKS::generation_table[i].start_segment);
while (seg)
diff --git a/src/debug/daccess/request_svr.cpp b/src/debug/daccess/request_svr.cpp
index 429f30020f..1fe20e2b60 100644
--- a/src/debug/daccess/request_svr.cpp
+++ b/src/debug/daccess/request_svr.cpp
@@ -256,7 +256,7 @@ ClrDataAccess::EnumSvrGlobalMemoryRegions(CLRDataEnumMemoryFlags flags)
// enumerating the generations from max (which is normally gen2) to max+1 gives you
// the segment list for all the normal segements plus the large heap segment (max+1)
// this is the convention in the GC so it is repeated here
- for (ULONG i = GCHeap::GetMaxGeneration(); i <= GCHeap::GetMaxGeneration()+1; i++)
+ for (ULONG i = GCHeapUtilities::GetMaxGeneration(); i <= GCHeapUtilities::GetMaxGeneration()+1; i++)
{
__DPtr<SVR::heap_segment> seg = dac_cast<TADDR>(pHeap->generation_table[i].start_segment);
while (seg)
@@ -271,7 +271,7 @@ ClrDataAccess::EnumSvrGlobalMemoryRegions(CLRDataEnumMemoryFlags flags)
DWORD DacGetNumHeaps()
{
- if (GCHeap::IsServerHeap())
+ if (GCHeapUtilities::IsServerHeap())
return (DWORD)SVR::gc_heap::n_heaps;
// workstation gc
diff --git a/src/gc/env/gcenv.ee.h b/src/gc/env/gcenv.ee.h
index 0c1fd4988a..f71380e1a1 100644
--- a/src/gc/env/gcenv.ee.h
+++ b/src/gc/env/gcenv.ee.h
@@ -9,10 +9,11 @@
struct ScanContext;
class CrawlFrame;
+struct gc_alloc_context;
typedef void promote_func(PTR_PTR_Object, ScanContext*, uint32_t);
-typedef void enum_alloc_context_func(alloc_context*, void*);
+typedef void enum_alloc_context_func(gc_alloc_context*, void*);
typedef struct
{
@@ -74,7 +75,7 @@ public:
static void EnablePreemptiveGC(Thread * pThread);
static void DisablePreemptiveGC(Thread * pThread);
- static alloc_context * GetAllocContext(Thread * pThread);
+ static gc_alloc_context * GetAllocContext(Thread * pThread);
static bool CatchAtSafePoint(Thread * pThread);
static void GcEnumAllocContexts(enum_alloc_context_func* fn, void* param);
diff --git a/src/gc/gc.cpp b/src/gc/gc.cpp
index dad71331cd..198fd32a88 100644
--- a/src/gc/gc.cpp
+++ b/src/gc/gc.cpp
@@ -1544,7 +1544,7 @@ void WaitLongerNoInstru (int i)
}
else if (g_TrapReturningThreads)
{
- GCHeap::GetGCHeap()->WaitUntilGCComplete();
+ g_theGcHeap->WaitUntilGCComplete();
}
}
@@ -1573,7 +1573,7 @@ retry:
unsigned int i = 0;
while (VolatileLoad(lock) >= 0)
{
- if ((++i & 7) && !GCHeap::IsGCInProgress())
+ if ((++i & 7) && !IsGCInProgress())
{
if (g_SystemInfo.dwNumberOfProcessors > 1)
{
@@ -1584,11 +1584,11 @@ retry:
#endif //!MULTIPLE_HEAPS
for (int j = 0; j < spin_count; j++)
{
- if (VolatileLoad(lock) < 0 || GCHeap::IsGCInProgress())
+ if (VolatileLoad(lock) < 0 || IsGCInProgress())
break;
YieldProcessor(); // indicate to the processor that we are spining
}
- if (VolatileLoad(lock) >= 0 && !GCHeap::IsGCInProgress())
+ if (VolatileLoad(lock) >= 0 && !IsGCInProgress())
{
safe_switch_to_thread();
}
@@ -3743,9 +3743,9 @@ public:
BOOL fSmallObjectHeapPtr = FALSE, fLargeObjectHeapPtr = FALSE;
if (!noRangeChecks)
{
- fSmallObjectHeapPtr = GCHeap::GetGCHeap()->IsHeapPointer(this, TRUE);
+ fSmallObjectHeapPtr = g_theGcHeap->IsHeapPointer(this, TRUE);
if (!fSmallObjectHeapPtr)
- fLargeObjectHeapPtr = GCHeap::GetGCHeap()->IsHeapPointer(this);
+ fLargeObjectHeapPtr = g_theGcHeap->IsHeapPointer(this);
_ASSERTE(fSmallObjectHeapPtr || fLargeObjectHeapPtr);
}
@@ -3763,14 +3763,14 @@ public:
#ifdef VERIFY_HEAP
if (bDeep && (g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_GC))
- GCHeap::GetGCHeap()->ValidateObjectMember(this);
+ g_theGcHeap->ValidateObjectMember(this);
#endif
if (fSmallObjectHeapPtr)
{
#ifdef FEATURE_BASICFREEZE
- _ASSERTE(!GCHeap::GetGCHeap()->IsLargeObject(pMT) || GCHeap::GetGCHeap()->IsInFrozenSegment(this));
+ _ASSERTE(!g_theGcHeap->IsLargeObject(pMT) || g_theGcHeap->IsInFrozenSegment(this));
#else
- _ASSERTE(!GCHeap::GetGCHeap()->IsLargeObject(pMT));
+ _ASSERTE(!g_theGcHeap->IsLargeObject(pMT));
#endif
}
}
@@ -4361,7 +4361,7 @@ static size_t get_valid_segment_size (BOOL large_seg=FALSE)
// if seg_size is small but not 0 (0 is default if config not set)
// then set the segment to the minimum size
- if (!GCHeap::IsValidSegmentSize(seg_size))
+ if (!g_theGcHeap->IsValidSegmentSize(seg_size))
{
// if requested size is between 1 byte and 4MB, use min
if ((seg_size >> 1) && !(seg_size >> 22))
@@ -5775,7 +5775,7 @@ void gc_heap::fix_allocation_context (alloc_context* acontext, BOOL for_gc_p,
//used by the heap verification for concurrent gc.
//it nulls out the words set by fix_allocation_context for heap_verification
-void repair_allocation (alloc_context* acontext, void*)
+void repair_allocation (gc_alloc_context* acontext, void*)
{
uint8_t* point = acontext->alloc_ptr;
@@ -5788,7 +5788,7 @@ void repair_allocation (alloc_context* acontext, void*)
}
}
-void void_allocation (alloc_context* acontext, void*)
+void void_allocation (gc_alloc_context* acontext, void*)
{
uint8_t* point = acontext->alloc_ptr;
@@ -5818,10 +5818,10 @@ struct fix_alloc_context_args
void* heap;
};
-void fix_alloc_context(alloc_context* acontext, void* param)
+void fix_alloc_context(gc_alloc_context* acontext, void* param)
{
fix_alloc_context_args* args = (fix_alloc_context_args*)param;
- GCHeap::GetGCHeap()->FixAllocContext(acontext, FALSE, (void*)(size_t)(args->for_gc_p), args->heap);
+ g_theGcHeap->FixAllocContext(acontext, FALSE, (void*)(size_t)(args->for_gc_p), args->heap);
}
void gc_heap::fix_allocation_contexts(BOOL for_gc_p)
@@ -10384,10 +10384,10 @@ gc_heap::init_gc_heap (int h_number)
heap_segment_heap (lseg) = this;
//initialize the alloc context heap
- generation_alloc_context (generation_of (0))->alloc_heap = vm_heap;
+ generation_alloc_context (generation_of (0))->set_alloc_heap(vm_heap);
//initialize the alloc context heap
- generation_alloc_context (generation_of (max_generation+1))->alloc_heap = vm_heap;
+ generation_alloc_context (generation_of (max_generation+1))->set_alloc_heap(vm_heap);
#endif //MULTIPLE_HEAPS
@@ -13080,10 +13080,10 @@ void gc_heap::balance_heaps (alloc_context* acontext)
{
if (acontext->alloc_count == 0)
{
- acontext->home_heap = GCHeap::GetHeap( heap_select::select_heap(acontext, 0) );
- gc_heap* hp = acontext->home_heap->pGenGCHeap;
+ acontext->set_home_heap(GCHeap::GetHeap( heap_select::select_heap(acontext, 0) ));
+ gc_heap* hp = acontext->get_home_heap()->pGenGCHeap;
dprintf (3, ("First allocation for context %Ix on heap %d\n", (size_t)acontext, (size_t)hp->heap_number));
- acontext->alloc_heap = acontext->home_heap;
+ acontext->set_alloc_heap(acontext->get_home_heap());
hp->alloc_context_count++;
}
}
@@ -13094,9 +13094,9 @@ void gc_heap::balance_heaps (alloc_context* acontext)
if (heap_select::can_find_heap_fast())
{
- if (acontext->home_heap != NULL)
- hint = acontext->home_heap->pGenGCHeap->heap_number;
- if (acontext->home_heap != GCHeap::GetHeap(hint = heap_select::select_heap(acontext, hint)) || ((acontext->alloc_count & 15) == 0))
+ if (acontext->get_home_heap() != NULL)
+ hint = acontext->get_home_heap()->pGenGCHeap->heap_number;
+ if (acontext->get_home_heap() != GCHeap::GetHeap(hint = heap_select::select_heap(acontext, hint)) || ((acontext->alloc_count & 15) == 0))
{
set_home_heap = TRUE;
}
@@ -13122,7 +13122,7 @@ void gc_heap::balance_heaps (alloc_context* acontext)
else
*/
{
- gc_heap* org_hp = acontext->alloc_heap->pGenGCHeap;
+ gc_heap* org_hp = acontext->get_alloc_heap()->pGenGCHeap;
dynamic_data* dd = org_hp->dynamic_data_of (0);
ptrdiff_t org_size = dd_new_allocation (dd);
@@ -13141,9 +13141,9 @@ try_again:
{
max_hp = org_hp;
max_size = org_size + delta;
- acontext->home_heap = GCHeap::GetHeap( heap_select::select_heap(acontext, hint) );
+ acontext->set_home_heap(GCHeap::GetHeap( heap_select::select_heap(acontext, hint) ));
- if (org_hp == acontext->home_heap->pGenGCHeap)
+ if (org_hp == acontext->get_home_heap()->pGenGCHeap)
max_size = max_size + delta;
org_alloc_context_count = org_hp->alloc_context_count;
@@ -13156,7 +13156,7 @@ try_again:
gc_heap* hp = GCHeap::GetHeap(i%n_heaps)->pGenGCHeap;
dd = hp->dynamic_data_of (0);
ptrdiff_t size = dd_new_allocation (dd);
- if (hp == acontext->home_heap->pGenGCHeap)
+ if (hp == acontext->get_home_heap()->pGenGCHeap)
size = size + delta;
int hp_alloc_context_count = hp->alloc_context_count;
if (hp_alloc_context_count > 0)
@@ -13183,7 +13183,7 @@ try_again:
{
org_hp->alloc_context_count--;
max_hp->alloc_context_count++;
- acontext->alloc_heap = GCHeap::GetHeap(max_hp->heap_number);
+ acontext->set_alloc_heap(GCHeap::GetHeap(max_hp->heap_number));
#if !defined(FEATURE_PAL)
if (CPUGroupInfo::CanEnableGCCPUGroups())
{ //only set ideal processor when max_hp and org_hp are in the same cpu
@@ -13221,7 +13221,7 @@ try_again:
#endif // !FEATURE_PAL
dprintf (3, ("Switching context %p (home heap %d) ",
acontext,
- acontext->home_heap->pGenGCHeap->heap_number));
+ acontext->get_home_heap()->pGenGCHeap->heap_number));
dprintf (3, (" from heap %d (%Id free bytes, %d contexts) ",
org_hp->heap_number,
org_size,
@@ -13239,7 +13239,7 @@ try_again:
gc_heap* gc_heap::balance_heaps_loh (alloc_context* acontext, size_t /*size*/)
{
- gc_heap* org_hp = acontext->alloc_heap->pGenGCHeap;
+ gc_heap* org_hp = acontext->get_alloc_heap()->pGenGCHeap;
//dprintf (1, ("LA: %Id", size));
//if (size > 128*1024)
@@ -13316,7 +13316,7 @@ BOOL gc_heap::allocate_more_space(alloc_context* acontext, size_t size,
if (alloc_generation_number == 0)
{
balance_heaps (acontext);
- status = acontext->alloc_heap->pGenGCHeap->try_allocate_more_space (acontext, size, alloc_generation_number);
+ status = acontext->get_alloc_heap()->pGenGCHeap->try_allocate_more_space (acontext, size, alloc_generation_number);
}
else
{
@@ -22250,7 +22250,7 @@ void gc_heap::plan_phase (int condemned_gen_number)
#endif //TIME_GC
// We may update write barrier code. We assume here EE has been suspended if we are on a GC thread.
- assert(GCHeap::IsGCInProgress());
+ assert(IsGCInProgress());
BOOL should_expand = FALSE;
BOOL should_compact= FALSE;
@@ -30469,7 +30469,7 @@ CObjectHeader* gc_heap::allocate_large_object (size_t jsize, int64_t& alloc_byte
acontext.alloc_limit = 0;
acontext.alloc_bytes = 0;
#ifdef MULTIPLE_HEAPS
- acontext.alloc_heap = vm_heap;
+ acontext.set_alloc_heap(vm_heap);
#endif //MULTIPLE_HEAPS
#ifdef MARK_ARRAY
@@ -31946,11 +31946,11 @@ void gc_heap::descr_card_table ()
#endif //TRACE_GC
}
-#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
void gc_heap::descr_generations_to_profiler (gen_walk_fn fn, void *context)
{
+#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
#ifdef MULTIPLE_HEAPS
- int n_heaps = GCHeap::GetGCHeap()->GetNumberOfHeaps ();
+ int n_heaps = g_theGcHeap->GetNumberOfHeaps ();
for (int i = 0; i < n_heaps; i++)
{
gc_heap* hp = GCHeap::GetHeap(i)->pGenGCHeap;
@@ -32026,8 +32026,8 @@ void gc_heap::descr_generations_to_profiler (gen_walk_fn fn, void *context)
curr_gen_number0--;
}
}
-}
#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
+}
#ifdef TRACE_GC
// Note that when logging is on it can take a long time to go through the free items.
@@ -33306,8 +33306,12 @@ gc_heap::verify_heap (BOOL begin_gc_p)
#endif //BACKGROUND_GC
}
+#endif //VERIFY_HEAP
+
+
void GCHeap::ValidateObjectMember (Object* obj)
{
+#ifdef VERIFY_HEAP
size_t s = size (obj);
uint8_t* o = (uint8_t*)obj;
@@ -33325,9 +33329,8 @@ void GCHeap::ValidateObjectMember (Object* obj)
}
}
} );
-
+#endif // VERIFY_HEAP
}
-#endif //VERIFY_HEAP
void DestructObject (CObjectHeader* hdr)
{
@@ -33640,12 +33643,12 @@ BOOL GCHeap::IsEphemeral (Object* object)
return hp->ephemeral_pointer_p (o);
}
-#ifdef VERIFY_HEAP
// Return NULL if can't find next object. When EE is not suspended,
// the result is not accurate: if the input arg is in gen0, the function could
// return zeroed out memory as next object
Object * GCHeap::NextObj (Object * object)
{
+#ifdef VERIFY_HEAP
uint8_t* o = (uint8_t*)object;
#ifndef FEATURE_BASICFREEZE
@@ -33687,8 +33690,13 @@ Object * GCHeap::NextObj (Object * object)
}
return (Object *)nextobj;
+#else
+ return nullptr;
+#endif // VERIFY_HEAP
}
+#ifdef VERIFY_HEAP
+
#ifdef FEATURE_BASICFREEZE
BOOL GCHeap::IsInFrozenSegment (Object * object)
{
@@ -33918,11 +33926,16 @@ int StressRNG(int iMaxValue)
int randValue = (((lHoldrand = lHoldrand * 214013L + 2531011L) >> 16) & 0x7fff);
return randValue % iMaxValue;
}
+#endif // STRESS_HEAP
+#endif // !FEATURE_REDHAWK
// free up object so that things will move and then do a GC
//return TRUE if GC actually happens, otherwise FALSE
-BOOL GCHeap::StressHeap(alloc_context * acontext)
+BOOL GCHeap::StressHeap(gc_alloc_context * context)
{
+#if defined(STRESS_HEAP) && !defined(FEATURE_REDHAWK)
+ alloc_context* acontext = static_cast<alloc_context*>(context);
+
// if GC stress was dynamically disabled during this run we return FALSE
if (!GCStressPolicy::IsEnabled())
return FALSE;
@@ -34102,11 +34115,11 @@ BOOL GCHeap::StressHeap(alloc_context * acontext)
}
return TRUE;
+#else
+ return FALSE;
+#endif // defined(STRESS_HEAP) && !defined(FEATURE_REDHAWK)
}
-#endif // STRESS_HEAP
-#endif // FEATURE_REDHAWK
-
#ifdef FEATURE_PREMORTEM_FINALIZATION
#define REGISTER_FOR_FINALIZATION(_object, _size) \
@@ -34164,8 +34177,6 @@ GCHeap::Alloc( size_t size, uint32_t flags REQD_ALIGN_DCL)
TRIGGERSGC();
- assert (!GCHeap::UseAllocationContexts());
-
Object* newAlloc = NULL;
#ifdef TRACE_GC
@@ -34237,11 +34248,11 @@ GCHeap::Alloc( size_t size, uint32_t flags REQD_ALIGN_DCL)
return newAlloc;
}
-#ifdef FEATURE_64BIT_ALIGNMENT
// Allocate small object with an alignment requirement of 8-bytes. Non allocation context version.
Object *
GCHeap::AllocAlign8( size_t size, uint32_t flags)
{
+#ifdef FEATURE_64BIT_ALIGNMENT
CONTRACTL {
#ifdef FEATURE_REDHAWK
// Under Redhawk NULL is returned on failure.
@@ -34252,8 +34263,6 @@ GCHeap::AllocAlign8( size_t size, uint32_t flags)
GC_TRIGGERS;
} CONTRACTL_END;
- assert (!GCHeap::UseAllocationContexts());
-
Object* newAlloc = NULL;
{
@@ -34270,12 +34279,17 @@ GCHeap::AllocAlign8( size_t size, uint32_t flags)
}
return newAlloc;
+#else
+ assert(!"should not call GCHeap::AllocAlign8 without FEATURE_64BIT_ALIGNMENT defined!");
+ return nullptr;
+#endif //FEATURE_64BIT_ALIGNMENT
}
// Allocate small object with an alignment requirement of 8-bytes. Allocation context version.
Object*
-GCHeap::AllocAlign8(alloc_context* acontext, size_t size, uint32_t flags )
+GCHeap::AllocAlign8(gc_alloc_context* ctx, size_t size, uint32_t flags )
{
+#ifdef FEATURE_64BIT_ALIGNMENT
CONTRACTL {
#ifdef FEATURE_REDHAWK
// Under Redhawk NULL is returned on failure.
@@ -34286,25 +34300,32 @@ GCHeap::AllocAlign8(alloc_context* acontext, size_t size, uint32_t flags )
GC_TRIGGERS;
} CONTRACTL_END;
+ alloc_context* acontext = static_cast<alloc_context*>(ctx);
+
#ifdef MULTIPLE_HEAPS
- if (acontext->alloc_heap == 0)
+ if (acontext->get_alloc_heap() == 0)
{
AssignHeap (acontext);
- assert (acontext->alloc_heap);
+ assert (acontext->get_alloc_heap());
}
- gc_heap* hp = acontext->alloc_heap->pGenGCHeap;
+ gc_heap* hp = acontext->get_alloc_heap()->pGenGCHeap;
#else
gc_heap* hp = pGenGCHeap;
#endif //MULTIPLE_HEAPS
return AllocAlign8Common(hp, acontext, size, flags);
+#else
+ assert(!"should not call GCHeap::AllocAlign8 without FEATURE_64BIT_ALIGNMENT defined!");
+ return nullptr;
+#endif //FEATURE_64BIT_ALIGNMENT
}
// Common code used by both variants of AllocAlign8 above.
Object*
GCHeap::AllocAlign8Common(void* _hp, alloc_context* acontext, size_t size, uint32_t flags)
{
+#ifdef FEATURE_64BIT_ALIGNMENT
CONTRACTL {
#ifdef FEATURE_REDHAWK
// Under Redhawk NULL is returned on failure.
@@ -34424,8 +34445,11 @@ GCHeap::AllocAlign8Common(void* _hp, alloc_context* acontext, size_t size, uint3
AllocCount++;
#endif //TRACE_GC
return newAlloc;
-}
+#else
+ assert(!"Should not call GCHeap::AllocAlign8Common without FEATURE_64BIT_ALIGNMENT defined!");
+ return nullptr;
#endif // FEATURE_64BIT_ALIGNMENT
+}
Object *
GCHeap::AllocLHeap( size_t size, uint32_t flags REQD_ALIGN_DCL)
@@ -34499,7 +34523,7 @@ GCHeap::AllocLHeap( size_t size, uint32_t flags REQD_ALIGN_DCL)
}
Object*
-GCHeap::Alloc(alloc_context* acontext, size_t size, uint32_t flags REQD_ALIGN_DCL)
+GCHeap::Alloc(gc_alloc_context* context, size_t size, uint32_t flags REQD_ALIGN_DCL)
{
CONTRACTL {
#ifdef FEATURE_REDHAWK
@@ -34522,6 +34546,7 @@ GCHeap::Alloc(alloc_context* acontext, size_t size, uint32_t flags REQD_ALIGN_DC
TRIGGERSGC();
Object* newAlloc = NULL;
+ alloc_context* acontext = static_cast<alloc_context*>(context);
#ifdef TRACE_GC
#ifdef COUNT_CYCLES
@@ -34534,10 +34559,10 @@ GCHeap::Alloc(alloc_context* acontext, size_t size, uint32_t flags REQD_ALIGN_DC
#endif //TRACE_GC
#ifdef MULTIPLE_HEAPS
- if (acontext->alloc_heap == 0)
+ if (acontext->get_alloc_heap() == 0)
{
AssignHeap (acontext);
- assert (acontext->alloc_heap);
+ assert (acontext->get_alloc_heap());
}
#endif //MULTIPLE_HEAPS
@@ -34546,7 +34571,7 @@ GCHeap::Alloc(alloc_context* acontext, size_t size, uint32_t flags REQD_ALIGN_DC
#endif // FEATURE_REDHAWK
#ifdef MULTIPLE_HEAPS
- gc_heap* hp = acontext->alloc_heap->pGenGCHeap;
+ gc_heap* hp = acontext->get_alloc_heap()->pGenGCHeap;
#else
gc_heap* hp = pGenGCHeap;
#ifdef _PREFAST_
@@ -34591,8 +34616,9 @@ GCHeap::Alloc(alloc_context* acontext, size_t size, uint32_t flags REQD_ALIGN_DC
}
void
-GCHeap::FixAllocContext (alloc_context* acontext, BOOL lockp, void* arg, void *heap)
+GCHeap::FixAllocContext (gc_alloc_context* context, BOOL lockp, void* arg, void *heap)
{
+ alloc_context* acontext = static_cast<alloc_context*>(context);
#ifdef MULTIPLE_HEAPS
if (arg != 0)
@@ -35383,8 +35409,8 @@ size_t GCHeap::ApproxTotalBytesInUse(BOOL small_heap_only)
void GCHeap::AssignHeap (alloc_context* acontext)
{
// Assign heap based on processor
- acontext->alloc_heap = GetHeap(heap_select::select_heap(acontext, 0));
- acontext->home_heap = acontext->alloc_heap;
+ acontext->set_alloc_heap(GetHeap(heap_select::select_heap(acontext, 0)));
+ acontext->set_home_heap(acontext->get_alloc_heap());
}
GCHeap* GCHeap::GetHeap (int n)
{
@@ -35393,11 +35419,12 @@ GCHeap* GCHeap::GetHeap (int n)
}
#endif //MULTIPLE_HEAPS
-bool GCHeap::IsThreadUsingAllocationContextHeap(alloc_context* acontext, int thread_number)
+bool GCHeap::IsThreadUsingAllocationContextHeap(gc_alloc_context* context, int thread_number)
{
+ alloc_context* acontext = static_cast<alloc_context*>(context);
#ifdef MULTIPLE_HEAPS
- return ((acontext->home_heap == GetHeap(thread_number)) ||
- ((acontext->home_heap == 0) && (thread_number == 0)));
+ return ((acontext->get_home_heap() == GetHeap(thread_number)) ||
+ ((acontext->get_home_heap() == 0) && (thread_number == 0)));
#else
UNREFERENCED_PARAMETER(acontext);
UNREFERENCED_PARAMETER(thread_number);
@@ -35427,7 +35454,8 @@ int GCHeap::GetHomeHeapNumber ()
{
if (pThread)
{
- GCHeap *hp = GCToEEInterface::GetAllocContext(pThread)->home_heap;
+ gc_alloc_context* ctx = GCToEEInterface::GetAllocContext(pThread);
+ GCHeap *hp = static_cast<alloc_context*>(ctx)->get_home_heap();
if (hp == gc_heap::g_heaps[i]->vm_heap) return i;
}
}
@@ -35639,7 +35667,7 @@ size_t GCHeap::GetValidGen0MaxSize(size_t seg_size)
{
size_t gen0size = g_pConfig->GetGCgen0size();
- if ((gen0size == 0) || !GCHeap::IsValidGen0MaxSize(gen0size))
+ if ((gen0size == 0) || !g_theGcHeap->IsValidGen0MaxSize(gen0size))
{
#ifdef SERVER_GC
// performance data seems to indicate halving the size results
@@ -35869,7 +35897,7 @@ GCHeap::SetCardsAfterBulkCopy( Object **StartPoint, size_t len )
#ifdef BACKGROUND_GC
(!gc_heap::settings.concurrent) &&
#endif //BACKGROUND_GC
- (GCHeap::GetGCHeap()->WhichGeneration( (Object*) StartPoint ) == 0))
+ (g_theGcHeap->WhichGeneration( (Object*) StartPoint ) == 0))
return;
rover = StartPoint;
@@ -36374,7 +36402,7 @@ CFinalize::ScanForFinalization (promote_func* pfn, int gen, BOOL mark_only_p,
{
CObjectHeader* obj = (CObjectHeader*)*i;
dprintf (3, ("scanning: %Ix", (size_t)obj));
- if (!GCHeap::GetGCHeap()->IsPromoted (obj))
+ if (!g_theGcHeap->IsPromoted (obj))
{
dprintf (3, ("freacheable: %Ix", (size_t)obj));
@@ -36507,7 +36535,7 @@ CFinalize::UpdatePromotedGenerations (int gen, BOOL gen_0_empty_p)
for (Object** po = startIndex;
po < SegQueueLimit (gen_segment(i)); po++)
{
- int new_gen = GCHeap::GetGCHeap()->WhichGeneration (*po);
+ int new_gen = g_theGcHeap->WhichGeneration (*po);
if (new_gen != i)
{
if (new_gen > i)
@@ -36567,7 +36595,7 @@ void CFinalize::CheckFinalizerObjects()
for (Object **po = startIndex; po < stopIndex; po++)
{
- if ((int)GCHeap::GetGCHeap()->WhichGeneration (*po) < i)
+ if ((int)g_theGcHeap->WhichGeneration (*po) < i)
FATAL_GC_ERROR ();
((CObjectHeader*)*po)->Validate();
}
@@ -36640,9 +36668,9 @@ void gc_heap::walk_heap (walk_fn fn, void* context, int gen_number, BOOL walk_la
}
}
-#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
void GCHeap::WalkObject (Object* obj, walk_fn fn, void* context)
{
+#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
uint8_t* o = (uint8_t*)obj;
if (o)
{
@@ -36657,8 +36685,8 @@ void GCHeap::WalkObject (Object* obj, walk_fn fn, void* context)
}
);
}
-}
#endif //defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
+}
// Go through and touch (read) each page straddled by a memory block.
void TouchPages(void * pStart, size_t cb)
diff --git a/src/gc/gc.h b/src/gc/gc.h
index 14c6baee83..7f6e956515 100644
--- a/src/gc/gc.h
+++ b/src/gc/gc.h
@@ -14,9 +14,8 @@ Module Name:
#ifndef __GC_H
#define __GC_H
-#ifdef PROFILING_SUPPORTED
-#define GC_PROFILING //Turn on profiling
-#endif // PROFILING_SUPPORTED
+#include "gcinterface.h"
+
/*
* Promotion Function Prototypes
@@ -80,6 +79,24 @@ enum oom_reason
oom_unproductive_full_gc = 6
};
+// TODO : it would be easier to make this an ORed value
+enum gc_reason
+{
+ reason_alloc_soh = 0,
+ reason_induced = 1,
+ reason_lowmemory = 2,
+ reason_empty = 3,
+ reason_alloc_loh = 4,
+ reason_oos_soh = 5,
+ reason_oos_loh = 6,
+ reason_induced_noforce = 7, // it's an induced GC and doesn't have to be blocking.
+ reason_gcstress = 8, // this turns into reason_induced & gc_mechanisms.stress_induced = true
+ reason_lowmemory_blocking = 9,
+ reason_induced_compacting = 10,
+ reason_lowmemory_host = 11,
+ reason_max
+};
+
struct oom_history
{
oom_reason reason;
@@ -97,28 +114,16 @@ struct oom_history
class CObjectHeader;
class Object;
-class GCHeap;
+class IGCHeapInternal;
/* misc defines */
#define LARGE_OBJECT_SIZE ((size_t)(85000))
-GPTR_DECL(GCHeap, g_pGCHeap);
-
#ifdef GC_CONFIG_DRIVEN
#define MAX_GLOBAL_GC_MECHANISMS_COUNT 6
GARY_DECL(size_t, gc_global_mechanisms, MAX_GLOBAL_GC_MECHANISMS_COUNT);
#endif //GC_CONFIG_DRIVEN
-#ifndef DACCESS_COMPILE
-extern "C" {
-#endif
-GPTR_DECL(uint8_t,g_lowest_address);
-GPTR_DECL(uint8_t,g_highest_address);
-GPTR_DECL(uint32_t,g_card_table);
-#ifndef DACCESS_COMPILE
-}
-#endif
-
#ifdef DACCESS_COMPILE
class DacHeapWalker;
#endif
@@ -127,137 +132,22 @@ class DacHeapWalker;
#define _LOGALLOC
#endif
-#ifdef WRITE_BARRIER_CHECK
-//always defined, but should be 0 in Server GC
-extern uint8_t* g_GCShadow;
-extern uint8_t* g_GCShadowEnd;
-// saves the g_lowest_address in between GCs to verify the consistency of the shadow segment
-extern uint8_t* g_shadow_lowest_address;
-#endif
-
#define MP_LOCKS
-extern "C" uint8_t* g_ephemeral_low;
-extern "C" uint8_t* g_ephemeral_high;
-
namespace WKS {
- ::GCHeap* CreateGCHeap();
+ ::IGCHeapInternal* CreateGCHeap();
class GCHeap;
class gc_heap;
}
#if defined(FEATURE_SVR_GC)
namespace SVR {
- ::GCHeap* CreateGCHeap();
+ ::IGCHeapInternal* CreateGCHeap();
class GCHeap;
class gc_heap;
}
#endif // defined(FEATURE_SVR_GC)
-/*
- * Ephemeral Garbage Collected Heap Interface
- */
-
-
-struct alloc_context
-{
- friend class WKS::gc_heap;
-#if defined(FEATURE_SVR_GC)
- friend class SVR::gc_heap;
- friend class SVR::GCHeap;
-#endif // defined(FEATURE_SVR_GC)
- friend struct ClassDumpInfo;
-
- uint8_t* alloc_ptr;
- uint8_t* alloc_limit;
- int64_t alloc_bytes; //Number of bytes allocated on SOH by this context
- int64_t alloc_bytes_loh; //Number of bytes allocated on LOH by this context
-#if defined(FEATURE_SVR_GC)
- SVR::GCHeap* alloc_heap;
- SVR::GCHeap* home_heap;
-#endif // defined(FEATURE_SVR_GC)
- int alloc_count;
-public:
-
- void init()
- {
- LIMITED_METHOD_CONTRACT;
-
- alloc_ptr = 0;
- alloc_limit = 0;
- alloc_bytes = 0;
- alloc_bytes_loh = 0;
-#if defined(FEATURE_SVR_GC)
- alloc_heap = 0;
- home_heap = 0;
-#endif // defined(FEATURE_SVR_GC)
- alloc_count = 0;
- }
-};
-
-struct ScanContext
-{
- Thread* thread_under_crawl;
- int thread_number;
- uintptr_t stack_limit; // Lowest point on the thread stack that the scanning logic is permitted to read
- BOOL promotion; //TRUE: Promotion, FALSE: Relocation.
- BOOL concurrent; //TRUE: concurrent scanning
-#if CHECK_APP_DOMAIN_LEAKS || defined (FEATURE_APPDOMAIN_RESOURCE_MONITORING) || defined (DACCESS_COMPILE)
- AppDomain *pCurrentDomain;
-#endif //CHECK_APP_DOMAIN_LEAKS || FEATURE_APPDOMAIN_RESOURCE_MONITORING || DACCESS_COMPILE
-
-#ifndef FEATURE_REDHAWK
-#if defined(GC_PROFILING) || defined (DACCESS_COMPILE)
- MethodDesc *pMD;
-#endif //GC_PROFILING || DACCESS_COMPILE
-#endif // FEATURE_REDHAWK
-#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
- EtwGCRootKind dwEtwRootKind;
-#endif // GC_PROFILING || FEATURE_EVENT_TRACE
-
- ScanContext()
- {
- LIMITED_METHOD_CONTRACT;
-
- thread_under_crawl = 0;
- thread_number = -1;
- stack_limit = 0;
- promotion = FALSE;
- concurrent = FALSE;
-#ifdef GC_PROFILING
- pMD = NULL;
-#endif //GC_PROFILING
-#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
- dwEtwRootKind = kEtwGCRootKindOther;
-#endif // GC_PROFILING || FEATURE_EVENT_TRACE
- }
-};
-
-typedef BOOL (* walk_fn)(Object*, void*);
-typedef void (* gen_walk_fn)(void *context, int generation, uint8_t *range_start, uint8_t * range_end, uint8_t *range_reserved);
-
-#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
-struct ProfilingScanContext : ScanContext
-{
- BOOL fProfilerPinned;
- void * pvEtwContext;
- void *pHeapId;
-
- ProfilingScanContext(BOOL fProfilerPinnedParam) : ScanContext()
- {
- LIMITED_METHOD_CONTRACT;
-
- pHeapId = NULL;
- fProfilerPinned = fProfilerPinnedParam;
- pvEtwContext = NULL;
-#ifdef FEATURE_CONSERVATIVE_GC
- // To not confuse GCScan::GcScanRoots
- promotion = g_pConfig->GetGCConservative();
-#endif
- }
-};
-#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
-
#ifdef STRESS_HEAP
#define IN_STRESS_HEAP(x) x
#define STRESS_HEAP_ARG(x) ,x
@@ -266,7 +156,6 @@ struct ProfilingScanContext : ScanContext
#define STRESS_HEAP_ARG(x)
#endif // STRESS_HEAP
-
//dynamic data interface
struct gc_counters
{
@@ -275,51 +164,6 @@ struct gc_counters
size_t collection_count;
};
-// !!!!!!!!!!!!!!!!!!!!!!!
-// make sure you change the def in bcl\system\gc.cs
-// if you change this!
-enum collection_mode
-{
- collection_non_blocking = 0x00000001,
- collection_blocking = 0x00000002,
- collection_optimized = 0x00000004,
- collection_compacting = 0x00000008
-#ifdef STRESS_HEAP
- , collection_gcstress = 0x80000000
-#endif // STRESS_HEAP
-};
-
-// !!!!!!!!!!!!!!!!!!!!!!!
-// make sure you change the def in bcl\system\gc.cs
-// if you change this!
-enum wait_full_gc_status
-{
- wait_full_gc_success = 0,
- wait_full_gc_failed = 1,
- wait_full_gc_cancelled = 2,
- wait_full_gc_timeout = 3,
- wait_full_gc_na = 4
-};
-
-// !!!!!!!!!!!!!!!!!!!!!!!
-// make sure you change the def in bcl\system\gc.cs
-// if you change this!
-enum start_no_gc_region_status
-{
- start_no_gc_success = 0,
- start_no_gc_no_memory = 1,
- start_no_gc_too_large = 2,
- start_no_gc_in_progress = 3
-};
-
-enum end_no_gc_region_status
-{
- end_no_gc_success = 0,
- end_no_gc_not_in_progress = 1,
- end_no_gc_induced = 2,
- end_no_gc_alloc_exceeded = 3
-};
-
enum bgc_state
{
bgc_not_in_process = 0,
@@ -352,276 +196,72 @@ void record_changed_seg (uint8_t* start, uint8_t* end,
void record_global_mechanism (int mech_index);
#endif //GC_CONFIG_DRIVEN
-//constants for the flags parameter to the gc call back
-
-#define GC_CALL_INTERIOR 0x1
-#define GC_CALL_PINNED 0x2
-#define GC_CALL_CHECK_APP_DOMAIN 0x4
-
-//flags for GCHeap::Alloc(...)
-#define GC_ALLOC_FINALIZE 0x1
-#define GC_ALLOC_CONTAINS_REF 0x2
-#define GC_ALLOC_ALIGN8_BIAS 0x4
-#define GC_ALLOC_ALIGN8 0x8
-
-class GCHeap {
- friend struct ::_DacGlobals;
-#ifdef DACCESS_COMPILE
- friend class ClrDataAccess;
-#endif
-
-public:
-
- virtual ~GCHeap() {}
-
- static GCHeap *GetGCHeap()
+struct alloc_context : gc_alloc_context
+{
+#ifdef FEATURE_SVR_GC
+ inline SVR::GCHeap* get_alloc_heap()
{
- LIMITED_METHOD_CONTRACT;
-
- _ASSERTE(g_pGCHeap != NULL);
- return g_pGCHeap;
+ return static_cast<SVR::GCHeap*>(gc_reserved_1);
}
-#ifndef DACCESS_COMPILE
- static BOOL IsGCInProgress(BOOL bConsiderGCStart = FALSE)
- {
- WRAPPER_NO_CONTRACT;
-
- return (IsGCHeapInitialized() ? GetGCHeap()->IsGCInProgressHelper(bConsiderGCStart) : false);
- }
-#endif
-
- static BOOL IsGCHeapInitialized()
+ inline void set_alloc_heap(SVR::GCHeap* heap)
{
- LIMITED_METHOD_CONTRACT;
-
- return (g_pGCHeap != NULL);
+ gc_reserved_1 = heap;
}
- static void WaitForGCCompletion(BOOL bConsiderGCStart = FALSE)
- {
- WRAPPER_NO_CONTRACT;
-
- if (IsGCHeapInitialized())
- GetGCHeap()->WaitUntilGCComplete(bConsiderGCStart);
- }
-
- // The runtime needs to know whether we're using workstation or server GC
- // long before the GCHeap is created. So IsServerHeap cannot be a virtual
- // method on GCHeap. Instead we make it a static method and initialize
- // gcHeapType before any of the calls to IsServerHeap. Note that this also
- // has the advantage of getting the answer without an indirection
- // (virtual call), which is important for perf critical codepaths.
-
- #ifndef DACCESS_COMPILE
- static void InitializeHeapType(bool bServerHeap)
- {
- LIMITED_METHOD_CONTRACT;
-#ifdef FEATURE_SVR_GC
- gcHeapType = bServerHeap ? GC_HEAP_SVR : GC_HEAP_WKS;
-#ifdef WRITE_BARRIER_CHECK
- if (gcHeapType == GC_HEAP_SVR)
- {
- g_GCShadow = 0;
- g_GCShadowEnd = 0;
- }
-#endif
-#else // FEATURE_SVR_GC
- UNREFERENCED_PARAMETER(bServerHeap);
- CONSISTENCY_CHECK(bServerHeap == false);
-#endif // FEATURE_SVR_GC
- }
- #endif
-
- static BOOL IsValidSegmentSize(size_t cbSize)
+ inline SVR::GCHeap* get_home_heap()
{
- //Must be aligned on a Mb and greater than 4Mb
- return (((cbSize & (1024*1024-1)) ==0) && (cbSize >> 22));
+ return static_cast<SVR::GCHeap*>(gc_reserved_2);
}
- static BOOL IsValidGen0MaxSize(size_t cbSize)
+ inline void set_home_heap(SVR::GCHeap* heap)
{
- return (cbSize >= 64*1024);
+ gc_reserved_2 = heap;
}
-
- inline static bool IsServerHeap()
- {
- LIMITED_METHOD_CONTRACT;
-#ifdef FEATURE_SVR_GC
- _ASSERTE(gcHeapType != GC_HEAP_INVALID);
- return (gcHeapType == GC_HEAP_SVR);
-#else // FEATURE_SVR_GC
- return false;
#endif // FEATURE_SVR_GC
- }
+};
- inline static bool UseAllocationContexts()
- {
- WRAPPER_NO_CONTRACT;
-#ifdef FEATURE_REDHAWK
- // SIMPLIFY: only use allocation contexts
- return true;
-#else
-#if defined(_TARGET_ARM_) || defined(FEATURE_PAL)
- return true;
-#else
- return ((IsServerHeap() ? true : (g_SystemInfo.dwNumberOfProcessors >= 2)));
+class IGCHeapInternal : public IGCHeap {
+ friend struct ::_DacGlobals;
+#ifdef DACCESS_COMPILE
+ friend class ClrDataAccess;
#endif
-#endif
- }
-
- inline static bool MarkShouldCompeteForStatics()
- {
- WRAPPER_NO_CONTRACT;
-
- return IsServerHeap() && g_SystemInfo.dwNumberOfProcessors >= 2;
- }
-#ifndef DACCESS_COMPILE
- static GCHeap * CreateGCHeap()
- {
- WRAPPER_NO_CONTRACT;
-
- GCHeap * pGCHeap;
-
-#if defined(FEATURE_SVR_GC)
- pGCHeap = (IsServerHeap() ? SVR::CreateGCHeap() : WKS::CreateGCHeap());
-#else
- pGCHeap = WKS::CreateGCHeap();
-#endif // defined(FEATURE_SVR_GC)
+public:
- g_pGCHeap = pGCHeap;
- return pGCHeap;
- }
-#endif // DACCESS_COMPILE
+ virtual ~IGCHeapInternal() {}
private:
- typedef enum
- {
- GC_HEAP_INVALID = 0,
- GC_HEAP_WKS = 1,
- GC_HEAP_SVR = 2
- } GC_HEAP_TYPE;
-
-#ifdef FEATURE_SVR_GC
- SVAL_DECL(uint32_t,gcHeapType);
-#endif // FEATURE_SVR_GC
-
+ virtual Object* AllocAlign8Common (void* hp, alloc_context* acontext, size_t size, uint32_t flags) = 0;
public:
- // TODO Synchronization, should be moved out
- virtual BOOL IsGCInProgressHelper (BOOL bConsiderGCStart = FALSE) = 0;
- virtual uint32_t WaitUntilGCComplete (BOOL bConsiderGCStart = FALSE) = 0;
- virtual void SetGCInProgress(BOOL fInProgress) = 0;
- virtual CLREventStatic * GetWaitForGCEvent() = 0;
-
- virtual void SetFinalizationRun (Object* obj) = 0;
- virtual Object* GetNextFinalizable() = 0;
- virtual size_t GetNumberOfFinalizable() = 0;
-
- virtual void SetFinalizeQueueForShutdown(BOOL fHasLock) = 0;
- virtual BOOL FinalizeAppDomain(AppDomain *pDomain, BOOL fRunFinalizers) = 0;
- virtual BOOL ShouldRestartFinalizerWatchDog() = 0;
-
- //wait for concurrent GC to finish
- virtual void WaitUntilConcurrentGCComplete () = 0; // Use in managed threads
-#ifndef DACCESS_COMPILE
- virtual HRESULT WaitUntilConcurrentGCCompleteAsync(int millisecondsTimeout) = 0; // Use in native threads. TRUE if succeed. FALSE if failed or timeout
-#endif
- virtual BOOL IsConcurrentGCInProgress() = 0;
-
- // Enable/disable concurrent GC
- virtual void TemporaryEnableConcurrentGC() = 0;
- virtual void TemporaryDisableConcurrentGC() = 0;
- virtual BOOL IsConcurrentGCEnabled() = 0;
-
- virtual void FixAllocContext (alloc_context* acontext, BOOL lockp, void* arg, void *heap) = 0;
- virtual Object* Alloc (alloc_context* acontext, size_t size, uint32_t flags) = 0;
-
- // This is safe to call only when EE is suspended.
- virtual Object* GetContainingObject(void *pInteriorPtr) = 0;
-
- // TODO Should be folded into constructor
- virtual HRESULT Initialize () = 0;
-
- virtual HRESULT GarbageCollect (int generation = -1, BOOL low_memory_p=FALSE, int mode = collection_blocking) = 0;
- virtual Object* Alloc (size_t size, uint32_t flags) = 0;
-#ifdef FEATURE_64BIT_ALIGNMENT
- virtual Object* AllocAlign8 (size_t size, uint32_t flags) = 0;
- virtual Object* AllocAlign8 (alloc_context* acontext, size_t size, uint32_t flags) = 0;
-private:
- virtual Object* AllocAlign8Common (void* hp, alloc_context* acontext, size_t size, uint32_t flags) = 0;
-public:
-#endif // FEATURE_64BIT_ALIGNMENT
- virtual Object* AllocLHeap (size_t size, uint32_t flags) = 0;
- virtual void SetReservedVMLimit (size_t vmlimit) = 0;
- virtual void SetCardsAfterBulkCopy( Object**, size_t ) = 0;
-#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
- virtual void WalkObject (Object* obj, walk_fn fn, void* context) = 0;
-#endif //defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
-
- virtual bool IsThreadUsingAllocationContextHeap(alloc_context* acontext, int thread_number) = 0;
+ virtual void SetReservedVMLimit (size_t vmlimit) = 0;
virtual int GetNumberOfHeaps () = 0;
virtual int GetHomeHeapNumber () = 0;
-
- virtual int CollectionCount (int generation, int get_bgc_fgc_count = 0) = 0;
-
- // Finalizer queue stuff (should stay)
- virtual bool RegisterForFinalization (int gen, Object* obj) = 0;
-
- // General queries to the GC
- virtual BOOL IsPromoted (Object *object) = 0;
- virtual unsigned WhichGeneration (Object* object) = 0;
- virtual BOOL IsEphemeral (Object* object) = 0;
- virtual BOOL IsHeapPointer (void* object, BOOL small_heap_only = FALSE) = 0;
-
- virtual unsigned GetCondemnedGeneration() = 0;
- virtual int GetGcLatencyMode() = 0;
- virtual int SetGcLatencyMode(int newLatencyMode) = 0;
-
- virtual int GetLOHCompactionMode() = 0;
- virtual void SetLOHCompactionMode(int newLOHCompactionyMode) = 0;
-
- virtual BOOL RegisterForFullGCNotification(uint32_t gen2Percentage,
- uint32_t lohPercentage) = 0;
- virtual BOOL CancelFullGCNotification() = 0;
- virtual int WaitForFullGCApproach(int millisecondsTimeout) = 0;
- virtual int WaitForFullGCComplete(int millisecondsTimeout) = 0;
-
- virtual int StartNoGCRegion(uint64_t totalSize, BOOL lohSizeKnown, uint64_t lohSize, BOOL disallowFullBlockingGC) = 0;
- virtual int EndNoGCRegion() = 0;
+ virtual size_t GetPromotedBytes(int heap_index) = 0;
- virtual BOOL IsObjectInFixedHeap(Object *pObj) = 0;
- virtual size_t GetTotalBytesInUse () = 0;
- virtual size_t GetCurrentObjSize() = 0;
- virtual size_t GetLastGCStartTime(int generation) = 0;
- virtual size_t GetLastGCDuration(int generation) = 0;
- virtual size_t GetNow() = 0;
- virtual unsigned GetGcCount() = 0;
- virtual void TraceGCSegments() = 0;
+ unsigned GetMaxGeneration()
+ {
+ return IGCHeap::maxGeneration;
+ }
- virtual void PublishObject(uint8_t* obj) = 0;
+ BOOL IsValidSegmentSize(size_t cbSize)
+ {
+ //Must be aligned on a Mb and greater than 4Mb
+ return (((cbSize & (1024*1024-1)) ==0) && (cbSize >> 22));
+ }
- // static if since restricting for all heaps is fine
- virtual size_t GetValidSegmentSize(BOOL large_seg = FALSE) = 0;
+ BOOL IsValidGen0MaxSize(size_t cbSize)
+ {
+ return (cbSize >= 64*1024);
+ }
- static BOOL IsLargeObject(MethodTable *mt) {
+ BOOL IsLargeObject(MethodTable *mt)
+ {
WRAPPER_NO_CONTRACT;
return mt->GetBaseSize() >= LARGE_OBJECT_SIZE;
}
- static unsigned GetMaxGeneration() {
- LIMITED_METHOD_DAC_CONTRACT;
- return max_generation;
- }
-
- virtual size_t GetPromotedBytes(int heap_index) = 0;
-
-private:
- enum {
- max_generation = 2,
- };
-
public:
#ifdef FEATURE_BASICFREEZE
@@ -630,43 +270,17 @@ public:
virtual void UnregisterFrozenSegment(segment_handle seg) = 0;
#endif //FEATURE_BASICFREEZE
- // debug support
-#ifndef FEATURE_REDHAWK // Redhawk forces relocation a different way
-#ifdef STRESS_HEAP
- //return TRUE if GC actually happens, otherwise FALSE
- virtual BOOL StressHeap(alloc_context * acontext = 0) = 0;
-#endif
-#endif // FEATURE_REDHAWK
-#ifdef VERIFY_HEAP
- virtual void ValidateObjectMember (Object *obj) = 0;
-#endif
-
-#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
- virtual void DescrGenerationsToProfiler (gen_walk_fn fn, void *context) = 0;
-#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
-
protected:
-#ifdef VERIFY_HEAP
public:
- // Return NULL if can't find next object. When EE is not suspended,
- // the result is not accurate: if the input arg is in gen0, the function could
- // return zeroed out memory as next object
- virtual Object * NextObj (Object * object) = 0;
-#ifdef FEATURE_BASICFREEZE
+#if defined(FEATURE_BASICFREEZE) && defined(VERIFY_HEAP)
// Return TRUE if object lives in frozen segment
virtual BOOL IsInFrozenSegment (Object * object) = 0;
-#endif //FEATURE_BASICFREEZE
-#endif //VERIFY_HEAP
+#endif // defined(FEATURE_BASICFREEZE) && defined(VERIFY_HEAP)
};
-extern VOLATILE(int32_t) m_GCLock;
-
// Go through and touch (read) each page straddled by a memory block.
void TouchPages(void * pStart, size_t cb);
-// For low memory notification from host
-extern int32_t g_bLowMemoryFromHost;
-
#ifdef WRITE_BARRIER_CHECK
void updateGCShadow(Object** ptr, Object* val);
#endif
@@ -677,4 +291,27 @@ extern MethodTable *pWeakReferenceMT;
extern MethodTable *pWeakReferenceOfTCanonMT;
extern void FinalizeWeakReference(Object * obj);
+// The single GC heap instance, shared with the VM.
+extern IGCHeapInternal* g_theGcHeap;
+
+#ifndef DACCESS_COMPILE
+inline BOOL IsGCInProgress(bool bConsiderGCStart = FALSE)
+{
+ WRAPPER_NO_CONTRACT;
+
+ return g_theGcHeap != nullptr ? g_theGcHeap->IsGCInProgressHelper(bConsiderGCStart) : false;
+}
+#endif // DACCESS_COMPILE
+
+inline BOOL IsServerHeap()
+{
+ LIMITED_METHOD_CONTRACT;
+#ifdef FEATURE_SVR_GC
+ _ASSERTE(IGCHeap::gcHeapType != IGCHeap::GC_HEAP_INVALID);
+ return (IGCHeap::gcHeapType == IGCHeap::GC_HEAP_SVR);
+#else // FEATURE_SVR_GC
+ return false;
+#endif // FEATURE_SVR_GC
+}
+
#endif // __GC_H
diff --git a/src/gc/gccommon.cpp b/src/gc/gccommon.cpp
index 779aac7296..6bcdf3957b 100644
--- a/src/gc/gccommon.cpp
+++ b/src/gc/gccommon.cpp
@@ -15,10 +15,12 @@
#include "gc.h"
#ifdef FEATURE_SVR_GC
-SVAL_IMPL_INIT(uint32_t,GCHeap,gcHeapType,GCHeap::GC_HEAP_INVALID);
+SVAL_IMPL_INIT(uint32_t,IGCHeap,gcHeapType,IGCHeap::GC_HEAP_INVALID);
#endif // FEATURE_SVR_GC
-GPTR_IMPL(GCHeap,g_pGCHeap);
+SVAL_IMPL_INIT(uint32_t,IGCHeap,maxGeneration,2);
+
+IGCHeapInternal* g_theGcHeap;
/* global versions of the card table and brick table */
GPTR_IMPL(uint32_t,g_card_table);
@@ -112,4 +114,41 @@ void record_changed_seg (uint8_t* start, uint8_t* end,
}
}
+// The runtime needs to know whether we're using workstation or server GC
+// long before the GCHeap is created.
+void InitializeHeapType(bool bServerHeap)
+{
+ LIMITED_METHOD_CONTRACT;
+#ifdef FEATURE_SVR_GC
+ IGCHeap::gcHeapType = bServerHeap ? IGCHeap::GC_HEAP_SVR : IGCHeap::GC_HEAP_WKS;
+#ifdef WRITE_BARRIER_CHECK
+ if (IGCHeap::gcHeapType == IGCHeap::GC_HEAP_SVR)
+ {
+ g_GCShadow = 0;
+ g_GCShadowEnd = 0;
+ }
+#endif // WRITE_BARRIER_CHECK
+#else // FEATURE_SVR_GC
+ UNREFERENCED_PARAMETER(bServerHeap);
+ CONSISTENCY_CHECK(bServerHeap == false);
+#endif // FEATURE_SVR_GC
+}
+
+IGCHeap* InitializeGarbageCollector(IGCToCLR* clrToGC)
+{
+ LIMITED_METHOD_CONTRACT;
+ UNREFERENCED_PARAMETER(clrToGC);
+
+ IGCHeapInternal* heap;
+#ifdef FEATURE_SVR_GC
+ assert(IGCHeap::gcHeapType != IGCHeap::GC_HEAP_INVALID);
+ heap = IGCHeap::gcHeapType == IGCHeap::GC_HEAP_SVR ? SVR::CreateGCHeap() : WKS::CreateGCHeap();
+#else
+ heap = WKS::CreateGCHeap();
+#endif
+
+ g_theGcHeap = heap;
+ return heap;
+}
+
#endif // !DACCESS_COMPILE
diff --git a/src/gc/gcee.cpp b/src/gc/gcee.cpp
index d37eaf4de9..e2015eef0c 100644
--- a/src/gc/gcee.cpp
+++ b/src/gc/gcee.cpp
@@ -148,7 +148,7 @@ void GCHeap::UpdatePostGCCounters()
// if a max gen garbage collection was performed, resync the GC Handle counter;
// if threads are currently suspended, we do not need to obtain a lock on each handle table
if (condemned_gen == max_generation)
- total_num_gc_handles = HndCountAllHandles(!GCHeap::IsGCInProgress());
+ total_num_gc_handles = HndCountAllHandles(!IsGCInProgress());
#endif //FEATURE_REDHAWK
// per generation calculation.
@@ -782,7 +782,7 @@ void gc_heap::background_gc_wait_lh (alloc_wait_reason awr)
/******************************************************************************/
-::GCHeap* CreateGCHeap() {
+IGCHeapInternal* CreateGCHeap() {
return new(nothrow) GCHeap(); // we return wks or svr
}
@@ -823,12 +823,12 @@ void GCHeap::TraceGCSegments()
#endif // FEATURE_EVENT_TRACE
}
-#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
void GCHeap::DescrGenerationsToProfiler (gen_walk_fn fn, void *context)
{
+#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
pGenGCHeap->descr_generations_to_profiler(fn, context);
-}
#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
+}
#ifdef FEATURE_BASICFREEZE
segment_handle GCHeap::RegisterFrozenSegment(segment_info *pseginfo)
diff --git a/src/gc/gcimpl.h b/src/gc/gcimpl.h
index 6a4ee86cd8..38392b8229 100644
--- a/src/gc/gcimpl.h
+++ b/src/gc/gcimpl.h
@@ -36,29 +36,10 @@ inline void checkGCWriteBarrier() {}
void GCProfileWalkHeap();
-class GCHeap;
class gc_heap;
class CFinalize;
-// TODO : it would be easier to make this an ORed value
-enum gc_reason
-{
- reason_alloc_soh = 0,
- reason_induced = 1,
- reason_lowmemory = 2,
- reason_empty = 3,
- reason_alloc_loh = 4,
- reason_oos_soh = 5,
- reason_oos_loh = 6,
- reason_induced_noforce = 7, // it's an induced GC and doesn't have to be blocking.
- reason_gcstress = 8, // this turns into reason_induced & gc_mechanisms.stress_induced = true
- reason_lowmemory_blocking = 9,
- reason_induced_compacting = 10,
- reason_lowmemory_host = 11,
- reason_max
-};
-
-class GCHeap : public ::GCHeap
+class GCHeap : public IGCHeapInternal
{
protected:
@@ -111,17 +92,15 @@ public:
//flags can be GC_ALLOC_CONTAINS_REF GC_ALLOC_FINALIZE
Object* Alloc (size_t size, uint32_t flags);
-#ifdef FEATURE_64BIT_ALIGNMENT
Object* AllocAlign8 (size_t size, uint32_t flags);
- Object* AllocAlign8 (alloc_context* acontext, size_t size, uint32_t flags);
+ Object* AllocAlign8 (gc_alloc_context* acontext, size_t size, uint32_t flags);
private:
Object* AllocAlign8Common (void* hp, alloc_context* acontext, size_t size, uint32_t flags);
public:
-#endif // FEATURE_64BIT_ALIGNMENT
Object* AllocLHeap (size_t size, uint32_t flags);
- Object* Alloc (alloc_context* acontext, size_t size, uint32_t flags);
+ Object* Alloc (gc_alloc_context* acontext, size_t size, uint32_t flags);
- void FixAllocContext (alloc_context* acontext,
+ void FixAllocContext (gc_alloc_context* acontext,
BOOL lockp, void* arg, void *heap);
Object* GetContainingObject(void *pInteriorPtr);
@@ -132,7 +111,7 @@ public:
#endif //MULTIPLE_HEAPS
int GetHomeHeapNumber ();
- bool IsThreadUsingAllocationContextHeap(alloc_context* acontext, int thread_number);
+ bool IsThreadUsingAllocationContextHeap(gc_alloc_context* acontext, int thread_number);
int GetNumberOfHeaps ();
void HideAllocContext(alloc_context*);
void RevealAllocContext(alloc_context*);
@@ -176,9 +155,7 @@ public:
BOOL IsEphemeral (Object* object);
BOOL IsHeapPointer (void* object, BOOL small_heap_only = FALSE);
-#ifdef VERIFY_HEAP
void ValidateObjectMember (Object *obj);
-#endif //_DEBUG
PER_HEAP size_t ApproxTotalBytesInUse(BOOL small_heap_only = FALSE);
PER_HEAP size_t ApproxFreeBytes();
@@ -199,8 +176,6 @@ public:
int StartNoGCRegion(uint64_t totalSize, BOOL lohSizeKnown, uint64_t lohSize, BOOL disallowFullBlockingGC);
int EndNoGCRegion();
-
- PER_HEAP_ISOLATED unsigned GetMaxGeneration();
unsigned GetGcCount();
@@ -224,9 +199,7 @@ public:
BOOL ShouldRestartFinalizerWatchDog();
void SetCardsAfterBulkCopy( Object**, size_t);
-#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
void WalkObject (Object* obj, walk_fn fn, void* context);
-#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
public: // FIX
@@ -281,11 +254,12 @@ private:
// the condition here may have to change as well.
return g_TrapReturningThreads == 0;
}
-#ifndef FEATURE_REDHAWK // Redhawk forces relocation a different way
-#ifdef STRESS_HEAP
public:
//return TRUE if GC actually happens, otherwise FALSE
- BOOL StressHeap(alloc_context * acontext = 0);
+ BOOL StressHeap(gc_alloc_context * acontext = 0);
+
+#ifndef FEATURE_REDHAWK // Redhawk forces relocation a different way
+#ifdef STRESS_HEAP
protected:
// only used in BACKGROUND_GC, but the symbol is not defined yet...
@@ -300,17 +274,13 @@ protected:
#endif // STRESS_HEAP
#endif // FEATURE_REDHAWK
-#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
virtual void DescrGenerationsToProfiler (gen_walk_fn fn, void *context);
-#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
-#ifdef VERIFY_HEAP
public:
Object * NextObj (Object * object);
-#ifdef FEATURE_BASICFREEZE
+#if defined (FEATURE_BASICFREEZE) && defined (VERIFY_HEAP)
BOOL IsInFrozenSegment (Object * object);
-#endif //FEATURE_BASICFREEZE
-#endif //VERIFY_HEAP
+#endif // defined (FEATURE_BASICFREEZE) && defined (VERIFY_HEAP)
};
#endif // GCIMPL_H_
diff --git a/src/gc/gcinterface.h b/src/gc/gcinterface.h
new file mode 100644
index 0000000000..a12031bc61
--- /dev/null
+++ b/src/gc/gcinterface.h
@@ -0,0 +1,509 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+#ifndef _GC_INTERFACE_H_
+#define _GC_INTERFACE_H_
+
+// The allocation context must be known to the VM for use in the allocation
+// fast path and known to the GC for performing the allocation. Every Thread
+// has its own allocation context that it hands to the GC when allocating.
+struct gc_alloc_context
+{
+ uint8_t* alloc_ptr;
+ uint8_t* alloc_limit;
+ int64_t alloc_bytes; //Number of bytes allocated on SOH by this context
+ int64_t alloc_bytes_loh; //Number of bytes allocated on LOH by this context
+ // These two fields are deliberately not exposed past the EE-GC interface.
+ void* gc_reserved_1;
+ void* gc_reserved_2;
+ int alloc_count;
+public:
+
+ void init()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ alloc_ptr = 0;
+ alloc_limit = 0;
+ alloc_bytes = 0;
+ alloc_bytes_loh = 0;
+ gc_reserved_1 = 0;
+ gc_reserved_2 = 0;
+ alloc_count = 0;
+ }
+};
+
+#ifdef PROFILING_SUPPORTED
+#define GC_PROFILING //Turn on profiling
+#endif // PROFILING_SUPPORTED
+
+#define LARGE_OBJECT_SIZE ((size_t)(85000))
+
+class Object;
+class IGCHeap;
+class IGCToCLR;
+
+// Initializes the garbage collector. Should only be called
+// once, during EE startup.
+IGCHeap* InitializeGarbageCollector(IGCToCLR* clrToGC);
+
+// The runtime needs to know whether we're using workstation or server GC
+// long before the GCHeap is created. This function sets the type of
+// heap that will be created, before InitializeGarbageCollector is called
+// and the heap is actually recated.
+void InitializeHeapType(bool bServerHeap);
+
+#ifndef DACCESS_COMPILE
+extern "C" {
+#endif // !DACCESS_COMPILE
+GPTR_DECL(uint8_t,g_lowest_address);
+GPTR_DECL(uint8_t,g_highest_address);
+GPTR_DECL(uint32_t,g_card_table);
+#ifndef DACCESS_COMPILE
+}
+#endif // !DACCESS_COMPILE
+
+extern "C" uint8_t* g_ephemeral_low;
+extern "C" uint8_t* g_ephemeral_high;
+
+#ifdef WRITE_BARRIER_CHECK
+//always defined, but should be 0 in Server GC
+extern uint8_t* g_GCShadow;
+extern uint8_t* g_GCShadowEnd;
+// saves the g_lowest_address in between GCs to verify the consistency of the shadow segment
+extern uint8_t* g_shadow_lowest_address;
+#endif
+
+// For low memory notification from host
+extern int32_t g_bLowMemoryFromHost;
+
+extern VOLATILE(int32_t) m_GCLock;
+
+// !!!!!!!!!!!!!!!!!!!!!!!
+// make sure you change the def in bcl\system\gc.cs
+// if you change this!
+enum collection_mode
+{
+ collection_non_blocking = 0x00000001,
+ collection_blocking = 0x00000002,
+ collection_optimized = 0x00000004,
+ collection_compacting = 0x00000008
+#ifdef STRESS_HEAP
+ , collection_gcstress = 0x80000000
+#endif // STRESS_HEAP
+};
+
+// !!!!!!!!!!!!!!!!!!!!!!!
+// make sure you change the def in bcl\system\gc.cs
+// if you change this!
+enum wait_full_gc_status
+{
+ wait_full_gc_success = 0,
+ wait_full_gc_failed = 1,
+ wait_full_gc_cancelled = 2,
+ wait_full_gc_timeout = 3,
+ wait_full_gc_na = 4
+};
+
+// !!!!!!!!!!!!!!!!!!!!!!!
+// make sure you change the def in bcl\system\gc.cs
+// if you change this!
+enum start_no_gc_region_status
+{
+ start_no_gc_success = 0,
+ start_no_gc_no_memory = 1,
+ start_no_gc_too_large = 2,
+ start_no_gc_in_progress = 3
+};
+
+enum end_no_gc_region_status
+{
+ end_no_gc_success = 0,
+ end_no_gc_not_in_progress = 1,
+ end_no_gc_induced = 2,
+ end_no_gc_alloc_exceeded = 3
+};
+
+typedef BOOL (* walk_fn)(Object*, void*);
+typedef void (* gen_walk_fn)(void* context, int generation, uint8_t* range_start, uint8_t* range_end, uint8_t* range_reserved);
+
+// IGCHeap is the interface that the VM will use when interacting with the GC.
+class IGCHeap {
+public:
+ /*
+ ===========================================================================
+ Hosting APIs. These are used by GC hosting. The code that
+ calls these methods may possibly be moved behind the interface -
+ today, the VM handles the setting of segment size and max gen 0 size.
+ (See src/vm/corehost.cpp)
+ ===========================================================================
+ */
+
+ // Returns whether or not the given size is a valid segment size.
+ virtual BOOL IsValidSegmentSize(size_t size) = 0;
+
+ // Returns whether or not the given size is a valid gen 0 max size.
+ virtual BOOL IsValidGen0MaxSize(size_t size) = 0;
+
+ // Gets a valid segment size.
+ virtual size_t GetValidSegmentSize(BOOL large_seg = FALSE) = 0;
+
+ /*
+ ===========================================================================
+ Concurrent GC routines. These are used in various places in the VM
+ to synchronize with the GC, when the VM wants to update something that
+ the GC is potentially using, if it's doing a background GC.
+
+ Concrete examples of this are moving async pinned handles across appdomains
+ and profiling/ETW scenarios.
+ ===========================================================================
+ */
+
+ // Blocks until any running concurrent GCs complete.
+ virtual void WaitUntilConcurrentGCComplete() = 0;
+
+ // Returns true if a concurrent GC is in progress, false otherwise.
+ virtual BOOL IsConcurrentGCInProgress() = 0;
+
+ // Temporarily enables concurrent GC, used during profiling.
+ virtual void TemporaryEnableConcurrentGC() = 0;
+
+ // Temporarily disables concurrent GC, used during profiling.
+ virtual void TemporaryDisableConcurrentGC() = 0;
+
+ // Returns whether or not Concurrent GC is enabled.
+ virtual BOOL IsConcurrentGCEnabled() = 0;
+
+ // Wait for a concurrent GC to complete if one is in progress, with the given timeout.
+ virtual HRESULT WaitUntilConcurrentGCCompleteAsync(int millisecondsTimeout) = 0; // Use in native threads. TRUE if succeed. FALSE if failed or timeout
+
+
+ /*
+ ===========================================================================
+ Finalization routines. These are used by the finalizer thread to communicate
+ with the GC.
+ ===========================================================================
+ */
+
+ // Finalizes an app domain by finalizing objects within that app domain.
+ virtual BOOL FinalizeAppDomain(AppDomain* pDomain, BOOL fRunFinalizers) = 0;
+
+ // Finalizes all registered objects for shutdown, even if they are still reachable.
+ virtual void SetFinalizeQueueForShutdown(BOOL fHasLock) = 0;
+
+ // Gets the number of finalizable objects.
+ virtual size_t GetNumberOfFinalizable() = 0;
+
+ // Traditionally used by the finalizer thread on shutdown to determine
+ // whether or not to time out. Returns true if the GC lock has not been taken.
+ virtual BOOL ShouldRestartFinalizerWatchDog() = 0;
+
+ // Gets the next finalizable object.
+ virtual Object* GetNextFinalizable() = 0;
+
+ /*
+ ===========================================================================
+ BCL routines. These are routines that are directly exposed by mscorlib
+ as a part of the `System.GC` class. These routines behave in the same
+ manner as the functions on `System.GC`.
+ ===========================================================================
+ */
+
+ // Gets the current GC latency mode.
+ virtual int GetGcLatencyMode() = 0;
+
+ // Sets the current GC latency mode. newLatencyMode has already been
+ // verified by mscorlib to be valid.
+ virtual int SetGcLatencyMode(int newLatencyMode) = 0;
+
+ // Gets the current LOH compaction mode.
+ virtual int GetLOHCompactionMode() = 0;
+
+ // Sets the current LOH compaction mode. newLOHCompactionMode has
+ // already been verified by mscorlib to be valid.
+ virtual void SetLOHCompactionMode(int newLOHCompactionMode) = 0;
+
+ // Registers for a full GC notification, raising a notification if the gen 2 or
+ // LOH object heap thresholds are exceeded.
+ virtual BOOL RegisterForFullGCNotification(uint32_t gen2Percentage, uint32_t lohPercentage) = 0;
+
+ // Cancels a full GC notification that was requested by `RegisterForFullGCNotification`.
+ virtual BOOL CancelFullGCNotification() = 0;
+
+ // Returns the status of a registered notification for determining whether a blocking
+ // Gen 2 collection is about to be initiated, with the given timeout.
+ virtual int WaitForFullGCApproach(int millisecondsTimeout) = 0;
+
+ // Returns the status of a registered notification for determining whether a blocking
+ // Gen 2 collection has completed, with the given timeout.
+ virtual int WaitForFullGCComplete(int millisecondsTimeout) = 0;
+
+ // Returns the generation in which obj is found. Also used by the VM
+ // in some places, in particular syncblk code.
+ virtual unsigned WhichGeneration(Object* obj) = 0;
+
+ // Returns the number of GCs that have transpired in the given generation
+ // since the beginning of the life of the process. Also used by the VM
+ // for debug code and app domains.
+ virtual int CollectionCount(int generation, int get_bgc_fgc_coutn = 0) = 0;
+
+ // Begins a no-GC region, returning a code indicating whether entering the no-GC
+ // region was successful.
+ virtual int StartNoGCRegion(uint64_t totalSize, BOOL lohSizeKnown, uint64_t lohSize, BOOL disallowFullBlockingGC) = 0;
+
+ // Exits a no-GC region.
+ virtual int EndNoGCRegion() = 0;
+
+ // Gets the total number of bytes in use.
+ virtual size_t GetTotalBytesInUse() = 0;
+
+ // Forces a garbage collection of the given generation. Also used extensively
+ // throughout the VM.
+ virtual HRESULT GarbageCollect(int generation = -1, BOOL low_memory_p = FALSE, int mode = collection_blocking) = 0;
+
+ // Gets the largest GC generation. Also used extensively throughout the VM.
+ virtual unsigned GetMaxGeneration() = 0;
+
+ // Indicates that an object's finalizer should not be run upon the object's collection.
+ virtual void SetFinalizationRun(Object* obj) = 0;
+
+ // Indicates that an object's finalizer should be run upon the object's collection.
+ virtual bool RegisterForFinalization(int gen, Object* obj) = 0;
+
+ /*
+ ===========================================================================
+ Miscellaneous routines used by the VM.
+ ===========================================================================
+ */
+
+ // Initializes the GC heap, returning whether or not the initialization
+ // was successful.
+ virtual HRESULT Initialize() = 0;
+
+ // Returns whether nor this GC was promoted by the last GC.
+ virtual BOOL IsPromoted(Object* object) = 0;
+
+ // Returns true if this pointer points into a GC heap, false otherwise.
+ virtual BOOL IsHeapPointer(void* object, BOOL small_heap_only = FALSE) = 0;
+
+ // Return the generation that has been condemned by the current GC.
+ virtual unsigned GetCondemnedGeneration() = 0;
+
+ // Returns whether or not a GC is in progress.
+ virtual BOOL IsGCInProgressHelper(BOOL bConsiderGCStart = FALSE) = 0;
+
+ // Returns the number of GCs that have occured. Mainly used for
+ // sanity checks asserting that a GC has not occured.
+ virtual unsigned GetGcCount() = 0;
+
+ // Sets cards after an object has been memmoved.
+ virtual void SetCardsAfterBulkCopy(Object** obj, size_t length) = 0;
+
+ // Gets whether or not the home heap of this alloc context matches the heap
+ // associated with this thread.
+ virtual bool IsThreadUsingAllocationContextHeap(gc_alloc_context* acontext, int thread_number) = 0;
+
+ // Returns whether or not this object resides in an ephemeral generation.
+ virtual BOOL IsEphemeral(Object* object) = 0;
+
+ // Blocks until a GC is complete, returning a code indicating the wait was successful.
+ virtual uint32_t WaitUntilGCComplete(BOOL bConsiderGCStart = FALSE) = 0;
+
+ // "Fixes" an allocation context by binding its allocation pointer to a
+ // location on the heap.
+ virtual void FixAllocContext(gc_alloc_context* acontext, BOOL lockp, void* arg, void* heap) = 0;
+
+ // Gets the total survived size plus the total allocated bytes on the heap.
+ virtual size_t GetCurrentObjSize() = 0;
+
+ // Sets whether or not a GC is in progress.
+ virtual void SetGCInProgress(BOOL fInProgress) = 0;
+
+ /*
+ Add/RemoveMemoryPressure support routines. These are on the interface
+ for now, but we should move Add/RemoveMemoryPressure from the VM to the GC.
+ When that occurs, these three routines can be removed from the interface.
+ */
+
+ // Get the timestamp corresponding to the last GC that occured for the
+ // given generation.
+ virtual size_t GetLastGCStartTime(int generation) = 0;
+
+ // Gets the duration of the last GC that occured for the given generation.
+ virtual size_t GetLastGCDuration(int generation) = 0;
+
+ // Gets a timestamp for the current moment in time.
+ virtual size_t GetNow() = 0;
+
+ /*
+ ===========================================================================
+ Allocation routines. These all call into the GC's allocator and may trigger a garbage
+ collection.
+ ===========================================================================
+ */
+
+ // Allocates an object on the given allocation context with the given size and flags.
+ virtual Object* Alloc(gc_alloc_context* acontext, size_t size, uint32_t flags) = 0;
+
+ // Allocates an object on the default allocation context with the given size and flags.
+ virtual Object* Alloc(size_t size, uint32_t flags) = 0;
+
+ // Allocates an object on the large object heap with the given size and flags.
+ virtual Object* AllocLHeap(size_t size, uint32_t flags) = 0;
+
+ // Allocates an object on the default allocation context, aligned to 64 bits,
+ // with the given size and flags.
+ virtual Object* AllocAlign8 (size_t size, uint32_t flags) = 0;
+
+ // Allocates an object on the given allocation context, aligned to 64 bits,
+ // with the given size and flags.
+ virtual Object* AllocAlign8 (gc_alloc_context* acontext, size_t size, uint32_t flags) = 0;
+
+ // If allocating on the LOH, blocks if a BGC is in a position (concurrent mark)
+ // where the LOH allocator can't allocate.
+ virtual void PublishObject(uint8_t* obj) = 0;
+
+ // Gets the event that suspended threads will use to wait for the
+ // end of a GC.
+ virtual CLREventStatic* GetWaitForGCEvent() = 0;
+
+ /*
+ ===========================================================================
+ Heap verification routines. These are used during heap verification only.
+ ===========================================================================
+ */
+ // Returns whether or not this object is in the fixed heap.
+ virtual BOOL IsObjectInFixedHeap(Object* pObj) = 0;
+
+ // Walks an object and validates its members.
+ virtual void ValidateObjectMember(Object* obj) = 0;
+
+ // Retrieves the next object after the given object. When the EE
+ // is not suspended, the result is not accurate - if the input argument
+ // is in Gen0, the function could return zeroed out memory as the next object.
+ virtual Object* NextObj(Object* object) = 0;
+
+ // Given an interior pointer, return a pointer to the object
+ // containing that pointer. This is safe to call only when the EE is suspended.
+ virtual Object* GetContainingObject(void* pInteriorPtr) = 0;
+
+ /*
+ ===========================================================================
+ Profiling routines. Used for event tracing and profiling to broadcast
+ information regarding the heap.
+ ===========================================================================
+ */
+
+ // Walks an object, invoking a callback on each member.
+ virtual void WalkObject(Object* obj, walk_fn fn, void* context) = 0;
+
+ // Describes all generations to the profiler, invoking a callback on each generation.
+ virtual void DescrGenerationsToProfiler(gen_walk_fn fn, void* context) = 0;
+
+ // Traces all GC segments and fires ETW events with information on them.
+ virtual void TraceGCSegments() = 0;
+
+ /*
+ ===========================================================================
+ GC Stress routines. Used only when running under GC Stress.
+ ===========================================================================
+ */
+
+ // Returns TRUE if GC actually happens, otherwise FALSE
+ virtual BOOL StressHeap(gc_alloc_context* acontext = 0) = 0;
+
+ IGCHeap() {}
+ virtual ~IGCHeap() {}
+
+ typedef enum
+ {
+ GC_HEAP_INVALID = 0,
+ GC_HEAP_WKS = 1,
+ GC_HEAP_SVR = 2
+ } GC_HEAP_TYPE;
+
+#ifdef FEATURE_SVR_GC
+ SVAL_DECL(uint32_t, gcHeapType);
+#endif
+
+ SVAL_DECL(uint32_t, maxGeneration);
+};
+
+#ifdef WRITE_BARRIER_CHECK
+void updateGCShadow(Object** ptr, Object* val);
+#endif
+
+//constants for the flags parameter to the gc call back
+
+#define GC_CALL_INTERIOR 0x1
+#define GC_CALL_PINNED 0x2
+#define GC_CALL_CHECK_APP_DOMAIN 0x4
+
+//flags for IGCHeapAlloc(...)
+#define GC_ALLOC_FINALIZE 0x1
+#define GC_ALLOC_CONTAINS_REF 0x2
+#define GC_ALLOC_ALIGN8_BIAS 0x4
+#define GC_ALLOC_ALIGN8 0x8
+
+struct ScanContext
+{
+ Thread* thread_under_crawl;
+ int thread_number;
+ uintptr_t stack_limit; // Lowest point on the thread stack that the scanning logic is permitted to read
+ BOOL promotion; //TRUE: Promotion, FALSE: Relocation.
+ BOOL concurrent; //TRUE: concurrent scanning
+#if CHECK_APP_DOMAIN_LEAKS || defined (FEATURE_APPDOMAIN_RESOURCE_MONITORING) || defined (DACCESS_COMPILE)
+ AppDomain *pCurrentDomain;
+#endif //CHECK_APP_DOMAIN_LEAKS || FEATURE_APPDOMAIN_RESOURCE_MONITORING || DACCESS_COMPILE
+
+#ifndef FEATURE_REDHAWK
+#if defined(GC_PROFILING) || defined (DACCESS_COMPILE)
+ MethodDesc *pMD;
+#endif //GC_PROFILING || DACCESS_COMPILE
+#endif // FEATURE_REDHAWK
+#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
+ EtwGCRootKind dwEtwRootKind;
+#endif // GC_PROFILING || FEATURE_EVENT_TRACE
+
+ ScanContext()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ thread_under_crawl = 0;
+ thread_number = -1;
+ stack_limit = 0;
+ promotion = FALSE;
+ concurrent = FALSE;
+#ifdef GC_PROFILING
+ pMD = NULL;
+#endif //GC_PROFILING
+#ifdef FEATURE_EVENT_TRACE
+ dwEtwRootKind = kEtwGCRootKindOther;
+#endif // FEATURE_EVENT_TRACE
+ }
+};
+
+#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
+struct ProfilingScanContext : ScanContext
+{
+ BOOL fProfilerPinned;
+ void * pvEtwContext;
+ void *pHeapId;
+
+ ProfilingScanContext(BOOL fProfilerPinnedParam) : ScanContext()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ pHeapId = NULL;
+ fProfilerPinned = fProfilerPinnedParam;
+ pvEtwContext = NULL;
+#ifdef FEATURE_CONSERVATIVE_GC
+ // To not confuse GCScan::GcScanRoots
+ promotion = g_pConfig->GetGCConservative();
+#endif
+ }
+};
+#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
+
+#endif // _GC_INTERFACE_H_
diff --git a/src/gc/gcpriv.h b/src/gc/gcpriv.h
index 03a23454a0..9fbb289db8 100644
--- a/src/gc/gcpriv.h
+++ b/src/gc/gcpriv.h
@@ -2547,9 +2547,9 @@ protected:
PER_HEAP
void descr_generations (BOOL begin_gc_p);
-#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
PER_HEAP_ISOLATED
void descr_generations_to_profiler (gen_walk_fn fn, void *context);
+#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
PER_HEAP
void record_survived_for_profiler(int condemned_gen_number, uint8_t * first_condemned_address);
PER_HEAP
@@ -2978,7 +2978,7 @@ protected:
PER_HEAP
VOLATILE(int) alloc_context_count;
#else //MULTIPLE_HEAPS
-#define vm_heap ((GCHeap*) g_pGCHeap)
+#define vm_heap ((GCHeap*) g_theGcHeap)
#define heap_number (0)
#endif //MULTIPLE_HEAPS
diff --git a/src/gc/gcscan.cpp b/src/gc/gcscan.cpp
index 42989e0414..f22840a262 100644
--- a/src/gc/gcscan.cpp
+++ b/src/gc/gcscan.cpp
@@ -129,7 +129,7 @@ static void CALLBACK CheckPromoted(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t * /*
LOG((LF_GC, LL_INFO100000, LOG_HANDLE_OBJECT_CLASS("Checking referent of Weak-", pObjRef, "to ", *pObjRef)));
Object **pRef = (Object **)pObjRef;
- if (!GCHeap::GetGCHeap()->IsPromoted(*pRef))
+ if (!g_theGcHeap->IsPromoted(*pRef))
{
LOG((LF_GC, LL_INFO100, LOG_HANDLE_OBJECT_CLASS("Severing Weak-", pObjRef, "to unreachable ", *pObjRef)));
@@ -240,14 +240,14 @@ void GCScan::GcRuntimeStructuresValid (BOOL bValid)
void GCScan::GcDemote (int condemned, int max_gen, ScanContext* sc)
{
Ref_RejuvenateHandles (condemned, max_gen, (uintptr_t)sc);
- if (!GCHeap::IsServerHeap() || sc->thread_number == 0)
+ if (!IsServerHeap() || sc->thread_number == 0)
GCToEEInterface::SyncBlockCacheDemote(max_gen);
}
void GCScan::GcPromotionsGranted (int condemned, int max_gen, ScanContext* sc)
{
Ref_AgeHandles(condemned, max_gen, (uintptr_t)sc);
- if (!GCHeap::IsServerHeap() || sc->thread_number == 0)
+ if (!IsServerHeap() || sc->thread_number == 0)
GCToEEInterface::SyncBlockCachePromotionsGranted(max_gen);
}
diff --git a/src/gc/handletable.cpp b/src/gc/handletable.cpp
index 43b43ffcea..fe184bf2ff 100644
--- a/src/gc/handletable.cpp
+++ b/src/gc/handletable.cpp
@@ -755,7 +755,7 @@ void HndLogSetEvent(OBJECTHANDLE handle, _UNCHECKED_OBJECTREF value)
uint32_t hndType = HandleFetchType(handle);
ADIndex appDomainIndex = HndGetHandleADIndex(handle);
AppDomain* pAppDomain = SystemDomain::GetAppDomainAtIndex(appDomainIndex);
- uint32_t generation = value != 0 ? GCHeap::GetGCHeap()->WhichGeneration(value) : 0;
+ uint32_t generation = value != 0 ? g_theGcHeap->WhichGeneration(value) : 0;
FireEtwSetGCHandle((void*) handle, value, hndType, generation, (int64_t) pAppDomain, GetClrInstanceId());
FireEtwPrvSetGCHandle((void*) handle, value, hndType, generation, (int64_t) pAppDomain, GetClrInstanceId());
@@ -774,14 +774,14 @@ void HndLogSetEvent(OBJECTHANDLE handle, _UNCHECKED_OBJECTREF value)
for (size_t i = 0; i < num; i ++)
{
value = ppObj[i];
- uint32_t generation = value != 0 ? GCHeap::GetGCHeap()->WhichGeneration(value) : 0;
+ uint32_t generation = value != 0 ? g_theGcHeap->WhichGeneration(value) : 0;
FireEtwSetGCHandle(overlapped, value, HNDTYPE_PINNED, generation, (int64_t) pAppDomain, GetClrInstanceId());
}
}
else
{
value = OBJECTREF_TO_UNCHECKED_OBJECTREF(overlapped->m_userObject);
- uint32_t generation = value != 0 ? GCHeap::GetGCHeap()->WhichGeneration(value) : 0;
+ uint32_t generation = value != 0 ? g_theGcHeap->WhichGeneration(value) : 0;
FireEtwSetGCHandle(overlapped, value, HNDTYPE_PINNED, generation, (int64_t) pAppDomain, GetClrInstanceId());
}
}
@@ -838,7 +838,7 @@ void HndWriteBarrier(OBJECTHANDLE handle, OBJECTREF objref)
if (*pClumpAge != 0) // Perf optimization: if clumpAge is 0, nothing more to do
{
// find out generation
- int generation = GCHeap::GetGCHeap()->WhichGeneration(value);
+ int generation = g_theGcHeap->WhichGeneration(value);
uint32_t uType = HandleFetchType(handle);
#ifndef FEATURE_REDHAWK
diff --git a/src/gc/handletablecore.cpp b/src/gc/handletablecore.cpp
index 5e077de8a2..c6f569d5fa 100644
--- a/src/gc/handletablecore.cpp
+++ b/src/gc/handletablecore.cpp
@@ -14,6 +14,7 @@
#include "common.h"
#include "gcenv.h"
+#include "gc.h"
#ifndef FEATURE_REDHAWK
#include "nativeoverlapped.h"
@@ -1111,13 +1112,13 @@ SLOW_PATH:
// we have the lock held but the part we care about (the async table scan) takes the table lock during
// a preparation step so we'll be able to complete our segment moves before the async scan has a
// chance to interfere with us (or vice versa).
- if (GCHeap::GetGCHeap()->IsConcurrentGCInProgress())
+ if (g_theGcHeap->IsConcurrentGCInProgress())
{
// A concurrent GC is in progress so someone might be scanning our segments asynchronously.
// Release the lock, wait for the GC to complete and try again. The order is important; if we wait
// before releasing the table lock we can deadlock with an async table scan.
ch.Release();
- GCHeap::GetGCHeap()->WaitUntilConcurrentGCComplete();
+ g_theGcHeap->WaitUntilConcurrentGCComplete();
continue;
}
diff --git a/src/gc/handletablescan.cpp b/src/gc/handletablescan.cpp
index 863b5a52b0..1f58336573 100644
--- a/src/gc/handletablescan.cpp
+++ b/src/gc/handletablescan.cpp
@@ -818,7 +818,7 @@ void BlockResetAgeMapForBlocksWorker(uint32_t *pdwGen, uint32_t dwClumpMask, Sca
{
if (!HndIsNullOrDestroyedHandle(*pValue))
{
- int thisAge = GCHeap::GetGCHeap()->WhichGeneration(*pValue);
+ int thisAge = g_theGcHeap->WhichGeneration(*pValue);
if (minAge > thisAge)
minAge = thisAge;
@@ -830,7 +830,7 @@ void BlockResetAgeMapForBlocksWorker(uint32_t *pdwGen, uint32_t dwClumpMask, Sca
if (pOverlapped->m_userObject != NULL)
{
Object * pUserObject = OBJECTREFToObject(pOverlapped->m_userObject);
- thisAge = GCHeap::GetGCHeap()->WhichGeneration(pUserObject);
+ thisAge = g_theGcHeap->WhichGeneration(pUserObject);
if (minAge > thisAge)
minAge = thisAge;
if (pOverlapped->m_isArray)
@@ -840,7 +840,7 @@ void BlockResetAgeMapForBlocksWorker(uint32_t *pdwGen, uint32_t dwClumpMask, Sca
size_t num = pUserArrayObject->GetNumComponents();
for (size_t i = 0; i < num; i ++)
{
- thisAge = GCHeap::GetGCHeap()->WhichGeneration(pObj[i]);
+ thisAge = g_theGcHeap->WhichGeneration(pObj[i]);
if (minAge > thisAge)
minAge = thisAge;
}
@@ -925,10 +925,10 @@ static void VerifyObjectAndAge(_UNCHECKED_OBJECTREF *pValue, _UNCHECKED_OBJECTRE
UNREFERENCED_PARAMETER(pValue);
VerifyObject(from, obj);
- int thisAge = GCHeap::GetGCHeap()->WhichGeneration(obj);
+ int thisAge = g_theGcHeap->WhichGeneration(obj);
//debugging code
- //if (minAge > thisAge && thisAge < GCHeap::GetGCHeap()->GetMaxGeneration())
+ //if (minAge > thisAge && thisAge < g_theGcHeap->GetMaxGeneration())
//{
// if ((*pValue) == obj)
// printf("Handle (age %u) %p -> %p (age %u)", minAge, pValue, obj, thisAge);
@@ -946,7 +946,7 @@ static void VerifyObjectAndAge(_UNCHECKED_OBJECTREF *pValue, _UNCHECKED_OBJECTRE
// }
//}
- if (minAge >= GEN_MAX_AGE || (minAge > thisAge && thisAge < static_cast<int>(GCHeap::GetGCHeap()->GetMaxGeneration())))
+ if (minAge >= GEN_MAX_AGE || (minAge > thisAge && thisAge < static_cast<int>(g_theGcHeap->GetMaxGeneration())))
{
_ASSERTE(!"Fatal Error in HandleTable.");
EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE);
diff --git a/src/gc/objecthandle.cpp b/src/gc/objecthandle.cpp
index 74a8a71c5e..231332a395 100644
--- a/src/gc/objecthandle.cpp
+++ b/src/gc/objecthandle.cpp
@@ -95,7 +95,7 @@ void CALLBACK PromoteRefCounted(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *pExtra
Object *pOldObj = pObj;
#endif
- if (!HndIsNullOrDestroyedHandle(pObj) && !GCHeap::GetGCHeap()->IsPromoted(pObj))
+ if (!HndIsNullOrDestroyedHandle(pObj) && !g_theGcHeap->IsPromoted(pObj))
{
if (GCToEEInterface::RefCountedHandleCallbacks(pObj))
{
@@ -186,9 +186,9 @@ void CALLBACK PromoteDependentHandle(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *p
ScanContext *sc = (ScanContext*)lp1;
DhContext *pDhContext = Ref_GetDependentHandleContext(sc);
- if (*pObjRef && GCHeap::GetGCHeap()->IsPromoted(*pPrimaryRef))
+ if (*pObjRef && g_theGcHeap->IsPromoted(*pPrimaryRef))
{
- if (!GCHeap::GetGCHeap()->IsPromoted(*pSecondaryRef))
+ if (!g_theGcHeap->IsPromoted(*pSecondaryRef))
{
LOG((LF_GC|LF_ENC, LL_INFO10000, "\tPromoting secondary " LOG_OBJECT_CLASS(*pSecondaryRef)));
_ASSERTE(lp2);
@@ -221,7 +221,7 @@ void CALLBACK ClearDependentHandle(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *pEx
LOG((LF_GC|LF_ENC, LL_INFO1000, LOG_HANDLE_OBJECT_CLASS("\tPrimary:\t", pPrimaryRef, "to ", *pPrimaryRef)));
LOG((LF_GC|LF_ENC, LL_INFO1000, LOG_HANDLE_OBJECT_CLASS("\tSecondary\t", pSecondaryRef, "to ", *pSecondaryRef)));
- if (!GCHeap::GetGCHeap()->IsPromoted(*pPrimaryRef))
+ if (!g_theGcHeap->IsPromoted(*pPrimaryRef))
{
LOG((LF_GC|LF_ENC, LL_INFO1000, "\tunreachable ", LOG_OBJECT_CLASS(*pPrimaryRef)));
LOG((LF_GC|LF_ENC, LL_INFO1000, "\tunreachable ", LOG_OBJECT_CLASS(*pSecondaryRef)));
@@ -230,7 +230,7 @@ void CALLBACK ClearDependentHandle(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *pEx
}
else
{
- _ASSERTE(GCHeap::GetGCHeap()->IsPromoted(*pSecondaryRef));
+ _ASSERTE(g_theGcHeap->IsPromoted(*pSecondaryRef));
LOG((LF_GC|LF_ENC, LL_INFO10000, "\tPrimary is reachable " LOG_OBJECT_CLASS(*pPrimaryRef)));
LOG((LF_GC|LF_ENC, LL_INFO10000, "\tSecondary is reachable " LOG_OBJECT_CLASS(*pSecondaryRef)));
}
@@ -330,7 +330,7 @@ void CALLBACK CheckPromoted(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *pExtraInfo
LOG((LF_GC, LL_INFO100000, LOG_HANDLE_OBJECT_CLASS("Checking referent of Weak-", pObjRef, "to ", *pObjRef)));
Object **ppRef = (Object **)pObjRef;
- if (!GCHeap::GetGCHeap()->IsPromoted(*ppRef))
+ if (!g_theGcHeap->IsPromoted(*ppRef))
{
LOG((LF_GC, LL_INFO100, LOG_HANDLE_OBJECT_CLASS("Severing Weak-", pObjRef, "to unreachable ", *pObjRef)));
@@ -355,9 +355,9 @@ void CALLBACK CalculateSizedRefSize(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *pE
ScanContext* sc = (ScanContext *)lp1;
promote_func* callback = (promote_func*) lp2;
- size_t sizeBegin = GCHeap::GetGCHeap()->GetPromotedBytes(sc->thread_number);
+ size_t sizeBegin = g_theGcHeap->GetPromotedBytes(sc->thread_number);
callback(ppSizedRef, (ScanContext *)lp1, 0);
- size_t sizeEnd = GCHeap::GetGCHeap()->GetPromotedBytes(sc->thread_number);
+ size_t sizeEnd = g_theGcHeap->GetPromotedBytes(sc->thread_number);
*pSize = sizeEnd - sizeBegin;
}
@@ -583,10 +583,10 @@ int getNumberOfSlots()
{
WRAPPER_NO_CONTRACT;
- // when Ref_Initialize called, GCHeap::GetNumberOfHeaps() is still 0, so use #procs as a workaround
+ // when Ref_Initialize called, IGCHeap::GetNumberOfHeaps() is still 0, so use #procs as a workaround
// it is legal since even if later #heaps < #procs we create handles by thread home heap
// and just have extra unused slots in HandleTableBuckets, which does not take a lot of space
- if (!GCHeap::IsServerHeap())
+ if (!IsServerHeap())
return 1;
#ifdef FEATURE_REDHAWK
@@ -874,7 +874,7 @@ int getSlotNumber(ScanContext* sc)
{
WRAPPER_NO_CONTRACT;
- return (GCHeap::IsServerHeap() ? sc->thread_number : 0);
+ return (IsServerHeap() ? sc->thread_number : 0);
}
// <TODO> - reexpress as complete only like hndtable does now!!! -fmh</REVISIT_TODO>
@@ -1152,7 +1152,7 @@ void Ref_TraceNormalRoots(uint32_t condemned, uint32_t maxgen, ScanContext* sc,
// promote objects pointed to by strong handles
// during ephemeral GCs we also want to promote the ones pointed to by sizedref handles.
uint32_t types[2] = {HNDTYPE_STRONG, HNDTYPE_SIZEDREF};
- uint32_t uTypeCount = (((condemned >= maxgen) && !GCHeap::GetGCHeap()->IsConcurrentGCInProgress()) ? 1 : _countof(types));
+ uint32_t uTypeCount = (((condemned >= maxgen) && !g_theGcHeap->IsConcurrentGCInProgress()) ? 1 : _countof(types));
uint32_t flags = (sc->concurrent) ? HNDGCF_ASYNC : HNDGCF_NORMAL;
HandleTableMap *walk = &g_HandleTableMap;
@@ -1454,7 +1454,7 @@ void ScanSizedRefByAD(uint32_t maxgen, HANDLESCANPROC scanProc, ScanContext* sc,
HandleTableMap *walk = &g_HandleTableMap;
uint32_t type = HNDTYPE_SIZEDREF;
int uCPUindex = getSlotNumber(sc);
- int n_slots = GCHeap::GetGCHeap()->GetNumberOfHeaps();
+ int n_slots = g_theGcHeap->GetNumberOfHeaps();
while (walk)
{
@@ -1574,11 +1574,11 @@ void Ref_UpdatePointers(uint32_t condemned, uint32_t maxgen, ScanContext* sc, Re
// @TODO cwb: wait for compelling performance measurements.</REVISIT_TODO>
BOOL bDo = TRUE;
- if (GCHeap::IsServerHeap())
+ if (IsServerHeap())
{
bDo = (Interlocked::Increment(&uCount) == 1);
- Interlocked::CompareExchange (&uCount, 0, GCHeap::GetGCHeap()->GetNumberOfHeaps());
- _ASSERTE (uCount <= GCHeap::GetGCHeap()->GetNumberOfHeaps());
+ Interlocked::CompareExchange (&uCount, 0, g_theGcHeap->GetNumberOfHeaps());
+ _ASSERTE (uCount <= g_theGcHeap->GetNumberOfHeaps());
}
if (bDo)
@@ -1906,9 +1906,9 @@ int GetCurrentThreadHomeHeapNumber()
{
WRAPPER_NO_CONTRACT;
- if (!GCHeap::IsGCHeapInitialized())
+ if (g_theGcHeap == nullptr)
return 0;
- return GCHeap::GetGCHeap()->GetHomeHeapNumber();
+ return g_theGcHeap->GetHomeHeapNumber();
}
bool HandleTableBucket::Contains(OBJECTHANDLE handle)
@@ -1921,7 +1921,7 @@ bool HandleTableBucket::Contains(OBJECTHANDLE handle)
}
HHANDLETABLE hTable = HndGetHandleTable(handle);
- for (int uCPUindex=0; uCPUindex < GCHeap::GetGCHeap()->GetNumberOfHeaps(); uCPUindex++)
+ for (int uCPUindex=0; uCPUindex < g_theGcHeap->GetNumberOfHeaps(); uCPUindex++)
{
if (hTable == this->pTable[uCPUindex])
{
diff --git a/src/gc/sample/GCSample.cpp b/src/gc/sample/GCSample.cpp
index 7e07834ced..c242f08cac 100644
--- a/src/gc/sample/GCSample.cpp
+++ b/src/gc/sample/GCSample.cpp
@@ -68,7 +68,7 @@ Object * AllocateObject(MethodTable * pMT)
}
else
{
- pObject = GCHeap::GetGCHeap()->Alloc(acontext, size, 0);
+ pObject = g_theGcHeap->Alloc(acontext, size, 0);
if (pObject == NULL)
return NULL;
}
@@ -137,7 +137,7 @@ int __cdecl main(int argc, char* argv[])
//
// Initialize GC heap
//
- GCHeap *pGCHeap = GCHeap::CreateGCHeap();
+ IGCHeap *pGCHeap = InitializeGarbageCollector(nullptr);
if (!pGCHeap)
return -1;
diff --git a/src/gc/sample/gcenv.ee.cpp b/src/gc/sample/gcenv.ee.cpp
index 330564a380..05359d954c 100644
--- a/src/gc/sample/gcenv.ee.cpp
+++ b/src/gc/sample/gcenv.ee.cpp
@@ -131,7 +131,7 @@ void ThreadStore::AttachCurrentThread()
void GCToEEInterface::SuspendEE(GCToEEInterface::SUSPEND_REASON reason)
{
- GCHeap::GetGCHeap()->SetGCInProgress(TRUE);
+ g_theGcHeap->SetGCInProgress(TRUE);
// TODO: Implement
}
@@ -140,7 +140,7 @@ void GCToEEInterface::RestartEE(bool bFinishedGC)
{
// TODO: Implement
- GCHeap::GetGCHeap()->SetGCInProgress(FALSE);
+ g_theGcHeap->SetGCInProgress(FALSE);
}
void GCToEEInterface::GcScanRoots(promote_func* fn, int condemned, int max_gen, ScanContext* sc)
@@ -184,7 +184,7 @@ void GCToEEInterface::DisablePreemptiveGC(Thread * pThread)
pThread->DisablePreemptiveGC();
}
-alloc_context * GCToEEInterface::GetAllocContext(Thread * pThread)
+gc_alloc_context * GCToEEInterface::GetAllocContext(Thread * pThread)
{
return pThread->GetAllocContext();
}
diff --git a/src/gc/sample/gcenv.h b/src/gc/sample/gcenv.h
index d560789751..3f43a3ffeb 100644
--- a/src/gc/sample/gcenv.h
+++ b/src/gc/sample/gcenv.h
@@ -64,6 +64,9 @@
#define LOG(x)
+#define SVAL_IMPL_INIT(type, cls, var, init) \
+ type cls::var = init
+
//
// Thread
//
diff --git a/src/inc/dacvars.h b/src/inc/dacvars.h
index fb052b3f5d..c8a12931bf 100644
--- a/src/inc/dacvars.h
+++ b/src/inc/dacvars.h
@@ -125,9 +125,10 @@ DEFINE_DACVAR(ULONG, PTR_Thread, dac__g_pFinalizerThread, ::g_pFinalizerThread)
DEFINE_DACVAR(ULONG, PTR_Thread, dac__g_pSuspensionThread, ::g_pSuspensionThread)
#ifdef FEATURE_SVR_GC
-DEFINE_DACVAR(ULONG, DWORD, GCHeap__gcHeapType, GCHeap::gcHeapType)
+DEFINE_DACVAR(ULONG, DWORD, IGCHeap__gcHeapType, IGCHeap::gcHeapType)
#endif // FEATURE_SVR_GC
+DEFINE_DACVAR(ULONG, DWORD, IGCHeap__maxGeneration, IGCHeap::maxGeneration)
DEFINE_DACVAR(ULONG, PTR_BYTE, WKS__gc_heap__alloc_allocated, WKS::gc_heap::alloc_allocated)
DEFINE_DACVAR(ULONG, UNKNOWN_POINTER_TYPE /*PTR_heap_segment*/, WKS__gc_heap__ephemeral_heap_segment, WKS::gc_heap::ephemeral_heap_segment)
DEFINE_DACVAR(ULONG, UNKNOWN_POINTER_TYPE /*PTR_CFinalize*/, WKS__gc_heap__finalize_queue, WKS::gc_heap::finalize_queue)
@@ -198,7 +199,7 @@ DEFINE_DACVAR(ULONG, PTR_DWORD, dac__g_card_table, ::g_card_table)
DEFINE_DACVAR(ULONG, PTR_BYTE, dac__g_lowest_address, ::g_lowest_address)
DEFINE_DACVAR(ULONG, PTR_BYTE, dac__g_highest_address, ::g_highest_address)
-DEFINE_DACVAR(ULONG, GCHeap, dac__g_pGCHeap, ::g_pGCHeap)
+DEFINE_DACVAR(ULONG, IGCHeap, dac__g_pGCHeap, ::g_pGCHeap)
#ifdef GC_CONFIG_DRIVEN
DEFINE_DACVAR_NO_DUMP(ULONG, SIZE_T, dac__interesting_data_per_heap, WKS::interesting_data_per_heap)
diff --git a/src/inc/stresslog.h b/src/inc/stresslog.h
index 86dee130c4..55fb27a56d 100644
--- a/src/inc/stresslog.h
+++ b/src/inc/stresslog.h
@@ -683,7 +683,7 @@ public:
static const char* gcRootPromoteMsg()
{
STATIC_CONTRACT_LEAF;
- return " GCHeap::Promote: Promote GC Root *%p = %p MT = %pT\n";
+ return " IGCHeap::Promote: Promote GC Root *%p = %p MT = %pT\n";
}
static const char* gcPlugMoveMsg()
diff --git a/src/strongname/api/common.h b/src/strongname/api/common.h
index c83685cc0c..0518c00433 100644
--- a/src/strongname/api/common.h
+++ b/src/strongname/api/common.h
@@ -156,7 +156,7 @@ typedef DPTR(class TypeHandle) PTR_TypeHandle;
typedef VPTR(class VirtualCallStubManager) PTR_VirtualCallStubManager;
typedef VPTR(class VirtualCallStubManagerManager) PTR_VirtualCallStubManagerManager;
#endif
-typedef VPTR(class GCHeap) PTR_GCHeap;
+typedef VPTR(class IGCHeap) PTR_IGCHeap;
//
// _UNCHECKED_OBJECTREF is for code that can't deal with DEBUG OBJECTREFs
diff --git a/src/vm/CMakeLists.txt b/src/vm/CMakeLists.txt
index 6f17a90c1f..e15d08d2d3 100644
--- a/src/vm/CMakeLists.txt
+++ b/src/vm/CMakeLists.txt
@@ -70,6 +70,7 @@ set(VM_SOURCES_DAC_AND_WKS_COMMON
formattype.cpp
fptrstubs.cpp
frames.cpp
+ gcheaputilities.cpp
genericdict.cpp
generics.cpp
hash.cpp
diff --git a/src/vm/amd64/asmconstants.h b/src/vm/amd64/asmconstants.h
index 32b23c83c3..ad90dd17ad 100644
--- a/src/vm/amd64/asmconstants.h
+++ b/src/vm/amd64/asmconstants.h
@@ -164,10 +164,10 @@ ASMCONSTANTS_C_ASSERT(OFFSETOF__Thread__m_ThreadId
== offsetof(Thread, m_ThreadId));
#define OFFSET__Thread__m_alloc_context__alloc_ptr 0x60
-ASMCONSTANTS_C_ASSERT(OFFSET__Thread__m_alloc_context__alloc_ptr == offsetof(Thread, m_alloc_context) + offsetof(alloc_context, alloc_ptr));
+ASMCONSTANTS_C_ASSERT(OFFSET__Thread__m_alloc_context__alloc_ptr == offsetof(Thread, m_alloc_context) + offsetof(gc_alloc_context, alloc_ptr));
#define OFFSET__Thread__m_alloc_context__alloc_limit 0x68
-ASMCONSTANTS_C_ASSERT(OFFSET__Thread__m_alloc_context__alloc_limit == offsetof(Thread, m_alloc_context) + offsetof(alloc_context, alloc_limit));
+ASMCONSTANTS_C_ASSERT(OFFSET__Thread__m_alloc_context__alloc_limit == offsetof(Thread, m_alloc_context) + offsetof(gc_alloc_context, alloc_limit));
#define OFFSETOF__ThreadExceptionState__m_pCurrentTracker 0x000
ASMCONSTANTS_C_ASSERT(OFFSETOF__ThreadExceptionState__m_pCurrentTracker
diff --git a/src/vm/amd64/excepamd64.cpp b/src/vm/amd64/excepamd64.cpp
index 2fc553a987..d4248e7b07 100644
--- a/src/vm/amd64/excepamd64.cpp
+++ b/src/vm/amd64/excepamd64.cpp
@@ -21,7 +21,7 @@
#include "comutilnative.h"
#include "sigformat.h"
#include "siginfo.hpp"
-#include "gc.h"
+#include "gcheaputilities.h"
#include "eedbginterfaceimpl.h" //so we can clearexception in COMPlusThrow
#include "perfcounters.h"
#include "asmconstants.h"
diff --git a/src/vm/amd64/jitinterfaceamd64.cpp b/src/vm/amd64/jitinterfaceamd64.cpp
index 39c2e05c2f..d5dec8e6e8 100644
--- a/src/vm/amd64/jitinterfaceamd64.cpp
+++ b/src/vm/amd64/jitinterfaceamd64.cpp
@@ -390,7 +390,7 @@ bool WriteBarrierManager::NeedDifferentWriteBarrier(bool bReqUpperBoundsCheck, W
}
#endif
- writeBarrierType = GCHeap::IsServerHeap() ? WRITE_BARRIER_SVR64 : WRITE_BARRIER_PREGROW64;
+ writeBarrierType = GCHeapUtilities::IsServerHeap() ? WRITE_BARRIER_SVR64 : WRITE_BARRIER_PREGROW64;
continue;
case WRITE_BARRIER_PREGROW64:
diff --git a/src/vm/appdomain.cpp b/src/vm/appdomain.cpp
index 0ec2c5f2fc..f2d7955a43 100644
--- a/src/vm/appdomain.cpp
+++ b/src/vm/appdomain.cpp
@@ -12,7 +12,7 @@
#include "strongnameinternal.h"
#include "excep.h"
#include "eeconfig.h"
-#include "gc.h"
+#include "gcheaputilities.h"
#include "eventtrace.h"
#ifdef FEATURE_FUSION
#include "assemblysink.h"
@@ -2652,8 +2652,8 @@ void AppDomain::CreateADUnloadStartEvent()
// If the thread is in cooperative mode, it must have been suspended for the GC so a delete
// can't happen.
- _ASSERTE(GCHeap::IsGCInProgress() &&
- GCHeap::IsServerHeap() &&
+ _ASSERTE(GCHeapUtilities::IsGCInProgress() &&
+ GCHeapUtilities::IsServerHeap() &&
IsGCSpecialThread());
SystemDomain* sysDomain = SystemDomain::System();
@@ -2691,7 +2691,7 @@ void SystemDomain::ResetADSurvivedBytes()
}
CONTRACT_END;
- _ASSERTE(GCHeap::IsGCInProgress());
+ _ASSERTE(GCHeapUtilities::IsGCInProgress());
SystemDomain* sysDomain = SystemDomain::System();
if (sysDomain)
@@ -3824,7 +3824,7 @@ HRESULT SystemDomain::RunDllMain(HINSTANCE hInst, DWORD dwReason, LPVOID lpReser
return S_OK;
// ExitProcess is called while a thread is doing GC.
- if (dwReason == DLL_PROCESS_DETACH && GCHeap::IsGCInProgress())
+ if (dwReason == DLL_PROCESS_DETACH && GCHeapUtilities::IsGCInProgress())
return S_OK;
// ExitProcess is called on a thread that we don't know about
@@ -5107,7 +5107,7 @@ void AppDomain::Init()
// Ref_CreateHandleTableBucket, this is because AD::Init() can race with GC
// and once we add ourselves to the handle table map the GC can start walking
// our handles and calling AD::RecordSurvivedBytes() which touches ARM data.
- if (GCHeap::IsServerHeap())
+ if (GCHeapUtilities::IsServerHeap())
m_dwNumHeaps = CPUGroupInfo::CanEnableGCCPUGroups() ?
CPUGroupInfo::GetNumActiveProcessors() :
GetCurrentProcessCpuCount();
@@ -11110,7 +11110,7 @@ void AppDomain::Unload(BOOL fForceUnload)
}
if(bForceGC)
{
- GCHeap::GetGCHeap()->GarbageCollect();
+ GCHeapUtilities::GetGCHeap()->GarbageCollect();
FinalizerThread::FinalizerThreadWait();
SetStage(STAGE_COLLECTED);
Close(); //NOTHROW!
@@ -11146,7 +11146,7 @@ void AppDomain::Unload(BOOL fForceUnload)
{
// do extra finalizer wait to remove any leftover sb entries
FinalizerThread::FinalizerThreadWait();
- GCHeap::GetGCHeap()->GarbageCollect();
+ GCHeapUtilities::GetGCHeap()->GarbageCollect();
FinalizerThread::FinalizerThreadWait();
LogSpewAlways("Done unload %3.3d\n", unloadCount);
DumpSyncBlockCache();
@@ -11548,7 +11548,7 @@ void AppDomain::ClearGCHandles()
SetStage(STAGE_HANDLETABLE_NOACCESS);
- GCHeap::GetGCHeap()->WaitUntilConcurrentGCComplete();
+ GCHeapUtilities::GetGCHeap()->WaitUntilConcurrentGCComplete();
// Keep async pin handles alive by moving them to default domain
HandleAsyncPinHandles();
@@ -13516,8 +13516,8 @@ void SystemDomain::ProcessDelayedUnloadDomains()
}
CONTRACTL_END;
- int iGCRefPoint=GCHeap::GetGCHeap()->CollectionCount(GCHeap::GetGCHeap()->GetMaxGeneration());
- if (GCHeap::GetGCHeap()->IsConcurrentGCInProgress())
+ int iGCRefPoint=GCHeapUtilities::GetGCHeap()->CollectionCount(GCHeapUtilities::GetGCHeap()->GetMaxGeneration());
+ if (GCHeapUtilities::GetGCHeap()->IsConcurrentGCInProgress())
iGCRefPoint--;
BOOL bAppDomainToCleanup = FALSE;
@@ -13735,8 +13735,8 @@ void AppDomain::EnumStaticGCRefs(promote_func* fn, ScanContext* sc)
}
CONTRACT_END;
- _ASSERTE(GCHeap::IsGCInProgress() &&
- GCHeap::IsServerHeap() &&
+ _ASSERTE(GCHeapUtilities::IsGCInProgress() &&
+ GCHeapUtilities::IsServerHeap() &&
IsGCSpecialThread());
AppDomain::AssemblyIterator asmIterator = IterateAssembliesEx((AssemblyIterationFlags)(kIncludeLoaded | kIncludeExecution));
diff --git a/src/vm/appdomain.hpp b/src/vm/appdomain.hpp
index 97e8438329..d766b3da43 100644
--- a/src/vm/appdomain.hpp
+++ b/src/vm/appdomain.hpp
@@ -30,6 +30,7 @@
#include "fptrstubs.h"
#include "ilstubcache.h"
#include "testhookmgr.h"
+#include "gcheaputilities.h"
#ifdef FEATURE_VERSIONING
#include "../binder/inc/applicationcontext.hpp"
#endif // FEATURE_VERSIONING
@@ -1295,7 +1296,7 @@ public:
{
WRAPPER_NO_CONTRACT;
OBJECTHANDLE h = ::CreateSizedRefHandle(
- m_hHandleTableBucket->pTable[GCHeap::IsServerHeap() ? (m_dwSizedRefHandles % m_iNumberOfProcessors) : GetCurrentThreadHomeHeapNumber()],
+ m_hHandleTableBucket->pTable[GCHeapUtilities::IsServerHeap() ? (m_dwSizedRefHandles % m_iNumberOfProcessors) : GetCurrentThreadHomeHeapNumber()],
object);
InterlockedIncrement((LONG*)&m_dwSizedRefHandles);
return h;
@@ -4533,8 +4534,8 @@ public:
if (m_UnloadIsAsync)
{
pDomain->AddRef();
- int iGCRefPoint=GCHeap::GetGCHeap()->CollectionCount(GCHeap::GetGCHeap()->GetMaxGeneration());
- if (GCHeap::GetGCHeap()->IsGCInProgress())
+ int iGCRefPoint=GCHeapUtilities::GetGCHeap()->CollectionCount(GCHeapUtilities::GetGCHeap()->GetMaxGeneration());
+ if (GCHeapUtilities::IsGCInProgress())
iGCRefPoint++;
pDomain->SetGCRefPoint(iGCRefPoint);
}
@@ -4554,8 +4555,8 @@ public:
pAllocator->m_pLoaderAllocatorDestroyNext=m_pDelayedUnloadListOfLoaderAllocators;
m_pDelayedUnloadListOfLoaderAllocators=pAllocator;
- int iGCRefPoint=GCHeap::GetGCHeap()->CollectionCount(GCHeap::GetGCHeap()->GetMaxGeneration());
- if (GCHeap::GetGCHeap()->IsGCInProgress())
+ int iGCRefPoint=GCHeapUtilities::GetGCHeap()->CollectionCount(GCHeapUtilities::GetGCHeap()->GetMaxGeneration());
+ if (GCHeapUtilities::IsGCInProgress())
iGCRefPoint++;
pAllocator->SetGCRefPoint(iGCRefPoint);
}
diff --git a/src/vm/arm/asmconstants.h b/src/vm/arm/asmconstants.h
index 47ebb2d24d..93af04734e 100644
--- a/src/vm/arm/asmconstants.h
+++ b/src/vm/arm/asmconstants.h
@@ -225,10 +225,10 @@ ASMCONSTANTS_C_ASSERT(UnmanagedToManagedFrame__m_pvDatum == offsetof(UnmanagedTo
#ifndef CROSSGEN_COMPILE
#define Thread__m_alloc_context__alloc_limit 0x44
-ASMCONSTANTS_C_ASSERT(Thread__m_alloc_context__alloc_limit == offsetof(Thread, m_alloc_context) + offsetof(alloc_context, alloc_limit));
+ASMCONSTANTS_C_ASSERT(Thread__m_alloc_context__alloc_limit == offsetof(Thread, m_alloc_context) + offsetof(gc_alloc_context, alloc_limit));
#define Thread__m_alloc_context__alloc_ptr 0x40
-ASMCONSTANTS_C_ASSERT(Thread__m_alloc_context__alloc_ptr == offsetof(Thread, m_alloc_context) + offsetof(alloc_context, alloc_ptr));
+ASMCONSTANTS_C_ASSERT(Thread__m_alloc_context__alloc_ptr == offsetof(Thread, m_alloc_context) + offsetof(gc_alloc_context, alloc_ptr));
#endif // CROSSGEN_COMPILE
#define Thread__m_fPreemptiveGCDisabled 0x08
diff --git a/src/vm/arm/stubs.cpp b/src/vm/arm/stubs.cpp
index 0b069da47e..1309695f73 100644
--- a/src/vm/arm/stubs.cpp
+++ b/src/vm/arm/stubs.cpp
@@ -2650,7 +2650,7 @@ void InitJITHelpers1()
))
{
- _ASSERTE(GCHeap::UseAllocationContexts());
+ _ASSERTE(GCHeapUtilities::UseAllocationContexts());
// If the TLS for Thread is low enough use the super-fast helpers
if (gThreadTLSIndex < TLS_MINIMUM_AVAILABLE)
{
diff --git a/src/vm/ceeload.cpp b/src/vm/ceeload.cpp
index 6dab63eefd..5699a29342 100644
--- a/src/vm/ceeload.cpp
+++ b/src/vm/ceeload.cpp
@@ -3427,8 +3427,8 @@ void Module::EnumRegularStaticGCRefs(AppDomain* pAppDomain, promote_func* fn, Sc
}
CONTRACT_END;
- _ASSERTE(GCHeap::IsGCInProgress() &&
- GCHeap::IsServerHeap() &&
+ _ASSERTE(GCHeapUtilities::IsGCInProgress() &&
+ GCHeapUtilities::IsServerHeap() &&
IsGCSpecialThread());
@@ -6264,7 +6264,7 @@ Module *Module::GetModuleIfLoaded(mdFile kFile, BOOL onlyLoadedInAppDomain, BOOL
#ifndef DACCESS_COMPILE
#if defined(FEATURE_MULTIMODULE_ASSEMBLIES)
// check if actually loaded, unless happens during GC (GC works only with loaded assemblies)
- if (!GCHeap::IsGCInProgress() && onlyLoadedInAppDomain && pModule && !pModule->IsManifest())
+ if (!GCHeapUtilities::IsGCInProgress() && onlyLoadedInAppDomain && pModule && !pModule->IsManifest())
{
DomainModule *pDomainModule = pModule->FindDomainModule(GetAppDomain());
if (pDomainModule == NULL || !pDomainModule->IsLoaded())
diff --git a/src/vm/ceemain.cpp b/src/vm/ceemain.cpp
index 07781261f7..1a19e630e4 100644
--- a/src/vm/ceemain.cpp
+++ b/src/vm/ceemain.cpp
@@ -150,7 +150,7 @@
#include "frames.h"
#include "threads.h"
#include "stackwalk.h"
-#include "gc.h"
+#include "gcheaputilities.h"
#include "interoputil.h"
#include "security.h"
#include "fieldmarshaler.h"
@@ -640,7 +640,7 @@ void InitializeStartupFlags()
g_fEnableARM = TRUE;
#endif // !FEATURE_CORECLR
- GCHeap::InitializeHeapType((flags & STARTUP_SERVER_GC) != 0);
+ InitializeHeapType((flags & STARTUP_SERVER_GC) != 0);
#ifdef FEATURE_LOADER_OPTIMIZATION
g_dwGlobalSharePolicy = (flags&STARTUP_LOADER_OPTIMIZATION_MASK)>>1;
@@ -3719,7 +3719,8 @@ void InitializeGarbageCollector()
g_pFreeObjectMethodTable->SetBaseSize(ObjSizeOf (ArrayBase));
g_pFreeObjectMethodTable->SetComponentSize(1);
- GCHeap *pGCHeap = GCHeap::CreateGCHeap();
+ IGCHeap *pGCHeap = InitializeGarbageCollector(nullptr);
+ g_pGCHeap = pGCHeap;
if (!pGCHeap)
ThrowOutOfMemory();
@@ -3833,7 +3834,7 @@ BOOL STDMETHODCALLTYPE EEDllMain( // TRUE on success, FALSE on error.
{
// GetThread() may be set to NULL for Win9x during shutdown.
Thread *pThread = GetThread();
- if (GCHeap::IsGCInProgress() &&
+ if (GCHeapUtilities::IsGCInProgress() &&
( (pThread && (pThread != ThreadSuspend::GetSuspensionThread() ))
|| !g_fSuspendOnShutdown))
{
diff --git a/src/vm/classcompat.cpp b/src/vm/classcompat.cpp
index ac819941f9..50c56506a9 100644
--- a/src/vm/classcompat.cpp
+++ b/src/vm/classcompat.cpp
@@ -31,7 +31,7 @@
#include "log.h"
#include "fieldmarshaler.h"
#include "cgensys.h"
-#include "gc.h"
+#include "gcheaputilities.h"
#include "security.h"
#include "dbginterface.h"
#include "comdelegate.h"
diff --git a/src/vm/codeman.cpp b/src/vm/codeman.cpp
index 89084dbe85..88bfd58e93 100644
--- a/src/vm/codeman.cpp
+++ b/src/vm/codeman.cpp
@@ -3397,7 +3397,7 @@ void ExecutionManager::CleanupCodeHeaps()
}
CONTRACTL_END;
- _ASSERTE (g_fProcessDetach || (GCHeap::IsGCInProgress() && ::IsGCThread()));
+ _ASSERTE (g_fProcessDetach || (GCHeapUtilities::IsGCInProgress() && ::IsGCThread()));
GetEEJitManager()->CleanupCodeHeaps();
}
@@ -3411,7 +3411,7 @@ void EEJitManager::CleanupCodeHeaps()
}
CONTRACTL_END;
- _ASSERTE (g_fProcessDetach || (GCHeap::IsGCInProgress() && ::IsGCThread()));
+ _ASSERTE (g_fProcessDetach || (GCHeapUtilities::IsGCInProgress() && ::IsGCThread()));
CrstHolder ch(&m_CodeHeapCritSec);
@@ -4451,7 +4451,7 @@ RangeSection* ExecutionManager::GetRangeSection(TADDR addr)
// Unless we are on an MP system with many cpus
// where this sort of caching actually diminishes scaling during server GC
// due to many processors writing to a common location
- if (g_SystemInfo.dwNumberOfProcessors < 4 || !GCHeap::IsServerHeap() || !GCHeap::IsGCInProgress())
+ if (g_SystemInfo.dwNumberOfProcessors < 4 || !GCHeapUtilities::IsServerHeap() || !GCHeapUtilities::IsGCInProgress())
pHead->pLastUsed = pLast;
#endif
diff --git a/src/vm/commemoryfailpoint.cpp b/src/vm/commemoryfailpoint.cpp
index 276a9f305a..4d1ed6ef64 100644
--- a/src/vm/commemoryfailpoint.cpp
+++ b/src/vm/commemoryfailpoint.cpp
@@ -26,7 +26,7 @@ FCIMPL2(void, COMMemoryFailPoint::GetMemorySettings, UINT64* pMaxGCSegmentSize,
{
FCALL_CONTRACT;
- GCHeap * pGC = GCHeap::GetGCHeap();
+ IGCHeap * pGC = GCHeapUtilities::GetGCHeap();
size_t segment_size = pGC->GetValidSegmentSize(FALSE);
size_t large_segment_size = pGC->GetValidSegmentSize(TRUE);
_ASSERTE(segment_size < SIZE_T_MAX && large_segment_size < SIZE_T_MAX);
diff --git a/src/vm/common.h b/src/vm/common.h
index 123350334b..9de9f35141 100644
--- a/src/vm/common.h
+++ b/src/vm/common.h
@@ -177,7 +177,7 @@ typedef DPTR(class StringBufferObject) PTR_StringBufferObject;
typedef DPTR(class TypeHandle) PTR_TypeHandle;
typedef VPTR(class VirtualCallStubManager) PTR_VirtualCallStubManager;
typedef VPTR(class VirtualCallStubManagerManager) PTR_VirtualCallStubManagerManager;
-typedef VPTR(class GCHeap) PTR_GCHeap;
+typedef VPTR(class IGCHeap) PTR_IGCHeap;
//
// _UNCHECKED_OBJECTREF is for code that can't deal with DEBUG OBJECTREFs
diff --git a/src/vm/comutilnative.cpp b/src/vm/comutilnative.cpp
index b55c63549d..0f27542e1d 100644
--- a/src/vm/comutilnative.cpp
+++ b/src/vm/comutilnative.cpp
@@ -27,7 +27,7 @@
#include "frames.h"
#include "field.h"
#include "winwrap.h"
-#include "gc.h"
+#include "gcheaputilities.h"
#include "fcall.h"
#include "invokeutil.h"
#include "eeconfig.h"
@@ -1638,7 +1638,7 @@ FCIMPL0(int, GCInterface::GetGcLatencyMode)
FC_GC_POLL_NOT_NEEDED();
- int result = (INT32)GCHeap::GetGCHeap()->GetGcLatencyMode();
+ int result = (INT32)GCHeapUtilities::GetGCHeap()->GetGcLatencyMode();
return result;
}
FCIMPLEND
@@ -1649,7 +1649,7 @@ FCIMPL1(int, GCInterface::SetGcLatencyMode, int newLatencyMode)
FC_GC_POLL_NOT_NEEDED();
- return GCHeap::GetGCHeap()->SetGcLatencyMode(newLatencyMode);
+ return GCHeapUtilities::GetGCHeap()->SetGcLatencyMode(newLatencyMode);
}
FCIMPLEND
@@ -1659,7 +1659,7 @@ FCIMPL0(int, GCInterface::GetLOHCompactionMode)
FC_GC_POLL_NOT_NEEDED();
- int result = (INT32)GCHeap::GetGCHeap()->GetLOHCompactionMode();
+ int result = (INT32)GCHeapUtilities::GetGCHeap()->GetLOHCompactionMode();
return result;
}
FCIMPLEND
@@ -1670,7 +1670,7 @@ FCIMPL1(void, GCInterface::SetLOHCompactionMode, int newLOHCompactionyMode)
FC_GC_POLL_NOT_NEEDED();
- GCHeap::GetGCHeap()->SetLOHCompactionMode(newLOHCompactionyMode);
+ GCHeapUtilities::GetGCHeap()->SetLOHCompactionMode(newLOHCompactionyMode);
}
FCIMPLEND
@@ -1681,7 +1681,7 @@ FCIMPL2(FC_BOOL_RET, GCInterface::RegisterForFullGCNotification, UINT32 gen2Perc
FC_GC_POLL_NOT_NEEDED();
- FC_RETURN_BOOL(GCHeap::GetGCHeap()->RegisterForFullGCNotification(gen2Percentage, lohPercentage));
+ FC_RETURN_BOOL(GCHeapUtilities::GetGCHeap()->RegisterForFullGCNotification(gen2Percentage, lohPercentage));
}
FCIMPLEND
@@ -1690,7 +1690,7 @@ FCIMPL0(FC_BOOL_RET, GCInterface::CancelFullGCNotification)
FCALL_CONTRACT;
FC_GC_POLL_NOT_NEEDED();
- FC_RETURN_BOOL(GCHeap::GetGCHeap()->CancelFullGCNotification());
+ FC_RETURN_BOOL(GCHeapUtilities::GetGCHeap()->CancelFullGCNotification());
}
FCIMPLEND
@@ -1711,7 +1711,7 @@ FCIMPL1(int, GCInterface::WaitForFullGCApproach, int millisecondsTimeout)
HELPER_METHOD_FRAME_BEGIN_RET_0();
DWORD dwMilliseconds = ((millisecondsTimeout == -1) ? INFINITE : millisecondsTimeout);
- result = GCHeap::GetGCHeap()->WaitForFullGCApproach(dwMilliseconds);
+ result = GCHeapUtilities::GetGCHeap()->WaitForFullGCApproach(dwMilliseconds);
HELPER_METHOD_FRAME_END();
@@ -1736,7 +1736,7 @@ FCIMPL1(int, GCInterface::WaitForFullGCComplete, int millisecondsTimeout)
HELPER_METHOD_FRAME_BEGIN_RET_0();
DWORD dwMilliseconds = ((millisecondsTimeout == -1) ? INFINITE : millisecondsTimeout);
- result = GCHeap::GetGCHeap()->WaitForFullGCComplete(dwMilliseconds);
+ result = GCHeapUtilities::GetGCHeap()->WaitForFullGCComplete(dwMilliseconds);
HELPER_METHOD_FRAME_END();
@@ -1757,7 +1757,7 @@ FCIMPL1(int, GCInterface::GetGeneration, Object* objUNSAFE)
if (objUNSAFE == NULL)
FCThrowArgumentNull(W("obj"));
- int result = (INT32)GCHeap::GetGCHeap()->WhichGeneration(objUNSAFE);
+ int result = (INT32)GCHeapUtilities::GetGCHeap()->WhichGeneration(objUNSAFE);
FC_GC_POLL_RET();
return result;
}
@@ -1777,7 +1777,7 @@ FCIMPL2(int, GCInterface::CollectionCount, INT32 generation, INT32 getSpecialGCC
_ASSERTE(generation >= 0);
//We don't need to check the top end because the GC will take care of that.
- int result = (INT32)GCHeap::GetGCHeap()->CollectionCount(generation, getSpecialGCCount);
+ int result = (INT32)GCHeapUtilities::GetGCHeap()->CollectionCount(generation, getSpecialGCCount);
FC_GC_POLL_RET();
return result;
}
@@ -1793,7 +1793,7 @@ int QCALLTYPE GCInterface::StartNoGCRegion(INT64 totalSize, BOOL lohSizeKnown, I
GCX_COOP();
- retVal = GCHeap::GetGCHeap()->StartNoGCRegion((ULONGLONG)totalSize,
+ retVal = GCHeapUtilities::GetGCHeap()->StartNoGCRegion((ULONGLONG)totalSize,
lohSizeKnown,
(ULONGLONG)lohSize,
disallowFullBlockingGC);
@@ -1811,7 +1811,7 @@ int QCALLTYPE GCInterface::EndNoGCRegion()
BEGIN_QCALL;
- retVal = GCHeap::GetGCHeap()->EndNoGCRegion();
+ retVal = GCHeapUtilities::GetGCHeap()->EndNoGCRegion();
END_QCALL;
@@ -1837,7 +1837,7 @@ FCIMPL1(int, GCInterface::GetGenerationWR, LPVOID handle)
if (temp == NULL)
COMPlusThrowArgumentNull(W("weak handle"));
- iRetVal = (INT32)GCHeap::GetGCHeap()->WhichGeneration(OBJECTREFToObject(temp));
+ iRetVal = (INT32)GCHeapUtilities::GetGCHeap()->WhichGeneration(OBJECTREFToObject(temp));
HELPER_METHOD_FRAME_END();
@@ -1860,7 +1860,7 @@ INT64 QCALLTYPE GCInterface::GetTotalMemory()
BEGIN_QCALL;
GCX_COOP();
- iRetVal = (INT64) GCHeap::GetGCHeap()->GetTotalBytesInUse();
+ iRetVal = (INT64) GCHeapUtilities::GetGCHeap()->GetTotalBytesInUse();
END_QCALL;
@@ -1885,7 +1885,7 @@ void QCALLTYPE GCInterface::Collect(INT32 generation, INT32 mode)
//We don't need to check the top end because the GC will take care of that.
GCX_COOP();
- GCHeap::GetGCHeap()->GarbageCollect(generation, FALSE, mode);
+ GCHeapUtilities::GetGCHeap()->GarbageCollect(generation, FALSE, mode);
END_QCALL;
}
@@ -1918,7 +1918,7 @@ FCIMPL0(int, GCInterface::GetMaxGeneration)
{
FCALL_CONTRACT;
- return(INT32)GCHeap::GetGCHeap()->GetMaxGeneration();
+ return(INT32)GCHeapUtilities::GetGCHeap()->GetMaxGeneration();
}
FCIMPLEND
@@ -1934,7 +1934,7 @@ FCIMPL0(INT64, GCInterface::GetAllocatedBytesForCurrentThread)
INT64 currentAllocated = 0;
Thread *pThread = GetThread();
- alloc_context* ac = pThread->GetAllocContext();
+ gc_alloc_context* ac = pThread->GetAllocContext();
currentAllocated = ac->alloc_bytes + ac->alloc_bytes_loh - (ac->alloc_limit - ac->alloc_ptr);
return currentAllocated;
@@ -1956,7 +1956,7 @@ FCIMPL1(void, GCInterface::SuppressFinalize, Object *obj)
if (!obj->GetMethodTable ()->HasFinalizer())
return;
- GCHeap::GetGCHeap()->SetFinalizationRun(obj);
+ GCHeapUtilities::GetGCHeap()->SetFinalizationRun(obj);
FC_GC_POLL();
}
FCIMPLEND
@@ -1977,7 +1977,7 @@ FCIMPL1(void, GCInterface::ReRegisterForFinalize, Object *obj)
if (obj->GetMethodTable()->HasFinalizer())
{
HELPER_METHOD_FRAME_BEGIN_1(obj);
- GCHeap::GetGCHeap()->RegisterForFinalization(-1, obj);
+ GCHeapUtilities::GetGCHeap()->RegisterForFinalization(-1, obj);
HELPER_METHOD_FRAME_END();
}
}
@@ -2079,7 +2079,7 @@ void GCInterface::AddMemoryPressure(UINT64 bytesAllocated)
m_ulThreshold = (addMethod > multMethod) ? addMethod : multMethod;
for (int i = 0; i <= 1; i++)
{
- if ((GCHeap::GetGCHeap()->CollectionCount(i) / RELATIVE_GC_RATIO) > GCHeap::GetGCHeap()->CollectionCount(i + 1))
+ if ((GCHeapUtilities::GetGCHeap()->CollectionCount(i) / RELATIVE_GC_RATIO) > GCHeapUtilities::GetGCHeap()->CollectionCount(i + 1))
{
gen_collect = i + 1;
break;
@@ -2089,14 +2089,14 @@ void GCInterface::AddMemoryPressure(UINT64 bytesAllocated)
PREFIX_ASSUME(gen_collect <= 2);
- if ((gen_collect == 0) || (m_gc_counts[gen_collect] == GCHeap::GetGCHeap()->CollectionCount(gen_collect)))
+ if ((gen_collect == 0) || (m_gc_counts[gen_collect] == GCHeapUtilities::GetGCHeap()->CollectionCount(gen_collect)))
{
GarbageCollectModeAny(gen_collect);
}
for (int i = 0; i < 3; i++)
{
- m_gc_counts [i] = GCHeap::GetGCHeap()->CollectionCount(i);
+ m_gc_counts [i] = GCHeapUtilities::GetGCHeap()->CollectionCount(i);
}
}
}
@@ -2115,7 +2115,7 @@ void GCInterface::CheckCollectionCount()
{
LIMITED_METHOD_CONTRACT;
- GCHeap * pHeap = GCHeap::GetGCHeap();
+ IGCHeap * pHeap = GCHeapUtilities::GetGCHeap();
if (m_gc_counts[2] != pHeap->CollectionCount(2))
{
@@ -2200,7 +2200,7 @@ void GCInterface::NewAddMemoryPressure(UINT64 bytesAllocated)
// If still over budget, check current managed heap size
if (newMemValue >= budget)
{
- GCHeap *pGCHeap = GCHeap::GetGCHeap();
+ IGCHeap *pGCHeap = GCHeapUtilities::GetGCHeap();
UINT64 heapOver3 = pGCHeap->GetCurrentObjSize() / 3;
if (budget < heapOver3) // Max
@@ -2274,7 +2274,7 @@ void GCInterface::RemoveMemoryPressure(UINT64 bytesAllocated)
for (int i = 0; i < 3; i++)
{
- m_gc_counts [i] = GCHeap::GetGCHeap()->CollectionCount(i);
+ m_gc_counts [i] = GCHeapUtilities::GetGCHeap()->CollectionCount(i);
}
}
}
@@ -2348,7 +2348,7 @@ NOINLINE void GCInterface::GarbageCollectModeAny(int generation)
CONTRACTL_END;
GCX_COOP();
- GCHeap::GetGCHeap()->GarbageCollect(generation, FALSE, collection_non_blocking);
+ GCHeapUtilities::GetGCHeap()->GarbageCollect(generation, FALSE, collection_non_blocking);
}
//
diff --git a/src/vm/corhost.cpp b/src/vm/corhost.cpp
index c229a0ee07..6091bad9e2 100644
--- a/src/vm/corhost.cpp
+++ b/src/vm/corhost.cpp
@@ -5170,7 +5170,7 @@ public:
HRESULT hr = S_OK;
- if (Generation > (int) GCHeap::GetGCHeap()->GetMaxGeneration())
+ if (Generation > (int) GCHeapUtilities::GetGCHeap()->GetMaxGeneration())
hr = E_INVALIDARG;
if (SUCCEEDED(hr))
@@ -5188,7 +5188,7 @@ public:
EX_TRY
{
STRESS_LOG0(LF_GC, LL_INFO100, "Host triggers GC\n");
- hr = GCHeap::GetGCHeap()->GarbageCollect(Generation);
+ hr = GCHeapUtilities::GetGCHeap()->GarbageCollect(Generation);
}
EX_CATCH
{
@@ -5354,7 +5354,7 @@ HRESULT CCLRGCManager::_SetGCSegmentSize(SIZE_T SegmentSize)
HRESULT hr = S_OK;
// Sanity check the value, it must be a power of two and big enough.
- if (!GCHeap::IsValidSegmentSize(SegmentSize))
+ if (!GCHeapUtilities::GetGCHeap()->IsValidSegmentSize(SegmentSize))
{
hr = E_INVALIDARG;
}
@@ -5380,7 +5380,7 @@ HRESULT CCLRGCManager::_SetGCMaxGen0Size(SIZE_T MaxGen0Size)
HRESULT hr = S_OK;
// Sanity check the value is at least large enough.
- if (!GCHeap::IsValidGen0MaxSize(MaxGen0Size))
+ if (!GCHeapUtilities::GetGCHeap()->IsValidGen0MaxSize(MaxGen0Size))
{
hr = E_INVALIDARG;
}
@@ -6408,7 +6408,7 @@ HRESULT CCLRDebugManager::SetConnectionTasks(
}
// Check for Finalizer thread
- if (GCHeap::IsGCHeapInitialized() && (pThread == FinalizerThread::GetFinalizerThread()))
+ if (GCHeapUtilities::IsGCHeapInitialized() && (pThread == FinalizerThread::GetFinalizerThread()))
{
// _ASSERTE(!"Host should not try to schedule user code on our Finalizer Thread");
IfFailGo(E_INVALIDARG);
diff --git a/src/vm/crossgencompile.cpp b/src/vm/crossgencompile.cpp
index 85859c2d82..ffb025adb0 100644
--- a/src/vm/crossgencompile.cpp
+++ b/src/vm/crossgencompile.cpp
@@ -130,7 +130,7 @@ BOOL __SwitchToThread(DWORD, DWORD)
// Globals and misc other
//
-GPTR_IMPL(GCHeap,g_pGCHeap);
+GPTR_IMPL(IGCHeap,g_pGCHeap);
BOOL g_fEEOtherStartup=FALSE;
BOOL g_fEEComActivatedStartup=FALSE;
@@ -138,7 +138,7 @@ BOOL g_fEEComActivatedStartup=FALSE;
GVAL_IMPL_INIT(DWORD, g_fHostConfig, 0);
#ifdef FEATURE_SVR_GC
-SVAL_IMPL_INIT(uint32_t,GCHeap,gcHeapType,GCHeap::GC_HEAP_WKS);
+SVAL_IMPL_INIT(uint32_t,IGCHeap,gcHeapType,IGCHeap::GC_HEAP_WKS);
#endif
void UpdateGCSettingFromHost()
diff --git a/src/vm/crst.cpp b/src/vm/crst.cpp
index a72ec9d3c0..7bf9bd65da 100644
--- a/src/vm/crst.cpp
+++ b/src/vm/crst.cpp
@@ -627,7 +627,7 @@ void CrstBase::PreEnter()
|| (pThread != NULL && pThread->PreemptiveGCDisabled())
// If GC heap has not been initialized yet, there is no need to synchronize with GC.
// This check is mainly for code called from EEStartup.
- || (pThread == NULL && !GCHeap::IsGCHeapInitialized()) );
+ || (pThread == NULL && !GCHeapUtilities::IsGCHeapInitialized()) );
}
if ((pThread != NULL) &&
@@ -910,7 +910,7 @@ BOOL CrstBase::IsSafeToTake()
_ASSERTE(pThread == NULL ||
(pThread->PreemptiveGCDisabled() == ((m_dwFlags & CRST_UNSAFE_COOPGC) != 0)) ||
((m_dwFlags & (CRST_UNSAFE_ANYMODE | CRST_GC_NOTRIGGER_WHEN_TAKEN)) != 0) ||
- (GCHeap::IsGCInProgress() && pThread == ThreadSuspend::GetSuspensionThread()));
+ (GCHeapUtilities::IsGCInProgress() && pThread == ThreadSuspend::GetSuspensionThread()));
END_GETTHREAD_ALLOWED;
if (m_holderthreadid.IsCurrentThread())
diff --git a/src/vm/debugdebugger.cpp b/src/vm/debugdebugger.cpp
index 9ea5427dfe..64870fa419 100644
--- a/src/vm/debugdebugger.cpp
+++ b/src/vm/debugdebugger.cpp
@@ -22,7 +22,7 @@
#include "frames.h"
#include "vars.hpp"
#include "field.h"
-#include "gc.h"
+#include "gcheaputilities.h"
#include "jitinterface.h"
#include "debugdebugger.h"
#include "dbginterface.h"
diff --git a/src/vm/domainfile.cpp b/src/vm/domainfile.cpp
index 2353712c9e..bfb69cdd48 100644
--- a/src/vm/domainfile.cpp
+++ b/src/vm/domainfile.cpp
@@ -4140,8 +4140,8 @@ void DomainAssembly::EnumStaticGCRefs(promote_func* fn, ScanContext* sc)
}
CONTRACT_END;
- _ASSERTE(GCHeap::IsGCInProgress() &&
- GCHeap::IsServerHeap() &&
+ _ASSERTE(GCHeapUtilities::IsGCInProgress() &&
+ GCHeapUtilities::IsServerHeap() &&
IsGCSpecialThread());
DomainModuleIterator i = IterateModules(kModIterIncludeLoaded);
diff --git a/src/vm/dwreport.cpp b/src/vm/dwreport.cpp
index 77669b2f14..5ae4f84de2 100644
--- a/src/vm/dwreport.cpp
+++ b/src/vm/dwreport.cpp
@@ -3212,7 +3212,7 @@ FaultReportResult DoFaultReport( // Was Watson attempted, successful?
// thread under Coop mode, this will let the new generated DoFaultReportCallBack
// thread trigger a deadlock. So in this case, we should directly abort the fault
// report to avoid the deadlock.
- ((IsGCThread() || pThread->PreemptiveGCDisabled()) && GCHeap::IsGCInProgress()) ||
+ ((IsGCThread() || pThread->PreemptiveGCDisabled()) && GCHeapUtilities::IsGCInProgress()) ||
FAILED(g_pDebugInterface->RequestFavor(DoFaultReportFavorWorker, pData)))
{
// If we can't initialize the debugger helper thread or we are running on the debugger helper
diff --git a/src/vm/eetoprofinterfaceimpl.cpp b/src/vm/eetoprofinterfaceimpl.cpp
index 2ec3812159..fdc725b1d8 100644
--- a/src/vm/eetoprofinterfaceimpl.cpp
+++ b/src/vm/eetoprofinterfaceimpl.cpp
@@ -2294,7 +2294,7 @@ HRESULT EEToProfInterfaceImpl::SetEventMask(DWORD dwEventMask, DWORD dwEventMask
// in this function
if (g_profControlBlock.curProfStatus.Get() == kProfStatusInitializingForAttachLoad)
{
- if (GCHeap::GetGCHeap()->IsConcurrentGCEnabled())
+ if (GCHeapUtilities::GetGCHeap()->IsConcurrentGCEnabled())
{
// We only allow turning off concurrent GC in the profiler attach thread inside
// InitializeForAttach, otherwise we would be vulnerable to weird races such as
@@ -2316,7 +2316,7 @@ HRESULT EEToProfInterfaceImpl::SetEventMask(DWORD dwEventMask, DWORD dwEventMask
// Fail if concurrent GC is enabled
// This should only happen for attach profilers if user didn't turn on COR_PRF_MONITOR_GC
// at attach time
- if (GCHeap::GetGCHeap()->IsConcurrentGCEnabled())
+ if (GCHeapUtilities::GetGCHeap()->IsConcurrentGCEnabled())
{
return CORPROF_E_CONCURRENT_GC_NOT_PROFILABLE;
}
@@ -2384,7 +2384,7 @@ HRESULT EEToProfInterfaceImpl::SetEventMask(DWORD dwEventMask, DWORD dwEventMask
if (fNeedToTurnOffConcurrentGC)
{
// Turn off concurrent GC if it is on so that user can walk the heap safely in GC callbacks
- GCHeap * pGCHeap = GCHeap::GetGCHeap();
+ IGCHeap * pGCHeap = GCHeapUtilities::GetGCHeap();
LOG((LF_CORPROF, LL_INFO10, "**PROF: Turning off concurrent GC at attach.\n"));
@@ -5609,7 +5609,7 @@ HRESULT EEToProfInterfaceImpl::MovedReferences(GCReferencesData *pData)
LL_INFO10000,
"**PROF: MovedReferences.\n"));
- _ASSERTE(!GCHeap::GetGCHeap()->IsConcurrentGCEnabled());
+ _ASSERTE(!GCHeapUtilities::GetGCHeap()->IsConcurrentGCEnabled());
if (pData->curIdx == 0)
{
@@ -5805,7 +5805,7 @@ HRESULT EEToProfInterfaceImpl::ObjectReference(ObjectID objId,
LL_INFO100000,
"**PROF: ObjectReferences.\n"));
- _ASSERTE(!GCHeap::GetGCHeap()->IsConcurrentGCEnabled());
+ _ASSERTE(!GCHeapUtilities::GetGCHeap()->IsConcurrentGCEnabled());
{
// All callbacks are really NOTHROW, but that's enforced partially by the profiler,
@@ -5844,7 +5844,7 @@ HRESULT EEToProfInterfaceImpl::FinalizeableObjectQueued(BOOL isCritical, ObjectI
LL_INFO100,
"**PROF: Notifying profiler of finalizeable object.\n"));
- _ASSERTE(!GCHeap::GetGCHeap()->IsConcurrentGCEnabled());
+ _ASSERTE(!GCHeapUtilities::GetGCHeap()->IsConcurrentGCEnabled());
{
// All callbacks are really NOTHROW, but that's enforced partially by the profiler,
@@ -5883,7 +5883,7 @@ HRESULT EEToProfInterfaceImpl::RootReferences2(GCReferencesData *pData)
LL_INFO10000,
"**PROF: RootReferences2.\n"));
- _ASSERTE(!GCHeap::GetGCHeap()->IsConcurrentGCEnabled());
+ _ASSERTE(!GCHeapUtilities::GetGCHeap()->IsConcurrentGCEnabled());
HRESULT hr = S_OK;
@@ -5948,7 +5948,7 @@ HRESULT EEToProfInterfaceImpl::ConditionalWeakTableElementReferences(GCReference
LL_INFO10000,
"**PROF: ConditionalWeakTableElementReferences.\n"));
- _ASSERTE(!GCHeap::GetGCHeap()->IsConcurrentGCEnabled());
+ _ASSERTE(!GCHeapUtilities::GetGCHeap()->IsConcurrentGCEnabled());
HRESULT hr = S_OK;
@@ -6082,7 +6082,7 @@ HRESULT EEToProfInterfaceImpl::GarbageCollectionStarted(int cGenerations, BOOL g
LL_INFO10000,
"**PROF: GarbageCollectionStarted.\n"));
- _ASSERTE(!GCHeap::GetGCHeap()->IsConcurrentGCEnabled());
+ _ASSERTE(!GCHeapUtilities::GetGCHeap()->IsConcurrentGCEnabled());
{
// All callbacks are really NOTHROW, but that's enforced partially by the profiler,
@@ -6120,7 +6120,7 @@ HRESULT EEToProfInterfaceImpl::GarbageCollectionFinished()
LL_INFO10000,
"**PROF: GarbageCollectionFinished.\n"));
- _ASSERTE(!GCHeap::GetGCHeap()->IsConcurrentGCEnabled());
+ _ASSERTE(!GCHeapUtilities::GetGCHeap()->IsConcurrentGCEnabled());
{
// All callbacks are really NOTHROW, but that's enforced partially by the profiler,
diff --git a/src/vm/eventtrace.cpp b/src/vm/eventtrace.cpp
index d8702a53e1..b126fb07a4 100644
--- a/src/vm/eventtrace.cpp
+++ b/src/vm/eventtrace.cpp
@@ -431,19 +431,19 @@ ETW::SamplingLog::EtwStackWalkStatus ETW::SamplingLog::SaveCurrentStack(int skip
VOID ETW::GCLog::GCSettingsEvent()
{
- if (GCHeap::IsGCHeapInitialized())
+ if (GCHeapUtilities::IsGCHeapInitialized())
{
if (ETW_TRACING_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context,
GCSettings))
{
ETW::GCLog::ETW_GC_INFO Info;
- Info.GCSettings.ServerGC = GCHeap::IsServerHeap ();
- Info.GCSettings.SegmentSize = GCHeap::GetGCHeap()->GetValidSegmentSize (FALSE);
- Info.GCSettings.LargeObjectSegmentSize = GCHeap::GetGCHeap()->GetValidSegmentSize (TRUE);
+ Info.GCSettings.ServerGC = GCHeapUtilities::IsServerHeap ();
+ Info.GCSettings.SegmentSize = GCHeapUtilities::GetGCHeap()->GetValidSegmentSize (FALSE);
+ Info.GCSettings.LargeObjectSegmentSize = GCHeapUtilities::GetGCHeap()->GetValidSegmentSize (TRUE);
FireEtwGCSettings_V1(Info.GCSettings.SegmentSize, Info.GCSettings.LargeObjectSegmentSize, Info.GCSettings.ServerGC, GetClrInstanceId());
}
- GCHeap::GetGCHeap()->TraceGCSegments();
+ GCHeapUtilities::GetGCHeap()->TraceGCSegments();
}
};
@@ -892,7 +892,7 @@ VOID ETW::GCLog::FireGcStartAndGenerationRanges(ETW_GC_INFO * pGcInfo)
// GCStart, then retrieve it
LONGLONG l64ClientSequenceNumberToLog = 0;
if ((s_l64LastClientSequenceNumber != 0) &&
- (pGcInfo->GCStart.Depth == GCHeap::GetMaxGeneration()) &&
+ (pGcInfo->GCStart.Depth == GCHeapUtilities::GetGCHeap()->GetMaxGeneration()) &&
(pGcInfo->GCStart.Reason == ETW_GC_INFO::GC_INDUCED))
{
l64ClientSequenceNumberToLog = InterlockedExchange64(&s_l64LastClientSequenceNumber, 0);
@@ -901,7 +901,7 @@ VOID ETW::GCLog::FireGcStartAndGenerationRanges(ETW_GC_INFO * pGcInfo)
FireEtwGCStart_V2(pGcInfo->GCStart.Count, pGcInfo->GCStart.Depth, pGcInfo->GCStart.Reason, pGcInfo->GCStart.Type, GetClrInstanceId(), l64ClientSequenceNumberToLog);
// Fire an event per range per generation
- GCHeap *hp = GCHeap::GetGCHeap();
+ IGCHeap *hp = GCHeapUtilities::GetGCHeap();
hp->DescrGenerationsToProfiler(FireSingleGenerationRangeEvent, NULL /* context */);
}
}
@@ -928,7 +928,7 @@ VOID ETW::GCLog::FireGcEndAndGenerationRanges(ULONG Count, ULONG Depth)
CLR_GC_KEYWORD))
{
// Fire an event per range per generation
- GCHeap *hp = GCHeap::GetGCHeap();
+ IGCHeap *hp = GCHeapUtilities::GetGCHeap();
hp->DescrGenerationsToProfiler(FireSingleGenerationRangeEvent, NULL /* context */);
// GCEnd
@@ -938,7 +938,7 @@ VOID ETW::GCLog::FireGcEndAndGenerationRanges(ULONG Count, ULONG Depth)
//---------------------------------------------------------------------------------------
//
-// Callback made by GC when we call GCHeap::DescrGenerationsToProfiler(). This is
+// Callback made by GC when we call GCHeapUtilities::DescrGenerationsToProfiler(). This is
// called once per range per generation, and results in a single ETW event per range per
// generation.
//
@@ -1033,7 +1033,7 @@ HRESULT ETW::GCLog::ForceGCForDiagnostics()
ForcedGCHolder forcedGCHolder;
- hr = GCHeap::GetGCHeap()->GarbageCollect(
+ hr = GCHeapUtilities::GetGCHeap()->GarbageCollect(
-1, // all generations should be collected
FALSE, // low_memory_p
collection_blocking);
diff --git a/src/vm/excep.cpp b/src/vm/excep.cpp
index 672f315fcd..d26f2d62d8 100644
--- a/src/vm/excep.cpp
+++ b/src/vm/excep.cpp
@@ -20,7 +20,7 @@
#include "cgensys.h"
#include "comutilnative.h"
#include "siginfo.hpp"
-#include "gc.h"
+#include "gcheaputilities.h"
#include "eedbginterfaceimpl.h" //so we can clearexception in RealCOMPlusThrow
#include "perfcounters.h"
#include "dllimportcallback.h"
diff --git a/src/vm/finalizerthread.cpp b/src/vm/finalizerthread.cpp
index 5d51d33cfb..2f72b07957 100644
--- a/src/vm/finalizerthread.cpp
+++ b/src/vm/finalizerthread.cpp
@@ -295,7 +295,7 @@ Object * FinalizerThread::FinalizeAllObjects(Object* fobj, int bitToCheck)
{
return NULL;
}
- fobj = GCHeap::GetGCHeap()->GetNextFinalizable();
+ fobj = GCHeapUtilities::GetGCHeap()->GetNextFinalizable();
}
Thread *pThread = GetThread();
@@ -320,7 +320,7 @@ Object * FinalizerThread::FinalizeAllObjects(Object* fobj, int bitToCheck)
{
return NULL;
}
- fobj = GCHeap::GetGCHeap()->GetNextFinalizable();
+ fobj = GCHeapUtilities::GetGCHeap()->GetNextFinalizable();
}
else
{
@@ -337,7 +337,7 @@ Object * FinalizerThread::FinalizeAllObjects(Object* fobj, int bitToCheck)
{
return NULL;
}
- fobj = GCHeap::GetGCHeap()->GetNextFinalizable();
+ fobj = GCHeapUtilities::GetGCHeap()->GetNextFinalizable();
}
}
}
@@ -533,7 +533,7 @@ void FinalizerThread::WaitForFinalizerEvent (CLREvent *event)
case (WAIT_OBJECT_0 + kLowMemoryNotification):
//short on memory GC immediately
GetFinalizerThread()->DisablePreemptiveGC();
- GCHeap::GetGCHeap()->GarbageCollect(0, TRUE);
+ GCHeapUtilities::GetGCHeap()->GarbageCollect(0, TRUE);
GetFinalizerThread()->EnablePreemptiveGC();
//wait only on the event for 2s
switch (event->Wait(2000, FALSE))
@@ -584,7 +584,7 @@ void FinalizerThread::WaitForFinalizerEvent (CLREvent *event)
if (WaitForSingleObject(MHandles[kLowMemoryNotification], 0) == WAIT_OBJECT_0) {
//short on memory GC immediately
GetFinalizerThread()->DisablePreemptiveGC();
- GCHeap::GetGCHeap()->GarbageCollect(0, TRUE);
+ GCHeapUtilities::GetGCHeap()->GarbageCollect(0, TRUE);
GetFinalizerThread()->EnablePreemptiveGC();
}
//wait only on the event for 2s
@@ -604,7 +604,7 @@ void FinalizerThread::WaitForFinalizerEvent (CLREvent *event)
if (sLastLowMemoryFromHost != 0)
{
GetFinalizerThread()->DisablePreemptiveGC();
- GCHeap::GetGCHeap()->GarbageCollect(0, TRUE);
+ GCHeapUtilities::GetGCHeap()->GarbageCollect(0, TRUE);
GetFinalizerThread()->EnablePreemptiveGC();
}
}
@@ -677,7 +677,7 @@ VOID FinalizerThread::FinalizerThreadWorker(void *args)
{
s_forcedGCInProgress = true;
GetFinalizerThread()->DisablePreemptiveGC();
- GCHeap::GetGCHeap()->GarbageCollect(2, FALSE, collection_blocking);
+ GCHeapUtilities::GetGCHeap()->GarbageCollect(2, FALSE, collection_blocking);
GetFinalizerThread()->EnablePreemptiveGC();
s_forcedGCInProgress = false;
@@ -710,14 +710,14 @@ VOID FinalizerThread::FinalizerThreadWorker(void *args)
do
{
- last_gc_count = GCHeap::GetGCHeap()->CollectionCount(0);
+ last_gc_count = GCHeapUtilities::GetGCHeap()->CollectionCount(0);
GetFinalizerThread()->m_GCOnTransitionsOK = FALSE;
GetFinalizerThread()->EnablePreemptiveGC();
__SwitchToThread (0, ++dwSwitchCount);
GetFinalizerThread()->DisablePreemptiveGC();
// If no GCs happended, then we assume we are quiescent
GetFinalizerThread()->m_GCOnTransitionsOK = TRUE;
- } while (GCHeap::GetGCHeap()->CollectionCount(0) - last_gc_count > 0);
+ } while (GCHeapUtilities::GetGCHeap()->CollectionCount(0) - last_gc_count > 0);
}
#endif //_DEBUG
@@ -747,7 +747,7 @@ VOID FinalizerThread::FinalizerThreadWorker(void *args)
}
else if (UnloadingAppDomain == NULL)
break;
- else if (!GCHeap::GetGCHeap()->FinalizeAppDomain(UnloadingAppDomain, fRunFinalizersOnUnload))
+ else if (!GCHeapUtilities::GetGCHeap()->FinalizeAppDomain(UnloadingAppDomain, fRunFinalizersOnUnload))
{
break;
}
@@ -916,7 +916,7 @@ DWORD __stdcall FinalizerThread::FinalizerThreadStart(void *args)
if (CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_FinalizeOnShutdown) != 0)
{
// Finalize all registered objects during shutdown, even they are still reachable.
- GCHeap::GetGCHeap()->SetFinalizeQueueForShutdown(FALSE);
+ GCHeapUtilities::GetGCHeap()->SetFinalizeQueueForShutdown(FALSE);
// This will apply any policy for swallowing exceptions during normal
// processing, without allowing the finalizer thread to disappear on us.
@@ -1380,7 +1380,7 @@ BOOL FinalizerThread::FinalizerThreadWatchDogHelper()
}
else
{
- prevCount = GCHeap::GetGCHeap()->GetNumberOfFinalizable();
+ prevCount = GCHeapUtilities::GetGCHeap()->GetNumberOfFinalizable();
}
DWORD maxTry = (DWORD)(totalWaitTimeout*1.0/FINALIZER_WAIT_TIMEOUT + 0.5);
@@ -1447,11 +1447,11 @@ BOOL FinalizerThread::FinalizerThreadWatchDogHelper()
}
else
{
- curCount = GCHeap::GetGCHeap()->GetNumberOfFinalizable();
+ curCount = GCHeapUtilities::GetGCHeap()->GetNumberOfFinalizable();
}
if ((prevCount <= curCount)
- && !GCHeap::GetGCHeap()->ShouldRestartFinalizerWatchDog()
+ && !GCHeapUtilities::GetGCHeap()->ShouldRestartFinalizerWatchDog()
&& (pThread == NULL || !(pThread->m_State & (Thread::TS_UserSuspendPending | Thread::TS_DebugSuspendPending)))){
if (nTry == maxTry) {
if (!s_fRaiseExitProcessEvent) {
diff --git a/src/vm/frames.cpp b/src/vm/frames.cpp
index ec7e7be63c..04a1815cf3 100644
--- a/src/vm/frames.cpp
+++ b/src/vm/frames.cpp
@@ -18,7 +18,7 @@
#include "fieldmarshaler.h"
#include "objecthandle.h"
#include "siginfo.hpp"
-#include "gc.h"
+#include "gcheaputilities.h"
#include "dllimportcallback.h"
#include "stackwalk.h"
#include "dbginterface.h"
diff --git a/src/vm/gccover.cpp b/src/vm/gccover.cpp
index 41dc094e94..89f271cdd0 100644
--- a/src/vm/gccover.cpp
+++ b/src/vm/gccover.cpp
@@ -1234,8 +1234,8 @@ void checkAndUpdateReg(DWORD& origVal, DWORD curVal, bool gcHappened) {
// the validation infrastructure has got a bug.
_ASSERTE(gcHappened); // If the register values are different, a GC must have happened
- _ASSERTE(GCHeap::GetGCHeap()->IsHeapPointer((BYTE*) size_t(origVal))); // And the pointers involved are on the GCHeap
- _ASSERTE(GCHeap::GetGCHeap()->IsHeapPointer((BYTE*) size_t(curVal)));
+ _ASSERTE(GCHeapUtilities::GetGCHeap()->IsHeapPointer((BYTE*) size_t(origVal))); // And the pointers involved are on the GCHeap
+ _ASSERTE(GCHeapUtilities::GetGCHeap()->IsHeapPointer((BYTE*) size_t(curVal)));
origVal = curVal; // this is now the best estimate of what should be returned.
}
@@ -1478,7 +1478,7 @@ void DoGcStress (PCONTEXT regs, MethodDesc *pMD)
if (gcCover->callerThread == 0) {
if (FastInterlockCompareExchangePointer(&gcCover->callerThread, pThread, 0) == 0) {
gcCover->callerRegs = *regs;
- gcCover->gcCount = GCHeap::GetGCHeap()->GetGcCount();
+ gcCover->gcCount = GCHeapUtilities::GetGCHeap()->GetGcCount();
bShouldUpdateProlog = false;
}
}
@@ -1564,13 +1564,13 @@ void DoGcStress (PCONTEXT regs, MethodDesc *pMD)
// instruction in the epilog (TODO: fix it for the first instr Case)
_ASSERTE(pThread->PreemptiveGCDisabled()); // Epilogs should be in cooperative mode, no GC can happen right now.
- bool gcHappened = gcCover->gcCount != GCHeap::GetGCHeap()->GetGcCount();
+ bool gcHappened = gcCover->gcCount != GCHeapUtilities::GetGCHeap()->GetGcCount();
checkAndUpdateReg(gcCover->callerRegs.Edi, *regDisp.pEdi, gcHappened);
checkAndUpdateReg(gcCover->callerRegs.Esi, *regDisp.pEsi, gcHappened);
checkAndUpdateReg(gcCover->callerRegs.Ebx, *regDisp.pEbx, gcHappened);
checkAndUpdateReg(gcCover->callerRegs.Ebp, *regDisp.pEbp, gcHappened);
- gcCover->gcCount = GCHeap::GetGCHeap()->GetGcCount();
+ gcCover->gcCount = GCHeapUtilities::GetGCHeap()->GetGcCount();
}
return;
@@ -1777,7 +1777,7 @@ void DoGcStress (PCONTEXT regs, MethodDesc *pMD)
// Do the actual stress work
//
- if (!GCHeap::GetGCHeap()->StressHeap())
+ if (!GCHeapUtilities::GetGCHeap()->StressHeap())
UpdateGCStressInstructionWithoutGC ();
// Must flush instruction cache before returning as instruction has been modified.
diff --git a/src/vm/gcenv.ee.cpp b/src/vm/gcenv.ee.cpp
index 2f1e4e8200..8dc1692643 100644
--- a/src/vm/gcenv.ee.cpp
+++ b/src/vm/gcenv.ee.cpp
@@ -550,7 +550,7 @@ void GCToEEInterface::GcScanRoots(promote_func* fn, int condemned, int max_gen,
STRESS_LOG1(LF_GCROOTS, LL_INFO10, "GCScan: Promotion Phase = %d\n", sc->promotion);
// In server GC, we should be competing for marking the statics
- if (GCHeap::MarkShouldCompeteForStatics())
+ if (GCHeapUtilities::MarkShouldCompeteForStatics())
{
if (condemned == max_gen && sc->promotion)
{
@@ -563,7 +563,7 @@ void GCToEEInterface::GcScanRoots(promote_func* fn, int condemned, int max_gen,
{
STRESS_LOG2(LF_GC | LF_GCROOTS, LL_INFO100, "{ Starting scan of Thread %p ID = %x\n", pThread, pThread->GetThreadId());
- if (GCHeap::GetGCHeap()->IsThreadUsingAllocationContextHeap(
+ if (GCHeapUtilities::GetGCHeap()->IsThreadUsingAllocationContextHeap(
GCToEEInterface::GetAllocContext(pThread), sc->thread_number))
{
sc->thread_under_crawl = pThread;
@@ -693,7 +693,7 @@ void GCToEEInterface::SyncBlockCachePromotionsGranted(int max_gen)
SyncBlockCache::GetSyncBlockCache()->GCDone(FALSE, max_gen);
}
-alloc_context * GCToEEInterface::GetAllocContext(Thread * pThread)
+gc_alloc_context * GCToEEInterface::GetAllocContext(Thread * pThread)
{
WRAPPER_NO_CONTRACT;
return pThread->GetAllocContext();
diff --git a/src/vm/gcheaputilities.cpp b/src/vm/gcheaputilities.cpp
new file mode 100644
index 0000000000..ceadb9ea42
--- /dev/null
+++ b/src/vm/gcheaputilities.cpp
@@ -0,0 +1,9 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+#include "common.h"
+#include "gcheaputilities.h"
+
+// This is the global GC heap, maintained by the VM.
+GPTR_IMPL(IGCHeap, g_pGCHeap); \ No newline at end of file
diff --git a/src/vm/gcheaputilities.h b/src/vm/gcheaputilities.h
new file mode 100644
index 0000000000..6e08472a2f
--- /dev/null
+++ b/src/vm/gcheaputilities.h
@@ -0,0 +1,110 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+#ifndef _GCHEAPUTILITIES_H_
+#define _GCHEAPUTILITIES_H_
+
+#include "gcinterface.h"
+
+// The singular heap instance.
+GPTR_DECL(IGCHeap, g_pGCHeap);
+
+// GCHeapUtilities provides a number of static methods
+// that operate on the global heap instance. It can't be
+// instantiated.
+class GCHeapUtilities {
+public:
+ // Retrieves the GC heap.
+ inline static IGCHeap* GetGCHeap()
+ {
+ assert(g_pGCHeap != nullptr);
+ return g_pGCHeap;
+ }
+
+ // Returns true if the heap has been initialized, false otherwise.
+ inline static bool IsGCHeapInitialized()
+ {
+ return g_pGCHeap != nullptr;
+ }
+
+ // Returns true if a the heap is initialized and a garbage collection
+ // is in progress, false otherwise.
+ inline static BOOL IsGCInProgress(BOOL bConsiderGCStart = FALSE)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ return (IsGCHeapInitialized() ? GetGCHeap()->IsGCInProgressHelper(bConsiderGCStart) : false);
+ }
+
+ // Returns true if we should be competing marking for statics. This
+ // influences the behavior of `GCToEEInterface::GcScanRoots`.
+ inline static BOOL MarkShouldCompeteForStatics()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ return IsServerHeap() && g_SystemInfo.dwNumberOfProcessors >= 2;
+ }
+
+ // Waits until a GC is complete, if the heap has been initialized.
+ inline static void WaitForGCCompletion(BOOL bConsiderGCStart = FALSE)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ if (IsGCHeapInitialized())
+ GetGCHeap()->WaitUntilGCComplete(bConsiderGCStart);
+ }
+
+ // Returns true if we should be using allocation contexts, false otherwise.
+ inline static bool UseAllocationContexts()
+ {
+ WRAPPER_NO_CONTRACT;
+#ifdef FEATURE_REDHAWK
+ // SIMPLIFY: only use allocation contexts
+ return true;
+#else
+#if defined(_TARGET_ARM_) || defined(FEATURE_PAL)
+ return true;
+#else
+ return ((IsServerHeap() ? true : (g_SystemInfo.dwNumberOfProcessors >= 2)));
+#endif
+#endif
+ }
+
+ // Returns true if the held GC heap is a Server GC heap, false otherwise.
+ inline static bool IsServerHeap()
+ {
+ LIMITED_METHOD_CONTRACT;
+#ifdef FEATURE_SVR_GC
+ _ASSERTE(IGCHeap::gcHeapType != IGCHeap::GC_HEAP_INVALID);
+ return (IGCHeap::gcHeapType == IGCHeap::GC_HEAP_SVR);
+#else // FEATURE_SVR_GC
+ return false;
+#endif // FEATURE_SVR_GC
+ }
+
+ // Gets the maximum generation number by reading the static field
+ // on IGCHeap. This should only be done by the DAC code paths - all other code
+ // should go through IGCHeap::GetMaxGeneration.
+ //
+ // The reason for this is that, while we are in the early stages of
+ // decoupling the GC, the GC and the DAC still remain tightly coupled
+ // and, in particular, the DAC needs to know how many generations the GC
+ // has. However, it is not permitted to invoke virtual methods on g_pGCHeap
+ // while on a DAC code path. Therefore, we need to determine the max generation
+ // non-virtually, while still in a manner consistent with the interface -
+ // therefore, a static field is used.
+ //
+ // This is not without precedent - IGCHeap::gcHeapType is a static field used
+ // for a similar reason (the DAC needs to know what kind of heap it's looking at).
+ inline static unsigned GetMaxGeneration()
+ {
+ return IGCHeap::maxGeneration;
+ }
+
+private:
+ // This class should never be instantiated.
+ GCHeapUtilities() = delete;
+};
+
+#endif // _GCHEAPUTILITIES_H_ \ No newline at end of file
diff --git a/src/vm/gchelpers.cpp b/src/vm/gchelpers.cpp
index bf81847716..9b0a17fbb4 100644
--- a/src/vm/gchelpers.cpp
+++ b/src/vm/gchelpers.cpp
@@ -16,7 +16,7 @@
#include "threads.h"
#include "eetwain.h"
#include "eeconfig.h"
-#include "gc.h"
+#include "gcheaputilities.h"
#include "corhost.h"
#include "threads.h"
#include "fieldmarshaler.h"
@@ -51,11 +51,11 @@
orObject = (ArrayBase *) OBJECTREFToObject(objref);
-inline alloc_context* GetThreadAllocContext()
+inline gc_alloc_context* GetThreadAllocContext()
{
WRAPPER_NO_CONTRACT;
- assert(GCHeap::UseAllocationContexts());
+ assert(GCHeapUtilities::UseAllocationContexts());
return & GetThread()->m_alloc_context;
}
@@ -102,10 +102,10 @@ inline Object* Alloc(size_t size, BOOL bFinalize, BOOL bContainsPointers )
// We don't want to throw an SO during the GC, so make sure we have plenty
// of stack before calling in.
INTERIOR_STACK_PROBE_FOR(GetThread(), static_cast<unsigned>(DEFAULT_ENTRY_PROBE_AMOUNT * 1.5));
- if (GCHeap::UseAllocationContexts())
- retVal = GCHeap::GetGCHeap()->Alloc(GetThreadAllocContext(), size, flags);
+ if (GCHeapUtilities::UseAllocationContexts())
+ retVal = GCHeapUtilities::GetGCHeap()->Alloc(GetThreadAllocContext(), size, flags);
else
- retVal = GCHeap::GetGCHeap()->Alloc(size, flags);
+ retVal = GCHeapUtilities::GetGCHeap()->Alloc(size, flags);
END_INTERIOR_STACK_PROBE;
return retVal;
}
@@ -130,10 +130,10 @@ inline Object* AllocAlign8(size_t size, BOOL bFinalize, BOOL bContainsPointers,
// We don't want to throw an SO during the GC, so make sure we have plenty
// of stack before calling in.
INTERIOR_STACK_PROBE_FOR(GetThread(), static_cast<unsigned>(DEFAULT_ENTRY_PROBE_AMOUNT * 1.5));
- if (GCHeap::UseAllocationContexts())
- retVal = GCHeap::GetGCHeap()->AllocAlign8(GetThreadAllocContext(), size, flags);
+ if (GCHeapUtilities::UseAllocationContexts())
+ retVal = GCHeapUtilities::GetGCHeap()->AllocAlign8(GetThreadAllocContext(), size, flags);
else
- retVal = GCHeap::GetGCHeap()->AllocAlign8(size, flags);
+ retVal = GCHeapUtilities::GetGCHeap()->AllocAlign8(size, flags);
END_INTERIOR_STACK_PROBE;
return retVal;
@@ -173,7 +173,7 @@ inline Object* AllocLHeap(size_t size, BOOL bFinalize, BOOL bContainsPointers )
// We don't want to throw an SO during the GC, so make sure we have plenty
// of stack before calling in.
INTERIOR_STACK_PROBE_FOR(GetThread(), static_cast<unsigned>(DEFAULT_ENTRY_PROBE_AMOUNT * 1.5));
- retVal = GCHeap::GetGCHeap()->AllocLHeap(size, flags);
+ retVal = GCHeapUtilities::GetGCHeap()->AllocLHeap(size, flags);
END_INTERIOR_STACK_PROBE;
return retVal;
}
@@ -427,7 +427,7 @@ OBJECTREF AllocateArrayEx(TypeHandle arrayType, INT32 *pArgs, DWORD dwNumArgs, B
if (bAllocateInLargeHeap ||
(totalSize >= LARGE_OBJECT_SIZE))
{
- GCHeap::GetGCHeap()->PublishObject((BYTE*)orArray);
+ GCHeapUtilities::GetGCHeap()->PublishObject((BYTE*)orArray);
}
#ifdef _LOGALLOC
@@ -651,7 +651,7 @@ OBJECTREF FastAllocatePrimitiveArray(MethodTable* pMT, DWORD cElements, BOOL b
if (bPublish)
{
- GCHeap::GetGCHeap()->PublishObject((BYTE*)orObject);
+ GCHeapUtilities::GetGCHeap()->PublishObject((BYTE*)orObject);
}
// Notify the profiler of the allocation
@@ -860,7 +860,7 @@ STRINGREF SlowAllocateString( DWORD cchStringLength )
if (ObjectSize >= LARGE_OBJECT_SIZE)
{
- GCHeap::GetGCHeap()->PublishObject((BYTE*)orObject);
+ GCHeapUtilities::GetGCHeap()->PublishObject((BYTE*)orObject);
}
// Notify the profiler of the allocation
@@ -1000,7 +1000,7 @@ OBJECTREF AllocateObject(MethodTable *pMT
if ((baseSize >= LARGE_OBJECT_SIZE))
{
orObject->SetMethodTableForLargeObject(pMT);
- GCHeap::GetGCHeap()->PublishObject((BYTE*)orObject);
+ GCHeapUtilities::GetGCHeap()->PublishObject((BYTE*)orObject);
}
else
{
@@ -1234,7 +1234,7 @@ extern "C" HCIMPL2_RAW(VOID, JIT_WriteBarrier, Object **dst, Object *ref)
*dst = ref;
// If the store above succeeded, "dst" should be in the heap.
- assert(GCHeap::GetGCHeap()->IsHeapPointer((void*)dst));
+ assert(GCHeapUtilities::GetGCHeap()->IsHeapPointer((void*)dst));
#ifdef WRITE_BARRIER_CHECK
updateGCShadow(dst, ref); // support debugging write barrier
@@ -1280,7 +1280,7 @@ extern "C" HCIMPL2_RAW(VOID, JIT_WriteBarrierEnsureNonHeapTarget, Object **dst,
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_NOTRIGGER;
- assert(!GCHeap::GetGCHeap()->IsHeapPointer((void*)dst));
+ assert(!GCHeapUtilities::GetGCHeap()->IsHeapPointer((void*)dst));
// no HELPER_METHOD_FRAME because we are MODE_COOPERATIVE, GC_NOTRIGGER
diff --git a/src/vm/gchost.cpp b/src/vm/gchost.cpp
index 4f7d52f805..f20b438a0b 100644
--- a/src/vm/gchost.cpp
+++ b/src/vm/gchost.cpp
@@ -22,7 +22,7 @@
#include "corhost.h"
#include "excep.h"
#include "field.h"
-#include "gc.h"
+#include "gcheaputilities.h"
#if !defined(FEATURE_CORECLR)
inline size_t SizeInKBytes(size_t cbSize)
@@ -48,7 +48,7 @@ HRESULT CorGCHost::_SetGCSegmentSize(SIZE_T SegmentSize)
HRESULT hr = S_OK;
// Sanity check the value, it must be a power of two and big enough.
- if (!GCHeap::IsValidSegmentSize(SegmentSize))
+ if (!GCHeapUtilities::IsValidSegmentSize(SegmentSize))
{
hr = E_INVALIDARG;
}
@@ -74,7 +74,7 @@ HRESULT CorGCHost::_SetGCMaxGen0Size(SIZE_T MaxGen0Size)
HRESULT hr = S_OK;
// Sanity check the value is at least large enough.
- if (!GCHeap::IsValidGen0MaxSize(MaxGen0Size))
+ if (!GCHeapUtilities::IsValidGen0MaxSize(MaxGen0Size))
{
hr = E_INVALIDARG;
}
@@ -151,7 +151,7 @@ HRESULT CorGCHost::Collect(
HRESULT hr = E_FAIL;
- if (Generation > (int) GCHeap::GetGCHeap()->GetMaxGeneration())
+ if (Generation > (int) GCHeapUtilities::GetGCHeap()->GetMaxGeneration())
hr = E_INVALIDARG;
else
{
@@ -170,7 +170,7 @@ HRESULT CorGCHost::Collect(
EX_TRY
{
- hr = GCHeap::GetGCHeap()->GarbageCollect(Generation);
+ hr = GCHeapUtilities::GetGCHeap()->GarbageCollect(Generation);
}
EX_CATCH
{
@@ -268,7 +268,7 @@ HRESULT CorGCHost::SetVirtualMemLimit(
}
CONTRACTL_END;
- GCHeap::GetGCHeap()->SetReservedVMLimit (sztMaxVirtualMemMB);
+ GCHeapUtilities::GetGCHeap()->SetReservedVMLimit (sztMaxVirtualMemMB);
return (S_OK);
}
#endif // !defined(FEATURE_CORECLR)
diff --git a/src/vm/gc.h b/src/vm/gcinterface.h
index 825b5da803..cc70becdf1 100644
--- a/src/vm/gc.h
+++ b/src/vm/gcinterface.h
@@ -2,4 +2,4 @@
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
-#include "../gc/gc.h"
+#include "../gc/gcinterface.h" \ No newline at end of file
diff --git a/src/vm/gcstress.h b/src/vm/gcstress.h
index 609276e148..04487c611e 100644
--- a/src/vm/gcstress.h
+++ b/src/vm/gcstress.h
@@ -280,17 +280,17 @@ namespace _GCStress
// GC Trigger policy classes define how a garbage collection is triggered
// This is the default GC Trigger policy that simply calls
- // GCHeap::StressHeap
+ // IGCHeap::StressHeap
class StressGcTriggerPolicy
{
public:
FORCEINLINE
static void Trigger()
- { GCHeap::GetGCHeap()->StressHeap(); }
+ { GCHeapUtilities::GetGCHeap()->StressHeap(); }
FORCEINLINE
- static void Trigger(::alloc_context* acontext)
- { GCHeap::GetGCHeap()->StressHeap(acontext); }
+ static void Trigger(::gc_alloc_context* acontext)
+ { GCHeapUtilities::GetGCHeap()->StressHeap(acontext); }
};
// This is an overriding GC Trigger policy that triggers a GC by calling
@@ -403,7 +403,7 @@ namespace _GCStress
// Additionally it switches the GC mode as specified by GcModePolicy, and it
// uses GcTriggerPolicy::Trigger(alloc_context*) to actually trigger the GC
FORCEINLINE
- static void MaybeTrigger(::alloc_context* acontext, DWORD minFastGc = 0)
+ static void MaybeTrigger(::gc_alloc_context* acontext, DWORD minFastGc = 0)
{
if (IsEnabled(minFastGc) && GCStressPolicy::IsEnabled())
{
@@ -455,7 +455,7 @@ namespace _GCStress
public:
FORCEINLINE
- static void MaybeTrigger(::alloc_context* acontext)
+ static void MaybeTrigger(::gc_alloc_context* acontext)
{
GcStressBase::MaybeTrigger(acontext);
diff --git a/src/vm/hash.cpp b/src/vm/hash.cpp
index 205f736b0d..6b6b21391f 100644
--- a/src/vm/hash.cpp
+++ b/src/vm/hash.cpp
@@ -547,8 +547,8 @@ UPTR HashMap::LookupValue(UPTR key, UPTR value)
// BROKEN: This is called for the RCWCache on the GC thread
// Also called by AppDomain::FindCachedAssembly to resolve AssemblyRef -- this is used by stack walking on the GC thread.
- // See comments in GCHeap::RestartEE (above the call to SyncClean::CleanUp) for reason to enter COOP mode.
- // However, if the current thread is the GC thread, we know we're not going to call GCHeap::RestartEE
+ // See comments in GCHeapUtilities::RestartEE (above the call to SyncClean::CleanUp) for reason to enter COOP mode.
+ // However, if the current thread is the GC thread, we know we're not going to call GCHeapUtilities::RestartEE
// while accessing the HashMap, so it's safe to proceed.
// (m_fAsyncMode && !IsGCThread() is the condition for entering COOP mode. I.e., enable COOP GC only if
// the HashMap is in async mode and this is not a GC thread.)
diff --git a/src/vm/i386/excepx86.cpp b/src/vm/i386/excepx86.cpp
index 27c923b749..fb68963ae6 100644
--- a/src/vm/i386/excepx86.cpp
+++ b/src/vm/i386/excepx86.cpp
@@ -19,7 +19,7 @@
#include "comutilnative.h"
#include "sigformat.h"
#include "siginfo.hpp"
-#include "gc.h"
+#include "gcheaputilities.h"
#include "eedbginterfaceimpl.h" //so we can clearexception in COMPlusThrow
#include "perfcounters.h"
#include "eventtrace.h"
diff --git a/src/vm/i386/jitinterfacex86.cpp b/src/vm/i386/jitinterfacex86.cpp
index 949b115ce2..f6d77668b7 100644
--- a/src/vm/i386/jitinterfacex86.cpp
+++ b/src/vm/i386/jitinterfacex86.cpp
@@ -57,10 +57,10 @@ extern "C" void STDCALL WriteBarrierAssert(BYTE* ptr, Object* obj)
if (fVerifyHeap)
{
obj->Validate(FALSE);
- if(GCHeap::GetGCHeap()->IsHeapPointer(ptr))
+ if(GCHeapUtilities::GetGCHeap()->IsHeapPointer(ptr))
{
Object* pObj = *(Object**)ptr;
- _ASSERTE (pObj == NULL || GCHeap::GetGCHeap()->IsHeapPointer(pObj));
+ _ASSERTE (pObj == NULL || GCHeapUtilities::GetGCHeap()->IsHeapPointer(pObj));
}
}
else
@@ -610,7 +610,7 @@ void JIT_TrialAlloc::EmitCore(CPUSTUBLINKER *psl, CodeLabel *noLock, CodeLabel *
if (flags & (ALIGN8 | SIZE_IN_EAX | ALIGN8OBJ))
{
// MOV EBX, [edx]Thread.m_alloc_context.alloc_ptr
- psl->X86EmitOffsetModRM(0x8B, kEBX, kEDX, offsetof(Thread, m_alloc_context) + offsetof(alloc_context, alloc_ptr));
+ psl->X86EmitOffsetModRM(0x8B, kEBX, kEDX, offsetof(Thread, m_alloc_context) + offsetof(gc_alloc_context, alloc_ptr));
// add EAX, EBX
psl->Emit16(0xC303);
if (flags & ALIGN8)
@@ -619,11 +619,11 @@ void JIT_TrialAlloc::EmitCore(CPUSTUBLINKER *psl, CodeLabel *noLock, CodeLabel *
else
{
// add eax, [edx]Thread.m_alloc_context.alloc_ptr
- psl->X86EmitOffsetModRM(0x03, kEAX, kEDX, offsetof(Thread, m_alloc_context) + offsetof(alloc_context, alloc_ptr));
+ psl->X86EmitOffsetModRM(0x03, kEAX, kEDX, offsetof(Thread, m_alloc_context) + offsetof(gc_alloc_context, alloc_ptr));
}
// cmp eax, [edx]Thread.m_alloc_context.alloc_limit
- psl->X86EmitOffsetModRM(0x3b, kEAX, kEDX, offsetof(Thread, m_alloc_context) + offsetof(alloc_context, alloc_limit));
+ psl->X86EmitOffsetModRM(0x3b, kEAX, kEDX, offsetof(Thread, m_alloc_context) + offsetof(gc_alloc_context, alloc_limit));
// ja noAlloc
psl->X86EmitCondJump(noAlloc, X86CondCode::kJA);
@@ -631,7 +631,7 @@ void JIT_TrialAlloc::EmitCore(CPUSTUBLINKER *psl, CodeLabel *noLock, CodeLabel *
// Fill in the allocation and get out.
// mov [edx]Thread.m_alloc_context.alloc_ptr, eax
- psl->X86EmitIndexRegStore(kEDX, offsetof(Thread, m_alloc_context) + offsetof(alloc_context, alloc_ptr), kEAX);
+ psl->X86EmitIndexRegStore(kEDX, offsetof(Thread, m_alloc_context) + offsetof(gc_alloc_context, alloc_ptr), kEAX);
if (flags & (ALIGN8 | SIZE_IN_EAX | ALIGN8OBJ))
{
@@ -1502,7 +1502,7 @@ void InitJITHelpers1()
_ASSERTE(g_SystemInfo.dwNumberOfProcessors != 0);
- JIT_TrialAlloc::Flags flags = GCHeap::UseAllocationContexts() ?
+ JIT_TrialAlloc::Flags flags = GCHeapUtilities::UseAllocationContexts() ?
JIT_TrialAlloc::MP_ALLOCATOR : JIT_TrialAlloc::NORMAL;
// Get CPU features and check for SSE2 support.
diff --git a/src/vm/i386/virtualcallstubcpu.hpp b/src/vm/i386/virtualcallstubcpu.hpp
index 33ce8199b9..8c16854d22 100644
--- a/src/vm/i386/virtualcallstubcpu.hpp
+++ b/src/vm/i386/virtualcallstubcpu.hpp
@@ -695,7 +695,7 @@ BOOL isDelegateCall(BYTE *interiorPtr)
{
LIMITED_METHOD_CONTRACT;
- if (GCHeap::GetGCHeap()->IsHeapPointer((void*)interiorPtr))
+ if (GCHeapUtilities::GetGCHeap()->IsHeapPointer((void*)interiorPtr))
{
Object *delegate = (Object*)(interiorPtr - DelegateObject::GetOffsetOfMethodPtrAux());
VALIDATEOBJECTREF(ObjectToOBJECTREF(delegate));
diff --git a/src/vm/interoputil.cpp b/src/vm/interoputil.cpp
index 33f04b9ab8..6a0fbded12 100644
--- a/src/vm/interoputil.cpp
+++ b/src/vm/interoputil.cpp
@@ -2130,7 +2130,7 @@ void MinorCleanupSyncBlockComData(InteropSyncBlockInfo* pInteropInfo)
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- PRECONDITION( GCHeap::IsGCInProgress() || ( (g_fEEShutDown & ShutDown_SyncBlock) && g_fProcessDetach ) );
+ PRECONDITION( GCHeapUtilities::IsGCInProgress() || ( (g_fEEShutDown & ShutDown_SyncBlock) && g_fProcessDetach ) );
}
CONTRACTL_END;
diff --git a/src/vm/interpreter.cpp b/src/vm/interpreter.cpp
index 198ed2b26a..15e4a29b68 100644
--- a/src/vm/interpreter.cpp
+++ b/src/vm/interpreter.cpp
@@ -14,7 +14,7 @@
#include "openum.h"
#include "fcall.h"
#include "frames.h"
-#include "gc.h"
+#include "gcheaputilities.h"
#include <float.h>
#include "jitinterface.h"
#include "safemath.h"
diff --git a/src/vm/jithelpers.cpp b/src/vm/jithelpers.cpp
index 1626810758..a18602e691 100644
--- a/src/vm/jithelpers.cpp
+++ b/src/vm/jithelpers.cpp
@@ -23,7 +23,7 @@
#include "security.h"
#include "securitymeta.h"
#include "dllimport.h"
-#include "gc.h"
+#include "gcheaputilities.h"
#include "comdelegate.h"
#include "jitperf.h" // to track jit perf
#include "corprof.h"
@@ -2858,7 +2858,7 @@ HCIMPL1(Object*, JIT_NewS_MP_FastPortable, CORINFO_CLASS_HANDLE typeHnd_)
do
{
- _ASSERTE(GCHeap::UseAllocationContexts());
+ _ASSERTE(GCHeapUtilities::UseAllocationContexts());
// This is typically the only call in the fast path. Making the call early seems to be better, as it allows the compiler
// to use volatile registers for intermediate values. This reduces the number of push/pop instructions and eliminates
@@ -2872,7 +2872,7 @@ HCIMPL1(Object*, JIT_NewS_MP_FastPortable, CORINFO_CLASS_HANDLE typeHnd_)
SIZE_T size = methodTable->GetBaseSize();
_ASSERTE(size % DATA_ALIGNMENT == 0);
- alloc_context *allocContext = thread->GetAllocContext();
+ gc_alloc_context *allocContext = thread->GetAllocContext();
BYTE *allocPtr = allocContext->alloc_ptr;
_ASSERTE(allocPtr <= allocContext->alloc_limit);
if (size > static_cast<SIZE_T>(allocContext->alloc_limit - allocPtr))
@@ -2997,7 +2997,7 @@ HCIMPL1(StringObject*, AllocateString_MP_FastPortable, DWORD stringLength)
do
{
- _ASSERTE(GCHeap::UseAllocationContexts());
+ _ASSERTE(GCHeapUtilities::UseAllocationContexts());
// Instead of doing elaborate overflow checks, we just limit the number of elements. This will avoid all overflow
// problems, as well as making sure big string objects are correctly allocated in the big object heap.
@@ -3021,7 +3021,7 @@ HCIMPL1(StringObject*, AllocateString_MP_FastPortable, DWORD stringLength)
_ASSERTE(alignedTotalSize >= totalSize);
totalSize = alignedTotalSize;
- alloc_context *allocContext = thread->GetAllocContext();
+ gc_alloc_context *allocContext = thread->GetAllocContext();
BYTE *allocPtr = allocContext->alloc_ptr;
_ASSERTE(allocPtr <= allocContext->alloc_limit);
if (totalSize > static_cast<SIZE_T>(allocContext->alloc_limit - allocPtr))
@@ -3161,7 +3161,7 @@ HCIMPL2(Object*, JIT_NewArr1VC_MP_FastPortable, CORINFO_CLASS_HANDLE arrayTypeHn
do
{
- _ASSERTE(GCHeap::UseAllocationContexts());
+ _ASSERTE(GCHeapUtilities::UseAllocationContexts());
// Do a conservative check here. This is to avoid overflow while doing the calculations. We don't
// have to worry about "large" objects, since the allocation quantum is never big enough for
@@ -3198,7 +3198,7 @@ HCIMPL2(Object*, JIT_NewArr1VC_MP_FastPortable, CORINFO_CLASS_HANDLE arrayTypeHn
_ASSERTE(alignedTotalSize >= totalSize);
totalSize = alignedTotalSize;
- alloc_context *allocContext = thread->GetAllocContext();
+ gc_alloc_context *allocContext = thread->GetAllocContext();
BYTE *allocPtr = allocContext->alloc_ptr;
_ASSERTE(allocPtr <= allocContext->alloc_limit);
if (totalSize > static_cast<SIZE_T>(allocContext->alloc_limit - allocPtr))
@@ -3238,7 +3238,7 @@ HCIMPL2(Object*, JIT_NewArr1OBJ_MP_FastPortable, CORINFO_CLASS_HANDLE arrayTypeH
do
{
- _ASSERTE(GCHeap::UseAllocationContexts());
+ _ASSERTE(GCHeapUtilities::UseAllocationContexts());
// Make sure that the total size cannot reach LARGE_OBJECT_SIZE, which also allows us to avoid overflow checks. The
// "256" slack is to cover the array header size and round-up, using a constant value here out of laziness.
@@ -3266,7 +3266,7 @@ HCIMPL2(Object*, JIT_NewArr1OBJ_MP_FastPortable, CORINFO_CLASS_HANDLE arrayTypeH
_ASSERTE(ALIGN_UP(totalSize, DATA_ALIGNMENT) == totalSize);
- alloc_context *allocContext = thread->GetAllocContext();
+ gc_alloc_context *allocContext = thread->GetAllocContext();
BYTE *allocPtr = allocContext->alloc_ptr;
_ASSERTE(allocPtr <= allocContext->alloc_limit);
if (totalSize > static_cast<SIZE_T>(allocContext->alloc_limit - allocPtr))
@@ -6431,7 +6431,7 @@ HCIMPL0(VOID, JIT_StressGC)
bool fSkipGC = false;
if (!fSkipGC)
- GCHeap::GetGCHeap()->GarbageCollect();
+ GCHeapUtilities::GetGCHeap()->GarbageCollect();
// <TODO>@TODO: the following ifdef is in error, but if corrected the
// compiler complains about the *__ms->pRetAddr() saying machine state
diff --git a/src/vm/jitinterface.cpp b/src/vm/jitinterface.cpp
index 76d4568adb..65a804e320 100644
--- a/src/vm/jitinterface.cpp
+++ b/src/vm/jitinterface.cpp
@@ -27,7 +27,7 @@
#include "security.h"
#include "securitymeta.h"
#include "dllimport.h"
-#include "gc.h"
+#include "gcheaputilities.h"
#include "comdelegate.h"
#include "jitperf.h" // to track jit perf
#include "corprof.h"
diff --git a/src/vm/jitinterfacegen.cpp b/src/vm/jitinterfacegen.cpp
index 0a90dc347d..f8a95bb759 100644
--- a/src/vm/jitinterfacegen.cpp
+++ b/src/vm/jitinterfacegen.cpp
@@ -221,7 +221,7 @@ void InitJITHelpers1()
))
{
// if (multi-proc || server GC)
- if (GCHeap::UseAllocationContexts())
+ if (GCHeapUtilities::UseAllocationContexts())
{
#ifdef FEATURE_IMPLICIT_TLS
SetJitHelperFunction(CORINFO_HELP_NEWSFAST, JIT_NewS_MP_FastPortable);
diff --git a/src/vm/marshalnative.cpp b/src/vm/marshalnative.cpp
index 48911b7190..5f05fa2daf 100644
--- a/src/vm/marshalnative.cpp
+++ b/src/vm/marshalnative.cpp
@@ -27,7 +27,7 @@
#include "log.h"
#include "fieldmarshaler.h"
#include "cgensys.h"
-#include "gc.h"
+#include "gcheaputilities.h"
#include "security.h"
#include "dbginterface.h"
#include "objecthandle.h"
diff --git a/src/vm/mdaassistants.cpp b/src/vm/mdaassistants.cpp
index cc598c0a6c..e0e747bbaf 100644
--- a/src/vm/mdaassistants.cpp
+++ b/src/vm/mdaassistants.cpp
@@ -137,7 +137,7 @@ void TriggerGCForMDAInternal()
EX_TRY
{
- GCHeap::GetGCHeap()->GarbageCollect();
+ GCHeapUtilities::GetGCHeap()->GarbageCollect();
#ifdef FEATURE_SYNCHRONIZATIONCONTEXT_WAIT
//
@@ -868,7 +868,7 @@ LPVOID MdaInvalidOverlappedToPinvoke::CheckOverlappedPointer(UINT index, LPVOID
{
GCX_COOP();
- GCHeap *pHeap = GCHeap::GetGCHeap();
+ GCHeap *pHeap = GCHeapUtilities::GetGCHeap();
fHeapPointer = pHeap->IsHeapPointer(pOverlapped);
}
diff --git a/src/vm/memberload.cpp b/src/vm/memberload.cpp
index 8b7b2ce69c..1b24300a68 100644
--- a/src/vm/memberload.cpp
+++ b/src/vm/memberload.cpp
@@ -30,7 +30,7 @@
#include "log.h"
#include "fieldmarshaler.h"
#include "cgensys.h"
-#include "gc.h"
+#include "gcheaputilities.h"
#include "security.h"
#include "dbginterface.h"
#include "comdelegate.h"
diff --git a/src/vm/message.cpp b/src/vm/message.cpp
index fa0370dd33..093f9a2629 100644
--- a/src/vm/message.cpp
+++ b/src/vm/message.cpp
@@ -249,7 +249,7 @@ void CMessage::GetObjectFromStack(OBJECTREF* ppDest, PVOID val, const CorElement
_ASSERTE(ty.GetMethodTable()->IsValueType() || ty.GetMethodTable()->IsEnum());
- _ASSERTE(!GCHeap::GetGCHeap()->IsHeapPointer((BYTE *) ppDest) ||
+ _ASSERTE(!GCHeapUtilities::GetGCHeap()->IsHeapPointer((BYTE *) ppDest) ||
!"(pDest) can not point to GC Heap");
MethodTable* pMT = ty.GetMethodTable();
diff --git a/src/vm/methodtable.cpp b/src/vm/methodtable.cpp
index bf863826d4..52a2ce4d98 100644
--- a/src/vm/methodtable.cpp
+++ b/src/vm/methodtable.cpp
@@ -33,7 +33,7 @@
#include "log.h"
#include "fieldmarshaler.h"
#include "cgensys.h"
-#include "gc.h"
+#include "gcheaputilities.h"
#include "security.h"
#include "dbginterface.h"
#include "comdelegate.h"
@@ -9804,11 +9804,11 @@ BOOL MethodTable::Validate()
}
DWORD dwLastVerifiedGCCnt = m_pWriteableData->m_dwLastVerifedGCCnt;
- // Here we used to assert that (dwLastVerifiedGCCnt <= GCHeap::GetGCHeap()->GetGcCount()) but
+ // Here we used to assert that (dwLastVerifiedGCCnt <= GCHeapUtilities::GetGCHeap()->GetGcCount()) but
// this is no longer true because with background gc. Since the purpose of having
// m_dwLastVerifedGCCnt is just to only verify the same method table once for each GC
// I am getting rid of the assert.
- if (g_pConfig->FastGCStressLevel () > 1 && dwLastVerifiedGCCnt == GCHeap::GetGCHeap()->GetGcCount())
+ if (g_pConfig->FastGCStressLevel () > 1 && dwLastVerifiedGCCnt == GCHeapUtilities::GetGCHeap()->GetGcCount())
return TRUE;
#endif //_DEBUG
@@ -9835,7 +9835,7 @@ BOOL MethodTable::Validate()
// It is not a fatal error to fail the update the counter. We will run slower and retry next time,
// but the system will function properly.
if (EnsureWritablePagesNoThrow(m_pWriteableData, sizeof(MethodTableWriteableData)))
- m_pWriteableData->m_dwLastVerifedGCCnt = GCHeap::GetGCHeap()->GetGcCount();
+ m_pWriteableData->m_dwLastVerifedGCCnt = GCHeapUtilities::GetGCHeap()->GetGcCount();
#endif //_DEBUG
return TRUE;
diff --git a/src/vm/nativeoverlapped.h b/src/vm/nativeoverlapped.h
index 854090c35f..9e4e861790 100644
--- a/src/vm/nativeoverlapped.h
+++ b/src/vm/nativeoverlapped.h
@@ -62,7 +62,7 @@ public:
STATIC_CONTRACT_SO_TOLERANT;
_ASSERTE (nativeOverlapped != NULL);
- _ASSERTE (GCHeap::GetGCHeap()->IsHeapPointer((BYTE *) nativeOverlapped));
+ _ASSERTE (GCHeapUtilities::GetGCHeap()->IsHeapPointer((BYTE *) nativeOverlapped));
return (OverlappedDataObject*)((BYTE*)nativeOverlapped - offsetof(OverlappedDataObject, Internal));
}
diff --git a/src/vm/object.cpp b/src/vm/object.cpp
index 7c47e26627..c4fbc40c33 100644
--- a/src/vm/object.cpp
+++ b/src/vm/object.cpp
@@ -17,7 +17,7 @@
#include "threads.h"
#include "excep.h"
#include "eeconfig.h"
-#include "gc.h"
+#include "gcheaputilities.h"
#ifdef FEATURE_REMOTING
#include "remoting.h"
#endif
@@ -243,7 +243,7 @@ TypeHandle Object::GetGCSafeTypeHandleIfPossible() const
//
// where MyRefType2's module was unloaded by the time the GC occurred. In at least
// one case, the GC was caused by the AD unload itself (AppDomain::Unload ->
- // AppDomain::Exit -> GCInterface::AddMemoryPressure -> WKS::GCHeap::GarbageCollect).
+ // AppDomain::Exit -> GCInterface::AddMemoryPressure -> WKS::GCHeapUtilities::GarbageCollect).
//
// To protect against all scenarios, verify that
//
@@ -1764,9 +1764,9 @@ VOID Object::ValidateInner(BOOL bDeep, BOOL bVerifyNextHeader, BOOL bVerifySyncB
BOOL bSmallObjectHeapPtr = FALSE, bLargeObjectHeapPtr = FALSE;
if (!noRangeChecks)
{
- bSmallObjectHeapPtr = GCHeap::GetGCHeap()->IsHeapPointer(this, TRUE);
+ bSmallObjectHeapPtr = GCHeapUtilities::GetGCHeap()->IsHeapPointer(this, TRUE);
if (!bSmallObjectHeapPtr)
- bLargeObjectHeapPtr = GCHeap::GetGCHeap()->IsHeapPointer(this);
+ bLargeObjectHeapPtr = GCHeapUtilities::GetGCHeap()->IsHeapPointer(this);
CHECK_AND_TEAR_DOWN(bSmallObjectHeapPtr || bLargeObjectHeapPtr);
}
@@ -1781,7 +1781,7 @@ VOID Object::ValidateInner(BOOL bDeep, BOOL bVerifyNextHeader, BOOL bVerifySyncB
lastTest = 4;
if (bDeep && (g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_GC)) {
- GCHeap::GetGCHeap()->ValidateObjectMember(this);
+ GCHeapUtilities::GetGCHeap()->ValidateObjectMember(this);
}
lastTest = 5;
@@ -1790,7 +1790,7 @@ VOID Object::ValidateInner(BOOL bDeep, BOOL bVerifyNextHeader, BOOL bVerifySyncB
// we skip checking noRangeChecks since if skipping
// is enabled bSmallObjectHeapPtr will always be false.
if (bSmallObjectHeapPtr) {
- CHECK_AND_TEAR_DOWN(!GCHeap::GetGCHeap()->IsObjectInFixedHeap(this));
+ CHECK_AND_TEAR_DOWN(!GCHeapUtilities::GetGCHeap()->IsObjectInFixedHeap(this));
}
lastTest = 6;
@@ -1815,9 +1815,9 @@ VOID Object::ValidateInner(BOOL bDeep, BOOL bVerifyNextHeader, BOOL bVerifySyncB
&& bVerifyNextHeader
&& GCScan::GetGcRuntimeStructuresValid ()
//NextObj could be very slow if concurrent GC is going on
- && !(GCHeap::IsGCHeapInitialized() && GCHeap::GetGCHeap ()->IsConcurrentGCInProgress ()))
+ && !(GCHeapUtilities::IsGCHeapInitialized() && GCHeapUtilities::GetGCHeap ()->IsConcurrentGCInProgress ()))
{
- Object * nextObj = GCHeap::GetGCHeap ()->NextObj (this);
+ Object * nextObj = GCHeapUtilities::GetGCHeap ()->NextObj (this);
if ((nextObj != NULL) &&
(nextObj->GetGCSafeMethodTable() != g_pFreeObjectMethodTable))
{
@@ -1949,7 +1949,7 @@ STRINGREF StringObject::NewString(const WCHAR *pwsz)
// pinning and then later put into a struct and that struct is
// then marshalled to managed.
//
- _ASSERTE(!GCHeap::GetGCHeap()->IsHeapPointer((BYTE *) pwsz) ||
+ _ASSERTE(!GCHeapUtilities::GetGCHeap()->IsHeapPointer((BYTE *) pwsz) ||
!"pwsz can not point to GC Heap");
#endif // 0
@@ -1988,7 +1988,7 @@ STRINGREF StringObject::NewString(const WCHAR *pwsz, int length) {
// pinning and then later put into a struct and that struct is
// then marshalled to managed.
//
- _ASSERTE(!GCHeap::GetGCHeap()->IsHeapPointer((BYTE *) pwsz) ||
+ _ASSERTE(!GCHeapUtilities::GetGCHeap()->IsHeapPointer((BYTE *) pwsz) ||
!"pwsz can not point to GC Heap");
#endif // 0
STRINGREF pString = AllocateString(length);
@@ -2664,7 +2664,7 @@ OBJECTREF::OBJECTREF(const OBJECTREF & objref)
// !!! Either way you need to fix the code.
_ASSERTE(Thread::IsObjRefValid(&objref));
if ((objref.m_asObj != 0) &&
- ((GCHeap*)GCHeap::GetGCHeap())->IsHeapPointer( (BYTE*)this ))
+ ((IGCHeap*)GCHeapUtilities::GetGCHeap())->IsHeapPointer( (BYTE*)this ))
{
_ASSERTE(!"Write Barrier violation. Must use SetObjectReference() to assign OBJECTREF's into the GC heap!");
}
@@ -2718,7 +2718,7 @@ OBJECTREF::OBJECTREF(Object *pObject)
DEBUG_ONLY_FUNCTION;
if ((pObject != 0) &&
- ((GCHeap*)GCHeap::GetGCHeap())->IsHeapPointer( (BYTE*)this ))
+ ((IGCHeap*)GCHeapUtilities::GetGCHeap())->IsHeapPointer( (BYTE*)this ))
{
_ASSERTE(!"Write Barrier violation. Must use SetObjectReference() to assign OBJECTREF's into the GC heap!");
}
@@ -2901,7 +2901,7 @@ OBJECTREF& OBJECTREF::operator=(const OBJECTREF &objref)
_ASSERTE(Thread::IsObjRefValid(&objref));
if ((objref.m_asObj != 0) &&
- ((GCHeap*)GCHeap::GetGCHeap())->IsHeapPointer( (BYTE*)this ))
+ ((IGCHeap*)GCHeapUtilities::GetGCHeap())->IsHeapPointer( (BYTE*)this ))
{
_ASSERTE(!"Write Barrier violation. Must use SetObjectReference() to assign OBJECTREF's into the GC heap!");
}
@@ -2948,14 +2948,14 @@ void* __cdecl GCSafeMemCpy(void * dest, const void * src, size_t len)
{
Thread* pThread = GetThread();
- // GCHeap::IsHeapPointer has race when called in preemptive mode. It walks the list of segments
+ // GCHeapUtilities::IsHeapPointer has race when called in preemptive mode. It walks the list of segments
// that can be modified by GC. Do the check below only if it is safe to do so.
if (pThread != NULL && pThread->PreemptiveGCDisabled())
{
// Note there is memcpyNoGCRefs which will allow you to do a memcpy into the GC
// heap if you really know you don't need to call the write barrier
- _ASSERTE(!GCHeap::GetGCHeap()->IsHeapPointer((BYTE *) dest) ||
+ _ASSERTE(!GCHeapUtilities::GetGCHeap()->IsHeapPointer((BYTE *) dest) ||
!"using memcpy to copy into the GC heap, use CopyValueClass");
}
}
diff --git a/src/vm/prestub.cpp b/src/vm/prestub.cpp
index bcce5a3002..bfad7a320b 100644
--- a/src/vm/prestub.cpp
+++ b/src/vm/prestub.cpp
@@ -1175,7 +1175,7 @@ PCODE MethodDesc::DoPrestub(MethodTable *pDispatchingMT)
if (g_pConfig->ShouldPrestubGC(this))
{
GCX_COOP();
- GCHeap::GetGCHeap()->GarbageCollect(-1);
+ GCHeapUtilities::GetGCHeap()->GarbageCollect(-1);
}
#endif // _DEBUG
diff --git a/src/vm/profattach.cpp b/src/vm/profattach.cpp
index f03db361f0..5b10e8f10e 100644
--- a/src/vm/profattach.cpp
+++ b/src/vm/profattach.cpp
@@ -806,7 +806,7 @@ void ProfilingAPIAttachDetach::InitializeAttachThreadingMode()
// Environment variable trumps all, so check it first
DWORD dwAlwaysOn = g_pConfig->GetConfigDWORD_DontUse_(
CLRConfig::EXTERNAL_AttachThreadAlwaysOn,
- GCHeap::IsServerHeap() ? 1 : 0); // Default depends on GC server mode
+ GCHeapUtilities::IsServerHeap() ? 1 : 0); // Default depends on GC server mode
if (dwAlwaysOn == 0)
{
diff --git a/src/vm/profilinghelper.cpp b/src/vm/profilinghelper.cpp
index 139ba89ec0..1dd60b47e1 100644
--- a/src/vm/profilinghelper.cpp
+++ b/src/vm/profilinghelper.cpp
@@ -1413,7 +1413,7 @@ void ProfilingAPIUtility::TerminateProfiling()
{
// We know for sure GC has been fully initialized as we've turned off concurrent GC before
_ASSERTE(IsGarbageCollectorFullyInitialized());
- GCHeap::GetGCHeap()->TemporaryEnableConcurrentGC();
+ GCHeapUtilities::GetGCHeap()->TemporaryEnableConcurrentGC();
g_profControlBlock.fConcurrentGCDisabledForAttach = FALSE;
}
diff --git a/src/vm/proftoeeinterfaceimpl.cpp b/src/vm/proftoeeinterfaceimpl.cpp
index 551b38631a..fee883f0ef 100644
--- a/src/vm/proftoeeinterfaceimpl.cpp
+++ b/src/vm/proftoeeinterfaceimpl.cpp
@@ -754,7 +754,7 @@ struct GenerationTable
//---------------------------------------------------------------------------------------
//
-// This is a callback used by the GC when we call GCHeap::DescrGenerationsToProfiler
+// This is a callback used by the GC when we call GCHeapUtilities::DescrGenerationsToProfiler
// (from UpdateGenerationBounds() below). The GC gives us generation information through
// this callback, which we use to update the GenerationDesc in the corresponding
// GenerationTable
@@ -874,7 +874,7 @@ void __stdcall UpdateGenerationBounds()
#endif
// fill in the values by calling back into the gc, which will report
// the ranges by calling GenWalkFunc for each one
- GCHeap *hp = GCHeap::GetGCHeap();
+ IGCHeap *hp = GCHeapUtilities::GetGCHeap();
hp->DescrGenerationsToProfiler(GenWalkFunc, newGenerationTable);
// remember the old table and plug in the new one
@@ -1018,7 +1018,7 @@ ClassID SafeGetClassIDFromObject(Object * pObj)
//---------------------------------------------------------------------------------------
//
-// Callback of type walk_fn used by GCHeap::WalkObject. Keeps a count of each
+// Callback of type walk_fn used by GCHeapUtilities::WalkObject. Keeps a count of each
// object reference found.
//
// Arguments:
@@ -1040,7 +1040,7 @@ BOOL CountContainedObjectRef(Object * pBO, void * context)
//---------------------------------------------------------------------------------------
//
-// Callback of type walk_fn used by GCHeap::WalkObject. Stores each object reference
+// Callback of type walk_fn used by GCHeapUtilities::WalkObject. Stores each object reference
// encountered into an array.
//
// Arguments:
@@ -1113,7 +1113,7 @@ BOOL HeapWalkHelper(Object * pBO, void * pvContext)
if (pMT->ContainsPointersOrCollectible())
{
// First round through calculates the number of object refs for this class
- GCHeap::GetGCHeap()->WalkObject(pBO, &CountContainedObjectRef, (void *)&cNumRefs);
+ GCHeapUtilities::GetGCHeap()->WalkObject(pBO, &CountContainedObjectRef, (void *)&cNumRefs);
if (cNumRefs > 0)
{
@@ -1138,7 +1138,7 @@ BOOL HeapWalkHelper(Object * pBO, void * pvContext)
// Second round saves off all of the ref values
OBJECTREF * pCurObjRef = arrObjRef;
- GCHeap::GetGCHeap()->WalkObject(pBO, &SaveContainedObjectRef, (void *)&pCurObjRef);
+ GCHeapUtilities::GetGCHeap()->WalkObject(pBO, &SaveContainedObjectRef, (void *)&pCurObjRef);
}
}
@@ -9461,7 +9461,7 @@ FCIMPL2(void, ProfilingFCallHelper::FC_RemotingClientSendingMessage, GUID *pId,
// it is a value class declared on the stack and so GC doesn't
// know about it.
- _ASSERTE (!GCHeap::GetGCHeap()->IsHeapPointer(pId)); // should be on the stack, not in the heap
+ _ASSERTE (!GCHeapUtilities::GetGCHeap()->IsHeapPointer(pId)); // should be on the stack, not in the heap
HELPER_METHOD_FRAME_BEGIN_NOPOLL();
{
diff --git a/src/vm/rcwwalker.cpp b/src/vm/rcwwalker.cpp
index ad718126c1..0b875360fd 100644
--- a/src/vm/rcwwalker.cpp
+++ b/src/vm/rcwwalker.cpp
@@ -129,10 +129,10 @@ STDMETHODIMP CLRServicesImpl::GarbageCollect(DWORD dwFlags)
{
GCX_COOP_THREAD_EXISTS(GET_THREAD());
if (dwFlags & GC_FOR_APPX_SUSPEND) {
- GCHeap::GetGCHeap()->GarbageCollect(2, TRUE, collection_blocking | collection_optimized);
+ GCHeapUtilities::GetGCHeap()->GarbageCollect(2, TRUE, collection_blocking | collection_optimized);
}
else
- GCHeap::GetGCHeap()->GarbageCollect();
+ GCHeapUtilities::GetGCHeap()->GarbageCollect();
}
END_EXTERNAL_ENTRYPOINT;
return hr;
diff --git a/src/vm/runtimecallablewrapper.cpp b/src/vm/runtimecallablewrapper.cpp
index d12d5568f6..8aeaec606a 100644
--- a/src/vm/runtimecallablewrapper.cpp
+++ b/src/vm/runtimecallablewrapper.cpp
@@ -1591,7 +1591,7 @@ public:
if (pRCW->IsValid())
{
- if (!GCHeap::GetGCHeap()->IsPromoted(OBJECTREFToObject(pRCW->GetExposedObject())) &&
+ if (!GCHeapUtilities::GetGCHeap()->IsPromoted(OBJECTREFToObject(pRCW->GetExposedObject())) &&
!pRCW->IsDetached())
{
// No need to use InterlockedOr here since every other place that modifies the flags
@@ -1612,7 +1612,7 @@ void RCWCache::DetachWrappersWorker()
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- PRECONDITION(GCHeap::IsGCInProgress()); // GC is in progress and the runtime is suspended
+ PRECONDITION(GCHeapUtilities::IsGCInProgress()); // GC is in progress and the runtime is suspended
}
CONTRACTL_END;
@@ -2808,7 +2808,7 @@ void RCW::MinorCleanup()
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- PRECONDITION(GCHeap::IsGCInProgress() || ( (g_fEEShutDown & ShutDown_SyncBlock) && g_fProcessDetach ));
+ PRECONDITION(GCHeapUtilities::IsGCInProgress() || ( (g_fEEShutDown & ShutDown_SyncBlock) && g_fProcessDetach ));
}
CONTRACTL_END;
diff --git a/src/vm/safehandle.cpp b/src/vm/safehandle.cpp
index 3336e693b5..828b221025 100644
--- a/src/vm/safehandle.cpp
+++ b/src/vm/safehandle.cpp
@@ -246,7 +246,7 @@ void SafeHandle::Dispose()
// Suppress finalization on this object (we may be racing here but the
// operation below is idempotent and a dispose should never race a
// finalization).
- GCHeap::GetGCHeap()->SetFinalizationRun(OBJECTREFToObject(sh));
+ GCHeapUtilities::GetGCHeap()->SetFinalizationRun(OBJECTREFToObject(sh));
GCPROTECT_END();
}
@@ -394,7 +394,7 @@ FCIMPL1(void, SafeHandle::SetHandleAsInvalid, SafeHandle* refThisUNSAFE)
} while (InterlockedCompareExchange((LONG*)&sh->m_state, newState, oldState) != oldState);
- GCHeap::GetGCHeap()->SetFinalizationRun(OBJECTREFToObject(sh));
+ GCHeapUtilities::GetGCHeap()->SetFinalizationRun(OBJECTREFToObject(sh));
}
FCIMPLEND
diff --git a/src/vm/siginfo.cpp b/src/vm/siginfo.cpp
index decd3c0aab..9adfb4998c 100644
--- a/src/vm/siginfo.cpp
+++ b/src/vm/siginfo.cpp
@@ -14,7 +14,7 @@
#include "clsload.hpp"
#include "vars.hpp"
#include "excep.h"
-#include "gc.h"
+#include "gcheaputilities.h"
#include "field.h"
#include "eeconfig.h"
#include "runtimehandles.h" // for SignatureNative
diff --git a/src/vm/stubhelpers.cpp b/src/vm/stubhelpers.cpp
index 6e7fb49b96..cbe1d37c94 100644
--- a/src/vm/stubhelpers.cpp
+++ b/src/vm/stubhelpers.cpp
@@ -19,7 +19,7 @@
#include "security.h"
#include "eventtrace.h"
#include "comdatetime.h"
-#include "gc.h"
+#include "gcheaputilities.h"
#include "interoputil.h"
#include "gcscan.h"
#ifdef FEATURE_REMOTING
@@ -70,7 +70,7 @@ void StubHelpers::ValidateObjectInternal(Object *pObjUNSAFE, BOOL fValidateNextO
// and the next object as required
if (fValidateNextObj)
{
- Object *nextObj = GCHeap::GetGCHeap()->NextObj(pObjUNSAFE);
+ Object *nextObj = GCHeapUtilities::GetGCHeap()->NextObj(pObjUNSAFE);
if (nextObj != NULL)
{
// Note that the MethodTable of the object (i.e. the pointer at offset 0) can change from
@@ -162,7 +162,7 @@ void StubHelpers::ProcessByrefValidationList()
{
entry = s_ByrefValidationEntries[i];
- Object *pObjUNSAFE = GCHeap::GetGCHeap()->GetGCHeap()->GetContainingObject(entry.pByref);
+ Object *pObjUNSAFE = GCHeapUtilities::GetGCHeap()->GetContainingObject(entry.pByref);
ValidateObjectInternal(pObjUNSAFE, TRUE);
}
}
@@ -2004,7 +2004,7 @@ FCIMPL3(void, StubHelpers::ValidateObject, Object *pObjUNSAFE, MethodDesc *pMD,
AVInRuntimeImplOkayHolder AVOkay;
// don't validate the next object if a BGC is in progress. we can race with background
// sweep which could make the next object a Free object underneath us if it's dead.
- ValidateObjectInternal(pObjUNSAFE, !(GCHeap::GetGCHeap()->IsConcurrentGCInProgress()));
+ ValidateObjectInternal(pObjUNSAFE, !(GCHeapUtilities::GetGCHeap()->IsConcurrentGCInProgress()));
}
EX_CATCH
{
@@ -2031,7 +2031,7 @@ FCIMPL3(void, StubHelpers::ValidateByref, void *pByref, MethodDesc *pMD, Object
// perform the validation on next GC (see code:StubHelpers.ProcessByrefValidationList).
// Skip byref if is not pointing inside managed heap
- if (!GCHeap::GetGCHeap()->IsHeapPointer(pByref))
+ if (!GCHeapUtilities::GetGCHeap()->IsHeapPointer(pByref))
{
return;
}
@@ -2066,7 +2066,7 @@ FCIMPL3(void, StubHelpers::ValidateByref, void *pByref, MethodDesc *pMD, Object
if (NumOfEntries > BYREF_VALIDATION_LIST_MAX_SIZE)
{
// if the list is too big, trigger GC now
- GCHeap::GetGCHeap()->GarbageCollect(0);
+ GCHeapUtilities::GetGCHeap()->GarbageCollect(0);
}
HELPER_METHOD_FRAME_END();
diff --git a/src/vm/syncblk.cpp b/src/vm/syncblk.cpp
index 3975542d98..171a8d3bb7 100644
--- a/src/vm/syncblk.cpp
+++ b/src/vm/syncblk.cpp
@@ -1372,7 +1372,7 @@ void SyncBlockCache::GCWeakPtrScan(HANDLESCANPROC scanProc, uintptr_t lp1, uintp
STRESS_LOG0 (LF_GC | LF_SYNC, LL_INFO100, "GCWeakPtrScan starting\n");
#endif
- if (GCHeap::GetGCHeap()->GetCondemnedGeneration() < GCHeap::GetGCHeap()->GetMaxGeneration())
+ if (GCHeapUtilities::GetGCHeap()->GetCondemnedGeneration() < GCHeapUtilities::GetGCHeap()->GetMaxGeneration())
{
#ifdef VERIFY_HEAP
//for VSW 294550: we saw stale obeject reference in SyncBlkCache, so we want to make sure the card
@@ -1416,7 +1416,7 @@ void SyncBlockCache::GCWeakPtrScan(HANDLESCANPROC scanProc, uintptr_t lp1, uintp
Object* o = SyncTableEntry::GetSyncTableEntry()[nb].m_Object;
if (o && !((size_t)o & 1))
{
- if (GCHeap::GetGCHeap()->IsEphemeral (o))
+ if (GCHeapUtilities::GetGCHeap()->IsEphemeral (o))
{
clear_card = FALSE;
@@ -1615,8 +1615,8 @@ void SyncBlockCache::GCDone(BOOL demoting, int max_gen)
CONTRACTL_END;
if (demoting &&
- (GCHeap::GetGCHeap()->GetCondemnedGeneration() ==
- GCHeap::GetGCHeap()->GetMaxGeneration()))
+ (GCHeapUtilities::GetGCHeap()->GetCondemnedGeneration() ==
+ GCHeapUtilities::GetGCHeap()->GetMaxGeneration()))
{
//scan the bitmap
size_t dw = 0;
@@ -1643,7 +1643,7 @@ void SyncBlockCache::GCDone(BOOL demoting, int max_gen)
Object* o = SyncTableEntry::GetSyncTableEntry()[nb].m_Object;
if (o && !((size_t)o & 1))
{
- if (GCHeap::GetGCHeap()->WhichGeneration (o) < (unsigned int)max_gen)
+ if (GCHeapUtilities::GetGCHeap()->WhichGeneration (o) < (unsigned int)max_gen)
{
SetCard (card);
break;
@@ -1713,7 +1713,7 @@ void SyncBlockCache::VerifySyncTableEntry()
DWORD idx = o->GetHeader()->GetHeaderSyncBlockIndex();
_ASSERTE(idx == nb || ((0 == idx) && (loop == max_iterations)));
- _ASSERTE(!GCHeap::GetGCHeap()->IsEphemeral(o) || CardSetP(CardOf(nb)));
+ _ASSERTE(!GCHeapUtilities::GetGCHeap()->IsEphemeral(o) || CardSetP(CardOf(nb)));
}
}
}
@@ -2498,10 +2498,10 @@ BOOL ObjHeader::Validate (BOOL bVerifySyncBlkIndex)
//BIT_SBLK_GC_RESERVE (0x20000000) is only set during GC. But for frozen object, we don't clean the bit
if (bits & BIT_SBLK_GC_RESERVE)
{
- if (!GCHeap::GetGCHeap()->IsGCInProgress () && !GCHeap::GetGCHeap()->IsConcurrentGCInProgress ())
+ if (!GCHeapUtilities::IsGCInProgress () && !GCHeapUtilities::GetGCHeap()->IsConcurrentGCInProgress ())
{
#ifdef FEATURE_BASICFREEZE
- ASSERT_AND_CHECK (GCHeap::GetGCHeap()->IsInFrozenSegment(obj));
+ ASSERT_AND_CHECK (GCHeapUtilities::GetGCHeap()->IsInFrozenSegment(obj));
#else //FEATURE_BASICFREEZE
_ASSERTE(!"Reserve bit not cleared");
return FALSE;
diff --git a/src/vm/syncclean.cpp b/src/vm/syncclean.cpp
index e02c2f90c2..133f448e16 100644
--- a/src/vm/syncclean.cpp
+++ b/src/vm/syncclean.cpp
@@ -73,7 +73,7 @@ void SyncClean::CleanUp ()
// Only GC thread can call this.
_ASSERTE (g_fProcessDetach ||
IsGCSpecialThread() ||
- (GCHeap::IsGCInProgress() && GetThread() == ThreadSuspend::GetSuspensionThread()));
+ (GCHeapUtilities::IsGCInProgress() && GetThread() == ThreadSuspend::GetSuspensionThread()));
if (m_HashMap)
{
Bucket * pTempBucket = FastInterlockExchangePointer(m_HashMap.GetPointer(), NULL);
diff --git a/src/vm/testhookmgr.cpp b/src/vm/testhookmgr.cpp
index 9ec53f8e45..48370134d2 100644
--- a/src/vm/testhookmgr.cpp
+++ b/src/vm/testhookmgr.cpp
@@ -655,7 +655,7 @@ HRESULT CLRTestHookManager::GC(int generation)
CONTRACTL_END;
_ASSERTE(GetThread()==NULL || !GetThread()->PreemptiveGCDisabled());
- GCHeap::GetGCHeap()->GarbageCollect(generation);
+ GCHeapUtilities::GetGCHeap()->GarbageCollect(generation);
FinalizerThread::FinalizerThreadWait();
return S_OK;
}
diff --git a/src/vm/threadpoolrequest.cpp b/src/vm/threadpoolrequest.cpp
index 8d47e6b810..a5c1c4263d 100644
--- a/src/vm/threadpoolrequest.cpp
+++ b/src/vm/threadpoolrequest.cpp
@@ -517,11 +517,11 @@ void UnManagedPerAppDomainTPCount::DispatchWorkItem(bool* foundWork, bool* wasNo
firstIteration = false;
*foundWork = true;
- if (GCHeap::IsGCInProgress(TRUE))
+ if (GCHeapUtilities::IsGCInProgress(TRUE))
{
// GC is imminent, so wait until GC is complete before executing next request.
// this reduces in-flight objects allocated right before GC, easing the GC's work
- GCHeap::WaitForGCCompletion(TRUE);
+ GCHeapUtilities::WaitForGCCompletion(TRUE);
}
PREFIX_ASSUME(pWorkRequest != NULL);
diff --git a/src/vm/threads.cpp b/src/vm/threads.cpp
index cc2e4eb5e4..54cd03ed89 100644
--- a/src/vm/threads.cpp
+++ b/src/vm/threads.cpp
@@ -18,7 +18,7 @@
#include "excep.h"
#include "comsynchronizable.h"
#include "log.h"
-#include "gc.h"
+#include "gcheaputilities.h"
#include "mscoree.h"
#include "dbginterface.h"
#include "corprof.h" // profiling
@@ -3889,14 +3889,14 @@ void Thread::OnThreadTerminate(BOOL holdingLock)
#endif
}
- if (GCHeap::IsGCHeapInitialized())
+ if (GCHeapUtilities::IsGCHeapInitialized())
{
// Guaranteed to NOT be a shutdown case, because we tear down the heap before
// we tear down any threads during shutdown.
if (ThisThreadID == CurrentThreadID)
{
GCX_COOP();
- GCHeap::GetGCHeap()->FixAllocContext(&m_alloc_context, FALSE, NULL, NULL);
+ GCHeapUtilities::GetGCHeap()->FixAllocContext(&m_alloc_context, FALSE, NULL, NULL);
m_alloc_context.init();
}
}
@@ -3957,11 +3957,11 @@ void Thread::OnThreadTerminate(BOOL holdingLock)
#endif
}
- if (GCHeap::IsGCHeapInitialized() && ThisThreadID != CurrentThreadID)
+ if (GCHeapUtilities::IsGCHeapInitialized() && ThisThreadID != CurrentThreadID)
{
// We must be holding the ThreadStore lock in order to clean up alloc context.
// We should never call FixAllocContext during GC.
- GCHeap::GetGCHeap()->FixAllocContext(&m_alloc_context, FALSE, NULL, NULL);
+ GCHeapUtilities::GetGCHeap()->FixAllocContext(&m_alloc_context, FALSE, NULL, NULL);
m_alloc_context.init();
}
@@ -9846,7 +9846,7 @@ void Thread::DoExtraWorkForFinalizer()
Thread::CleanupDetachedThreads();
}
- if(ExecutionManager::IsCacheCleanupRequired() && GCHeap::GetGCHeap()->GetCondemnedGeneration()>=1)
+ if(ExecutionManager::IsCacheCleanupRequired() && GCHeapUtilities::GetGCHeap()->GetCondemnedGeneration()>=1)
{
ExecutionManager::ClearCaches();
}
@@ -11186,7 +11186,7 @@ void Thread::SetHasPromotedBytes ()
m_fPromoted = TRUE;
- _ASSERTE(GCHeap::IsGCInProgress() && IsGCThread ());
+ _ASSERTE(GCHeapUtilities::IsGCInProgress() && IsGCThread ());
if (!m_fPreemptiveGCDisabled)
{
diff --git a/src/vm/threads.h b/src/vm/threads.h
index ec047f2ddd..bb54a85f45 100644
--- a/src/vm/threads.h
+++ b/src/vm/threads.h
@@ -142,7 +142,7 @@
#include "regdisp.h"
#include "mscoree.h"
#include "appdomainstack.h"
-#include "gc.h"
+#include "gcheaputilities.h"
#include "gcinfotypes.h"
#include <clrhost.h>
@@ -1739,9 +1739,9 @@ public:
// on MP systems, each thread has its own allocation chunk so we can avoid
// lock prefixes and expensive MP cache snooping stuff
- alloc_context m_alloc_context;
+ gc_alloc_context m_alloc_context;
- inline alloc_context *GetAllocContext() { LIMITED_METHOD_CONTRACT; return &m_alloc_context; }
+ inline gc_alloc_context *GetAllocContext() { LIMITED_METHOD_CONTRACT; return &m_alloc_context; }
// This is the type handle of the first object in the alloc context at the time
// we fire the AllocationTick event. It's only for tooling purpose.
@@ -4884,7 +4884,7 @@ private:
private:
// When we create an object, or create an OBJECTREF, or create an Interior Pointer, or enter EE from managed
// code, we will set this flag.
- // Inside GCHeap::StressHeap, we only do GC if this flag is TRUE. Then we reset it to zero.
+ // Inside GCHeapUtilities::StressHeap, we only do GC if this flag is TRUE. Then we reset it to zero.
BOOL m_fStressHeapCount;
public:
void EnableStressHeap()
diff --git a/src/vm/threadsuspend.cpp b/src/vm/threadsuspend.cpp
index c71855f45f..0458b7dd57 100644
--- a/src/vm/threadsuspend.cpp
+++ b/src/vm/threadsuspend.cpp
@@ -3276,7 +3276,7 @@ void Thread::RareDisablePreemptiveGC()
__SwitchToThread(0, CALLER_LIMITS_SPINNING);
}
- if (!GCHeap::IsGCHeapInitialized())
+ if (!GCHeapUtilities::IsGCHeapInitialized())
{
goto Exit;
}
@@ -3284,7 +3284,7 @@ void Thread::RareDisablePreemptiveGC()
// Note IsGCInProgress is also true for say Pause (anywhere SuspendEE happens) and GCThread is the
// thread that did the Pause. While in Pause if another thread attempts Rev/Pinvoke it should get inside the following and
// block until resume
- if (((GCHeap::IsGCInProgress() && (this != ThreadSuspend::GetSuspensionThread())) ||
+ if (((GCHeapUtilities::IsGCInProgress() && (this != ThreadSuspend::GetSuspensionThread())) ||
(m_State & (TS_UserSuspendPending | TS_DebugSuspendPending | TS_StackCrawlNeeded))) &&
(!g_fSuspendOnShutdown || IsFinalizerThread() || IsShutdownSpecialThread()))
{
@@ -3350,7 +3350,7 @@ void Thread::RareDisablePreemptiveGC()
DWORD status = S_OK;
SetThreadStateNC(TSNC_WaitUntilGCFinished);
- status = GCHeap::GetGCHeap()->WaitUntilGCComplete();
+ status = GCHeapUtilities::GetGCHeap()->WaitUntilGCComplete();
ResetThreadStateNC(TSNC_WaitUntilGCFinished);
if (status == (DWORD)COR_E_STACKOVERFLOW)
@@ -3359,7 +3359,7 @@ void Thread::RareDisablePreemptiveGC()
// 1. GC is suspending the process. GC needs to wait.
// 2. GC is proceeding after suspension. The current thread needs to spin.
SetThreadState(TS_BlockGCForSO);
- while (GCHeap::IsGCInProgress() && m_fPreemptiveGCDisabled.Load() == 0)
+ while (GCHeapUtilities::IsGCInProgress() && m_fPreemptiveGCDisabled.Load() == 0)
{
#undef Sleep
// We can not go to a host for blocking operation due ot lack of stack.
@@ -3376,7 +3376,7 @@ void Thread::RareDisablePreemptiveGC()
break;
}
}
- if (!GCHeap::IsGCInProgress())
+ if (!GCHeapUtilities::IsGCInProgress())
{
if (HasThreadState(TS_StackCrawlNeeded))
{
@@ -3411,7 +3411,7 @@ void Thread::RareDisablePreemptiveGC()
// thread while in this loop. This happens if you use the COM+
// debugger to suspend this thread and then release it.
- } while ((GCHeap::IsGCInProgress() && (this != ThreadSuspend::GetSuspensionThread())) ||
+ } while ((GCHeapUtilities::IsGCInProgress() && (this != ThreadSuspend::GetSuspensionThread())) ||
(m_State & (TS_UserSuspendPending | TS_DebugSuspendPending | TS_StackCrawlNeeded)));
}
STRESS_LOG0(LF_SYNC, LL_INFO1000, "RareDisablePreemptiveGC: leaving\n");
@@ -3705,7 +3705,7 @@ void Thread::PerformPreemptiveGC()
if (!GCStressPolicy::IsEnabled() || !GCStress<cfg_transition>::IsEnabled())
return;
- if (!GCHeap::IsGCHeapInitialized())
+ if (!GCHeapUtilities::IsGCHeapInitialized())
return;
if (!m_GCOnTransitionsOK
@@ -3713,8 +3713,8 @@ void Thread::PerformPreemptiveGC()
|| RawGCNoTrigger()
#endif
|| g_fEEShutDown
- || GCHeap::IsGCInProgress(TRUE)
- || GCHeap::GetGCHeap()->GetGcCount() == 0 // Need something that works for isolated heap.
+ || GCHeapUtilities::IsGCInProgress(TRUE)
+ || GCHeapUtilities::GetGCHeap()->GetGcCount() == 0 // Need something that works for isolated heap.
|| ThreadStore::HoldingThreadStore())
return;
@@ -3738,7 +3738,7 @@ void Thread::PerformPreemptiveGC()
{
GCX_COOP();
m_bGCStressing = TRUE;
- GCHeap::GetGCHeap()->StressHeap();
+ GCHeapUtilities::GetGCHeap()->StressHeap();
m_bGCStressing = FALSE;
}
m_GCOnTransitionsOK = TRUE;
@@ -4846,7 +4846,7 @@ HRESULT ThreadSuspend::SuspendRuntime(ThreadSuspend::SUSPEND_REASON reason)
// Caller is expected to be holding the ThreadStore lock. Also, caller must
// have set GcInProgress before coming here, or things will break;
_ASSERTE(ThreadStore::HoldingThreadStore() || IsAtProcessExit());
- _ASSERTE(GCHeap::IsGCInProgress() );
+ _ASSERTE(GCHeapUtilities::IsGCInProgress() );
STRESS_LOG1(LF_SYNC, LL_INFO1000, "Thread::SuspendRuntime(reason=0x%x)\n", reason);
@@ -5547,7 +5547,7 @@ void ThreadSuspend::ResumeRuntime(BOOL bFinishedGC, BOOL SuspendSucceded)
// reset GcInProgress, or threads will continue to suspend themselves and won't
// be resumed until the next GC.
_ASSERTE(IsGCSpecialThread() || ThreadStore::HoldingThreadStore());
- _ASSERTE(!GCHeap::IsGCInProgress() );
+ _ASSERTE(!GCHeapUtilities::IsGCInProgress() );
STRESS_LOG2(LF_SYNC, LL_INFO1000, "Thread::ResumeRuntime(finishedGC=%d, SuspendSucceeded=%d) - Start\n", bFinishedGC, SuspendSucceded);
@@ -5564,7 +5564,7 @@ void ThreadSuspend::ResumeRuntime(BOOL bFinishedGC, BOOL SuspendSucceded)
{
// If we the suspension was for a GC, tell the host what generation GC.
DWORD Generation = (bFinishedGC
- ? GCHeap::GetGCHeap()->GetCondemnedGeneration()
+ ? GCHeapUtilities::GetGCHeap()->GetCondemnedGeneration()
: ~0U);
pGCThreadControl->SuspensionEnding(Generation);
@@ -5574,7 +5574,7 @@ void ThreadSuspend::ResumeRuntime(BOOL bFinishedGC, BOOL SuspendSucceded)
{
// If we the suspension was for a GC, tell the host what generation GC.
DWORD Generation = (bFinishedGC
- ? GCHeap::GetGCHeap()->GetCondemnedGeneration()
+ ? GCHeapUtilities::GetGCHeap()->GetCondemnedGeneration()
: ~0U);
BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
@@ -7898,7 +7898,7 @@ void ThreadSuspend::RestartEE(BOOL bFinishedGC, BOOL SuspendSucceded)
// Revert to being a normal thread
//
ClrFlsClearThreadType (ThreadType_DynamicSuspendEE);
- GCHeap::GetGCHeap()->SetGCInProgress(FALSE);
+ GCHeapUtilities::GetGCHeap()->SetGCInProgress(FALSE);
//
// Allow threads to enter COOP mode (though we still need to wake the ones
@@ -7906,7 +7906,7 @@ void ThreadSuspend::RestartEE(BOOL bFinishedGC, BOOL SuspendSucceded)
//
// Note: this is the last barrier that keeps managed threads
// from entering cooperative mode. If the sequence changes,
- // you may have to change routine GCHeap::SafeToRestartManagedThreads
+ // you may have to change routine GCHeapUtilities::SafeToRestartManagedThreads
// as well.
//
ThreadStore::TrapReturningThreads(FALSE);
@@ -7915,7 +7915,7 @@ void ThreadSuspend::RestartEE(BOOL bFinishedGC, BOOL SuspendSucceded)
//
// Any threads that are waiting in WaitUntilGCComplete will continue now.
//
- GCHeap::GetGCHeap()->GetWaitForGCEvent()->Set();
+ GCHeapUtilities::GetGCHeap()->GetWaitForGCEvent()->Set();
_ASSERTE(IsGCSpecialThread() || ThreadStore::HoldingThreadStore());
ResumeRuntime(bFinishedGC, SuspendSucceded);
@@ -7964,7 +7964,7 @@ void ThreadSuspend::SuspendEE(SUSPEND_REASON reason)
ETW::GCLog::ETW_GC_INFO Info;
Info.SuspendEE.Reason = reason;
Info.SuspendEE.GcCount = (((reason == SUSPEND_FOR_GC) || (reason == SUSPEND_FOR_GC_PREP)) ?
- (ULONG)GCHeap::GetGCHeap()->GetGcCount() : (ULONG)-1);
+ (ULONG)GCHeapUtilities::GetGCHeap()->GetGcCount() : (ULONG)-1);
FireEtwGCSuspendEEBegin_V1(Info.SuspendEE.Reason, Info.SuspendEE.GcCount, GetClrInstanceId());
@@ -8041,7 +8041,7 @@ retry_for_debugger:
//
// First, we reset the event that we're about to tell other threads to wait for.
//
- GCHeap::GetGCHeap()->GetWaitForGCEvent()->Reset();
+ GCHeapUtilities::GetGCHeap()->GetWaitForGCEvent()->Reset();
//
// Remember that we're the one doing the GC. Actually, maybe we're not doing a GC -
@@ -8066,7 +8066,7 @@ retry_for_debugger:
// It seems like much of the above is redundant. We should investigate reducing the number
// of mechanisms we use to indicate that a suspension is in progress.
//
- GCHeap::GetGCHeap()->SetGCInProgress(TRUE);
+ GCHeapUtilities::GetGCHeap()->SetGCInProgress(TRUE);
//
// Gratuitous memory barrier. (may be needed - but I'm not sure why.)
@@ -8357,7 +8357,7 @@ void ThreadSuspend::Initialize()
BOOL Debug_IsLockedViaThreadSuspension()
{
LIMITED_METHOD_CONTRACT;
- return GCHeap::IsGCInProgress() &&
+ return GCHeapUtilities::IsGCInProgress() &&
(dbgOnly_IsSpecialEEThread() ||
IsGCSpecialThread() ||
GetThread() == ThreadSuspend::GetSuspensionThread());
@@ -8485,7 +8485,7 @@ void SuspendStatistics::EndSuspend(BOOL bForGC)
// details on suspends...
if (!bForGC)
cntNonGCSuspends++;
- if (GCHeap::GetGCHeap()->IsConcurrentGCInProgress())
+ if (GCHeapUtilities::GetGCHeap()->IsConcurrentGCInProgress())
{
cntSuspendsInBGC++;
if (!bForGC)
diff --git a/src/vm/vars.hpp b/src/vm/vars.hpp
index d197e0559d..4148725769 100644
--- a/src/vm/vars.hpp
+++ b/src/vm/vars.hpp
@@ -81,7 +81,7 @@ typedef unsigned short wchar_t;
class ClassLoader;
class LoaderHeap;
-class GCHeap;
+class IGCHeap;
class Object;
class StringObject;
class TransparentProxyObject;
diff --git a/src/vm/win32threadpool.cpp b/src/vm/win32threadpool.cpp
index 1121417492..1b4a8690e0 100644
--- a/src/vm/win32threadpool.cpp
+++ b/src/vm/win32threadpool.cpp
@@ -2367,11 +2367,11 @@ Work:
counts = oldCounts;
}
- if (GCHeap::IsGCInProgress(TRUE))
+ if (GCHeapUtilities::IsGCInProgress(TRUE))
{
// GC is imminent, so wait until GC is complete before executing next request.
// this reduces in-flight objects allocated right before GC, easing the GC's work
- GCHeap::WaitForGCCompletion(TRUE);
+ GCHeapUtilities::WaitForGCCompletion(TRUE);
}
{
@@ -3986,7 +3986,7 @@ Top:
if (key != 0)
{
- if (GCHeap::IsGCInProgress(TRUE))
+ if (GCHeapUtilities::IsGCInProgress(TRUE))
{
//Indicate that this thread is free, and waiting on GC, not doing any user work.
//This helps in threads not getting injected when some threads have woken up from the
@@ -4003,7 +4003,7 @@ Top:
// GC is imminent, so wait until GC is complete before executing next request.
// this reduces in-flight objects allocated right before GC, easing the GC's work
- GCHeap::WaitForGCCompletion(TRUE);
+ GCHeapUtilities::WaitForGCCompletion(TRUE);
while (true)
{
@@ -4217,7 +4217,7 @@ BOOL ThreadpoolMgr::ShouldGrowCompletionPortThreadpool(ThreadCounter::Counts cou
if (counts.NumWorking >= counts.NumActive
&& NumCPInfrastructureThreads == 0
- && (counts.NumActive == 0 || !GCHeap::IsGCInProgress(TRUE))
+ && (counts.NumActive == 0 || !GCHeapUtilities::IsGCInProgress(TRUE))
)
{
// adjust limit if neeeded
@@ -4618,7 +4618,7 @@ DWORD __stdcall ThreadpoolMgr::GateThreadStart(LPVOID lpArgs)
EX_END_CATCH(SwallowAllExceptions);
}
- if (!GCHeap::IsGCInProgress(FALSE) )
+ if (!GCHeapUtilities::IsGCInProgress(FALSE) )
{
if (IgnoreNextSample)
{
@@ -4660,7 +4660,7 @@ DWORD __stdcall ThreadpoolMgr::GateThreadStart(LPVOID lpArgs)
oldCounts.NumActive < MaxLimitTotalCPThreads &&
!g_fCompletionPortDrainNeeded &&
NumCPInfrastructureThreads == 0 && // infrastructure threads count as "to be free as needed"
- !GCHeap::IsGCInProgress(TRUE))
+ !GCHeapUtilities::IsGCInProgress(TRUE))
{
BOOL status;