summaryrefslogtreecommitdiff
path: root/src/gc
diff options
context:
space:
mode:
authorDavid Mason <davmason@microsoft.com>2018-08-25 12:24:35 -0700
committerGitHub <noreply@github.com>2018-08-25 12:24:35 -0700
commita1404cafbe1cd15a5c0cfbd4995ad97694b708bd (patch)
tree33728689499c098f0c4eef47cb22ef4efbc38721 /src/gc
parent3fca50a36e62a7433d7601d805d38de6baee7951 (diff)
downloadcoreclr-a1404cafbe1cd15a5c0cfbd4995ad97694b708bd.tar.gz
coreclr-a1404cafbe1cd15a5c0cfbd4995ad97694b708bd.tar.bz2
coreclr-a1404cafbe1cd15a5c0cfbd4995ad97694b708bd.zip
Remove NumaNodeInfo, CPUGroupInfo, AppDomain, SystemDomain, and EEConfig stubs from local gc (#19500)
* Switch NumaNodeInfo and CPUGroupInfo to the interface * Remove AppDomain/SystemDomain stubs * remove EEConfig methods * Port numa code to the coreclr side * add numa back to PAL and standalone builds * enable numa for PAL/Standalone builds, and fix BOOL warnings * remove unused defines, and fix linux build errors * building on windows * about to delete numa work from unix and want a backup * add stubs for unix numa/cpugroup * Code review feedback * Code review feedback
Diffstat (limited to 'src/gc')
-rw-r--r--src/gc/env/gcenv.base.h66
-rw-r--r--src/gc/env/gcenv.ee.h9
-rw-r--r--src/gc/env/gcenv.os.h17
-rw-r--r--src/gc/env/gcenv.structs.h6
-rw-r--r--src/gc/gc.cpp42
-rw-r--r--src/gc/gcconfig.h2
-rw-r--r--src/gc/gcenv.ee.standalone.inl38
-rw-r--r--src/gc/gcimpl.h2
-rw-r--r--src/gc/gcinterface.ee.h20
-rw-r--r--src/gc/gcinterface.h2
-rw-r--r--src/gc/gcload.cpp1
-rw-r--r--src/gc/gcpriv.h4
-rw-r--r--src/gc/handletable.cpp27
-rw-r--r--src/gc/handletablecore.cpp2
-rw-r--r--src/gc/objecthandle.cpp5
-rw-r--r--src/gc/sample/gcenv.ee.cpp32
-rw-r--r--src/gc/sample/gcenv.h24
-rw-r--r--src/gc/unix/CMakeLists.txt3
-rw-r--r--src/gc/unix/config.h.in2
-rw-r--r--src/gc/unix/configure.cmake2
-rw-r--r--src/gc/unix/gcenv.unix.cpp23
-rw-r--r--src/gc/windows/gcenv.windows.cpp265
22 files changed, 433 insertions, 161 deletions
diff --git a/src/gc/env/gcenv.base.h b/src/gc/env/gcenv.base.h
index 8693bbe449..15a81d77ae 100644
--- a/src/gc/env/gcenv.base.h
+++ b/src/gc/env/gcenv.base.h
@@ -489,70 +489,4 @@ struct ADIndex
BOOL operator!=(const ADIndex& ad) const { return m_dwIndex != ad.m_dwIndex; }
};
-class AppDomain
-{
-public:
- ADIndex GetIndex() { return ADIndex(RH_DEFAULT_DOMAIN_ID); }
- BOOL IsRudeUnload() { return FALSE; }
- BOOL NoAccessToHandleTable() { return FALSE; }
- void DecNumSizedRefHandles() {}
-};
-
-class SystemDomain
-{
-public:
- static SystemDomain *System() { return NULL; }
- static AppDomain *GetAppDomainAtIndex(ADIndex /*index*/) { return (AppDomain *)-1; }
- static AppDomain *AppDomainBeingUnloaded() { return NULL; }
- AppDomain *DefaultDomain() { return NULL; }
- DWORD GetTotalNumSizedRefHandles() { return 0; }
-};
-
-class NumaNodeInfo
-{
-public:
- static bool CanEnableGCNumaAware()
- {
- // [LOCALGC TODO] enable NUMA node support
- return false;
- }
-
- static void GetGroupForProcessor(uint16_t processor_number, uint16_t * group_number, uint16_t * group_processor_number)
- {
- // [LOCALGC TODO] enable NUMA node support
- assert(!"should not be called");
- }
-
- static bool GetNumaProcessorNodeEx(PPROCESSOR_NUMBER proc_no, uint16_t * node_no)
- {
- // [LOCALGC TODO] enable NUMA node support
- assert(!"should not be called");
- return false;
- }
-};
-
-class CPUGroupInfo
-{
-public:
- static bool CanEnableGCCPUGroups()
- {
- // [LOCALGC TODO] enable CPU group support
- return false;
- }
-
- static uint32_t GetNumActiveProcessors()
- {
- // [LOCALGC TODO] enable CPU group support
- assert(!"should not be called");
- return 0;
- }
-
- static void GetGroupForProcessor(uint16_t processor_number, uint16_t * group_number, uint16_t * group_processor_number)
- {
- // [LOCALGC TODO] enable CPU group support
- assert(!"should not be called");
- }
-};
-
-
#endif // __GCENV_BASE_INCLUDED__
diff --git a/src/gc/env/gcenv.ee.h b/src/gc/env/gcenv.ee.h
index ec79877867..ebe3046b17 100644
--- a/src/gc/env/gcenv.ee.h
+++ b/src/gc/env/gcenv.ee.h
@@ -71,7 +71,7 @@ public:
static void EnableFinalization(bool foundFinalizers);
static void HandleFatalError(unsigned int exitCode);
- static bool ShouldFinalizeObjectForUnload(AppDomain* pDomain, Object* obj);
+ static bool ShouldFinalizeObjectForUnload(void* pDomain, Object* obj);
static bool ForceFullGCToBeBlocking();
static bool EagerFinalized(Object* obj);
static MethodTable* GetFreeObjectMethodTable();
@@ -85,6 +85,13 @@ public:
static void WalkAsyncPinnedForPromotion(Object* object, ScanContext* sc, promote_func* callback);
static void WalkAsyncPinned(Object* object, void* context, void(*callback)(Object*, Object*, void*));
static IGCToCLREventSink* EventSink();
+
+ static uint32_t GetDefaultDomainIndex();
+ static void *GetAppDomainAtIndex(uint32_t appDomainIndex);
+ static bool AppDomainCanAccessHandleTable(uint32_t appDomainID);
+ static uint32_t GetIndexOfAppDomainBeingUnloaded();
+ static uint32_t GetTotalNumSizedRefHandles();
+ static bool GCToEEInterface::AppDomainIsRudeUnload(void *appDomain);
};
#endif // __GCENV_EE_H__
diff --git a/src/gc/env/gcenv.os.h b/src/gc/env/gcenv.os.h
index 41e46f8f0f..35515de280 100644
--- a/src/gc/env/gcenv.os.h
+++ b/src/gc/env/gcenv.os.h
@@ -18,6 +18,8 @@
#undef Sleep
#endif // Sleep
+#define NUMA_NODE_UNDEFINED UINT32_MAX
+
// Critical section used by the GC
class CLRCriticalSection
{
@@ -194,7 +196,7 @@ public:
// size - size of the virtual memory range
// Return:
// true if it has succeeded, false if it has failed
- static bool VirtualCommit(void *address, size_t size);
+ static bool VirtualCommit(void *address, size_t size, uint32_t node = NUMA_NODE_UNDEFINED);
// Decomit virtual memory range.
// Parameters:
@@ -391,6 +393,19 @@ public:
// Return:
// Number of processors on the machine
static uint32_t GetTotalProcessorCount();
+
+ // Is NUMA support available
+ static bool CanEnableGCNumaAware();
+
+ // Gets the NUMA node for the processor
+ static bool GetNumaProcessorNode(PPROCESSOR_NUMBER proc_no, uint16_t *node_no);
+
+ // Are CPU groups enabled
+ static bool CanEnableGCCPUGroups();
+
+ // Get the CPU group for the specified processor
+ static void GetGroupForProcessor(uint16_t processor_number, uint16_t* group_number, uint16_t* group_processor_number);
+
};
#endif // __GCENV_OS_H__
diff --git a/src/gc/env/gcenv.structs.h b/src/gc/env/gcenv.structs.h
index bb503e36e8..4f51ad0d9e 100644
--- a/src/gc/env/gcenv.structs.h
+++ b/src/gc/env/gcenv.structs.h
@@ -9,9 +9,9 @@
struct GCSystemInfo
{
- uint32_t dwNumberOfProcessors;
- uint32_t dwPageSize;
- uint32_t dwAllocationGranularity;
+ uint32_t dwNumberOfProcessors;
+ uint32_t dwPageSize;
+ uint32_t dwAllocationGranularity;
};
typedef void * HANDLE;
diff --git a/src/gc/gc.cpp b/src/gc/gc.cpp
index 31715cb569..edd7d07de0 100644
--- a/src/gc/gc.cpp
+++ b/src/gc/gc.cpp
@@ -5066,7 +5066,7 @@ public:
//can not enable gc numa aware, force all heaps to be in
//one numa node by filling the array with all 0s
- if (!NumaNodeInfo::CanEnableGCNumaAware())
+ if (!GCToOSInterface::CanEnableGCNumaAware())
memset(heap_no_to_numa_node, 0, sizeof (heap_no_to_numa_node));
return TRUE;
@@ -5262,7 +5262,7 @@ void set_thread_group_affinity_for_heap(int heap_number, GCThreadAffinity* affin
affinity->Processor = GCThreadAffinity::None;
uint16_t gn, gpn;
- CPUGroupInfo::GetGroupForProcessor((uint16_t)heap_number, &gn, &gpn);
+ GCToOSInterface::GetGroupForProcessor((uint16_t)heap_number, &gn, &gpn);
int bit_number = 0;
for (uintptr_t mask = 1; mask !=0; mask <<=1)
@@ -5274,7 +5274,7 @@ void set_thread_group_affinity_for_heap(int heap_number, GCThreadAffinity* affin
affinity->Group = gn;
heap_select::set_cpu_group_for_heap(heap_number, gn);
heap_select::set_group_proc_for_heap(heap_number, gpn);
- if (NumaNodeInfo::CanEnableGCNumaAware())
+ if (GCToOSInterface::CanEnableGCNumaAware())
{
PROCESSOR_NUMBER proc_no;
proc_no.Group = gn;
@@ -5282,7 +5282,7 @@ void set_thread_group_affinity_for_heap(int heap_number, GCThreadAffinity* affin
proc_no.Reserved = 0;
uint16_t node_no = 0;
- if (NumaNodeInfo::GetNumaProcessorNodeEx(&proc_no, &node_no))
+ if (GCToOSInterface::GetNumaProcessorNode(&proc_no, &node_no))
heap_select::set_numa_node_for_heap(heap_number, node_no);
}
else
@@ -5315,14 +5315,14 @@ void set_thread_affinity_mask_for_heap(int heap_number, GCThreadAffinity* affini
dprintf (3, ("Using processor %d for heap %d", proc_number, heap_number));
affinity->Processor = proc_number;
heap_select::set_proc_no_for_heap(heap_number, proc_number);
- if (NumaNodeInfo::CanEnableGCNumaAware())
+ if (GCToOSInterface::CanEnableGCNumaAware())
{
uint16_t node_no = 0;
PROCESSOR_NUMBER proc_no;
proc_no.Group = 0;
proc_no.Number = (uint8_t)proc_number;
proc_no.Reserved = 0;
- if (NumaNodeInfo::GetNumaProcessorNodeEx(&proc_no, &node_no))
+ if (GCToOSInterface::GetNumaProcessorNode(&proc_no, &node_no))
{
heap_select::set_numa_node_for_heap(heap_number, node_no);
}
@@ -5457,19 +5457,17 @@ void gc_heap::gc_thread_function ()
bool virtual_alloc_commit_for_heap(void* addr, size_t size, int h_number)
{
-#if defined(MULTIPLE_HEAPS) && !defined(FEATURE_REDHAWK) && !defined(FEATURE_PAL) && !defined(BUILD_AS_STANDALONE)
+#if defined(MULTIPLE_HEAPS) && !defined(FEATURE_REDHAWK)
// Currently there is no way for us to specific the numa node to allocate on via hosting interfaces to
// a host. This will need to be added later.
#if !defined(FEATURE_CORECLR)
if (!CLRMemoryHosted())
#endif
{
- if (NumaNodeInfo::CanEnableGCNumaAware())
+ if (GCToOSInterface::CanEnableGCNumaAware())
{
uint32_t numa_node = heap_select::find_numa_node_from_heap_no(h_number);
- void * ret = NumaNodeInfo::VirtualAllocExNuma(GetCurrentProcess(), addr, size,
- MEM_COMMIT, PAGE_READWRITE, numa_node);
- if (ret != NULL)
+ if (GCToOSInterface::VirtualCommit(addr, size, numa_node))
return true;
}
}
@@ -13343,7 +13341,7 @@ try_again:
org_hp->alloc_context_count--;
max_hp->alloc_context_count++;
acontext->set_alloc_heap(GCHeap::GetHeap(max_hp->heap_number));
- if (CPUGroupInfo::CanEnableGCCPUGroups())
+ if (GCToOSInterface::CanEnableGCCPUGroups())
{ //only set ideal processor when max_hp and org_hp are in the same cpu
//group. DO NOT MOVE THREADS ACROSS CPU GROUPS
uint16_t org_gn = heap_select::find_cpu_group_from_heap_no(org_hp->heap_number);
@@ -19548,7 +19546,7 @@ void gc_heap::mark_phase (int condemned_gen_number, BOOL mark_only_p)
{
#endif //MULTIPLE_HEAPS
- num_sizedrefs = SystemDomain::System()->GetTotalNumSizedRefHandles();
+ num_sizedrefs = GCToEEInterface::GetTotalNumSizedRefHandles();
#ifdef MULTIPLE_HEAPS
@@ -24914,7 +24912,7 @@ void gc_heap::gc_thread_stub (void* arg)
// We are about to set affinity for GC threads. It is a good place to set up NUMA and
// CPU groups because the process mask, processor number, and group number are all
// readily available.
- if (CPUGroupInfo::CanEnableGCCPUGroups())
+ if (GCToOSInterface::CanEnableGCCPUGroups())
set_thread_group_affinity_for_heap(heap->heap_number, &affinity);
else
set_thread_affinity_mask_for_heap(heap->heap_number, &affinity);
@@ -25707,7 +25705,7 @@ void gc_heap::background_mark_phase ()
#endif //WRITE_WATCH
#endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
- num_sizedrefs = SystemDomain::System()->GetTotalNumSizedRefHandles();
+ num_sizedrefs = GCToEEInterface::GetTotalNumSizedRefHandles();
// this c_write is not really necessary because restart_vm
// has an instruction that will flush the cpu cache (interlocked
@@ -33498,10 +33496,8 @@ HRESULT GCHeap::Initialize ()
gc_heap::gc_thread_no_affinitize_p = true;
uint32_t nhp_from_config = static_cast<uint32_t>(GCConfig::GetHeapCount());
- // GetGCProcessCpuCount only returns up to 64 procs.
- uint32_t nhp_from_process = CPUGroupInfo::CanEnableGCCPUGroups() ?
- CPUGroupInfo::GetNumActiveProcessors():
- GCToOSInterface::GetCurrentProcessCpuCount();
+
+ uint32_t nhp_from_process = GCToOSInterface::GetCurrentProcessCpuCount();
uint32_t nhp = ((nhp_from_config == 0) ? nhp_from_process :
(min (nhp_from_config, nhp_from_process)));
@@ -35615,7 +35611,7 @@ size_t GCHeap::GetFinalizablePromotedCount()
#endif //MULTIPLE_HEAPS
}
-bool GCHeap::FinalizeAppDomain(AppDomain *pDomain, bool fRunFinalizers)
+bool GCHeap::FinalizeAppDomain(void *pDomain, bool fRunFinalizers)
{
#ifdef MULTIPLE_HEAPS
bool foundp = false;
@@ -35937,7 +35933,7 @@ CFinalize::GetNumberFinalizableObjects()
}
BOOL
-CFinalize::FinalizeSegForAppDomain (AppDomain *pDomain,
+CFinalize::FinalizeSegForAppDomain (void *pDomain,
BOOL fRunFinalizers,
unsigned int Seg)
{
@@ -35980,7 +35976,7 @@ CFinalize::FinalizeSegForAppDomain (AppDomain *pDomain,
}
else
{
- if (pDomain->IsRudeUnload())
+ if (GCToEEInterface::AppDomainIsRudeUnload(pDomain))
{
MoveItem (i, Seg, FreeList);
}
@@ -35997,7 +35993,7 @@ CFinalize::FinalizeSegForAppDomain (AppDomain *pDomain,
}
bool
-CFinalize::FinalizeAppDomain (AppDomain *pDomain, bool fRunFinalizers)
+CFinalize::FinalizeAppDomain (void *pDomain, bool fRunFinalizers)
{
bool finalizedFound = false;
diff --git a/src/gc/gcconfig.h b/src/gc/gcconfig.h
index 811e6f9e4f..ea44a09a5c 100644
--- a/src/gc/gcconfig.h
+++ b/src/gc/gcconfig.h
@@ -72,6 +72,8 @@ public:
"Specifies if you want to turn on logging in GC") \
BOOL_CONFIG(ConfigLogEnabled, "GCConfigLogEnabled", false, \
"Specifies the name of the GC config log file") \
+ BOOL_CONFIG(GCNumaAware, "GCNumaAware", true, "Enables numa allocations in the GC") \
+ BOOL_CONFIG(GCCpuGroup, "GCCpuGroup", false, "Enables CPU groups in the GC") \
INT_CONFIG(HeapVerifyLevel, "HeapVerify", HEAPVERIFY_NONE, \
"When set verifies the integrity of the managed heap on entry and exit of each GC") \
INT_CONFIG(LOHCompactionMode, "GCLOHCompact", 0, "Specifies the LOH compaction mode") \
diff --git a/src/gc/gcenv.ee.standalone.inl b/src/gc/gcenv.ee.standalone.inl
index 4fc8ca6a02..1aca1dc52c 100644
--- a/src/gc/gcenv.ee.standalone.inl
+++ b/src/gc/gcenv.ee.standalone.inl
@@ -191,7 +191,7 @@ inline void GCToEEInterface::HandleFatalError(unsigned int exitCode)
g_theGCToCLR->HandleFatalError(exitCode);
}
-inline bool GCToEEInterface::ShouldFinalizeObjectForUnload(AppDomain* pDomain, Object* obj)
+inline bool GCToEEInterface::ShouldFinalizeObjectForUnload(void* pDomain, Object* obj)
{
assert(g_theGCToCLR != nullptr);
return g_theGCToCLR->ShouldFinalizeObjectForUnload(pDomain, obj);
@@ -275,4 +275,40 @@ inline IGCToCLREventSink* GCToEEInterface::EventSink()
return g_theGCToCLR->EventSink();
}
+inline uint32_t GCToEEInterface::GetDefaultDomainIndex()
+{
+ assert(g_theGCToCLR != nullptr);
+ return g_theGCToCLR->GetDefaultDomainIndex();
+}
+
+inline void *GCToEEInterface::GetAppDomainAtIndex(uint32_t appDomainIndex)
+{
+ assert(g_theGCToCLR != nullptr);
+ return g_theGCToCLR->GetAppDomainAtIndex(appDomainIndex);
+}
+
+inline bool GCToEEInterface::AppDomainCanAccessHandleTable(uint32_t appDomainID)
+{
+ assert(g_theGCToCLR != nullptr);
+ return g_theGCToCLR->AppDomainCanAccessHandleTable(appDomainID);
+}
+
+inline uint32_t GCToEEInterface::GetIndexOfAppDomainBeingUnloaded()
+{
+ assert(g_theGCToCLR != nullptr);
+ return g_theGCToCLR->GetIndexOfAppDomainBeingUnloaded();
+}
+
+inline uint32_t GCToEEInterface::GetTotalNumSizedRefHandles()
+{
+ assert(g_theGCToCLR != nullptr);
+ return g_theGCToCLR->GetTotalNumSizedRefHandles();
+}
+
+inline bool GCToEEInterface::AppDomainIsRudeUnload(void *appDomain)
+{
+ assert(g_theGCToCLR != nullptr);
+ return g_theGCToCLR->AppDomainIsRudeUnload(appDomain);
+}
+
#endif // __GCTOENV_EE_STANDALONE_INL__
diff --git a/src/gc/gcimpl.h b/src/gc/gcimpl.h
index 67f906a2d6..fe859641ec 100644
--- a/src/gc/gcimpl.h
+++ b/src/gc/gcimpl.h
@@ -209,7 +209,7 @@ public:
PER_HEAP_ISOLATED size_t GetFinalizablePromotedCount();
void SetFinalizeQueueForShutdown(bool fHasLock);
- bool FinalizeAppDomain(AppDomain *pDomain, bool fRunFinalizers);
+ bool FinalizeAppDomain(void *pDomain, bool fRunFinalizers);
bool ShouldRestartFinalizerWatchDog();
void DiagWalkObject (Object* obj, walk_fn fn, void* context);
diff --git a/src/gc/gcinterface.ee.h b/src/gc/gcinterface.ee.h
index 82d89345f5..ae887e6f55 100644
--- a/src/gc/gcinterface.ee.h
+++ b/src/gc/gcinterface.ee.h
@@ -319,7 +319,7 @@ public:
// Asks the EE if it wants a particular object to be finalized when unloading
// an app domain.
virtual
- bool ShouldFinalizeObjectForUnload(AppDomain* pDomain, Object* obj) = 0;
+ bool ShouldFinalizeObjectForUnload(void* pDomain, Object* obj) = 0;
// Offers the EE the option to finalize the given object eagerly, i.e.
// not on the finalizer thread but on the current thread. The
@@ -409,6 +409,24 @@ public:
// Returns an IGCToCLREventSink instance that can be used to fire events.
virtual
IGCToCLREventSink* EventSink() = 0;
+
+ virtual
+ uint32_t GetDefaultDomainIndex() = 0;
+
+ virtual
+ void *GetAppDomainAtIndex(uint32_t appDomainIndex) = 0;
+
+ virtual
+ uint32_t GetIndexOfAppDomainBeingUnloaded() = 0;
+
+ virtual
+ bool AppDomainCanAccessHandleTable(uint32_t appDomainID) = 0;
+
+ virtual
+ uint32_t GetTotalNumSizedRefHandles() = 0;
+
+ virtual
+ bool AppDomainIsRudeUnload(void *appDomain) = 0;
};
#endif // _GCINTERFACE_EE_H_
diff --git a/src/gc/gcinterface.h b/src/gc/gcinterface.h
index 58482b8c89..55c755d0e1 100644
--- a/src/gc/gcinterface.h
+++ b/src/gc/gcinterface.h
@@ -581,7 +581,7 @@ public:
*/
// Finalizes an app domain by finalizing objects within that app domain.
- virtual bool FinalizeAppDomain(AppDomain* pDomain, bool fRunFinalizers) = 0;
+ virtual bool FinalizeAppDomain(void* pDomain, bool fRunFinalizers) = 0;
// Finalizes all registered objects for shutdown, even if they are still reachable.
virtual void SetFinalizeQueueForShutdown(bool fHasLock) = 0;
diff --git a/src/gc/gcload.cpp b/src/gc/gcload.cpp
index 21eedb250f..2d157c843e 100644
--- a/src/gc/gcload.cpp
+++ b/src/gc/gcload.cpp
@@ -72,6 +72,7 @@ GC_Initialize(
// Initialize GCConfig before anything else - initialization of our
// various components may want to query the current configuration.
GCConfig::Initialize();
+
if (!GCToOSInterface::Initialize())
{
return E_FAIL;
diff --git a/src/gc/gcpriv.h b/src/gc/gcpriv.h
index 0cb72ec35c..c2f7356fce 100644
--- a/src/gc/gcpriv.h
+++ b/src/gc/gcpriv.h
@@ -3750,7 +3750,7 @@ private:
}
- BOOL FinalizeSegForAppDomain (AppDomain *pDomain,
+ BOOL FinalizeSegForAppDomain (void *pDomain,
BOOL fRunFinalizers,
unsigned int Seg);
@@ -3774,7 +3774,7 @@ public:
void DiscardNonCriticalObjects();
//Methods used by the app domain unloading call to finalize objects in an app domain
- bool FinalizeAppDomain (AppDomain *pDomain, bool fRunFinalizers);
+ bool FinalizeAppDomain (void *pDomain, bool fRunFinalizers);
void CheckFinalizerObjects();
diff --git a/src/gc/handletable.cpp b/src/gc/handletable.cpp
index 0c05715e53..13fb1964eb 100644
--- a/src/gc/handletable.cpp
+++ b/src/gc/handletable.cpp
@@ -363,11 +363,9 @@ void ValidateFetchObjrefForHandle(OBJECTREF objref, ADIndex appDomainIndex)
BEGIN_DEBUG_ONLY_CODE;
VALIDATEOBJECTREF (objref);
- AppDomain *pDomain = SystemDomain::GetAppDomainAtIndex(appDomainIndex);
-
- // Access to a handle in unloaded domain is not allowed
- _ASSERTE(pDomain != NULL);
- _ASSERTE(!pDomain->NoAccessToHandleTable());
+#ifndef DACCESS_COMPILE
+ _ASSERTE(GCToEEInterface::AppDomainCanAccessHandleTable(appDomainIndex.m_dwIndex));
+#endif // DACCESS_COMPILE
END_DEBUG_ONLY_CODE;
}
@@ -384,12 +382,9 @@ void ValidateAssignObjrefForHandle(OBJECTREF objref, ADIndex appDomainIndex)
VALIDATEOBJECTREF (objref);
- AppDomain *pDomain = SystemDomain::GetAppDomainAtIndex(appDomainIndex);
-
- // Access to a handle in unloaded domain is not allowed
- _ASSERTE(pDomain != NULL);
- _ASSERTE(!pDomain->NoAccessToHandleTable());
-
+#ifndef DACCESS_COMPILE
+ _ASSERTE(GCToEEInterface::AppDomainCanAccessHandleTable(appDomainIndex.m_dwIndex));
+#endif // DACCESS_COMPILE
END_DEBUG_ONLY_CODE;
}
@@ -407,12 +402,12 @@ void ValidateAppDomainForHandle(OBJECTHANDLE handle)
#else
BEGIN_DEBUG_ONLY_CODE;
ADIndex id = HndGetHandleADIndex(handle);
- AppDomain *pUnloadingDomain = SystemDomain::AppDomainBeingUnloaded();
- if (!pUnloadingDomain || pUnloadingDomain->GetIndex() != id)
+ ADIndex unloadingDomain(GCToEEInterface::GetIndexOfAppDomainBeingUnloaded());
+ if (unloadingDomain != id)
{
return;
}
- if (!pUnloadingDomain->NoAccessToHandleTable())
+ if (GCToEEInterface::AppDomainCanAccessHandleTable(unloadingDomain.m_dwIndex))
{
return;
}
@@ -604,7 +599,7 @@ void HndLogSetEvent(OBJECTHANDLE handle, _UNCHECKED_OBJECTREF value)
{
uint32_t hndType = HandleFetchType(handle);
ADIndex appDomainIndex = HndGetHandleADIndex(handle);
- AppDomain* pAppDomain = SystemDomain::GetAppDomainAtIndex(appDomainIndex);
+ void* pAppDomain = GCToEEInterface::GetAppDomainAtIndex(appDomainIndex.m_dwIndex);
uint32_t generation = value != 0 ? g_theGCHeap->WhichGeneration(value) : 0;
FIRE_EVENT(SetGCHandle, (void *)handle, (void *)value, hndType, generation, (uint64_t)pAppDomain);
FIRE_EVENT(PrvSetGCHandle, (void *) handle, (void *)value, hndType, generation, (uint64_t)pAppDomain);
@@ -620,7 +615,7 @@ void HndLogSetEvent(OBJECTHANDLE handle, _UNCHECKED_OBJECTREF value)
// to this structure as our closure's context pointer.
struct ClosureCapture
{
- AppDomain* pAppDomain;
+ void* pAppDomain;
Object* overlapped;
};
diff --git a/src/gc/handletablecore.cpp b/src/gc/handletablecore.cpp
index 8c0be427ed..01894381aa 100644
--- a/src/gc/handletablecore.cpp
+++ b/src/gc/handletablecore.cpp
@@ -1041,7 +1041,7 @@ void TableRelocateAsyncPinHandles(HandleTable *pTable,
}
CONTRACTL_END;
- _ASSERTE (pTargetTable->uADIndex == SystemDomain::System()->DefaultDomain()->GetIndex()); // must be for default domain
+ _ASSERTE (pTargetTable->uADIndex == ADIndex(GCToEEInterface::GetDefaultDomainIndex())); // must be for default domain
BOOL fGotException = FALSE;
TableSegment *pSegment = pTable->pSegmentList;
diff --git a/src/gc/objecthandle.cpp b/src/gc/objecthandle.cpp
index c2af23a916..09460d203f 100644
--- a/src/gc/objecthandle.cpp
+++ b/src/gc/objecthandle.cpp
@@ -541,12 +541,7 @@ int getNumberOfSlots()
if (!IsServerHeap())
return 1;
-#ifdef FEATURE_REDHAWK
return GCToOSInterface::GetCurrentProcessCpuCount();
-#else
- return (CPUGroupInfo::CanEnableGCCPUGroups() ? CPUGroupInfo::GetNumActiveProcessors() :
- GCToOSInterface::GetCurrentProcessCpuCount());
-#endif
}
class HandleTableBucketHolder
diff --git a/src/gc/sample/gcenv.ee.cpp b/src/gc/sample/gcenv.ee.cpp
index 0311c0e31b..a705ae2484 100644
--- a/src/gc/sample/gcenv.ee.cpp
+++ b/src/gc/sample/gcenv.ee.cpp
@@ -278,7 +278,7 @@ void GCToEEInterface::HandleFatalError(unsigned int exitCode)
abort();
}
-bool GCToEEInterface::ShouldFinalizeObjectForUnload(AppDomain* pDomain, Object* obj)
+bool GCToEEInterface::ShouldFinalizeObjectForUnload(void* pDomain, Object* obj)
{
return true;
}
@@ -341,3 +341,33 @@ void GCToEEInterface::WalkAsyncPinnedForPromotion(Object* object, ScanContext* s
void GCToEEInterface::WalkAsyncPinned(Object* object, void* context, void (*callback)(Object*, Object*, void*))
{
}
+
+uint32_t GCToEEInterface::GetDefaultDomainIndex()
+{
+ return -1;
+}
+
+void *GCToEEInterface::GetAppDomainAtIndex(uint32_t appDomainIndex)
+{
+ return nullptr;
+}
+
+bool GCToEEInterface::AppDomainCanAccessHandleTable(uint32_t appDomainID)
+{
+ return false;
+}
+
+uint32_t GCToEEInterface::GetIndexOfAppDomainBeingUnloaded()
+{
+ return -1;
+}
+
+uint32_t GCToEEInterface::GetTotalNumSizedRefHandles()
+{
+ return -1;
+}
+
+bool GCToEEInterface::AppDomainIsRudeUnload(void *appDomain)
+{
+ return false;
+}
diff --git a/src/gc/sample/gcenv.h b/src/gc/sample/gcenv.h
index 012ab44482..4dc2da0a89 100644
--- a/src/gc/sample/gcenv.h
+++ b/src/gc/sample/gcenv.h
@@ -167,32 +167,8 @@ public:
GCSTRESS_INSTR_NGEN = 8, // GC on every allowable NGEN instr
GCSTRESS_UNIQUE = 16, // GC only on a unique stack trace
};
-
- int GetHeapVerifyLevel() { return 0; }
- bool IsHeapVerifyEnabled() { return GetHeapVerifyLevel() != 0; }
-
- GCStressFlags GetGCStressLevel() const { return GCSTRESS_NONE; }
- bool IsGCStressMix() const { return false; }
-
- int GetGCtraceStart() const { return 0; }
- int GetGCtraceEnd() const { return 0; }//1000000000; }
- int GetGCtraceFac() const { return 0; }
- int GetGCprnLvl() const { return 0; }
- bool IsGCBreakOnOOMEnabled() const { return false; }
- int GetGCgen0size() const { return 0; }
- int GetSegmentSize() const { return 0; }
- int GetGCconcurrent() const { return 1; }
- int GetGCLatencyMode() const { return 1; }
- int GetGCForceCompact() const { return 0; }
- int GetGCRetainVM() const { return 0; }
- int GetGCTrimCommit() const { return 0; }
- int GetGCLOHCompactionMode() const { return 0; }
-
- bool GetGCConservative() const { return true; }
};
-extern EEConfig * g_pConfig;
-
#include "etmdummy.h"
#define ETW_EVENT_ENABLED(e,f) false
diff --git a/src/gc/unix/CMakeLists.txt b/src/gc/unix/CMakeLists.txt
index 10258108c6..fbb94fd513 100644
--- a/src/gc/unix/CMakeLists.txt
+++ b/src/gc/unix/CMakeLists.txt
@@ -7,6 +7,7 @@ include(configure.cmake)
set(GC_PAL_SOURCES
gcenv.unix.cpp
events.cpp
- cgroup.cpp)
+ cgroup.cpp
+ cpuinfo.cpp)
add_library(gc_unix STATIC ${GC_PAL_SOURCES} ${VERSION_FILE_PATH})
diff --git a/src/gc/unix/config.h.in b/src/gc/unix/config.h.in
index 3a56be9833..a4a59b663e 100644
--- a/src/gc/unix/config.h.in
+++ b/src/gc/unix/config.h.in
@@ -10,6 +10,8 @@
#cmakedefine01 HAVE_PTHREAD_THREADID_NP
#cmakedefine01 HAVE_PTHREAD_GETTHREADID_NP
#cmakedefine01 HAVE_SCHED_GETCPU
+#cmakedefine01 HAVE_NUMA_H
+#cmakedefine01 HAVE_VM_ALLOCATE
#cmakedefine01 HAVE_PTHREAD_CONDATTR_SETCLOCK
#cmakedefine01 HAVE_MACH_ABSOLUTE_TIME
#cmakedefine01 HAVE_SCHED_GETAFFINITY
diff --git a/src/gc/unix/configure.cmake b/src/gc/unix/configure.cmake
index b118232b35..c2d6afe483 100644
--- a/src/gc/unix/configure.cmake
+++ b/src/gc/unix/configure.cmake
@@ -1,5 +1,7 @@
check_include_files(sys/time.h HAVE_SYS_TIME_H)
check_include_files(sys/mman.h HAVE_SYS_MMAN_H)
+check_include_files(numa.h HAVE_NUMA_H)
+check_function_exists(vm_allocate HAVE_VM_ALLOCATE)
check_cxx_source_compiles("
#include <pthread.h>
#include <stdint.h>
diff --git a/src/gc/unix/gcenv.unix.cpp b/src/gc/unix/gcenv.unix.cpp
index a1e12961ad..23a4935eab 100644
--- a/src/gc/unix/gcenv.unix.cpp
+++ b/src/gc/unix/gcenv.unix.cpp
@@ -319,8 +319,9 @@ bool GCToOSInterface::VirtualRelease(void* address, size_t size)
// size - size of the virtual memory range
// Return:
// true if it has succeeded, false if it has failed
-bool GCToOSInterface::VirtualCommit(void* address, size_t size)
+bool GCToOSInterface::VirtualCommit(void* address, size_t size, uint32_t node)
{
+ assert(node == NUMA_NODE_UNDEFINED && "Numa allocation is not ported to local GC on unix yet");
return mprotect(address, size, PROT_WRITE | PROT_READ) == 0;
}
@@ -697,6 +698,26 @@ uint32_t GCToOSInterface::GetTotalProcessorCount()
return g_logicalCpuCount;
}
+bool GCToOSInterface::CanEnableGCNumaAware()
+{
+ return false;
+}
+
+bool GCToOSInterface::GetNumaProcessorNode(PPROCESSOR_NUMBER proc_no, uint16_t *node_no)
+{
+ assert(!"Numa has not been ported to local GC for unix");
+ return false;
+}
+
+bool GCToOSInterface::CanEnableGCCPUGroups()
+{
+ return false;
+}
+
+void GCToOSInterface::GetGroupForProcessor(uint16_t processor_number, uint16_t* group_number, uint16_t* group_processor_number)
+{
+ assert(!"CpuGroup has not been ported to local GC for unix");
+}
// Initialize the critical section
void CLRCriticalSection::Initialize()
diff --git a/src/gc/windows/gcenv.windows.cpp b/src/gc/windows/gcenv.windows.cpp
index 85bcd851a7..4be5ec5744 100644
--- a/src/gc/windows/gcenv.windows.cpp
+++ b/src/gc/windows/gcenv.windows.cpp
@@ -11,8 +11,10 @@
#include "env/gcenv.structs.h"
#include "env/gcenv.base.h"
#include "env/gcenv.os.h"
+#include "env/gcenv.ee.h"
#include "env/gcenv.windows.inl"
#include "env/volatile.h"
+#include "gcconfig.h"
GCSystemInfo g_SystemInfo;
@@ -30,6 +32,187 @@ typedef BOOL (WINAPI *PQUERY_INFORMATION_JOB_OBJECT)(HANDLE jobHandle, JOBOBJECT
namespace {
+static bool g_fEnableGCNumaAware;
+
+struct CPU_Group_Info
+{
+ WORD nr_active; // at most 64
+ WORD reserved[1];
+ WORD begin;
+ WORD end;
+ DWORD_PTR active_mask;
+ DWORD groupWeight;
+ DWORD activeThreadWeight;
+};
+
+static bool g_fEnableGCCPUGroups;
+static bool g_fHadSingleProcessorAtStartup;
+static DWORD g_nGroups;
+static DWORD g_nProcessors;
+static CPU_Group_Info *g_CPUGroupInfoArray;
+
+void InitNumaNodeInfo()
+{
+ ULONG highest = 0;
+
+ g_fEnableGCNumaAware = false;
+
+ if (!GCConfig::GetGCNumaAware())
+ return;
+
+ // fail to get the highest numa node number
+ if (!GetNumaHighestNodeNumber(&highest) || (highest == 0))
+ return;
+
+ g_fEnableGCNumaAware = true;
+ return;
+}
+
+#if (defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_))
+// Calculate greatest common divisor
+DWORD GCD(DWORD u, DWORD v)
+{
+ while (v != 0)
+ {
+ DWORD dwTemp = v;
+ v = u % v;
+ u = dwTemp;
+ }
+
+ return u;
+}
+
+// Calculate least common multiple
+DWORD LCM(DWORD u, DWORD v)
+{
+ return u / GCD(u, v) * v;
+}
+#endif
+
+bool InitCPUGroupInfoArray()
+{
+#if (defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_))
+ BYTE *bBuffer = NULL;
+ SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX *pSLPIEx = NULL;
+ SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX *pRecord = NULL;
+ DWORD cbSLPIEx = 0;
+ DWORD byteOffset = 0;
+ DWORD dwNumElements = 0;
+ DWORD dwWeight = 1;
+
+ if (GetLogicalProcessorInformationEx(RelationGroup, pSLPIEx, &cbSLPIEx) &&
+ GetLastError() != ERROR_INSUFFICIENT_BUFFER)
+ return false;
+
+ assert(cbSLPIEx);
+
+ // Fail to allocate buffer
+ bBuffer = new (std::nothrow) BYTE[ cbSLPIEx ];
+ if (bBuffer == NULL)
+ return false;
+
+ pSLPIEx = (SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX *)bBuffer;
+ if (!GetLogicalProcessorInformationEx(RelationGroup, pSLPIEx, &cbSLPIEx))
+ {
+ delete[] bBuffer;
+ return false;
+ }
+
+ pRecord = pSLPIEx;
+ while (byteOffset < cbSLPIEx)
+ {
+ if (pRecord->Relationship == RelationGroup)
+ {
+ g_nGroups = pRecord->Group.ActiveGroupCount;
+ break;
+ }
+ byteOffset += pRecord->Size;
+ pRecord = (SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX *)(bBuffer + byteOffset);
+ }
+
+ g_CPUGroupInfoArray = new (std::nothrow) CPU_Group_Info[g_nGroups];
+ if (g_CPUGroupInfoArray == NULL)
+ {
+ delete[] bBuffer;
+ return false;
+ }
+
+ for (DWORD i = 0; i < g_nGroups; i++)
+ {
+ g_CPUGroupInfoArray[i].nr_active = (WORD)pRecord->Group.GroupInfo[i].ActiveProcessorCount;
+ g_CPUGroupInfoArray[i].active_mask = pRecord->Group.GroupInfo[i].ActiveProcessorMask;
+ g_nProcessors += g_CPUGroupInfoArray[i].nr_active;
+ dwWeight = LCM(dwWeight, (DWORD)g_CPUGroupInfoArray[i].nr_active);
+ }
+
+ // The number of threads per group that can be supported will depend on the number of CPU groups
+ // and the number of LPs within each processor group. For example, when the number of LPs in
+ // CPU groups is the same and is 64, the number of threads per group before weight overflow
+ // would be 2^32/2^6 = 2^26 (64M threads)
+ for (DWORD i = 0; i < g_nGroups; i++)
+ {
+ g_CPUGroupInfoArray[i].groupWeight = dwWeight / (DWORD)g_CPUGroupInfoArray[i].nr_active;
+ g_CPUGroupInfoArray[i].activeThreadWeight = 0;
+ }
+
+ delete[] bBuffer; // done with it; free it
+ return true;
+#else
+ return false;
+#endif
+}
+
+bool InitCPUGroupInfoRange()
+{
+#if (defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_))
+ WORD begin = 0;
+ WORD nr_proc = 0;
+
+ for (WORD i = 0; i < g_nGroups; i++)
+ {
+ nr_proc += g_CPUGroupInfoArray[i].nr_active;
+ g_CPUGroupInfoArray[i].begin = begin;
+ g_CPUGroupInfoArray[i].end = nr_proc - 1;
+ begin = nr_proc;
+ }
+
+ return true;
+#else
+ return false;
+#endif
+}
+
+void InitCPUGroupInfo()
+{
+ g_fEnableGCCPUGroups = false;
+
+#if (defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_))
+ if (!GCConfig::GetGCCpuGroup())
+ return;
+
+ if (!InitCPUGroupInfoArray())
+ return;
+
+ if (!InitCPUGroupInfoRange())
+ return;
+
+ // only enable CPU groups if more than one group exists
+ g_fEnableGCCPUGroups = g_nGroups > 1;
+#endif // _TARGET_AMD64_ || _TARGET_ARM64_
+
+ // Determine if the process is affinitized to a single processor (or if the system has a single processor)
+ DWORD_PTR processAffinityMask, systemAffinityMask;
+ if (::GetProcessAffinityMask(::GetCurrentProcess(), &processAffinityMask, &systemAffinityMask))
+ {
+ processAffinityMask &= systemAffinityMask;
+ if (processAffinityMask != 0 && // only one CPU group is involved
+ (processAffinityMask & (processAffinityMask - 1)) == 0) // only one bit is set
+ {
+ g_fHadSingleProcessorAtStartup = true;
+ }
+ }
+}
+
void GetProcessMemoryLoad(LPMEMORYSTATUSEX pMSEX)
{
pMSEX->dwLength = sizeof(MEMORYSTATUSEX);
@@ -177,6 +360,9 @@ bool GCToOSInterface::Initialize()
assert(systemInfo.dwPageSize == 0x1000);
+ InitNumaNodeInfo();
+ InitCPUGroupInfo();
+
return true;
}
@@ -320,9 +506,17 @@ bool GCToOSInterface::VirtualRelease(void* address, size_t size)
// size - size of the virtual memory range
// Return:
// true if it has succeeded, false if it has failed
-bool GCToOSInterface::VirtualCommit(void* address, size_t size)
+bool GCToOSInterface::VirtualCommit(void* address, size_t size, uint32_t node)
{
- return ::VirtualAlloc(address, size, MEM_COMMIT, PAGE_READWRITE) != nullptr;
+ if (node == NUMA_NODE_UNDEFINED)
+ {
+ return ::VirtualAlloc(address, size, MEM_COMMIT, PAGE_READWRITE) != nullptr;
+ }
+ else
+ {
+ assert(g_fEnableGCNumaAware);
+ return ::VirtualAllocExNuma(::GetCurrentProcess(), address, size, MEM_COMMIT, PAGE_READWRITE, node) != nullptr;
+ }
}
// Decomit virtual memory range.
@@ -623,6 +817,63 @@ uint32_t GCToOSInterface::GetLowPrecisionTimeStamp()
return ::GetTickCount();
}
+// Gets the total number of processors on the machine, not taking
+// into account current process affinity.
+// Return:
+// Number of processors on the machine
+uint32_t GCToOSInterface::GetTotalProcessorCount()
+{
+ if (CanEnableGCCPUGroups())
+ {
+ return g_nProcessors;
+ }
+ else
+ {
+ return g_SystemInfo.dwNumberOfProcessors;
+ }
+}
+
+bool GCToOSInterface::CanEnableGCNumaAware()
+{
+ return g_fEnableGCNumaAware;
+}
+
+bool GCToOSInterface::GetNumaProcessorNode(PPROCESSOR_NUMBER proc_no, uint16_t *node_no)
+{
+ assert(g_fEnableGCNumaAware);
+ return ::GetNumaProcessorNodeEx(proc_no, node_no) != FALSE;
+}
+
+bool GCToOSInterface::CanEnableGCCPUGroups()
+{
+ return g_fEnableGCCPUGroups;
+}
+
+void GCToOSInterface::GetGroupForProcessor(uint16_t processor_number, uint16_t* group_number, uint16_t* group_processor_number)
+{
+ assert(g_fEnableGCCPUGroups);
+
+#if !defined(FEATURE_REDHAWK) && (defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_))
+ WORD bTemp = 0;
+ WORD bDiff = processor_number - bTemp;
+
+ for (WORD i=0; i < g_nGroups; i++)
+ {
+ bTemp += g_CPUGroupInfoArray[i].nr_active;
+ if (bTemp > processor_number)
+ {
+ *group_number = i;
+ *group_processor_number = bDiff;
+ break;
+ }
+ bDiff = processor_number - bTemp;
+ }
+#else
+ *group_number = 0;
+ *group_processor_number = 0;
+#endif
+}
+
// Parameters of the GC thread stub
struct GCThreadStubParam
{
@@ -644,15 +895,6 @@ static DWORD GCThreadStub(void* param)
return 0;
}
-// Gets the total number of processors on the machine, not taking
-// into account current process affinity.
-// Return:
-// Number of processors on the machine
-uint32_t GCToOSInterface::GetTotalProcessorCount()
-{
- return g_SystemInfo.dwNumberOfProcessors;
-}
-
// Initialize the critical section
void CLRCriticalSection::Initialize()
{
@@ -817,4 +1059,3 @@ bool GCEvent::CreateOSManualEventNoThrow(bool initialState)
m_impl = event.release();
return true;
}
-