summaryrefslogtreecommitdiff
path: root/src/gc/gc.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/gc/gc.cpp')
-rw-r--r--src/gc/gc.cpp42
1 files changed, 19 insertions, 23 deletions
diff --git a/src/gc/gc.cpp b/src/gc/gc.cpp
index 31715cb569..edd7d07de0 100644
--- a/src/gc/gc.cpp
+++ b/src/gc/gc.cpp
@@ -5066,7 +5066,7 @@ public:
//can not enable gc numa aware, force all heaps to be in
//one numa node by filling the array with all 0s
- if (!NumaNodeInfo::CanEnableGCNumaAware())
+ if (!GCToOSInterface::CanEnableGCNumaAware())
memset(heap_no_to_numa_node, 0, sizeof (heap_no_to_numa_node));
return TRUE;
@@ -5262,7 +5262,7 @@ void set_thread_group_affinity_for_heap(int heap_number, GCThreadAffinity* affin
affinity->Processor = GCThreadAffinity::None;
uint16_t gn, gpn;
- CPUGroupInfo::GetGroupForProcessor((uint16_t)heap_number, &gn, &gpn);
+ GCToOSInterface::GetGroupForProcessor((uint16_t)heap_number, &gn, &gpn);
int bit_number = 0;
for (uintptr_t mask = 1; mask !=0; mask <<=1)
@@ -5274,7 +5274,7 @@ void set_thread_group_affinity_for_heap(int heap_number, GCThreadAffinity* affin
affinity->Group = gn;
heap_select::set_cpu_group_for_heap(heap_number, gn);
heap_select::set_group_proc_for_heap(heap_number, gpn);
- if (NumaNodeInfo::CanEnableGCNumaAware())
+ if (GCToOSInterface::CanEnableGCNumaAware())
{
PROCESSOR_NUMBER proc_no;
proc_no.Group = gn;
@@ -5282,7 +5282,7 @@ void set_thread_group_affinity_for_heap(int heap_number, GCThreadAffinity* affin
proc_no.Reserved = 0;
uint16_t node_no = 0;
- if (NumaNodeInfo::GetNumaProcessorNodeEx(&proc_no, &node_no))
+ if (GCToOSInterface::GetNumaProcessorNode(&proc_no, &node_no))
heap_select::set_numa_node_for_heap(heap_number, node_no);
}
else
@@ -5315,14 +5315,14 @@ void set_thread_affinity_mask_for_heap(int heap_number, GCThreadAffinity* affini
dprintf (3, ("Using processor %d for heap %d", proc_number, heap_number));
affinity->Processor = proc_number;
heap_select::set_proc_no_for_heap(heap_number, proc_number);
- if (NumaNodeInfo::CanEnableGCNumaAware())
+ if (GCToOSInterface::CanEnableGCNumaAware())
{
uint16_t node_no = 0;
PROCESSOR_NUMBER proc_no;
proc_no.Group = 0;
proc_no.Number = (uint8_t)proc_number;
proc_no.Reserved = 0;
- if (NumaNodeInfo::GetNumaProcessorNodeEx(&proc_no, &node_no))
+ if (GCToOSInterface::GetNumaProcessorNode(&proc_no, &node_no))
{
heap_select::set_numa_node_for_heap(heap_number, node_no);
}
@@ -5457,19 +5457,17 @@ void gc_heap::gc_thread_function ()
bool virtual_alloc_commit_for_heap(void* addr, size_t size, int h_number)
{
-#if defined(MULTIPLE_HEAPS) && !defined(FEATURE_REDHAWK) && !defined(FEATURE_PAL) && !defined(BUILD_AS_STANDALONE)
+#if defined(MULTIPLE_HEAPS) && !defined(FEATURE_REDHAWK)
// Currently there is no way for us to specific the numa node to allocate on via hosting interfaces to
// a host. This will need to be added later.
#if !defined(FEATURE_CORECLR)
if (!CLRMemoryHosted())
#endif
{
- if (NumaNodeInfo::CanEnableGCNumaAware())
+ if (GCToOSInterface::CanEnableGCNumaAware())
{
uint32_t numa_node = heap_select::find_numa_node_from_heap_no(h_number);
- void * ret = NumaNodeInfo::VirtualAllocExNuma(GetCurrentProcess(), addr, size,
- MEM_COMMIT, PAGE_READWRITE, numa_node);
- if (ret != NULL)
+ if (GCToOSInterface::VirtualCommit(addr, size, numa_node))
return true;
}
}
@@ -13343,7 +13341,7 @@ try_again:
org_hp->alloc_context_count--;
max_hp->alloc_context_count++;
acontext->set_alloc_heap(GCHeap::GetHeap(max_hp->heap_number));
- if (CPUGroupInfo::CanEnableGCCPUGroups())
+ if (GCToOSInterface::CanEnableGCCPUGroups())
{ //only set ideal processor when max_hp and org_hp are in the same cpu
//group. DO NOT MOVE THREADS ACROSS CPU GROUPS
uint16_t org_gn = heap_select::find_cpu_group_from_heap_no(org_hp->heap_number);
@@ -19548,7 +19546,7 @@ void gc_heap::mark_phase (int condemned_gen_number, BOOL mark_only_p)
{
#endif //MULTIPLE_HEAPS
- num_sizedrefs = SystemDomain::System()->GetTotalNumSizedRefHandles();
+ num_sizedrefs = GCToEEInterface::GetTotalNumSizedRefHandles();
#ifdef MULTIPLE_HEAPS
@@ -24914,7 +24912,7 @@ void gc_heap::gc_thread_stub (void* arg)
// We are about to set affinity for GC threads. It is a good place to set up NUMA and
// CPU groups because the process mask, processor number, and group number are all
// readily available.
- if (CPUGroupInfo::CanEnableGCCPUGroups())
+ if (GCToOSInterface::CanEnableGCCPUGroups())
set_thread_group_affinity_for_heap(heap->heap_number, &affinity);
else
set_thread_affinity_mask_for_heap(heap->heap_number, &affinity);
@@ -25707,7 +25705,7 @@ void gc_heap::background_mark_phase ()
#endif //WRITE_WATCH
#endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
- num_sizedrefs = SystemDomain::System()->GetTotalNumSizedRefHandles();
+ num_sizedrefs = GCToEEInterface::GetTotalNumSizedRefHandles();
// this c_write is not really necessary because restart_vm
// has an instruction that will flush the cpu cache (interlocked
@@ -33498,10 +33496,8 @@ HRESULT GCHeap::Initialize ()
gc_heap::gc_thread_no_affinitize_p = true;
uint32_t nhp_from_config = static_cast<uint32_t>(GCConfig::GetHeapCount());
- // GetGCProcessCpuCount only returns up to 64 procs.
- uint32_t nhp_from_process = CPUGroupInfo::CanEnableGCCPUGroups() ?
- CPUGroupInfo::GetNumActiveProcessors():
- GCToOSInterface::GetCurrentProcessCpuCount();
+
+ uint32_t nhp_from_process = GCToOSInterface::GetCurrentProcessCpuCount();
uint32_t nhp = ((nhp_from_config == 0) ? nhp_from_process :
(min (nhp_from_config, nhp_from_process)));
@@ -35615,7 +35611,7 @@ size_t GCHeap::GetFinalizablePromotedCount()
#endif //MULTIPLE_HEAPS
}
-bool GCHeap::FinalizeAppDomain(AppDomain *pDomain, bool fRunFinalizers)
+bool GCHeap::FinalizeAppDomain(void *pDomain, bool fRunFinalizers)
{
#ifdef MULTIPLE_HEAPS
bool foundp = false;
@@ -35937,7 +35933,7 @@ CFinalize::GetNumberFinalizableObjects()
}
BOOL
-CFinalize::FinalizeSegForAppDomain (AppDomain *pDomain,
+CFinalize::FinalizeSegForAppDomain (void *pDomain,
BOOL fRunFinalizers,
unsigned int Seg)
{
@@ -35980,7 +35976,7 @@ CFinalize::FinalizeSegForAppDomain (AppDomain *pDomain,
}
else
{
- if (pDomain->IsRudeUnload())
+ if (GCToEEInterface::AppDomainIsRudeUnload(pDomain))
{
MoveItem (i, Seg, FreeList);
}
@@ -35997,7 +35993,7 @@ CFinalize::FinalizeSegForAppDomain (AppDomain *pDomain,
}
bool
-CFinalize::FinalizeAppDomain (AppDomain *pDomain, bool fRunFinalizers)
+CFinalize::FinalizeAppDomain (void *pDomain, bool fRunFinalizers)
{
bool finalizedFound = false;