summaryrefslogtreecommitdiff
path: root/src/gc
diff options
context:
space:
mode:
authorDavid Wrighton <davidwr@microsoft.com>2019-05-15 18:28:42 -0700
committerGitHub <noreply@github.com>2019-05-15 18:28:42 -0700
commitbdb995987178231ba541f22143cb3cab56309daa (patch)
tree655d0d0bfaee70a56c5a2b33e52fc27965314049 /src/gc
parentc2533b6c3f715b6964de5ffe0b2a2feaac42b079 (diff)
downloadcoreclr-bdb995987178231ba541f22143cb3cab56309daa.tar.gz
coreclr-bdb995987178231ba541f22143cb3cab56309daa.tar.bz2
coreclr-bdb995987178231ba541f22143cb3cab56309daa.zip
Remove concept of AppDomains from the GC (#24536)
* Remove concept of AppDomains from the GC - Leave constructs allowing for multiple handle tables, as scenarios for that have been proposed - Remove FEATURE_APPDOMAIN_RESOURCE_MONITORING
Diffstat (limited to 'src/gc')
-rw-r--r--src/gc/CMakeLists.txt3
-rw-r--r--src/gc/env/etmdummy.h4
-rw-r--r--src/gc/env/gcenv.base.h18
-rw-r--r--src/gc/env/gcenv.ee.h6
-rw-r--r--src/gc/gc.cpp169
-rw-r--r--src/gc/gcenv.ee.standalone.inl36
-rw-r--r--src/gc/gchandletable.cpp21
-rw-r--r--src/gc/gchandletableimpl.h8
-rw-r--r--src/gc/gcimpl.h1
-rw-r--r--src/gc/gcinterface.dac.h4
-rw-r--r--src/gc/gcinterface.ee.h24
-rw-r--r--src/gc/gcinterface.h28
-rw-r--r--src/gc/gcpriv.h7
-rw-r--r--src/gc/handletable.cpp151
-rw-r--r--src/gc/handletable.h14
-rw-r--r--src/gc/handletablecore.cpp544
-rw-r--r--src/gc/handletablepriv.h53
-rw-r--r--src/gc/objecthandle.cpp82
-rw-r--r--src/gc/objecthandle.h4
-rw-r--r--src/gc/sample/gcenv.ee.cpp30
20 files changed, 27 insertions, 1180 deletions
diff --git a/src/gc/CMakeLists.txt b/src/gc/CMakeLists.txt
index e3e171ebf1..ab9f3a0bad 100644
--- a/src/gc/CMakeLists.txt
+++ b/src/gc/CMakeLists.txt
@@ -2,9 +2,6 @@ set(CMAKE_INCLUDE_CURRENT_DIR ON)
# Local GC meta-issue: https://github.com/dotnet/coreclr/issues/11518
-# https://github.com/dotnet/coreclr/issues/11517
-remove_definitions(-DFEATURE_APPDOMAIN_RESOURCE_MONITORING)
-
# https://github.com/dotnet/coreclr/issues/11516
remove_definitions(-DSTRESS_HEAP)
diff --git a/src/gc/env/etmdummy.h b/src/gc/env/etmdummy.h
index 2b47a46e4e..f2c76d4b8d 100644
--- a/src/gc/env/etmdummy.h
+++ b/src/gc/env/etmdummy.h
@@ -47,7 +47,7 @@
#define FireEtwGCMarkHandles(HeapNum, ClrInstanceID) 0
#define FireEtwGCMarkOlderGenerationRoots(HeapNum, ClrInstanceID) 0
#define FireEtwFinalizeObject(TypeID, ObjectID, ClrInstanceID) 0
-#define FireEtwSetGCHandle(HandleID, ObjectID, Kind, Generation, AppDomainID, ClrInstanceID) 0
+#define FireEtwSetGCHandle(HandleID, ObjectID, Kind, Generation, ClrInstanceID) 0
#define FireEtwDestroyGCHandle(HandleID, ClrInstanceID) 0
#define FireEtwGCSampledObjectAllocationLow(Address, TypeID, ObjectCountForTypeSample, TotalSizeForTypeSample, ClrInstanceID) 0
#define FireEtwPinObjectAtGCTime(HandleID, ObjectID, ObjectSize, TypeName, ClrInstanceID) 0
@@ -376,7 +376,7 @@
#define FireEtwFailFast(FailFastUserMessage, FailedEIP, OSExitCode, ClrExitCode, ClrInstanceID) 0
#define FireEtwPrvFinalizeObject(TypeID, ObjectID, ClrInstanceID, TypeName) 0
#define FireEtwCCWRefCountChange(HandleID, ObjectID, COMInterfacePointer, NewRefCount, AppDomainID, ClassName, NameSpace, Operation, ClrInstanceID) 0
-#define FireEtwPrvSetGCHandle(HandleID, ObjectID, Kind, Generation, AppDomainID, ClrInstanceID) 0
+#define FireEtwPrvSetGCHandle(HandleID, ObjectID, Kind, Generation, ClrInstanceID) 0
#define FireEtwPrvDestroyGCHandle(HandleID, ClrInstanceID) 0
#define FireEtwFusionMessageEvent(ClrInstanceID, Prepend, Message) 0
#define FireEtwFusionErrorCodeEvent(ClrInstanceID, Category, ErrorCode) 0
diff --git a/src/gc/env/gcenv.base.h b/src/gc/env/gcenv.base.h
index 614b84ab23..46ad366e62 100644
--- a/src/gc/env/gcenv.base.h
+++ b/src/gc/env/gcenv.base.h
@@ -538,22 +538,4 @@ inline bool FitsInU1(uint64_t val)
return val == (uint64_t)(uint8_t)val;
}
-// -----------------------------------------------------------------------------------------------------------
-//
-// AppDomain emulation. The we don't have these in Redhawk so instead we emulate the bare minimum of the API
-// touched by the GC/HandleTable and pretend we have precisely one (default) appdomain.
-//
-
-#define RH_DEFAULT_DOMAIN_ID 1
-
-struct ADIndex
-{
- DWORD m_dwIndex;
-
- ADIndex () : m_dwIndex(RH_DEFAULT_DOMAIN_ID) {}
- explicit ADIndex (DWORD id) : m_dwIndex(id) {}
- BOOL operator==(const ADIndex& ad) const { return m_dwIndex == ad.m_dwIndex; }
- BOOL operator!=(const ADIndex& ad) const { return m_dwIndex != ad.m_dwIndex; }
-};
-
#endif // __GCENV_BASE_INCLUDED__
diff --git a/src/gc/env/gcenv.ee.h b/src/gc/env/gcenv.ee.h
index b9918ec5b7..819f64882a 100644
--- a/src/gc/env/gcenv.ee.h
+++ b/src/gc/env/gcenv.ee.h
@@ -71,7 +71,6 @@ public:
static void EnableFinalization(bool foundFinalizers);
static void HandleFatalError(unsigned int exitCode);
- static bool ShouldFinalizeObjectForUnload(void* pDomain, Object* obj);
static bool EagerFinalized(Object* obj);
static MethodTable* GetFreeObjectMethodTable();
static bool GetBooleanConfigValue(const char* key, bool* value);
@@ -85,12 +84,7 @@ public:
static void WalkAsyncPinned(Object* object, void* context, void(*callback)(Object*, Object*, void*));
static IGCToCLREventSink* EventSink();
- static uint32_t GetDefaultDomainIndex();
- static void *GetAppDomainAtIndex(uint32_t appDomainIndex);
- static bool AppDomainCanAccessHandleTable(uint32_t appDomainID);
- static uint32_t GetIndexOfAppDomainBeingUnloaded();
static uint32_t GetTotalNumSizedRefHandles();
- static bool AppDomainIsRudeUnload(void *appDomain);
static bool AnalyzeSurvivorsRequested(int condemnedGeneration);
static void AnalyzeSurvivorsFinished(int condemnedGeneration);
diff --git a/src/gc/gc.cpp b/src/gc/gc.cpp
index c6a858c8fa..3c8345922f 100644
--- a/src/gc/gc.cpp
+++ b/src/gc/gc.cpp
@@ -11528,14 +11528,6 @@ void gc_heap::adjust_limit_clr (uint8_t* start, size_t limit_size, size_t size,
acontext->alloc_bytes += added_bytes;
total_alloc_bytes += added_bytes;
-#ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
- if (g_fEnableAppDomainMonitoring)
- {
- GCToEEInterface::RecordAllocatedBytesForHeap(limit_size, heap_number);
- }
-#endif //FEATURE_APPDOMAIN_RESOURCE_MONITORING
-
-
uint8_t* saved_used = 0;
if (seg)
@@ -12085,13 +12077,6 @@ void gc_heap::bgc_loh_alloc_clr (uint8_t* alloc_start,
{
make_unused_array (alloc_start, size);
-#ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
- if (g_fEnableAppDomainMonitoring)
- {
- GCToEEInterface::RecordAllocatedBytesForHeap(size, heap_number);
- }
-#endif //FEATURE_APPDOMAIN_RESOURCE_MONITORING
-
size_t size_of_array_base = sizeof(ArrayBase);
bgc_alloc_lock->loh_alloc_done_with_index (lock_index);
@@ -19084,10 +19069,6 @@ gc_heap::scan_background_roots (promote_func* fn, int hn, ScanContext *pSC)
pSC->thread_number = hn;
-#ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
- pSC->pCurrentDomain = 0;
-#endif
-
BOOL relocate_p = (fn == &GCHeap::Relocate);
dprintf (3, ("Scanning background mark list"));
@@ -20295,22 +20276,6 @@ void gc_heap::mark_phase (int condemned_gen_number, BOOL mark_only_p)
// scan for deleted entries in the syncblk cache
GCScan::GcWeakPtrScanBySingleThread (condemned_gen_number, max_generation, &sc);
-#ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
- if (g_fEnableAppDomainMonitoring)
- {
- size_t promoted_all_heaps = 0;
-#ifdef MULTIPLE_HEAPS
- for (int i = 0; i < n_heaps; i++)
- {
- promoted_all_heaps += promoted_bytes (i);
- }
-#else
- promoted_all_heaps = promoted_bytes (heap_number);
-#endif //MULTIPLE_HEAPS
- GCToEEInterface::RecordTotalSurvivedBytes(promoted_all_heaps);
- }
-#endif //FEATURE_APPDOMAIN_RESOURCE_MONITORING
-
#ifdef MULTIPLE_HEAPS
#ifdef MARK_LIST
@@ -34589,26 +34554,11 @@ void GCHeap::Promote(Object** ppObject, ScanContext* sc, uint32_t flags)
hp->pin_object (o, (uint8_t**) ppObject, hp->gc_low, hp->gc_high);
#endif //STRESS_PINNING
-#ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
- size_t promoted_size_begin = hp->promoted_bytes (thread);
-#endif //FEATURE_APPDOMAIN_RESOURCE_MONITORING
-
if ((o >= hp->gc_low) && (o < hp->gc_high))
{
hpt->mark_object_simple (&o THREAD_NUMBER_ARG);
}
-#ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
- size_t promoted_size_end = hp->promoted_bytes (thread);
- if (g_fEnableAppDomainMonitoring)
- {
- if (sc->pCurrentDomain)
- {
- GCToEEInterface::RecordSurvivedBytesForHeap((promoted_size_end - promoted_size_begin), thread, sc->pCurrentDomain);
- }
- }
-#endif //FEATURE_APPDOMAIN_RESOURCE_MONITORING
-
STRESS_LOG_ROOT_PROMOTE(ppObject, o, o ? header(o)->GetMethodTable() : NULL);
}
@@ -35544,13 +35494,6 @@ void gc_heap::do_pre_gc()
#endif //BACKGROUND_GC
}
}
-
-#ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
- if (g_fEnableAppDomainMonitoring)
- {
- GCToEEInterface::ResetTotalSurvivedBytes();
- }
-#endif //FEATURE_APPDOMAIN_RESOURCE_MONITORING
}
#ifdef GC_CONFIG_DRIVEN
@@ -35798,13 +35741,6 @@ void gc_heap::do_post_gc()
}
GCHeap::UpdatePostGCCounters();
-#ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
- //if (g_fEnableARM)
- //{
- // SystemDomain::GetADSurvivedBytes();
- //}
-#endif //FEATURE_APPDOMAIN_RESOURCE_MONITORING
-
#ifdef STRESS_LOG
STRESS_LOG_GC_END(VolatileLoad(&settings.gc_index),
(uint32_t)settings.condemned_generation,
@@ -36551,23 +36487,6 @@ size_t GCHeap::GetFinalizablePromotedCount()
#endif //MULTIPLE_HEAPS
}
-bool GCHeap::FinalizeAppDomain(void *pDomain, bool fRunFinalizers)
-{
-#ifdef MULTIPLE_HEAPS
- bool foundp = false;
- for (int hn = 0; hn < gc_heap::n_heaps; hn++)
- {
- gc_heap* hp = gc_heap::g_heaps [hn];
- if (hp->finalize_queue->FinalizeAppDomain (pDomain, fRunFinalizers))
- foundp = true;
- }
- return foundp;
-
-#else //MULTIPLE_HEAPS
- return pGenGCHeap->finalize_queue->FinalizeAppDomain (pDomain, fRunFinalizers);
-#endif //MULTIPLE_HEAPS
-}
-
bool GCHeap::ShouldRestartFinalizerWatchDog()
{
// This condition was historically used as part of the condition to detect finalizer thread timeouts
@@ -36872,88 +36791,6 @@ CFinalize::GetNumberFinalizableObjects()
(g_fFinalizerRunOnShutDown ? m_Array : SegQueue(FinalizerListSeg));
}
-BOOL
-CFinalize::FinalizeSegForAppDomain (void *pDomain,
- BOOL fRunFinalizers,
- unsigned int Seg)
-{
- BOOL finalizedFound = FALSE;
- Object** endIndex = SegQueue (Seg);
- for (Object** i = SegQueueLimit (Seg)-1; i >= endIndex ;i--)
- {
- CObjectHeader* obj = (CObjectHeader*)*i;
-
- // Objects are put into the finalization queue before they are complete (ie their methodtable
- // may be null) so we must check that the object we found has a method table before checking
- // if it has the index we are looking for. If the methodtable is null, it can't be from the
- // unloading domain, so skip it.
- if (method_table(obj) == NULL)
- {
- continue;
- }
-
- // does the EE actually want us to finalize this object?
- if (!GCToEEInterface::ShouldFinalizeObjectForUnload(pDomain, obj))
- {
- continue;
- }
-
- if (!fRunFinalizers || (obj->GetHeader()->GetBits()) & BIT_SBLK_FINALIZER_RUN)
- {
- //remove the object because we don't want to
- //run the finalizer
- MoveItem (i, Seg, FreeList);
- //Reset the bit so it will be put back on the queue
- //if resurrected and re-registered.
- obj->GetHeader()->ClrBit (BIT_SBLK_FINALIZER_RUN);
- }
- else
- {
- if (method_table(obj)->HasCriticalFinalizer())
- {
- finalizedFound = TRUE;
- MoveItem (i, Seg, CriticalFinalizerListSeg);
- }
- else
- {
- if (GCToEEInterface::AppDomainIsRudeUnload(pDomain))
- {
- MoveItem (i, Seg, FreeList);
- }
- else
- {
- finalizedFound = TRUE;
- MoveItem (i, Seg, FinalizerListSeg);
- }
- }
- }
- }
-
- return finalizedFound;
-}
-
-bool
-CFinalize::FinalizeAppDomain (void *pDomain, bool fRunFinalizers)
-{
- bool finalizedFound = false;
-
- unsigned int startSeg = gen_segment (max_generation);
-
- EnterFinalizeLock();
-
- for (unsigned int Seg = startSeg; Seg <= gen_segment (0); Seg++)
- {
- if (FinalizeSegForAppDomain (pDomain, fRunFinalizers, Seg))
- {
- finalizedFound = true;
- }
- }
-
- LeaveFinalizeLock();
-
- return finalizedFound;
-}
-
void
CFinalize::MoveItem (Object** fromIndex,
unsigned int fromSeg,
@@ -37001,12 +36838,6 @@ CFinalize::GcScanRoots (promote_func* fn, int hn, ScanContext *pSC)
Object* o = *po;
//dprintf (3, ("scan freacheable %Ix", (size_t)o));
dprintf (3, ("scan f %Ix", (size_t)o));
-#ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
- if (g_fEnableAppDomainMonitoring)
- {
- pSC->pCurrentDomain = GCToEEInterface::GetAppDomainForObject(o);
- }
-#endif //FEATURE_APPDOMAIN_RESOURCE_MONITORING
(*fn)(po, pSC, 0);
}
diff --git a/src/gc/gcenv.ee.standalone.inl b/src/gc/gcenv.ee.standalone.inl
index 7a9b8b1014..be318dc8f0 100644
--- a/src/gc/gcenv.ee.standalone.inl
+++ b/src/gc/gcenv.ee.standalone.inl
@@ -191,12 +191,6 @@ inline void GCToEEInterface::HandleFatalError(unsigned int exitCode)
g_theGCToCLR->HandleFatalError(exitCode);
}
-inline bool GCToEEInterface::ShouldFinalizeObjectForUnload(void* pDomain, Object* obj)
-{
- assert(g_theGCToCLR != nullptr);
- return g_theGCToCLR->ShouldFinalizeObjectForUnload(pDomain, obj);
-}
-
inline bool GCToEEInterface::EagerFinalized(Object* obj)
{
assert(g_theGCToCLR != nullptr);
@@ -269,42 +263,12 @@ inline IGCToCLREventSink* GCToEEInterface::EventSink()
return g_theGCToCLR->EventSink();
}
-inline uint32_t GCToEEInterface::GetDefaultDomainIndex()
-{
- assert(g_theGCToCLR != nullptr);
- return g_theGCToCLR->GetDefaultDomainIndex();
-}
-
-inline void *GCToEEInterface::GetAppDomainAtIndex(uint32_t appDomainIndex)
-{
- assert(g_theGCToCLR != nullptr);
- return g_theGCToCLR->GetAppDomainAtIndex(appDomainIndex);
-}
-
-inline bool GCToEEInterface::AppDomainCanAccessHandleTable(uint32_t appDomainID)
-{
- assert(g_theGCToCLR != nullptr);
- return g_theGCToCLR->AppDomainCanAccessHandleTable(appDomainID);
-}
-
-inline uint32_t GCToEEInterface::GetIndexOfAppDomainBeingUnloaded()
-{
- assert(g_theGCToCLR != nullptr);
- return g_theGCToCLR->GetIndexOfAppDomainBeingUnloaded();
-}
-
inline uint32_t GCToEEInterface::GetTotalNumSizedRefHandles()
{
assert(g_theGCToCLR != nullptr);
return g_theGCToCLR->GetTotalNumSizedRefHandles();
}
-inline bool GCToEEInterface::AppDomainIsRudeUnload(void *appDomain)
-{
- assert(g_theGCToCLR != nullptr);
- return g_theGCToCLR->AppDomainIsRudeUnload(appDomain);
-}
-
inline bool GCToEEInterface::AnalyzeSurvivorsRequested(int condemnedGeneration)
{
assert(g_theGCToCLR != nullptr);
diff --git a/src/gc/gchandletable.cpp b/src/gc/gchandletable.cpp
index ad3c6e23c8..ed24c3e53b 100644
--- a/src/gc/gchandletable.cpp
+++ b/src/gc/gchandletable.cpp
@@ -57,18 +57,6 @@ OBJECTHANDLE GCHandleStore::CreateDependentHandle(Object* primary, Object* secon
return handle;
}
-void GCHandleStore::RelocateAsyncPinnedHandles(IGCHandleStore* pTarget, void (*clearIfComplete)(Object*), void (*setHandle)(Object*, OBJECTHANDLE))
-{
- // assumption - the IGCHandleStore is an instance of GCHandleStore
- GCHandleStore* other = static_cast<GCHandleStore*>(pTarget);
- ::Ref_RelocateAsyncPinHandles(&_underlyingBucket, &other->_underlyingBucket, clearIfComplete, setHandle);
-}
-
-bool GCHandleStore::EnumerateAsyncPinnedHandles(async_pin_enum_fn callback, void* context)
-{
- return !!::Ref_HandleAsyncPinHandles(callback, context);
-}
-
GCHandleStore::~GCHandleStore()
{
::Ref_DestroyHandleTableBucket(&_underlyingBucket);
@@ -94,7 +82,7 @@ IGCHandleStore* GCHandleManager::GetGlobalHandleStore()
return g_gcGlobalHandleStore;
}
-IGCHandleStore* GCHandleManager::CreateHandleStore(void* context)
+IGCHandleStore* GCHandleManager::CreateHandleStore()
{
#ifndef FEATURE_REDHAWK
GCHandleStore* store = new (nothrow) GCHandleStore();
@@ -103,7 +91,7 @@ IGCHandleStore* GCHandleManager::CreateHandleStore(void* context)
return nullptr;
}
- bool success = ::Ref_InitializeHandleTableBucket(&store->_underlyingBucket, context);
+ bool success = ::Ref_InitializeHandleTableBucket(&store->_underlyingBucket);
if (!success)
{
delete store;
@@ -122,11 +110,6 @@ void GCHandleManager::DestroyHandleStore(IGCHandleStore* store)
delete store;
}
-void* GCHandleManager::GetHandleContext(OBJECTHANDLE handle)
-{
- return (void*)((uintptr_t)::HndGetHandleTableADIndex(::HndGetHandleTable(handle)).m_dwIndex);
-}
-
OBJECTHANDLE GCHandleManager::CreateGlobalHandleOfType(Object* object, HandleType type)
{
return ::HndCreateHandle(g_HandleTableMap.pBuckets[0]->pTable[GetCurrentThreadHomeHeapNumber()], type, ObjectToOBJECTREF(object));
diff --git a/src/gc/gchandletableimpl.h b/src/gc/gchandletableimpl.h
index 77af352d4e..48eb2ab17d 100644
--- a/src/gc/gchandletableimpl.h
+++ b/src/gc/gchandletableimpl.h
@@ -23,10 +23,6 @@ public:
virtual OBJECTHANDLE CreateDependentHandle(Object* primary, Object* secondary);
- virtual void RelocateAsyncPinnedHandles(IGCHandleStore* pTarget, void (*clearIfCompleteCallback)(Object* object), void (*setHandle)(Object* object, OBJECTHANDLE handle));
-
- virtual bool EnumerateAsyncPinnedHandles(async_pin_enum_fn callback, void* context);
-
virtual ~GCHandleStore();
HandleTableBucket _underlyingBucket;
@@ -41,11 +37,9 @@ public:
virtual void Shutdown();
- virtual void* GetHandleContext(OBJECTHANDLE handle);
-
virtual IGCHandleStore* GetGlobalHandleStore();
- virtual IGCHandleStore* CreateHandleStore(void* context);
+ virtual IGCHandleStore* CreateHandleStore();
virtual void DestroyHandleStore(IGCHandleStore* store);
diff --git a/src/gc/gcimpl.h b/src/gc/gcimpl.h
index 711ecb5fdc..b3aeb36516 100644
--- a/src/gc/gcimpl.h
+++ b/src/gc/gcimpl.h
@@ -208,7 +208,6 @@ public:
PER_HEAP_ISOLATED size_t GetFinalizablePromotedCount();
void SetFinalizeQueueForShutdown(bool fHasLock);
- bool FinalizeAppDomain(void *pDomain, bool fRunFinalizers);
bool ShouldRestartFinalizerWatchDog();
void DiagWalkObject (Object* obj, walk_fn fn, void* context);
diff --git a/src/gc/gcinterface.dac.h b/src/gc/gcinterface.dac.h
index 93698c05a9..44bf00d316 100644
--- a/src/gc/gcinterface.dac.h
+++ b/src/gc/gcinterface.dac.h
@@ -53,15 +53,11 @@ public:
class dac_handle_table {
public:
- // On the handle table side, this is an ADIndex. They should still have
- // the same layout.
- //
// We do try to keep everything that the DAC knows about as close to the
// start of the struct as possible to avoid having padding members. However,
// HandleTable has rgTypeFlags at offset 0 for performance reasons and
// we don't want to disrupt that.
uint32_t padding[HANDLE_MAX_INTERNAL_TYPES];
- DWORD uADIndex;
};
class dac_handle_table_bucket {
diff --git a/src/gc/gcinterface.ee.h b/src/gc/gcinterface.ee.h
index 4d12ae9df9..ea9387e2a3 100644
--- a/src/gc/gcinterface.ee.h
+++ b/src/gc/gcinterface.ee.h
@@ -154,9 +154,9 @@ public:
virtual
void FireGCFullNotify_V1(uint32_t genNumber, uint32_t isAlloc) = 0;
virtual
- void FireSetGCHandle(void *handleID, void *objectID, uint32_t kind, uint32_t generation, uint64_t appDomainID) = 0;
+ void FireSetGCHandle(void *handleID, void *objectID, uint32_t kind, uint32_t generation) = 0;
virtual
- void FirePrvSetGCHandle(void *handleID, void *objectID, uint32_t kind, uint32_t generation, uint64_t appDomainID) = 0;
+ void FirePrvSetGCHandle(void *handleID, void *objectID, uint32_t kind, uint32_t generation) = 0;
virtual
void FireDestroyGCHandle(void *handleID) = 0;
virtual
@@ -321,11 +321,6 @@ public:
virtual
void HandleFatalError(unsigned int exitCode) = 0;
- // Asks the EE if it wants a particular object to be finalized when unloading
- // an app domain.
- virtual
- bool ShouldFinalizeObjectForUnload(void* pDomain, Object* obj) = 0;
-
// Offers the EE the option to finalize the given object eagerly, i.e.
// not on the finalizer thread but on the current thread. The
// EE returns true if it finalized the object eagerly and the GC does not
@@ -410,24 +405,9 @@ public:
IGCToCLREventSink* EventSink() = 0;
virtual
- uint32_t GetDefaultDomainIndex() = 0;
-
- virtual
- void *GetAppDomainAtIndex(uint32_t appDomainIndex) = 0;
-
- virtual
- uint32_t GetIndexOfAppDomainBeingUnloaded() = 0;
-
- virtual
- bool AppDomainCanAccessHandleTable(uint32_t appDomainID) = 0;
-
- virtual
uint32_t GetTotalNumSizedRefHandles() = 0;
virtual
- bool AppDomainIsRudeUnload(void *appDomain) = 0;
-
- virtual
bool AnalyzeSurvivorsRequested(int condemnedGeneration) = 0;
virtual
diff --git a/src/gc/gcinterface.h b/src/gc/gcinterface.h
index 92e7987cbf..083e84fdc6 100644
--- a/src/gc/gcinterface.h
+++ b/src/gc/gcinterface.h
@@ -469,17 +469,6 @@ public:
virtual OBJECTHANDLE CreateDependentHandle(Object* primary, Object* secondary) = 0;
- // Relocates async pinned handles from a condemned handle store to the default domain's handle store.
- //
- // The two callbacks are called when:
- // 1. clearIfComplete is called whenever the handle table observes an async pin that is still live.
- // The callback gives a chance for the EE to unpin the referents if the overlapped operation is complete.
- // 2. setHandle is called whenever the GC has relocated the async pin to a new handle table. The passed-in
- // handle is the newly-allocated handle in the default domain that should be assigned to the overlapped object.
- virtual void RelocateAsyncPinnedHandles(IGCHandleStore* pTarget, void (*clearIfComplete)(Object*), void (*setHandle)(Object*, OBJECTHANDLE)) = 0;
-
- virtual bool EnumerateAsyncPinnedHandles(async_pin_enum_fn callback, void* context) = 0;
-
virtual ~IGCHandleStore() {};
};
@@ -490,11 +479,9 @@ public:
virtual void Shutdown() = 0;
- virtual void* GetHandleContext(OBJECTHANDLE handle) = 0;
-
virtual IGCHandleStore* GetGlobalHandleStore() = 0;
- virtual IGCHandleStore* CreateHandleStore(void* context) = 0;
+ virtual IGCHandleStore* CreateHandleStore() = 0;
virtual void DestroyHandleStore(IGCHandleStore* store) = 0;
@@ -555,8 +542,7 @@ public:
to synchronize with the GC, when the VM wants to update something that
the GC is potentially using, if it's doing a background GC.
- Concrete examples of this are moving async pinned handles across appdomains
- and profiling/ETW scenarios.
+ Concrete examples of this are profiling/ETW scenarios.
===========================================================================
*/
@@ -586,9 +572,6 @@ public:
===========================================================================
*/
- // Finalizes an app domain by finalizing objects within that app domain.
- virtual bool FinalizeAppDomain(void* pDomain, bool fRunFinalizers) = 0;
-
// Finalizes all registered objects for shutdown, even if they are still reachable.
virtual void SetFinalizeQueueForShutdown(bool fHasLock) = 0;
@@ -663,7 +646,7 @@ public:
// Returns the number of GCs that have transpired in the given generation
// since the beginning of the life of the process. Also used by the VM
- // for debug code and app domains.
+ // for debug code.
virtual int CollectionCount(int generation, int get_bgc_fgc_coutn = 0) = 0;
// Begins a no-GC region, returning a code indicating whether entering the no-GC
@@ -909,7 +892,6 @@ void updateGCShadow(Object** ptr, Object* val);
#define GC_CALL_INTERIOR 0x1
#define GC_CALL_PINNED 0x2
-#define GC_CALL_CHECK_APP_DOMAIN 0x4
//flags for IGCHeapAlloc(...)
enum GC_ALLOC_FLAGS
@@ -952,11 +934,7 @@ struct ScanContext
uintptr_t stack_limit; // Lowest point on the thread stack that the scanning logic is permitted to read
bool promotion; //TRUE: Promotion, FALSE: Relocation.
bool concurrent; //TRUE: concurrent scanning
-#if defined (FEATURE_APPDOMAIN_RESOURCE_MONITORING) || defined (DACCESS_COMPILE)
- AppDomain *pCurrentDomain;
-#else
void* _unused1;
-#endif //FEATURE_APPDOMAIN_RESOURCE_MONITORING || DACCESS_COMPILE
void* pMD;
#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
EtwGCRootKind dwEtwRootKind;
diff --git a/src/gc/gcpriv.h b/src/gc/gcpriv.h
index a4dd54f066..60dcd4236f 100644
--- a/src/gc/gcpriv.h
+++ b/src/gc/gcpriv.h
@@ -3997,10 +3997,6 @@ private:
}
- BOOL FinalizeSegForAppDomain (void *pDomain,
- BOOL fRunFinalizers,
- unsigned int Seg);
-
public:
~CFinalize();
bool Initialize();
@@ -4020,9 +4016,6 @@ public:
size_t GetNumberFinalizableObjects();
void DiscardNonCriticalObjects();
- //Methods used by the app domain unloading call to finalize objects in an app domain
- bool FinalizeAppDomain (void *pDomain, bool fRunFinalizers);
-
void CheckFinalizerObjects();
};
diff --git a/src/gc/handletable.cpp b/src/gc/handletable.cpp
index 274c6ff0df..221911500c 100644
--- a/src/gc/handletable.cpp
+++ b/src/gc/handletable.cpp
@@ -83,7 +83,7 @@ __inline PTR_HandleTable Table(HHANDLETABLE hTable)
* Allocates and initializes a handle table.
*
*/
-HHANDLETABLE HndCreateHandleTable(const uint32_t *pTypeFlags, uint32_t uTypeCount, ADIndex uADIndex)
+HHANDLETABLE HndCreateHandleTable(const uint32_t *pTypeFlags, uint32_t uTypeCount)
{
CONTRACTL
{
@@ -141,7 +141,6 @@ HHANDLETABLE HndCreateHandleTable(const uint32_t *pTypeFlags, uint32_t uTypeCoun
// Store user data
pTable->uTableIndex = (uint32_t) -1;
- pTable->uADIndex = uADIndex;
// loop over various arrays an initialize them
uint32_t u;
@@ -246,38 +245,6 @@ uint32_t HndGetHandleTableIndex(HHANDLETABLE hTable)
return pTable->uTableIndex;
}
-/*
- * HndGetHandleTableIndex
- *
- * Retrieves the AppDomain index associated with a handle table at creation
- */
-ADIndex HndGetHandleTableADIndex(HHANDLETABLE hTable)
-{
- WRAPPER_NO_CONTRACT;
-
- // fetch the handle table pointer
- HandleTable *pTable = Table(hTable);
-
- return pTable->uADIndex;
-}
-
-/*
- * HndGetHandleTableIndex
- *
- * Retrieves the AppDomain index associated with a handle table at creation
- */
-GC_DAC_VISIBLE
-ADIndex HndGetHandleADIndex(OBJECTHANDLE handle)
-{
- WRAPPER_NO_CONTRACT;
- SUPPORTS_DAC;
-
- // fetch the handle table pointer
- HandleTable *pTable = Table(HndGetHandleTable(handle));
-
- return pTable->uADIndex;
-}
-
#ifndef DACCESS_COMPILE
/*
* HndCreateHandle
@@ -353,7 +320,7 @@ OBJECTHANDLE HndCreateHandle(HHANDLETABLE hTable, uint32_t uType, OBJECTREF obje
#endif // !DACCESS_COMPILE
#ifdef _DEBUG
-void ValidateFetchObjrefForHandle(OBJECTREF objref, ADIndex appDomainIndex)
+void ValidateFetchObjrefForHandle(OBJECTREF objref)
{
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
@@ -362,15 +329,10 @@ void ValidateFetchObjrefForHandle(OBJECTREF objref, ADIndex appDomainIndex)
BEGIN_DEBUG_ONLY_CODE;
VALIDATEOBJECTREF (objref);
-
-#ifndef DACCESS_COMPILE
- _ASSERTE(GCToEEInterface::AppDomainCanAccessHandleTable(appDomainIndex.m_dwIndex));
-#endif // DACCESS_COMPILE
-
END_DEBUG_ONLY_CODE;
}
-void ValidateAssignObjrefForHandle(OBJECTREF objref, ADIndex appDomainIndex)
+void ValidateAssignObjrefForHandle(OBJECTREF objref)
{
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
@@ -378,41 +340,8 @@ void ValidateAssignObjrefForHandle(OBJECTREF objref, ADIndex appDomainIndex)
STATIC_CONTRACT_DEBUG_ONLY;
BEGIN_DEBUG_ONLY_CODE;
-
VALIDATEOBJECTREF (objref);
-
-#ifndef DACCESS_COMPILE
- _ASSERTE(GCToEEInterface::AppDomainCanAccessHandleTable(appDomainIndex.m_dwIndex));
-#endif // DACCESS_COMPILE
- END_DEBUG_ONLY_CODE;
-}
-
-void ValidateAppDomainForHandle(OBJECTHANDLE handle)
-{
- STATIC_CONTRACT_DEBUG_ONLY;
- STATIC_CONTRACT_NOTHROW;
-
-#ifdef DEBUG_DestroyedHandleValue
- // Verify that we are not trying to access freed handle.
- _ASSERTE("Attempt to access destroyed handle." && *(_UNCHECKED_OBJECTREF *)handle != DEBUG_DestroyedHandleValue);
-#endif
-#ifdef DACCESS_COMPILE
- UNREFERENCED_PARAMETER(handle);
-#else
- BEGIN_DEBUG_ONLY_CODE;
- ADIndex id = HndGetHandleADIndex(handle);
- ADIndex unloadingDomain(GCToEEInterface::GetIndexOfAppDomainBeingUnloaded());
- if (unloadingDomain != id)
- {
- return;
- }
- if (GCToEEInterface::AppDomainCanAccessHandleTable(unloadingDomain.m_dwIndex))
- {
- return;
- }
- _ASSERTE (!"Access to a handle in unloaded domain is not allowed");
END_DEBUG_ONLY_CODE;
-#endif // !DACCESS_COMPILE
}
#endif
@@ -443,10 +372,6 @@ void HndDestroyHandle(HHANDLETABLE hTable, uint32_t uType, OBJECTHANDLE handle)
// sanity check handle we are being asked to free
_ASSERTE(handle);
-#ifdef _DEBUG
- ValidateAppDomainForHandle(handle);
-#endif
-
// fetch the handle table pointer
HandleTable *pTable = Table(hTable);
@@ -593,35 +518,18 @@ void HndLogSetEvent(OBJECTHANDLE handle, _UNCHECKED_OBJECTREF value)
if (EVENT_ENABLED(SetGCHandle) || EVENT_ENABLED(PrvSetGCHandle))
{
uint32_t hndType = HandleFetchType(handle);
- ADIndex appDomainIndex = HndGetHandleADIndex(handle);
- void* pAppDomain = GCToEEInterface::GetAppDomainAtIndex(appDomainIndex.m_dwIndex);
uint32_t generation = value != 0 ? g_theGCHeap->WhichGeneration(value) : 0;
- FIRE_EVENT(SetGCHandle, (void *)handle, (void *)value, hndType, generation, (uint64_t)pAppDomain);
- FIRE_EVENT(PrvSetGCHandle, (void *) handle, (void *)value, hndType, generation, (uint64_t)pAppDomain);
+ FIRE_EVENT(SetGCHandle, (void *)handle, (void *)value, hndType, generation);
+ FIRE_EVENT(PrvSetGCHandle, (void *) handle, (void *)value, hndType, generation);
// Also fire the things pinned by Async pinned handles
if (hndType == HNDTYPE_ASYNCPINNED)
{
- // the closure passed to "WalkOverlappedObject" is not permitted to implicitly
- // capture any variables in this scope, since WalkForOverlappedObject takes a bare
- // function pointer and context pointer as arguments. We can still /explicitly/
- // close over values in this scope by doing what the compiler would do and introduce
- // a structure that contains all of the things we closed over, while passing a pointer
- // to this structure as our closure's context pointer.
- struct ClosureCapture
+ GCToEEInterface::WalkAsyncPinned(value, value, [](Object*, Object* to, void* ctx)
{
- void* pAppDomain;
- Object* overlapped;
- };
-
- ClosureCapture captured;
- captured.pAppDomain = pAppDomain;
- captured.overlapped = value;
- GCToEEInterface::WalkAsyncPinned(value, &captured, [](Object*, Object* to, void* ctx)
- {
- ClosureCapture* captured = reinterpret_cast<ClosureCapture*>(ctx);
+ Object* overlapped = reinterpret_cast<Object*>(ctx);
uint32_t generation = to != nullptr ? g_theGCHeap->WhichGeneration(to) : 0;
- FIRE_EVENT(SetGCHandle, (void *)captured->overlapped, (void *)to, HNDTYPE_PINNED, generation, (uint64_t)captured->pAppDomain);
+ FIRE_EVENT(SetGCHandle, (void *)overlapped, (void *)to, HNDTYPE_PINNED, generation);
});
}
}
@@ -1130,49 +1038,6 @@ uint32_t HndCountAllHandles(BOOL fUseLocks)
return uCount;
}
-BOOL Ref_HandleAsyncPinHandles(async_pin_enum_fn asyncPinCallback, void* context)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- }
- CONTRACTL_END;
-
- AsyncPinCallbackContext callbackCtx(asyncPinCallback, context);
- HandleTableBucket *pBucket = g_HandleTableMap.pBuckets[0];
- BOOL result = FALSE;
- int limit = getNumberOfSlots();
- for (int n = 0; n < limit; n ++ )
- {
- if (TableHandleAsyncPinHandles(Table(pBucket->pTable[n]), callbackCtx))
- {
- result = TRUE;
- }
- }
-
- return result;
-}
-
-void Ref_RelocateAsyncPinHandles(HandleTableBucket *pSource,
- HandleTableBucket *pTarget,
- void (*clearIfComplete)(Object* object),
- void (*setHandle)(Object* object, OBJECTHANDLE handle))
-{
- CONTRACTL
- {
- NOTHROW;
- GC_TRIGGERS;
- }
- CONTRACTL_END;
-
- int limit = getNumberOfSlots();
- for (int n = 0; n < limit; n ++ )
- {
- TableRelocateAsyncPinHandles(Table(pSource->pTable[n]), Table(pTarget->pTable[n]), clearIfComplete, setHandle);
- }
-}
-
/*--------------------------------------------------------------------------*/
diff --git a/src/gc/handletable.h b/src/gc/handletable.h
index 70959edf3b..7cadcf92c4 100644
--- a/src/gc/handletable.h
+++ b/src/gc/handletable.h
@@ -53,7 +53,7 @@ typedef PTR_PTR_HandleTable PTR_HHANDLETABLE;
/*
* handle manager init and shutdown routines
*/
-HHANDLETABLE HndCreateHandleTable(const uint32_t *pTypeFlags, uint32_t uTypeCount, ADIndex uADIndex);
+HHANDLETABLE HndCreateHandleTable(const uint32_t *pTypeFlags, uint32_t uTypeCount);
void HndDestroyHandleTable(HHANDLETABLE hTable);
#endif // !DACCESS_COMPILE
@@ -62,10 +62,6 @@ void HndDestroyHandleTable(HHANDLETABLE hTable);
*/
void HndSetHandleTableIndex(HHANDLETABLE hTable, uint32_t uTableIndex);
uint32_t HndGetHandleTableIndex(HHANDLETABLE hTable);
-ADIndex HndGetHandleTableADIndex(HHANDLETABLE hTable);
-
-GC_DAC_VISIBLE
-ADIndex HndGetHandleADIndex(OBJECTHANDLE handle);
#ifndef DACCESS_COMPILE
/*
@@ -143,9 +139,8 @@ uint32_t HndCountAllHandles(BOOL fUseLocks);
#ifdef _DEBUG_IMPL
-void ValidateAssignObjrefForHandle(OBJECTREF, ADIndex appDomainIndex);
-void ValidateFetchObjrefForHandle(OBJECTREF, ADIndex appDomainIndex);
-void ValidateAppDomainForHandle(OBJECTHANDLE handle);
+void ValidateAssignObjrefForHandle(OBJECTREF);
+void ValidateFetchObjrefForHandle(OBJECTREF);
#endif
/*
@@ -185,8 +180,7 @@ OBJECTREF HndFetchHandle(OBJECTHANDLE handle)
_ASSERTE("Attempt to access destroyed handle." && *(_UNCHECKED_OBJECTREF *)handle != DEBUG_DestroyedHandleValue);
// Make sure the objref for handle is valid
- ValidateFetchObjrefForHandle(ObjectToOBJECTREF(*(Object **)handle),
- HndGetHandleTableADIndex(HndGetHandleTable(handle)));
+ ValidateFetchObjrefForHandle(ObjectToOBJECTREF(*(Object **)handle));
#endif // _DEBUG_IMPL
// wrap the raw objectref and return it
diff --git a/src/gc/handletablecore.cpp b/src/gc/handletablecore.cpp
index 5fc661c000..ee2d8873a8 100644
--- a/src/gc/handletablecore.cpp
+++ b/src/gc/handletablecore.cpp
@@ -616,550 +616,6 @@ TableSegment *SegmentAlloc(HandleTable *pTable)
return pSegment;
}
-// Mark a handle being free.
-__inline void SegmentMarkFreeMask(TableSegment *pSegment, _UNCHECKED_OBJECTREF* h)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- MODE_COOPERATIVE;
- }
- CONTRACTL_END;
-
- uint32_t uMask = (uint32_t)(h - pSegment->rgValue);
- uint32_t uBit = uMask % HANDLE_HANDLES_PER_MASK;
- uMask = uMask / HANDLE_HANDLES_PER_MASK;
- pSegment->rgFreeMask[uMask] |= (1<<uBit);
-}
-
-// Mark a handle being used.
-__inline void SegmentUnMarkFreeMask(TableSegment *pSegment, _UNCHECKED_OBJECTREF* h)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- MODE_COOPERATIVE;
- }
- CONTRACTL_END;
-
- uint32_t uMask = (uint32_t)(h - pSegment->rgValue);
- uint32_t uBit = uMask % HANDLE_HANDLES_PER_MASK;
- uMask = uMask / HANDLE_HANDLES_PER_MASK;
- pSegment->rgFreeMask[uMask] &= ~(1<<uBit);
-}
-
-// Prepare a segment to be moved to default domain.
-// Remove all non-async pin handles.
-void SegmentPreCompactAsyncPinHandles(TableSegment *pSegment, void (*clearIfComplete)(Object*))
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- MODE_COOPERATIVE;
- }
- CONTRACTL_END;
-
- pSegment->fResortChains = true;
- pSegment->fNeedsScavenging = true;
-
- // Zero out all non-async pin handles
- uint32_t uBlock;
- for (uBlock = 0; uBlock < pSegment->bEmptyLine; uBlock ++)
- {
- if (pSegment->rgBlockType[uBlock] == TYPE_INVALID)
- {
- continue;
- }
- else if (pSegment->rgBlockType[uBlock] != HNDTYPE_ASYNCPINNED)
- {
- _UNCHECKED_OBJECTREF *pValue = pSegment->rgValue + (uBlock * HANDLE_HANDLES_PER_BLOCK);
- _UNCHECKED_OBJECTREF *pLast = pValue + HANDLE_HANDLES_PER_BLOCK;
- do
- {
- *pValue = NULL;
- pValue ++;
- } while (pValue < pLast);
-
- ((uint32_t*)pSegment->rgGeneration)[uBlock] = (uint32_t)-1;
-
- uint32_t *pdwMask = pSegment->rgFreeMask + (uBlock * HANDLE_MASKS_PER_BLOCK);
- uint32_t *pdwMaskLast = pdwMask + HANDLE_MASKS_PER_BLOCK;
- do
- {
- *pdwMask = MASK_EMPTY;
- pdwMask ++;
- } while (pdwMask < pdwMaskLast);
-
- pSegment->rgBlockType[uBlock] = TYPE_INVALID;
- pSegment->rgUserData[uBlock] = BLOCK_INVALID;
- pSegment->rgLocks[uBlock] = 0;
- }
- }
-
- // Return all non-async pin handles to free list
- uint32_t uType;
- for (uType = 0; uType < HANDLE_MAX_INTERNAL_TYPES; uType ++)
- {
- if (uType == HNDTYPE_ASYNCPINNED)
- {
- continue;
- }
- pSegment->rgFreeCount[uType] = 0;
- if (pSegment->rgHint[uType] != BLOCK_INVALID)
- {
- uint32_t uLast = pSegment->rgHint[uType];
- uint8_t uFirst = pSegment->rgAllocation[uLast];
- pSegment->rgAllocation[uLast] = pSegment->bFreeList;
- pSegment->bFreeList = uFirst;
- pSegment->rgHint[uType] = BLOCK_INVALID;
- pSegment->rgTail[uType] = BLOCK_INVALID;
- }
- }
-
- // make sure the remaining async handle has MethodTable that exists in default domain
- uBlock = pSegment->rgHint[HNDTYPE_ASYNCPINNED];
- if (uBlock == BLOCK_INVALID)
- {
- return;
- }
- uint32_t freeCount = 0;
- for (uBlock = 0; uBlock < pSegment->bEmptyLine; uBlock ++)
- {
- if (pSegment->rgBlockType[uBlock] != HNDTYPE_ASYNCPINNED)
- {
- continue;
- }
- if (pSegment->rgFreeMask[uBlock*2] == (uint32_t)-1 && pSegment->rgFreeMask[uBlock*2+1] == (uint32_t)-1)
- {
- continue;
- }
- _UNCHECKED_OBJECTREF *pValue = pSegment->rgValue + (uBlock * HANDLE_HANDLES_PER_BLOCK);
- _UNCHECKED_OBJECTREF *pLast = pValue + HANDLE_HANDLES_PER_BLOCK;
-
- do
- {
- _UNCHECKED_OBJECTREF value = *pValue;
- if (!HndIsNullOrDestroyedHandle(value))
- {
- clearIfComplete((Object*)value);
- }
- else
- {
- // reset free mask
- SegmentMarkFreeMask(pSegment, pValue);
- freeCount ++;
- }
- pValue ++;
- } while (pValue != pLast);
- }
-
- pSegment->rgFreeCount[HNDTYPE_ASYNCPINNED] = freeCount;
-}
-
-// Copy a handle to a different segment in the same HandleTable
-BOOL SegmentCopyAsyncPinHandle(TableSegment *pSegment, _UNCHECKED_OBJECTREF *h)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- MODE_COOPERATIVE;
- }
- CONTRACTL_END;
-
- _ASSERTE (HandleFetchSegmentPointer((OBJECTHANDLE)h) != pSegment);
-
- if (pSegment->rgFreeCount[HNDTYPE_ASYNCPINNED] == 0)
- {
- uint8_t uBlock = pSegment->bFreeList;
- if (uBlock == BLOCK_INVALID)
- {
- // All slots are used up.
- return FALSE;
- }
- pSegment->bFreeList = pSegment->rgAllocation[uBlock];
- pSegment->rgBlockType[uBlock] = HNDTYPE_ASYNCPINNED;
- pSegment->rgAllocation[uBlock] = pSegment->rgHint[HNDTYPE_ASYNCPINNED];
- pSegment->rgHint[HNDTYPE_ASYNCPINNED] = uBlock;
- pSegment->rgFreeCount[HNDTYPE_ASYNCPINNED] += HANDLE_HANDLES_PER_BLOCK;
- }
- uint8_t uBlock = pSegment->rgHint[HNDTYPE_ASYNCPINNED];
- uint8_t uLast = uBlock;
- do
- {
- uint32_t n = uBlock * (HANDLE_HANDLES_PER_BLOCK/HANDLE_HANDLES_PER_MASK);
- uint32_t* pMask = pSegment->rgFreeMask + n;
- if (pMask[0] != 0 || pMask[1] != 0)
- {
- break;
- }
- uBlock = pSegment->rgAllocation[uBlock];
- } while (uBlock != uLast);
- _ASSERTE (uBlock != uLast);
- pSegment->rgHint[HNDTYPE_ASYNCPINNED] = uBlock;
- _UNCHECKED_OBJECTREF *pValue = pSegment->rgValue + (uBlock * HANDLE_HANDLES_PER_BLOCK);
- _UNCHECKED_OBJECTREF *pLast = pValue + HANDLE_HANDLES_PER_BLOCK;
- do
- {
- if (*pValue == NULL)
- {
- SegmentUnMarkFreeMask(pSegment,pValue);
- *pValue = *h;
- *h = NULL;
- break;
- }
- pValue ++;
- } while (pValue != pLast);
- _ASSERTE (pValue != pLast);
- pSegment->rgFreeCount[HNDTYPE_ASYNCPINNED] --;
- return TRUE;
-}
-
-void SegmentCompactAsyncPinHandles(TableSegment *pSegment, TableSegment **ppWorkerSegment, void (*clearIfComplete)(Object*))
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- MODE_COOPERATIVE;
- }
- CONTRACTL_END;
-
- uint32_t uBlock = pSegment->rgHint[HNDTYPE_ASYNCPINNED];
- if (uBlock == BLOCK_INVALID)
- {
- return;
- }
- for (uBlock = 0; uBlock < pSegment->bEmptyLine; uBlock ++)
- {
- if (pSegment->rgBlockType[uBlock] != HNDTYPE_ASYNCPINNED)
- {
- continue;
- }
- if (pSegment->rgFreeMask[uBlock*2] == (uint32_t)-1 && pSegment->rgFreeMask[uBlock*2+1] == (uint32_t)-1)
- {
- continue;
- }
- _UNCHECKED_OBJECTREF *pValue = pSegment->rgValue + (uBlock * HANDLE_HANDLES_PER_BLOCK);
- _UNCHECKED_OBJECTREF *pLast = pValue + HANDLE_HANDLES_PER_BLOCK;
-
- do
- {
- BOOL fNeedNewSegment = FALSE;
- _UNCHECKED_OBJECTREF value = *pValue;
- if (!HndIsNullOrDestroyedHandle(value))
- {
- clearIfComplete((Object*)value);
- fNeedNewSegment = !SegmentCopyAsyncPinHandle(*ppWorkerSegment,pValue);
- }
- if (fNeedNewSegment)
- {
- _ASSERTE ((*ppWorkerSegment)->rgFreeCount[HNDTYPE_ASYNCPINNED] == 0 &&
- (*ppWorkerSegment)->bFreeList == BLOCK_INVALID);
- TableSegment *pNextSegment = (*ppWorkerSegment)->pNextSegment;
- SegmentPreCompactAsyncPinHandles(pNextSegment, clearIfComplete);
- *ppWorkerSegment = pNextSegment;
- if (pNextSegment == pSegment)
- {
- // The current segment will be moved to default domain.
- return;
- }
- }
- else
- {
- pValue ++;
- }
- } while (pValue != pLast);
- }
-}
-
-
-// Mark AsyncPinHandles ready to be cleaned when the marker job is processed
-BOOL SegmentHandleAsyncPinHandles (TableSegment *pSegment, const AsyncPinCallbackContext &callbackCtx)
-{
- CONTRACTL
- {
- GC_NOTRIGGER;
- NOTHROW;
- MODE_COOPERATIVE;
- }
- CONTRACTL_END;
-
- uint32_t uBlock = pSegment->rgHint[HNDTYPE_ASYNCPINNED];
- if (uBlock == BLOCK_INVALID)
- {
- // There is no pinning handles.
- return FALSE;
- }
-
- BOOL result = FALSE;
-
- for (uBlock = 0; uBlock < pSegment->bEmptyLine; uBlock ++)
- {
- if (pSegment->rgBlockType[uBlock] != HNDTYPE_ASYNCPINNED)
- {
- continue;
- }
- if (pSegment->rgFreeMask[uBlock*2] == (uint32_t)-1 && pSegment->rgFreeMask[uBlock*2+1] == (uint32_t)-1)
- {
- continue;
- }
- _UNCHECKED_OBJECTREF *pValue = pSegment->rgValue + (uBlock * HANDLE_HANDLES_PER_BLOCK);
- _UNCHECKED_OBJECTREF *pLast = pValue + HANDLE_HANDLES_PER_BLOCK;
-
- do
- {
- _UNCHECKED_OBJECTREF value = *pValue;
- if (!HndIsNullOrDestroyedHandle(value))
- {
- // calls back into the VM using the callback given to
- // Ref_HandleAsyncPinHandles
- if (callbackCtx.Invoke((Object*)value))
- {
- result = TRUE;
- }
- }
- pValue ++;
- } while (pValue != pLast);
- }
-
- return result;
-}
-
-// Replace an async pin handle with one from default domain
-bool SegmentRelocateAsyncPinHandles (TableSegment *pSegment,
- HandleTable *pTargetTable,
- void (*clearIfComplete)(Object*),
- void (*setHandle)(Object*, OBJECTHANDLE))
-{
- CONTRACTL
- {
- GC_NOTRIGGER;
- NOTHROW;
- MODE_COOPERATIVE;
- }
- CONTRACTL_END;
-
- uint32_t uBlock = pSegment->rgHint[HNDTYPE_ASYNCPINNED];
- if (uBlock == BLOCK_INVALID)
- {
- // There is no pinning handles.
- return true;
- }
- for (uBlock = 0; uBlock < pSegment->bEmptyLine; uBlock ++)
- {
- if (pSegment->rgBlockType[uBlock] != HNDTYPE_ASYNCPINNED)
- {
- continue;
- }
- if (pSegment->rgFreeMask[uBlock*2] == (uint32_t)-1 && pSegment->rgFreeMask[uBlock*2+1] == (uint32_t)-1)
- {
- continue;
- }
- _UNCHECKED_OBJECTREF *pValue = pSegment->rgValue + (uBlock * HANDLE_HANDLES_PER_BLOCK);
- _UNCHECKED_OBJECTREF *pLast = pValue + HANDLE_HANDLES_PER_BLOCK;
-
- do
- {
- _UNCHECKED_OBJECTREF value = *pValue;
- if (!HndIsNullOrDestroyedHandle(value))
- {
- clearIfComplete((Object*)value);
- OBJECTHANDLE selfHandle = HndCreateHandle((HHANDLETABLE)pTargetTable, HNDTYPE_ASYNCPINNED, ObjectToOBJECTREF(value));
- if (!selfHandle)
- {
- // failed to allocate a new handle - callers have to handle this.
- return false;
- }
-
- setHandle((Object*)value, selfHandle);
- *pValue = NULL;
- }
- pValue ++;
- } while (pValue != pLast);
- }
-
- return true;
-}
-
-// Mark all non-pending AsyncPinHandle ready for cleanup.
-// We will queue a marker Overlapped to io completion port. We use the marker
-// to make sure that all iocompletion jobs before this marker have been processed.
-// After that we can free the async pinned handles.
-BOOL TableHandleAsyncPinHandles(HandleTable *pTable, const AsyncPinCallbackContext &callbackCtx)
-{
- CONTRACTL
- {
- GC_NOTRIGGER;
- NOTHROW;
- MODE_COOPERATIVE;
- }
- CONTRACTL_END;
-
- BOOL result = FALSE;
- TableSegment *pSegment = pTable->pSegmentList;
-
- CrstHolder ch(&pTable->Lock);
-
- while (pSegment)
- {
- if (SegmentHandleAsyncPinHandles (pSegment, callbackCtx))
- {
- result = TRUE;
- }
- pSegment = pSegment->pNextSegment;
- }
-
- return result;
-}
-
-// Keep needed async Pin Handle by moving them to default domain.
-// Strategy:
-// 1. Try to create pin handles in default domain to replace it.
-// 2. If 1 failed due to OOM, we will relocate segments from this HandleTable to default domain.
-// a. Clean the segment so that only saved pin handles exist. This segment becomes the worker segment.
-// b. Copy pin handles from remaining segments to the worker segment. If worker segment is full, start
-// from a again.
-// c. After copying all handles to worker segments, move the segments to default domain.
-// It is very important that in step 2, we should not fail for OOM, which means no memory allocation.
-void TableRelocateAsyncPinHandles(HandleTable *pTable,
- HandleTable *pTargetTable,
- void (*clearIfComplete)(Object*),
- void (*setHandle)(Object*, OBJECTHANDLE))
-{
- CONTRACTL
- {
- GC_TRIGGERS;
- NOTHROW;
- MODE_COOPERATIVE;
- }
- CONTRACTL_END;
-
- _ASSERTE (pTargetTable->uADIndex == ADIndex(GCToEEInterface::GetDefaultDomainIndex())); // must be for default domain
-
- BOOL fGotException = FALSE;
- TableSegment *pSegment = pTable->pSegmentList;
- bool wasSuccessful = true;
-
-#ifdef _DEBUG
- // on debug builds, execute the OOM path 10% of the time.
- if (GetRandomInt(100) < 10)
- goto SLOW_PATH;
-#endif
-
- // Step 1: replace pinning handles with ones from default domain
- while (pSegment)
- {
- wasSuccessful = wasSuccessful && SegmentRelocateAsyncPinHandles (pSegment, pTargetTable, clearIfComplete, setHandle);
- if (!wasSuccessful)
- {
- break;
- }
-
- pSegment = pSegment->pNextSegment;
- }
-
- if (wasSuccessful)
- {
- return;
- }
-
-#ifdef _DEBUG
-SLOW_PATH:
-#endif
-
- // step 2: default domain runs out of space
- // compact all remaining pinning handles and move the segments to default domain
-
- while (true)
- {
- CrstHolderWithState ch(&pTable->Lock);
-
- // We cannot move segments to a different table if we're asynchronously scanning the current table as
- // part of a concurrent GC. That's because the async table scanning code does most of its work without
- // the table lock held. So we'll take the table lock and then look to see if we're in a concurrent GC.
- // If we are we'll back out and try again. This doesn't prevent a concurrent GC from initiating while
- // we have the lock held but the part we care about (the async table scan) takes the table lock during
- // a preparation step so we'll be able to complete our segment moves before the async scan has a
- // chance to interfere with us (or vice versa).
- if (g_theGCHeap->IsConcurrentGCInProgress())
- {
- // A concurrent GC is in progress so someone might be scanning our segments asynchronously.
- // Release the lock, wait for the GC to complete and try again. The order is important; if we wait
- // before releasing the table lock we can deadlock with an async table scan.
- ch.Release();
- g_theGCHeap->WaitUntilConcurrentGCComplete();
- continue;
- }
-
- // If we get here then we managed to acquire the table lock and observe that no concurrent GC was in
- // progress. A concurrent GC could start at any time so that state may have changed, but since we took
- // the table lock first we know that the GC could only have gotten as far as attempting to initiate an
- // async handle table scan (which attempts to acquire the table lock). So as long as we complete our
- // segment compaction and moves without releasing the table lock we're guaranteed to complete before
- // the async scan can get in and observe any of the segments.
-
- // Compact async pinning handles into the smallest number of leading segments we can (the worker
- // segments).
- TableSegment *pWorkerSegment = pTable->pSegmentList;
- SegmentPreCompactAsyncPinHandles (pWorkerSegment, clearIfComplete);
-
- pSegment = pWorkerSegment->pNextSegment;
- while (pSegment)
- {
- SegmentCompactAsyncPinHandles (pSegment, &pWorkerSegment, clearIfComplete);
- pSegment= pSegment->pNextSegment;
- }
-
- // Empty the remaining segments.
- pSegment = pWorkerSegment->pNextSegment;
- while (pSegment)
- {
- memset(pSegment->rgValue, 0, (uint32_t)pSegment->bCommitLine * HANDLE_BYTES_PER_BLOCK);
- pSegment = pSegment->pNextSegment;
- }
-
- // Move the worker segments over to the tail end of the default domain's segment list.
- {
- CrstHolder ch1(&pTargetTable->Lock);
-
- // Locate the segment currently at the tail of the default domain's segment list.
- TableSegment *pTargetSegment = pTargetTable->pSegmentList;
- while (pTargetSegment->pNextSegment)
- {
- pTargetSegment = pTargetSegment->pNextSegment;
- }
-
- // Take the worker segments and point them to their new handle table and recalculate their
- // sequence numbers to be consistent with the queue they're moving to.
- uint8_t bLastSequence = pTargetSegment->bSequence;
- pSegment = pTable->pSegmentList;
- while (pSegment != pWorkerSegment->pNextSegment)
- {
- pSegment->pHandleTable = pTargetTable;
- pSegment->bSequence = (uint8_t)(((uint32_t)bLastSequence + 1) % 0x100);
- bLastSequence = pSegment->bSequence;
- pSegment = pSegment->pNextSegment;
- }
-
- // Join the worker segments to the tail of the default domain segment list.
- pTargetSegment->pNextSegment = pTable->pSegmentList;
-
- // Reset the current handle table segment list to omit the removed worker segments and start at
- // the first non-worker.
- pTable->pSegmentList = pWorkerSegment->pNextSegment;
-
- // The last worker segment is now the end of the default domain's segment list.
- pWorkerSegment->pNextSegment = NULL;
- }
-
- break;
- }
-}
-
/*
* Check if a handle is part of a HandleTable
*/
diff --git a/src/gc/handletablepriv.h b/src/gc/handletablepriv.h
index e0ed4b80e3..c696431f77 100644
--- a/src/gc/handletablepriv.h
+++ b/src/gc/handletablepriv.h
@@ -338,38 +338,6 @@ struct HandleTypeCache
int32_t lFreeIndex;
};
-/*
- * Async pin EE callback context, used to call back tot he EE when enumerating
- * over async pinned handles.
- */
-class AsyncPinCallbackContext
-{
-private:
- async_pin_enum_fn m_callback;
- void* m_context;
-
-public:
- /*
- * Constructs a new AsyncPinCallbackContext from a callback and a context,
- * which will be passed to the callback as its second parameter every time
- * it is invoked.
- */
- AsyncPinCallbackContext(async_pin_enum_fn callback, void* context)
- : m_callback(callback), m_context(context)
- {}
-
- /*
- * Invokes the callback with the given argument, returning the callback's
- * result.'
- */
- bool Invoke(Object* argument) const
- {
- assert(m_callback != nullptr);
- return m_callback(argument, m_context);
- }
-};
-
-
/*---------------------------------------------------------------------------*/
@@ -510,11 +478,6 @@ struct HandleTable
uint32_t rgTypeFlags[HANDLE_MAX_INTERNAL_TYPES];
/*
- * per-table AppDomain info
- */
- ADIndex uADIndex;
-
- /*
* lock for this table
*/
CrstStatic Lock;
@@ -782,22 +745,6 @@ TableSegment *SegmentAlloc(HandleTable *pTable);
void SegmentFree(TableSegment *pSegment);
/*
- * TableHandleAsyncPinHandles
- *
- * Mark ready for all non-pending OverlappedData that get moved to default domain.
- *
- */
-BOOL TableHandleAsyncPinHandles(HandleTable *pTable, const AsyncPinCallbackContext& callbackCtx);
-
-/*
- * TableRelocateAsyncPinHandles
- *
- * Replaces async pin handles with ones in default domain.
- *
- */
-void TableRelocateAsyncPinHandles(HandleTable *pTable, HandleTable *pTargetTable, void (*clearIfComplete)(Object*), void (*setHandle)(Object*, OBJECTHANDLE));
-
-/*
* Check if a handle is part of a HandleTable
*/
BOOL TableContainHandle(HandleTable *pTable, OBJECTHANDLE handle);
diff --git a/src/gc/objecthandle.cpp b/src/gc/objecthandle.cpp
index aa1ab57a51..aad2f0607c 100644
--- a/src/gc/objecthandle.cpp
+++ b/src/gc/objecthandle.cpp
@@ -623,7 +623,7 @@ bool Ref_Initialize()
n_slots * sizeof(HHANDLETABLE));
for (int uCPUindex = 0; uCPUindex < n_slots; uCPUindex++)
{
- pBucket->pTable[uCPUindex] = HndCreateHandleTable(s_rgTypeFlags, _countof(s_rgTypeFlags), ADIndex(1));
+ pBucket->pTable[uCPUindex] = HndCreateHandleTable(s_rgTypeFlags, _countof(s_rgTypeFlags));
if (pBucket->pTable[uCPUindex] == NULL)
goto CleanupAndFail;
@@ -688,7 +688,7 @@ void Ref_Shutdown()
}
#ifndef FEATURE_REDHAWK
-bool Ref_InitializeHandleTableBucket(HandleTableBucket* bucket, void* context)
+bool Ref_InitializeHandleTableBucket(HandleTableBucket* bucket)
{
CONTRACTL
{
@@ -720,7 +720,7 @@ bool Ref_InitializeHandleTableBucket(HandleTableBucket* bucket, void* context)
ZeroMemory(result->pTable, n_slots * sizeof(HHANDLETABLE));
for (int uCPUindex=0; uCPUindex < n_slots; uCPUindex++) {
- result->pTable[uCPUindex] = HndCreateHandleTable(s_rgTypeFlags, _countof(s_rgTypeFlags), ADIndex((DWORD)(uintptr_t)context));
+ result->pTable[uCPUindex] = HndCreateHandleTable(s_rgTypeFlags, _countof(s_rgTypeFlags));
if (!result->pTable[uCPUindex])
return false;
}
@@ -863,11 +863,8 @@ void SetDependentHandleSecondary(OBJECTHANDLE handle, OBJECTREF objref)
_ASSERTE(handle);
#ifdef _DEBUG
- // handle should not be in unloaded domain
- ValidateAppDomainForHandle(handle);
-
// Make sure the objref is valid before it is assigned to a handle
- ValidateAssignObjrefForHandle(objref, HndGetHandleTableADIndex(HndGetHandleTable(handle)));
+ ValidateAssignObjrefForHandle(objref);
#endif
// unwrap the objectref we were given
_UNCHECKED_OBJECTREF value = OBJECTREF_TO_UNCHECKED_OBJECTREF(objref);
@@ -967,13 +964,6 @@ void TraceVariableHandles(HANDLESCANPROC pfnTrace, uintptr_t lp1, uintptr_t lp2,
HHANDLETABLE hTable = walk->pBuckets[i]->pTable[getSlotNumber((ScanContext*) lp1)];
if (hTable)
{
-#ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
- if (g_fEnableARM)
- {
- ScanContext* sc = (ScanContext *)lp1;
- sc->pCurrentDomain = SystemDomain::GetAppDomainAtIndex(HndGetHandleTableADIndex(hTable));
- }
-#endif //FEATURE_APPDOMAIN_RESOURCE_MONITORING
HndScanHandlesForGC(hTable, VariableTraceDispatcher,
lp1, (uintptr_t)&info, &type, 1, condemned, maxgen, HNDGCF_EXTRAINFO | flags);
}
@@ -1032,13 +1022,6 @@ void Ref_TracePinningRoots(uint32_t condemned, uint32_t maxgen, ScanContext* sc,
HHANDLETABLE hTable = walk->pBuckets[i]->pTable[getSlotNumber((ScanContext*) sc)];
if (hTable)
{
-#ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
- if (g_fEnableARM)
- {
- sc->pCurrentDomain = SystemDomain::GetAppDomainAtIndex(HndGetHandleTableADIndex(hTable));
- }
-#endif //FEATURE_APPDOMAIN_RESOURCE_MONITORING
-
// Pinned handles and async pinned handles are scanned in separate passes, since async pinned
// handles may require a callback into the EE in order to fully trace an async pinned
// object's object graph.
@@ -1074,13 +1057,6 @@ void Ref_TraceNormalRoots(uint32_t condemned, uint32_t maxgen, ScanContext* sc,
HHANDLETABLE hTable = walk->pBuckets[i]->pTable[getSlotNumber(sc)];
if (hTable)
{
-#ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
- if (g_fEnableARM)
- {
- sc->pCurrentDomain = SystemDomain::GetAppDomainAtIndex(HndGetHandleTableADIndex(hTable));
- }
-#endif //FEATURE_APPDOMAIN_RESOURCE_MONITORING
-
HndScanHandlesForGC(hTable, PromoteObject, uintptr_t(sc), uintptr_t(fn), types, uTypeCount, condemned, maxgen, flags);
}
}
@@ -1360,48 +1336,6 @@ void TraceDependentHandlesBySingleThread(HANDLESCANPROC pfnTrace, uintptr_t lp1,
}
}
-// We scan handle tables by their buckets (ie, AD index). We could get into the situation where
-// the AD indices are not very compacted (for example if we have just unloaded ADs and their
-// indices haven't been reused yet) and we could be scanning them in an unbalanced fashion.
-// Consider using an array to represent the compacted form of all AD indices exist for the
-// sized ref handles.
-void ScanSizedRefByAD(uint32_t maxgen, HANDLESCANPROC scanProc, ScanContext* sc, Ref_promote_func* fn, uint32_t flags)
-{
- HandleTableMap *walk = &g_HandleTableMap;
- uint32_t type = HNDTYPE_SIZEDREF;
- int uCPUindex = getSlotNumber(sc);
- int n_slots = g_theGCHeap->GetNumberOfHeaps();
-
- while (walk)
- {
- for (uint32_t i = 0; i < INITIAL_HANDLE_TABLE_ARRAY_SIZE; i ++)
- {
- if (walk->pBuckets[i] != NULL)
- {
- ADIndex adIndex = HndGetHandleTableADIndex(walk->pBuckets[i]->pTable[0]);
- if ((adIndex.m_dwIndex % n_slots) == (uint32_t)uCPUindex)
- {
- for (int index = 0; index < n_slots; index++)
- {
- HHANDLETABLE hTable = walk->pBuckets[i]->pTable[index];
- if (hTable)
- {
-#ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
- if (g_fEnableARM)
- {
- sc->pCurrentDomain = SystemDomain::GetAppDomainAtIndex(adIndex);
- }
-#endif //FEATURE_APPDOMAIN_RESOURCE_MONITORING
- HndScanHandlesForGC(hTable, scanProc, uintptr_t(sc), uintptr_t(fn), &type, 1, maxgen, maxgen, flags);
- }
- }
- }
- }
- }
- walk = walk->pNext;
- }
-}
-
void ScanSizedRefByCPU(uint32_t maxgen, HANDLESCANPROC scanProc, ScanContext* sc, Ref_promote_func* fn, uint32_t flags)
{
HandleTableMap *walk = &g_HandleTableMap;
@@ -1417,13 +1351,6 @@ void ScanSizedRefByCPU(uint32_t maxgen, HANDLESCANPROC scanProc, ScanContext* sc
HHANDLETABLE hTable = walk->pBuckets[i]->pTable[uCPUindex];
if (hTable)
{
-#ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
- if (g_fEnableARM)
- {
- sc->pCurrentDomain = SystemDomain::GetAppDomainAtIndex(HndGetHandleTableADIndex(hTable));
- }
-#endif //FEATURE_APPDOMAIN_RESOURCE_MONITORING
-
HndScanHandlesForGC(hTable, scanProc, uintptr_t(sc), uintptr_t(fn), &type, 1, maxgen, maxgen, flags);
}
}
@@ -1858,7 +1785,6 @@ void PopulateHandleTableDacVars(GcDacVars* gcDacVars)
static_assert(offsetof(HandleTableMap, dwMaxIndex) == offsetof(dac_handle_table_map, dwMaxIndex), "handle table map DAC layout mismatch");
static_assert(offsetof(HandleTableBucket, pTable) == offsetof(dac_handle_table_bucket, pTable), "handle table bucket DAC layout mismatch");
static_assert(offsetof(HandleTableBucket, HandleTableIndex) == offsetof(dac_handle_table_bucket, HandleTableIndex), "handle table bucket DAC layout mismatch");
- static_assert(offsetof(HandleTable, uADIndex) == offsetof(dac_handle_table, uADIndex), "handle table DAC layout mismatch");
#ifndef DACCESS_COMPILE
gcDacVars->handle_table_map = reinterpret_cast<dac_handle_table_map*>(&g_HandleTableMap);
diff --git a/src/gc/objecthandle.h b/src/gc/objecthandle.h
index 7c44b34ffd..f1b5656265 100644
--- a/src/gc/objecthandle.h
+++ b/src/gc/objecthandle.h
@@ -78,9 +78,7 @@ int GetCurrentThreadHomeHeapNumber();
*/
bool Ref_Initialize();
void Ref_Shutdown();
-bool Ref_InitializeHandleTableBucket(HandleTableBucket* bucket, void* context);
-BOOL Ref_HandleAsyncPinHandles(async_pin_enum_fn callback, void* context);
-void Ref_RelocateAsyncPinHandles(HandleTableBucket *pSource, HandleTableBucket *pTarget, void (*clearIfComplete)(Object*), void (*setHandle)(Object*, OBJECTHANDLE));
+bool Ref_InitializeHandleTableBucket(HandleTableBucket* bucket);
void Ref_RemoveHandleTableBucket(HandleTableBucket *pBucket);
void Ref_DestroyHandleTableBucket(HandleTableBucket *pBucket);
diff --git a/src/gc/sample/gcenv.ee.cpp b/src/gc/sample/gcenv.ee.cpp
index 973b7f4491..730eb9dd9a 100644
--- a/src/gc/sample/gcenv.ee.cpp
+++ b/src/gc/sample/gcenv.ee.cpp
@@ -280,11 +280,6 @@ void GCToEEInterface::HandleFatalError(unsigned int exitCode)
abort();
}
-bool GCToEEInterface::ShouldFinalizeObjectForUnload(void* pDomain, Object* obj)
-{
- return true;
-}
-
bool GCToEEInterface::EagerFinalized(Object* obj)
{
// The sample does not finalize anything eagerly.
@@ -346,36 +341,11 @@ void GCToEEInterface::WalkAsyncPinned(Object* object, void* context, void (*call
{
}
-uint32_t GCToEEInterface::GetDefaultDomainIndex()
-{
- return -1;
-}
-
-void *GCToEEInterface::GetAppDomainAtIndex(uint32_t appDomainIndex)
-{
- return nullptr;
-}
-
-bool GCToEEInterface::AppDomainCanAccessHandleTable(uint32_t appDomainID)
-{
- return true;
-}
-
-uint32_t GCToEEInterface::GetIndexOfAppDomainBeingUnloaded()
-{
- return -1;
-}
-
uint32_t GCToEEInterface::GetTotalNumSizedRefHandles()
{
return -1;
}
-bool GCToEEInterface::AppDomainIsRudeUnload(void *appDomain)
-{
- return false;
-}
-
inline bool GCToEEInterface::AnalyzeSurvivorsRequested(int condemnedGeneration)
{
return false;