summaryrefslogtreecommitdiff
path: root/src/vm/gchelpers.cpp
diff options
context:
space:
mode:
authorSean Gillespie <sean@swgillespie.me>2017-03-16 17:18:28 -0700
committerGitHub <noreply@github.com>2017-03-16 17:18:28 -0700
commit1b827b5a82f8c6f8a9ed760ee127938dea5a7ea4 (patch)
tree6187e2b2fb4e7858b39298cf60afcc2f838dcdd1 /src/vm/gchelpers.cpp
parent54368dad5842089ed7b6a444fa50116d2c0a0b09 (diff)
downloadcoreclr-1b827b5a82f8c6f8a9ed760ee127938dea5a7ea4.tar.gz
coreclr-1b827b5a82f8c6f8a9ed760ee127938dea5a7ea4.tar.bz2
coreclr-1b827b5a82f8c6f8a9ed760ee127938dea5a7ea4.zip
[Local GC] Break EE dependency on GC's generation table and alloc lock in single-proc scenarios (#10065)
* Remove usage of the generation table from the EE by introducing an EE-owned GC alloc context used for allocations on single-proc machines. * Move the GC alloc lock to the EE side of the interface * Repair the Windows ARM build * Move the decision to use per-thread alloc contexts to the EE * Rename the lock used by StartNoGCRegion and EndNoGCRegion to be more indicative of what it is protecting * Address code review feedback 2 (enumerate the global alloc context as a part of GCToEEInterface) * Code review feedback (3) * Address code review feedback (move some GC-internal globals to gcimpl.h and gc.cpp) * g_global_alloc_lock is a dword, not a qword - fixes a deadlock * Move GlobalAllocLock to gchelpers.cpp and switch to preemptive mode when spinning * Repair the Windows x86 build
Diffstat (limited to 'src/vm/gchelpers.cpp')
-rw-r--r--src/vm/gchelpers.cpp124
1 files changed, 117 insertions, 7 deletions
diff --git a/src/vm/gchelpers.cpp b/src/vm/gchelpers.cpp
index 6b3c2f6650..258a86239c 100644
--- a/src/vm/gchelpers.cpp
+++ b/src/vm/gchelpers.cpp
@@ -54,11 +54,110 @@ inline gc_alloc_context* GetThreadAllocContext()
{
WRAPPER_NO_CONTRACT;
- assert(GCHeapUtilities::UseAllocationContexts());
+ assert(GCHeapUtilities::UseThreadAllocationContexts());
return & GetThread()->m_alloc_context;
}
+// When not using per-thread allocation contexts, we (the EE) need to take care that
+// no two threads are concurrently modifying the global allocation context. This lock
+// must be acquired before any sort of operations involving the global allocation context
+// can occur.
+//
+// This lock is acquired by all allocations when not using per-thread allocation contexts.
+// It is acquired in two kinds of places:
+// 1) JIT_TrialAllocFastSP (and related assembly alloc helpers), which attempt to
+// acquire it but move into an alloc slow path if acquiring fails
+// (but does not decrement the lock variable when doing so)
+// 2) Alloc and AllocAlign8 in gchelpers.cpp, which acquire the lock using
+// the Acquire and Release methods below.
+class GlobalAllocLock {
+ friend struct AsmOffsets;
+private:
+ // The lock variable. This field must always be first.
+ LONG m_lock;
+
+public:
+ // Creates a new GlobalAllocLock in the unlocked state.
+ GlobalAllocLock() : m_lock(-1) {}
+
+ // Copy and copy-assignment operators should never be invoked
+ // for this type
+ GlobalAllocLock(const GlobalAllocLock&) = delete;
+ GlobalAllocLock& operator=(const GlobalAllocLock&) = delete;
+
+ // Acquires the lock, spinning if necessary to do so. When this method
+ // returns, m_lock will be zero and the lock will be acquired.
+ void Acquire()
+ {
+ CONTRACTL {
+ NOTHROW;
+ GC_TRIGGERS; // switch to preemptive mode
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ DWORD spinCount = 0;
+ while(FastInterlockExchange(&m_lock, 0) != -1)
+ {
+ GCX_PREEMP();
+ __SwitchToThread(0, spinCount++);
+ }
+
+ assert(m_lock == 0);
+ }
+
+ // Releases the lock.
+ void Release()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ // the lock may not be exactly 0. This is because the
+ // assembly alloc routines increment the lock variable and
+ // jump if not zero to the slow alloc path, which eventually
+ // will try to acquire the lock again. At that point, it will
+ // spin in Acquire (since m_lock is some number that's not zero).
+ // When the thread that /does/ hold the lock releases it, the spinning
+ // thread will continue.
+ MemoryBarrier();
+ assert(m_lock >= 0);
+ m_lock = -1;
+ }
+
+ // Static helper to acquire a lock, for use with the Holder template.
+ static void AcquireLock(GlobalAllocLock *lock)
+ {
+ WRAPPER_NO_CONTRACT;
+ lock->Acquire();
+ }
+
+ // Static helper to release a lock, for use with the Holder template
+ static void ReleaseLock(GlobalAllocLock *lock)
+ {
+ WRAPPER_NO_CONTRACT;
+ lock->Release();
+ }
+
+ typedef Holder<GlobalAllocLock *, GlobalAllocLock::AcquireLock, GlobalAllocLock::ReleaseLock> Holder;
+};
+
+typedef GlobalAllocLock::Holder GlobalAllocLockHolder;
+
+struct AsmOffsets {
+ static_assert(offsetof(GlobalAllocLock, m_lock) == 0, "ASM code relies on this property");
+};
+
+// For single-proc machines, the global allocation context is protected
+// from concurrent modification by this lock.
+//
+// When not using per-thread allocation contexts, certain methods on IGCHeap
+// require that this lock be held before calling. These methods are documented
+// on the IGCHeap interface.
+extern "C"
+{
+ GlobalAllocLock g_global_alloc_lock;
+}
+
+
// Checks to see if the given allocation size exceeds the
// largest object size allowed - if it does, it throws
// an OutOfMemoryException with a message indicating that
@@ -102,12 +201,12 @@ inline void CheckObjectSize(size_t alloc_size)
// * Call code:Alloc - When the jit helpers fall back, or we do allocations within the runtime code
// itself, we ultimately call here.
// * Call code:AllocLHeap - Used very rarely to force allocation to be on the large object heap.
-//
+//
// While this is a choke point into allocating an object, it is primitive (it does not want to know about
// MethodTable and thus does not initialize that poitner. It also does not know if the object is finalizable
// or contains pointers. Thus we quickly wrap this function in more user-friendly ones that know about
// MethodTables etc. (see code:FastAllocatePrimitiveArray code:AllocateArrayEx code:AllocateObject)
-//
+//
// You can get an exhaustive list of code sites that allocate GC objects by finding all calls to
// code:ProfilerObjectAllocatedCallback (since the profiler has to hook them all).
inline Object* Alloc(size_t size, BOOL bFinalize, BOOL bContainsPointers )
@@ -137,10 +236,16 @@ inline Object* Alloc(size_t size, BOOL bFinalize, BOOL bContainsPointers )
// We don't want to throw an SO during the GC, so make sure we have plenty
// of stack before calling in.
INTERIOR_STACK_PROBE_FOR(GetThread(), static_cast<unsigned>(DEFAULT_ENTRY_PROBE_AMOUNT * 1.5));
- if (GCHeapUtilities::UseAllocationContexts())
+ if (GCHeapUtilities::UseThreadAllocationContexts())
+ {
retVal = GCHeapUtilities::GetGCHeap()->Alloc(GetThreadAllocContext(), size, flags);
+ }
else
- retVal = GCHeapUtilities::GetGCHeap()->Alloc(size, flags);
+ {
+ GlobalAllocLockHolder holder(&g_global_alloc_lock);
+ retVal = GCHeapUtilities::GetGCHeap()->Alloc(&g_global_alloc_context, size, flags);
+ }
+
if (!retVal)
{
@@ -172,10 +277,15 @@ inline Object* AllocAlign8(size_t size, BOOL bFinalize, BOOL bContainsPointers,
// We don't want to throw an SO during the GC, so make sure we have plenty
// of stack before calling in.
INTERIOR_STACK_PROBE_FOR(GetThread(), static_cast<unsigned>(DEFAULT_ENTRY_PROBE_AMOUNT * 1.5));
- if (GCHeapUtilities::UseAllocationContexts())
+ if (GCHeapUtilities::UseThreadAllocationContexts())
+ {
retVal = GCHeapUtilities::GetGCHeap()->AllocAlign8(GetThreadAllocContext(), size, flags);
+ }
else
- retVal = GCHeapUtilities::GetGCHeap()->AllocAlign8(size, flags);
+ {
+ GlobalAllocLockHolder holder(&g_global_alloc_lock);
+ retVal = GCHeapUtilities::GetGCHeap()->AllocAlign8(&g_global_alloc_context, size, flags);
+ }
if (!retVal)
{