summaryrefslogtreecommitdiff
path: root/src/vm
diff options
context:
space:
mode:
authorSean Gillespie <sean@swgillespie.me>2017-03-16 17:18:28 -0700
committerGitHub <noreply@github.com>2017-03-16 17:18:28 -0700
commit1b827b5a82f8c6f8a9ed760ee127938dea5a7ea4 (patch)
tree6187e2b2fb4e7858b39298cf60afcc2f838dcdd1 /src/vm
parent54368dad5842089ed7b6a444fa50116d2c0a0b09 (diff)
downloadcoreclr-1b827b5a82f8c6f8a9ed760ee127938dea5a7ea4.tar.gz
coreclr-1b827b5a82f8c6f8a9ed760ee127938dea5a7ea4.tar.bz2
coreclr-1b827b5a82f8c6f8a9ed760ee127938dea5a7ea4.zip
[Local GC] Break EE dependency on GC's generation table and alloc lock in single-proc scenarios (#10065)
* Remove usage of the generation table from the EE by introducing an EE-owned GC alloc context used for allocations on single-proc machines. * Move the GC alloc lock to the EE side of the interface * Repair the Windows ARM build * Move the decision to use per-thread alloc contexts to the EE * Rename the lock used by StartNoGCRegion and EndNoGCRegion to be more indicative of what it is protecting * Address code review feedback 2 (enumerate the global alloc context as a part of GCToEEInterface) * Code review feedback (3) * Address code review feedback (move some GC-internal globals to gcimpl.h and gc.cpp) * g_global_alloc_lock is a dword, not a qword - fixes a deadlock * Move GlobalAllocLock to gchelpers.cpp and switch to preemptive mode when spinning * Repair the Windows x86 build
Diffstat (limited to 'src/vm')
-rw-r--r--src/vm/amd64/JitHelpers_Slow.asm68
-rw-r--r--src/vm/amd64/asmconstants.h6
-rw-r--r--src/vm/arm/stubs.cpp2
-rw-r--r--src/vm/ceeload.cpp3
-rw-r--r--src/vm/gcenv.ee.cpp14
-rw-r--r--src/vm/gcenv.ee.h2
-rw-r--r--src/vm/gcheaputilities.cpp3
-rw-r--r--src/vm/gcheaputilities.h35
-rw-r--r--src/vm/gchelpers.cpp124
-rw-r--r--src/vm/i386/jitinterfacex86.cpp29
-rw-r--r--src/vm/jithelpers.cpp8
-rw-r--r--src/vm/jitinterfacegen.cpp2
12 files changed, 209 insertions, 87 deletions
diff --git a/src/vm/amd64/JitHelpers_Slow.asm b/src/vm/amd64/JitHelpers_Slow.asm
index 7deed49d98..293e447540 100644
--- a/src/vm/amd64/JitHelpers_Slow.asm
+++ b/src/vm/amd64/JitHelpers_Slow.asm
@@ -467,13 +467,9 @@ NESTED_END JIT_NewArr1OBJ_MP, _TEXT
-; <TODO> this m_GCLock should be a size_t so we don't have a store-forwarding penalty in the code below.
-; Unfortunately, the compiler intrinsic for InterlockedExchangePointer seems to be broken and we
-; get bad code gen in gc.cpp on IA64. </TODO>
-M_GCLOCK equ ?m_GCLock@@3HC
-extern M_GCLOCK:dword
-extern generation_table:qword
+extern g_global_alloc_lock:dword
+extern g_global_alloc_context:qword
LEAF_ENTRY JIT_TrialAllocSFastSP, _TEXT
@@ -481,20 +477,20 @@ LEAF_ENTRY JIT_TrialAllocSFastSP, _TEXT
; m_BaseSize is guaranteed to be a multiple of 8.
- inc [M_GCLOCK]
+ inc [g_global_alloc_lock]
jnz JIT_NEW
- mov rax, [generation_table + 0] ; alloc_ptr
- mov r10, [generation_table + 8] ; limit_ptr
+ mov rax, [g_global_alloc_context + OFFSETOF__gc_alloc_context__alloc_ptr] ; alloc_ptr
+ mov r10, [g_global_alloc_context + OFFSETOF__gc_alloc_context__alloc_limit] ; limit_ptr
add r8, rax
cmp r8, r10
ja AllocFailed
- mov qword ptr [generation_table + 0], r8 ; update the alloc ptr
+ mov qword ptr [g_global_alloc_context + OFFSETOF__gc_alloc_context__alloc_ptr], r8 ; update the alloc ptr
mov [rax], rcx
- mov [M_GCLOCK], -1
+ mov [g_global_alloc_lock], -1
ifdef _DEBUG
call DEBUG_TrialAllocSetAppDomain_NoScratchArea
@@ -503,7 +499,7 @@ endif ; _DEBUG
ret
AllocFailed:
- mov [M_GCLOCK], -1
+ mov [g_global_alloc_lock], -1
jmp JIT_NEW
LEAF_END JIT_TrialAllocSFastSP, _TEXT
@@ -520,11 +516,11 @@ NESTED_ENTRY JIT_BoxFastUP, _TEXT
; m_BaseSize is guaranteed to be a multiple of 8.
- inc [M_GCLOCK]
+ inc [g_global_alloc_lock]
jnz JIT_Box
- mov rax, [generation_table + 0] ; alloc_ptr
- mov r10, [generation_table + 8] ; limit_ptr
+ mov rax, [g_global_alloc_context + OFFSETOF__gc_alloc_context__alloc_ptr] ; alloc_ptr
+ mov r10, [g_global_alloc_context + OFFSETOF__gc_alloc_context__alloc_limit] ; limit_ptr
add r8, rax
@@ -532,9 +528,9 @@ NESTED_ENTRY JIT_BoxFastUP, _TEXT
ja NoAlloc
- mov qword ptr [generation_table + 0], r8 ; update the alloc ptr
+ mov qword ptr [g_global_alloc_context + OFFSETOF__gc_alloc_context__alloc_ptr], r8 ; update the alloc ptr
mov [rax], rcx
- mov [M_GCLOCK], -1
+ mov [g_global_alloc_lock], -1
ifdef _DEBUG
call DEBUG_TrialAllocSetAppDomain_NoScratchArea
@@ -574,7 +570,7 @@ endif ; _DEBUG
ret
NoAlloc:
- mov [M_GCLOCK], -1
+ mov [g_global_alloc_lock], -1
jmp JIT_Box
NESTED_END JIT_BoxFastUP, _TEXT
@@ -602,20 +598,20 @@ LEAF_ENTRY AllocateStringFastUP, _TEXT
lea r8d, [r8d + ecx*2 + 7]
and r8d, -8
- inc [M_GCLOCK]
+ inc [g_global_alloc_lock]
jnz FramedAllocateString
- mov rax, [generation_table + 0] ; alloc_ptr
- mov r10, [generation_table + 8] ; limit_ptr
+ mov rax, [g_global_alloc_context + OFFSETOF__gc_alloc_context__alloc_ptr] ; alloc_ptr
+ mov r10, [g_global_alloc_context + OFFSETOF__gc_alloc_context__alloc_limit] ; limit_ptr
add r8, rax
cmp r8, r10
ja AllocFailed
- mov qword ptr [generation_table + 0], r8 ; update the alloc ptr
+ mov qword ptr [g_global_alloc_context + OFFSETOF__gc_alloc_context__alloc_ptr], r8 ; update the alloc ptr
mov [rax], r11
- mov [M_GCLOCK], -1
+ mov [g_global_alloc_lock], -1
mov [rax + OFFSETOF__StringObject__m_StringLength], ecx
@@ -626,7 +622,7 @@ endif ; _DEBUG
ret
AllocFailed:
- mov [M_GCLOCK], -1
+ mov [g_global_alloc_lock], -1
jmp FramedAllocateString
LEAF_END AllocateStringFastUP, _TEXT
@@ -668,11 +664,11 @@ LEAF_ENTRY JIT_NewArr1VC_UP, _TEXT
add r8d, 7
and r8d, -8
- inc [M_GCLOCK]
+ inc [g_global_alloc_lock]
jnz JIT_NewArr1
- mov rax, [generation_table + 0] ; alloc_ptr
- mov r10, [generation_table + 8] ; limit_ptr
+ mov rax, [g_global_alloc_context + OFFSETOF__gc_alloc_context__alloc_ptr] ; alloc_ptr
+ mov r10, [g_global_alloc_context + OFFSETOF__gc_alloc_context__alloc_limit] ; limit_ptr
add r8, rax
jc AllocFailed
@@ -680,9 +676,9 @@ LEAF_ENTRY JIT_NewArr1VC_UP, _TEXT
cmp r8, r10
ja AllocFailed
- mov qword ptr [generation_table + 0], r8 ; update the alloc ptr
+ mov qword ptr [g_global_alloc_context + OFFSETOF__gc_alloc_context__alloc_ptr], r8 ; update the alloc ptr
mov [rax], r9
- mov [M_GCLOCK], -1
+ mov [g_global_alloc_lock], -1
mov dword ptr [rax + OFFSETOF__ArrayBase__m_NumComponents], edx
@@ -693,7 +689,7 @@ endif ; _DEBUG
ret
AllocFailed:
- mov [M_GCLOCK], -1
+ mov [g_global_alloc_lock], -1
jmp JIT_NewArr1
LEAF_END JIT_NewArr1VC_UP, _TEXT
@@ -731,20 +727,20 @@ LEAF_ENTRY JIT_NewArr1OBJ_UP, _TEXT
; No need for rounding in this case - element size is 8, and m_BaseSize is guaranteed
; to be a multiple of 8.
- inc [M_GCLOCK]
+ inc [g_global_alloc_lock]
jnz JIT_NewArr1
- mov rax, [generation_table + 0] ; alloc_ptr
- mov r10, [generation_table + 8] ; limit_ptr
+ mov rax, [g_global_alloc_context + OFFSETOF__gc_alloc_context__alloc_ptr] ; alloc_ptr
+ mov r10, [g_global_alloc_context + OFFSETOF__gc_alloc_context__alloc_limit] ; limit_ptr
add r8, rax
cmp r8, r10
ja AllocFailed
- mov qword ptr [generation_table + 0], r8 ; update the alloc ptr
+ mov qword ptr [g_global_alloc_context + OFFSETOF__gc_alloc_context__alloc_ptr], r8 ; update the alloc ptr
mov [rax], r9
- mov [M_GCLOCK], -1
+ mov [g_global_alloc_lock], -1
mov dword ptr [rax + OFFSETOF__ArrayBase__m_NumComponents], edx
@@ -755,7 +751,7 @@ endif ; _DEBUG
ret
AllocFailed:
- mov [M_GCLOCK], -1
+ mov [g_global_alloc_lock], -1
OversizedArray:
jmp JIT_NewArr1
diff --git a/src/vm/amd64/asmconstants.h b/src/vm/amd64/asmconstants.h
index 9c3b22d8cc..e4f77deb42 100644
--- a/src/vm/amd64/asmconstants.h
+++ b/src/vm/amd64/asmconstants.h
@@ -165,6 +165,12 @@ ASMCONSTANTS_C_ASSERT(OFFSET__Thread__m_alloc_context__alloc_ptr == offsetof(Thr
#define OFFSET__Thread__m_alloc_context__alloc_limit 0x68
ASMCONSTANTS_C_ASSERT(OFFSET__Thread__m_alloc_context__alloc_limit == offsetof(Thread, m_alloc_context) + offsetof(gc_alloc_context, alloc_limit));
+#define OFFSETOF__gc_alloc_context__alloc_ptr 0x0
+ASMCONSTANT_OFFSETOF_ASSERT(gc_alloc_context, alloc_ptr);
+
+#define OFFSETOF__gc_alloc_context__alloc_limit 0x8
+ASMCONSTANT_OFFSETOF_ASSERT(gc_alloc_context, alloc_limit);
+
#define OFFSETOF__ThreadExceptionState__m_pCurrentTracker 0x000
ASMCONSTANTS_C_ASSERT(OFFSETOF__ThreadExceptionState__m_pCurrentTracker
== offsetof(ThreadExceptionState, m_pCurrentTracker));
diff --git a/src/vm/arm/stubs.cpp b/src/vm/arm/stubs.cpp
index c276d21877..f1ba278ada 100644
--- a/src/vm/arm/stubs.cpp
+++ b/src/vm/arm/stubs.cpp
@@ -2641,7 +2641,7 @@ void InitJITHelpers1()
))
{
- _ASSERTE(GCHeapUtilities::UseAllocationContexts());
+ _ASSERTE(GCHeapUtilities::UseThreadAllocationContexts());
// If the TLS for Thread is low enough use the super-fast helpers
if (gThreadTLSIndex < TLS_MINIMUM_AVAILABLE)
{
diff --git a/src/vm/ceeload.cpp b/src/vm/ceeload.cpp
index 1979d694cb..710195d809 100644
--- a/src/vm/ceeload.cpp
+++ b/src/vm/ceeload.cpp
@@ -14519,8 +14519,7 @@ void Module::ExpandAll()
#include "clrvarargs.h" /* for VARARG C_ASSERTs in asmconstants.h */
class CheckAsmOffsets
{
-#define ASMCONSTANTS_C_ASSERT(cond) \
- typedef char UNIQUE_LABEL(__C_ASSERT__)[(cond) ? 1 : -1];
+#define ASMCONSTANTS_C_ASSERT(cond) static_assert(cond, #cond);
#include "asmconstants.h"
};
diff --git a/src/vm/gcenv.ee.cpp b/src/vm/gcenv.ee.cpp
index baea98f4b9..2833c99aa6 100644
--- a/src/vm/gcenv.ee.cpp
+++ b/src/vm/gcenv.ee.cpp
@@ -725,10 +725,17 @@ void GCToEEInterface::GcEnumAllocContexts(enum_alloc_context_func* fn, void* par
}
CONTRACTL_END;
- Thread * pThread = NULL;
- while ((pThread = ThreadStore::GetThreadList(pThread)) != NULL)
+ if (GCHeapUtilities::UseThreadAllocationContexts())
{
- fn(pThread->GetAllocContext(), param);
+ Thread * pThread = NULL;
+ while ((pThread = ThreadStore::GetThreadList(pThread)) != NULL)
+ {
+ fn(pThread->GetAllocContext(), param);
+ }
+ }
+ else
+ {
+ fn(&g_global_alloc_context, param);
}
}
@@ -1330,3 +1337,4 @@ void GCToEEInterface::EnableFinalization(bool foundFinalizers)
FinalizerThread::EnableFinalization();
}
}
+
diff --git a/src/vm/gcenv.ee.h b/src/vm/gcenv.ee.h
index 9aa3e59e36..a7ab0b5dda 100644
--- a/src/vm/gcenv.ee.h
+++ b/src/vm/gcenv.ee.h
@@ -48,4 +48,4 @@ public:
#endif // FEATURE_STANDALONE_GC
-#endif // _GCENV_EE_H_ \ No newline at end of file
+#endif // _GCENV_EE_H_
diff --git a/src/vm/gcheaputilities.cpp b/src/vm/gcheaputilities.cpp
index c34d07b30a..b260c3d8f4 100644
--- a/src/vm/gcheaputilities.cpp
+++ b/src/vm/gcheaputilities.cpp
@@ -31,3 +31,6 @@ uint8_t* g_sw_ww_table = nullptr;
bool g_sw_ww_enabled_for_gc_heap = false;
#endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
+
+gc_alloc_context g_global_alloc_context = {};
+
diff --git a/src/vm/gcheaputilities.h b/src/vm/gcheaputilities.h
index 48481146b4..d5ce46c60d 100644
--- a/src/vm/gcheaputilities.h
+++ b/src/vm/gcheaputilities.h
@@ -20,6 +20,12 @@ GPTR_DECL(uint32_t,g_card_table);
}
#endif // !DACCESS_COMPILE
+// For single-proc machines, the EE will use a single, shared alloc context
+// for all allocations. In order to avoid extra indirections in assembly
+// allocation helpers, the EE owns the global allocation context and the
+// GC will update it when it needs to.
+extern "C" gc_alloc_context g_global_alloc_context;
+
extern "C" uint32_t* g_card_bundle_table;
extern "C" uint8_t* g_ephemeral_low;
extern "C" uint8_t* g_ephemeral_high;
@@ -100,22 +106,6 @@ public:
GetGCHeap()->WaitUntilGCComplete(bConsiderGCStart);
}
- // Returns true if we should be using allocation contexts, false otherwise.
- inline static bool UseAllocationContexts()
- {
- WRAPPER_NO_CONTRACT;
-#ifdef FEATURE_REDHAWK
- // SIMPLIFY: only use allocation contexts
- return true;
-#else
-#if defined(_TARGET_ARM_) || defined(FEATURE_PAL)
- return true;
-#else
- return ((IsServerHeap() ? true : (g_SystemInfo.dwNumberOfProcessors >= 2)));
-#endif
-#endif
- }
-
// Returns true if the held GC heap is a Server GC heap, false otherwise.
inline static bool IsServerHeap()
{
@@ -128,6 +118,18 @@ public:
#endif // FEATURE_SVR_GC
}
+ static bool UseThreadAllocationContexts()
+ {
+ // When running on a single-proc system, it's more efficient to use a single global
+ // allocation context for SOH allocations than to use one for every thread.
+#if defined(_TARGET_ARM_) || defined(FEATURE_PAL) || defined(FEATURE_REDHAWK)
+ return true;
+#else
+ return IsServerHeap() || ::GetCurrentProcessCpuCount() != 1;
+#endif
+
+ }
+
#ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
// Returns True if software write watch is currently enabled for the GC Heap,
@@ -192,7 +194,6 @@ public:
}
#endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
-
private:
// This class should never be instantiated.
GCHeapUtilities() = delete;
diff --git a/src/vm/gchelpers.cpp b/src/vm/gchelpers.cpp
index 6b3c2f6650..258a86239c 100644
--- a/src/vm/gchelpers.cpp
+++ b/src/vm/gchelpers.cpp
@@ -54,11 +54,110 @@ inline gc_alloc_context* GetThreadAllocContext()
{
WRAPPER_NO_CONTRACT;
- assert(GCHeapUtilities::UseAllocationContexts());
+ assert(GCHeapUtilities::UseThreadAllocationContexts());
return & GetThread()->m_alloc_context;
}
+// When not using per-thread allocation contexts, we (the EE) need to take care that
+// no two threads are concurrently modifying the global allocation context. This lock
+// must be acquired before any sort of operations involving the global allocation context
+// can occur.
+//
+// This lock is acquired by all allocations when not using per-thread allocation contexts.
+// It is acquired in two kinds of places:
+// 1) JIT_TrialAllocFastSP (and related assembly alloc helpers), which attempt to
+// acquire it but move into an alloc slow path if acquiring fails
+// (but does not decrement the lock variable when doing so)
+// 2) Alloc and AllocAlign8 in gchelpers.cpp, which acquire the lock using
+// the Acquire and Release methods below.
+class GlobalAllocLock {
+ friend struct AsmOffsets;
+private:
+ // The lock variable. This field must always be first.
+ LONG m_lock;
+
+public:
+ // Creates a new GlobalAllocLock in the unlocked state.
+ GlobalAllocLock() : m_lock(-1) {}
+
+ // Copy and copy-assignment operators should never be invoked
+ // for this type
+ GlobalAllocLock(const GlobalAllocLock&) = delete;
+ GlobalAllocLock& operator=(const GlobalAllocLock&) = delete;
+
+ // Acquires the lock, spinning if necessary to do so. When this method
+ // returns, m_lock will be zero and the lock will be acquired.
+ void Acquire()
+ {
+ CONTRACTL {
+ NOTHROW;
+ GC_TRIGGERS; // switch to preemptive mode
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ DWORD spinCount = 0;
+ while(FastInterlockExchange(&m_lock, 0) != -1)
+ {
+ GCX_PREEMP();
+ __SwitchToThread(0, spinCount++);
+ }
+
+ assert(m_lock == 0);
+ }
+
+ // Releases the lock.
+ void Release()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ // the lock may not be exactly 0. This is because the
+ // assembly alloc routines increment the lock variable and
+ // jump if not zero to the slow alloc path, which eventually
+ // will try to acquire the lock again. At that point, it will
+ // spin in Acquire (since m_lock is some number that's not zero).
+ // When the thread that /does/ hold the lock releases it, the spinning
+ // thread will continue.
+ MemoryBarrier();
+ assert(m_lock >= 0);
+ m_lock = -1;
+ }
+
+ // Static helper to acquire a lock, for use with the Holder template.
+ static void AcquireLock(GlobalAllocLock *lock)
+ {
+ WRAPPER_NO_CONTRACT;
+ lock->Acquire();
+ }
+
+ // Static helper to release a lock, for use with the Holder template
+ static void ReleaseLock(GlobalAllocLock *lock)
+ {
+ WRAPPER_NO_CONTRACT;
+ lock->Release();
+ }
+
+ typedef Holder<GlobalAllocLock *, GlobalAllocLock::AcquireLock, GlobalAllocLock::ReleaseLock> Holder;
+};
+
+typedef GlobalAllocLock::Holder GlobalAllocLockHolder;
+
+struct AsmOffsets {
+ static_assert(offsetof(GlobalAllocLock, m_lock) == 0, "ASM code relies on this property");
+};
+
+// For single-proc machines, the global allocation context is protected
+// from concurrent modification by this lock.
+//
+// When not using per-thread allocation contexts, certain methods on IGCHeap
+// require that this lock be held before calling. These methods are documented
+// on the IGCHeap interface.
+extern "C"
+{
+ GlobalAllocLock g_global_alloc_lock;
+}
+
+
// Checks to see if the given allocation size exceeds the
// largest object size allowed - if it does, it throws
// an OutOfMemoryException with a message indicating that
@@ -102,12 +201,12 @@ inline void CheckObjectSize(size_t alloc_size)
// * Call code:Alloc - When the jit helpers fall back, or we do allocations within the runtime code
// itself, we ultimately call here.
// * Call code:AllocLHeap - Used very rarely to force allocation to be on the large object heap.
-//
+//
// While this is a choke point into allocating an object, it is primitive (it does not want to know about
// MethodTable and thus does not initialize that poitner. It also does not know if the object is finalizable
// or contains pointers. Thus we quickly wrap this function in more user-friendly ones that know about
// MethodTables etc. (see code:FastAllocatePrimitiveArray code:AllocateArrayEx code:AllocateObject)
-//
+//
// You can get an exhaustive list of code sites that allocate GC objects by finding all calls to
// code:ProfilerObjectAllocatedCallback (since the profiler has to hook them all).
inline Object* Alloc(size_t size, BOOL bFinalize, BOOL bContainsPointers )
@@ -137,10 +236,16 @@ inline Object* Alloc(size_t size, BOOL bFinalize, BOOL bContainsPointers )
// We don't want to throw an SO during the GC, so make sure we have plenty
// of stack before calling in.
INTERIOR_STACK_PROBE_FOR(GetThread(), static_cast<unsigned>(DEFAULT_ENTRY_PROBE_AMOUNT * 1.5));
- if (GCHeapUtilities::UseAllocationContexts())
+ if (GCHeapUtilities::UseThreadAllocationContexts())
+ {
retVal = GCHeapUtilities::GetGCHeap()->Alloc(GetThreadAllocContext(), size, flags);
+ }
else
- retVal = GCHeapUtilities::GetGCHeap()->Alloc(size, flags);
+ {
+ GlobalAllocLockHolder holder(&g_global_alloc_lock);
+ retVal = GCHeapUtilities::GetGCHeap()->Alloc(&g_global_alloc_context, size, flags);
+ }
+
if (!retVal)
{
@@ -172,10 +277,15 @@ inline Object* AllocAlign8(size_t size, BOOL bFinalize, BOOL bContainsPointers,
// We don't want to throw an SO during the GC, so make sure we have plenty
// of stack before calling in.
INTERIOR_STACK_PROBE_FOR(GetThread(), static_cast<unsigned>(DEFAULT_ENTRY_PROBE_AMOUNT * 1.5));
- if (GCHeapUtilities::UseAllocationContexts())
+ if (GCHeapUtilities::UseThreadAllocationContexts())
+ {
retVal = GCHeapUtilities::GetGCHeap()->AllocAlign8(GetThreadAllocContext(), size, flags);
+ }
else
- retVal = GCHeapUtilities::GetGCHeap()->AllocAlign8(size, flags);
+ {
+ GlobalAllocLockHolder holder(&g_global_alloc_lock);
+ retVal = GCHeapUtilities::GetGCHeap()->AllocAlign8(&g_global_alloc_context, size, flags);
+ }
if (!retVal)
{
diff --git a/src/vm/i386/jitinterfacex86.cpp b/src/vm/i386/jitinterfacex86.cpp
index c1769ebb57..1d8a6ba9b6 100644
--- a/src/vm/i386/jitinterfacex86.cpp
+++ b/src/vm/i386/jitinterfacex86.cpp
@@ -34,8 +34,7 @@
#define MON_DEBUG 1
#endif
-class generation;
-extern "C" generation generation_table[];
+extern "C" LONG g_global_alloc_lock;
extern "C" void STDCALL JIT_WriteBarrierReg_PreGrow();// JIThelp.asm/JIThelp.s
extern "C" void STDCALL JIT_WriteBarrierReg_PostGrow();// JIThelp.asm/JIThelp.s
@@ -562,9 +561,9 @@ void JIT_TrialAlloc::EmitCore(CPUSTUBLINKER *psl, CodeLabel *noLock, CodeLabel *
else
{
// Take the GC lock (there is no lock prefix required - we will use JIT_TrialAllocSFastMP on an MP System).
- // inc dword ptr [m_GCLock]
+ // inc dword ptr [g_global_alloc_lock]
psl->Emit16(0x05ff);
- psl->Emit32((int)(size_t)&m_GCLock);
+ psl->Emit32((int)(size_t)&g_global_alloc_lock);
// jnz NoLock
psl->X86EmitCondJump(noLock, X86CondCode::kJNZ);
@@ -580,9 +579,9 @@ void JIT_TrialAlloc::EmitCore(CPUSTUBLINKER *psl, CodeLabel *noLock, CodeLabel *
psl->X86EmitIndexRegLoad(kEDX, kECX, offsetof(MethodTable, m_BaseSize));
}
- // mov eax, dword ptr [generation_table]
+ // mov eax, dword ptr [g_global_alloc_context]
psl->Emit8(0xA1);
- psl->Emit32((int)(size_t)&generation_table);
+ psl->Emit32((int)(size_t)&g_global_alloc_context);
// Try the allocation.
// add edx, eax
@@ -591,17 +590,17 @@ void JIT_TrialAlloc::EmitCore(CPUSTUBLINKER *psl, CodeLabel *noLock, CodeLabel *
if (flags & (ALIGN8 | ALIGN8OBJ))
EmitAlignmentRoundup(psl, kEAX, kEDX, flags); // bump up EDX size by 12 if EAX unaligned (so that we are aligned)
- // cmp edx, dword ptr [generation_table+4]
+ // cmp edx, dword ptr [g_global_alloc_context+4]
psl->Emit16(0x153b);
- psl->Emit32((int)(size_t)&generation_table + 4);
+ psl->Emit32((int)(size_t)&g_global_alloc_context + 4);
// ja noAlloc
psl->X86EmitCondJump(noAlloc, X86CondCode::kJA);
// Fill in the allocation and get out.
- // mov dword ptr [generation_table], edx
+ // mov dword ptr [g_global_alloc_context], edx
psl->Emit16(0x1589);
- psl->Emit32((int)(size_t)&generation_table);
+ psl->Emit32((int)(size_t)&g_global_alloc_context);
if (flags & (ALIGN8 | ALIGN8OBJ))
EmitDummyObject(psl, kEAX, flags);
@@ -609,9 +608,9 @@ void JIT_TrialAlloc::EmitCore(CPUSTUBLINKER *psl, CodeLabel *noLock, CodeLabel *
// mov dword ptr [eax], ecx
psl->X86EmitIndexRegStore(kEAX, 0, kECX);
- // mov dword ptr [m_GCLock], 0FFFFFFFFh
+ // mov dword ptr [g_global_alloc_lock], 0FFFFFFFFh
psl->Emit16(0x05C7);
- psl->Emit32((int)(size_t)&m_GCLock);
+ psl->Emit32((int)(size_t)&g_global_alloc_lock);
psl->Emit32(0xFFFFFFFF);
}
@@ -667,9 +666,9 @@ void JIT_TrialAlloc::EmitNoAllocCode(CPUSTUBLINKER *psl, Flags flags)
}
else
{
- // mov dword ptr [m_GCLock], 0FFFFFFFFh
+ // mov dword ptr [g_global_alloc_lock], 0FFFFFFFFh
psl->Emit16(0x05c7);
- psl->Emit32((int)(size_t)&m_GCLock);
+ psl->Emit32((int)(size_t)&g_global_alloc_lock);
psl->Emit32(0xFFFFFFFF);
}
}
@@ -1427,7 +1426,7 @@ void InitJITHelpers1()
_ASSERTE(g_SystemInfo.dwNumberOfProcessors != 0);
- JIT_TrialAlloc::Flags flags = GCHeapUtilities::UseAllocationContexts() ?
+ JIT_TrialAlloc::Flags flags = GCHeapUtilities::UseThreadAllocationContexts() ?
JIT_TrialAlloc::MP_ALLOCATOR : JIT_TrialAlloc::NORMAL;
// Get CPU features and check for SSE2 support.
diff --git a/src/vm/jithelpers.cpp b/src/vm/jithelpers.cpp
index b46ac98ba5..aaab58963b 100644
--- a/src/vm/jithelpers.cpp
+++ b/src/vm/jithelpers.cpp
@@ -2752,7 +2752,7 @@ HCIMPL1(Object*, JIT_NewS_MP_FastPortable, CORINFO_CLASS_HANDLE typeHnd_)
do
{
- _ASSERTE(GCHeapUtilities::UseAllocationContexts());
+ _ASSERTE(GCHeapUtilities::UseThreadAllocationContexts());
// This is typically the only call in the fast path. Making the call early seems to be better, as it allows the compiler
// to use volatile registers for intermediate values. This reduces the number of push/pop instructions and eliminates
@@ -2844,7 +2844,7 @@ HCIMPL1(StringObject*, AllocateString_MP_FastPortable, DWORD stringLength)
do
{
- _ASSERTE(GCHeapUtilities::UseAllocationContexts());
+ _ASSERTE(GCHeapUtilities::UseThreadAllocationContexts());
// Instead of doing elaborate overflow checks, we just limit the number of elements. This will avoid all overflow
// problems, as well as making sure big string objects are correctly allocated in the big object heap.
@@ -3008,7 +3008,7 @@ HCIMPL2(Object*, JIT_NewArr1VC_MP_FastPortable, CORINFO_CLASS_HANDLE arrayTypeHn
do
{
- _ASSERTE(GCHeapUtilities::UseAllocationContexts());
+ _ASSERTE(GCHeapUtilities::UseThreadAllocationContexts());
// Do a conservative check here. This is to avoid overflow while doing the calculations. We don't
// have to worry about "large" objects, since the allocation quantum is never big enough for
@@ -3085,7 +3085,7 @@ HCIMPL2(Object*, JIT_NewArr1OBJ_MP_FastPortable, CORINFO_CLASS_HANDLE arrayTypeH
do
{
- _ASSERTE(GCHeapUtilities::UseAllocationContexts());
+ _ASSERTE(GCHeapUtilities::UseThreadAllocationContexts());
// Make sure that the total size cannot reach LARGE_OBJECT_SIZE, which also allows us to avoid overflow checks. The
// "256" slack is to cover the array header size and round-up, using a constant value here out of laziness.
diff --git a/src/vm/jitinterfacegen.cpp b/src/vm/jitinterfacegen.cpp
index 99e03f4b6b..ce4c1e90e3 100644
--- a/src/vm/jitinterfacegen.cpp
+++ b/src/vm/jitinterfacegen.cpp
@@ -218,7 +218,7 @@ void InitJITHelpers1()
))
{
// if (multi-proc || server GC)
- if (GCHeapUtilities::UseAllocationContexts())
+ if (GCHeapUtilities::UseThreadAllocationContexts())
{
#ifdef FEATURE_IMPLICIT_TLS
SetJitHelperFunction(CORINFO_HELP_NEWSFAST, JIT_NewS_MP_FastPortable);