diff options
-rw-r--r-- | src/inc/jithelpers.h | 7 | ||||
-rw-r--r-- | src/pal/inc/pal.h | 38 | ||||
-rw-r--r-- | src/vm/amd64/JitHelpers_InlineGetThread.asm | 1005 | ||||
-rw-r--r-- | src/vm/amd64/JitHelpers_Slow.asm | 888 | ||||
-rw-r--r-- | src/vm/amd64/asmconstants.h | 33 | ||||
-rw-r--r-- | src/vm/amd64/cgencpu.h | 10 | ||||
-rw-r--r-- | src/vm/i386/asmconstants.h | 76 | ||||
-rw-r--r-- | src/vm/i386/cgencpu.h | 18 | ||||
-rw-r--r-- | src/vm/i386/jithelp.asm | 1052 | ||||
-rw-r--r-- | src/vm/jithelpers.cpp | 319 | ||||
-rw-r--r-- | src/vm/jitinterfacegen.cpp | 42 | ||||
-rw-r--r-- | src/vm/syncblk.cpp | 84 | ||||
-rw-r--r-- | src/vm/syncblk.h | 10 | ||||
-rw-r--r-- | src/vm/syncblk.inl | 227 |
14 files changed, 212 insertions, 3597 deletions
diff --git a/src/inc/jithelpers.h b/src/inc/jithelpers.h index b45948ac59..43b75293ae 100644 --- a/src/inc/jithelpers.h +++ b/src/inc/jithelpers.h @@ -134,17 +134,10 @@ JITHELPER(CORINFO_HELP_ENDCATCH, JIT_EndCatch, CORINFO_HELP_SIG_CANNOT_USE_ALIGN_STUB) #endif -#ifdef _TARGET_AMD64_ - DYNAMICJITHELPER(CORINFO_HELP_MON_ENTER, JIT_MonEnterWorker, CORINFO_HELP_SIG_REG_ONLY) - DYNAMICJITHELPER(CORINFO_HELP_MON_EXIT, JIT_MonExitWorker, CORINFO_HELP_SIG_REG_ONLY) - DYNAMICJITHELPER(CORINFO_HELP_MON_ENTER_STATIC, JIT_MonEnterStatic,CORINFO_HELP_SIG_REG_ONLY) - DYNAMICJITHELPER(CORINFO_HELP_MON_EXIT_STATIC, JIT_MonExitStatic,CORINFO_HELP_SIG_REG_ONLY) -#else JITHELPER(CORINFO_HELP_MON_ENTER, JIT_MonEnterWorker, CORINFO_HELP_SIG_REG_ONLY) JITHELPER(CORINFO_HELP_MON_EXIT, JIT_MonExitWorker, CORINFO_HELP_SIG_REG_ONLY) JITHELPER(CORINFO_HELP_MON_ENTER_STATIC, JIT_MonEnterStatic,CORINFO_HELP_SIG_REG_ONLY) JITHELPER(CORINFO_HELP_MON_EXIT_STATIC, JIT_MonExitStatic,CORINFO_HELP_SIG_REG_ONLY) -#endif JITHELPER(CORINFO_HELP_GETCLASSFROMMETHODPARAM, JIT_GetClassFromMethodParam, CORINFO_HELP_SIG_REG_ONLY) JITHELPER(CORINFO_HELP_GETSYNCFROMCLASSHANDLE, JIT_GetSyncFromClassHandle, CORINFO_HELP_SIG_REG_ONLY) diff --git a/src/pal/inc/pal.h b/src/pal/inc/pal.h index 4ae2187b69..1f611d0922 100644 --- a/src/pal/inc/pal.h +++ b/src/pal/inc/pal.h @@ -4202,6 +4202,9 @@ InterlockedDecrement( return __sync_sub_and_fetch(lpAddend, (LONG)1); } +#define InterlockedDecrementAcquire InterlockedDecrement +#define InterlockedDecrementRelease InterlockedDecrement + EXTERN_C PALIMPORT inline @@ -4297,39 +4300,8 @@ InterlockedCompareExchange( Exchange /* The value to be stored */); } -EXTERN_C -PALIMPORT -inline -LONG -PALAPI -InterlockedCompareExchangeAcquire( - IN OUT LONG volatile *Destination, - IN LONG Exchange, - IN LONG Comperand) -{ - // TODO: implement the version with only the acquire semantics - return __sync_val_compare_and_swap( - Destination, /* The pointer to a variable whose value is to be compared with. */ - Comperand, /* The value to be compared */ - Exchange /* The value to be stored */); -} - -EXTERN_C -PALIMPORT -inline -LONG -PALAPI -InterlockedCompareExchangeRelease( - IN OUT LONG volatile *Destination, - IN LONG Exchange, - IN LONG Comperand) -{ - // TODO: implement the version with only the release semantics - return __sync_val_compare_and_swap( - Destination, /* The pointer to a variable whose value is to be compared with. */ - Comperand, /* The value to be compared */ - Exchange /* The value to be stored */); -} +#define InterlockedCompareExchangeAcquire InterlockedCompareExchange +#define InterlockedCompareExchangeRelease InterlockedCompareExchange // See the 32-bit variant in interlock2.s EXTERN_C diff --git a/src/vm/amd64/JitHelpers_InlineGetThread.asm b/src/vm/amd64/JitHelpers_InlineGetThread.asm index 022ec675df..40d63bf729 100644 --- a/src/vm/amd64/JitHelpers_InlineGetThread.asm +++ b/src/vm/amd64/JitHelpers_InlineGetThread.asm @@ -305,1010 +305,5 @@ endif ; _DEBUG LEAF_END JIT_NewArr1OBJ_MP_InlineGetThread, _TEXT -MON_ENTER_STACK_SIZE equ 00000020h -MON_EXIT_STACK_SIZE equ 00000068h - -ifdef MON_DEBUG -ifdef TRACK_SYNC -MON_ENTER_STACK_SIZE_INLINEGETTHREAD equ 00000020h -MON_EXIT_STACK_SIZE_INLINEGETTHREAD equ 00000068h -endif -endif - -BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX equ 08000000h ; syncblk.h -BIT_SBLK_IS_HASHCODE equ 04000000h ; syncblk.h -BIT_SBLK_SPIN_LOCK equ 10000000h ; syncblk.h - -SBLK_MASK_LOCK_THREADID equ 000003FFh ; syncblk.h -SBLK_LOCK_RECLEVEL_INC equ 00000400h ; syncblk.h -SBLK_MASK_LOCK_RECLEVEL equ 0000FC00h ; syncblk.h - -MASK_SYNCBLOCKINDEX equ 03FFFFFFh ; syncblk.h -STATE_CHECK equ 0FFFFFFFEh - -MT_CTX_PROXY_FLAG equ 10000000h - -g_pSyncTable equ ?g_pSyncTable@@3PEAVSyncTableEntry@@EA -g_SystemInfo equ ?g_SystemInfo@@3U_SYSTEM_INFO@@A -g_SpinConstants equ ?g_SpinConstants@@3USpinConstants@@A - -extern g_pSyncTable:QWORD -extern g_SystemInfo:QWORD -extern g_SpinConstants:QWORD - -; JITutil_MonEnterWorker(Object* obj, BYTE* pbLockTaken) -extern JITutil_MonEnterWorker:proc -; JITutil_MonTryEnter(Object* obj, INT32 timeout, BYTE* pbLockTaken) -extern JITutil_MonTryEnter:proc -; JITutil_MonExitWorker(Object* obj, BYTE* pbLockTaken) -extern JITutil_MonExitWorker:proc -; JITutil_MonSignal(AwareLock* lock, BYTE* pbLockTaken) -extern JITutil_MonSignal:proc -; JITutil_MonContention(AwareLock* lock, BYTE* pbLockTaken) -extern JITutil_MonContention:proc - -ifdef _DEBUG -MON_DEBUG equ 1 -endif - -ifdef MON_DEBUG -ifdef TRACK_SYNC -extern EnterSyncHelper:proc -extern LeaveSyncHelper:proc -endif -endif - - -MON_ENTER_EPILOG_ADJUST_STACK macro -ifdef MON_DEBUG -ifdef TRACK_SYNC - add rsp, MON_ENTER_STACK_SIZE_INLINEGETTHREAD -endif -endif - endm - - -MON_ENTER_RETURN_SUCCESS macro - ; This is sensitive to the potential that pbLockTaken is NULL - test rsi, rsi - jz @F - mov byte ptr [rsi], 1 - @@: - MON_ENTER_EPILOG_ADJUST_STACK - pop rsi - ret - - endm - - -; The worker versions of these functions are smart about the potential for pbLockTaken -; to be NULL, and if it is then they treat it as if they don't have a state variable. -; This is because when locking is not inserted by the JIT (instead by explicit calls to -; Monitor.Enter() and Monitor.Exit()) we will call these guys. -; -; This is a frameless helper for entering a monitor on a object. -; The object is in ARGUMENT_REG1. This tries the normal case (no -; blocking or object allocation) in line and calls a framed helper -; for the other cases. -; -; EXTERN_C void JIT_MonEnterWorker_InlineGetThread(Object* obj, /*OUT*/ BYTE* pbLockTaken) -JIT_HELPER_MONITOR_THUNK JIT_MonEnter, _TEXT -NESTED_ENTRY JIT_MonEnterWorker_InlineGetThread, _TEXT - push_nonvol_reg rsi -ifdef MON_DEBUG -ifdef TRACK_SYNC - alloc_stack MON_ENTER_STACK_SIZE_INLINEGETTHREAD - - save_reg_postrsp rcx, MON_ENTER_STACK_SIZE_INLINEGETTHREAD + 10h + 0h - save_reg_postrsp rdx, MON_ENTER_STACK_SIZE_INLINEGETTHREAD + 10h + 8h - save_reg_postrsp r8, MON_ENTER_STACK_SIZE_INLINEGETTHREAD + 10h + 10h - save_reg_postrsp r9, MON_ENTER_STACK_SIZE_INLINEGETTHREAD + 10h + 18h -endif -endif - END_PROLOGUE - - ; Put pbLockTaken in rsi, this can be null - mov rsi, rdx - - ; Check if the instance is NULL - test rcx, rcx - jz FramedLockHelper - - PATCHABLE_INLINE_GETTHREAD r11, JIT_MonEnterWorker_InlineGetThread_GetThread_PatchLabel - - ; Initialize delay value for retry with exponential backoff - mov r10d, dword ptr [g_SpinConstants + OFFSETOF__g_SpinConstants__dwInitialDuration] - - ; Check if we can abort here - mov eax, dword ptr [r11 + OFFSETOF__Thread__m_State] - and eax, THREAD_CATCHATSAFEPOINT_BITS - ; Go through the slow code path to initiate ThreadAbort - jnz FramedLockHelper - - ; r8 will hold the syncblockindex address - lea r8, [rcx - OFFSETOF__ObjHeader__SyncBlkIndex] - - RetryThinLock: - ; Fetch the syncblock dword - mov eax, dword ptr [r8] - - ; Check whether we have the "thin lock" layout, the lock is free and the spin lock bit is not set - test eax, BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX + BIT_SBLK_SPIN_LOCK + SBLK_MASK_LOCK_THREADID + SBLK_MASK_LOCK_RECLEVEL - jnz NeedMoreTests - - ; Everything is fine - get the thread id to store in the lock - mov edx, dword ptr [r11 + OFFSETOF__Thread__m_ThreadId] - - ; If the thread id is too large, we need a syncblock for sure - cmp edx, SBLK_MASK_LOCK_THREADID - ja FramedLockHelper - - ; We want to store a new value with the current thread id set in the low 10 bits - or edx, eax - lock cmpxchg dword ptr [r8], edx - jnz PrepareToWaitThinLock - - ; Everything went fine and we're done - add dword ptr [r11 + OFFSETOF__Thread__m_dwLockCount], 1 - - ; Done, leave and set pbLockTaken if we have it - MON_ENTER_RETURN_SUCCESS - - NeedMoreTests: - ; OK, not the simple case, find out which case it is - test eax, BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX - jnz HaveHashOrSyncBlockIndex - - ; The header is transitioning or the lock, treat this as if the lock was taken - test eax, BIT_SBLK_SPIN_LOCK - jnz PrepareToWaitThinLock - - ; Here we know we have the "thin lock" layout, but the lock is not free. - ; It could still be the recursion case, compare the thread id to check - mov edx, eax - and edx, SBLK_MASK_LOCK_THREADID - cmp edx, dword ptr [r11 + OFFSETOF__Thread__m_ThreadId] - jne PrepareToWaitThinLock - - ; Ok, the thread id matches, it's the recursion case. - ; Bump up the recursion level and check for overflow - lea edx, [eax + SBLK_LOCK_RECLEVEL_INC] - test edx, SBLK_MASK_LOCK_RECLEVEL - jz FramedLockHelper - - ; Try to put the new recursion level back. If the header was changed in the meantime - ; we need a full retry, because the layout could have changed - lock cmpxchg dword ptr [r8], edx - jnz RetryHelperThinLock - - ; Done, leave and set pbLockTaken if we have it - MON_ENTER_RETURN_SUCCESS - - PrepareToWaitThinLock: - ; If we are on an MP system, we try spinning for a certain number of iterations - cmp dword ptr [g_SystemInfo + OFFSETOF__g_SystemInfo__dwNumberOfProcessors], 1 - jle FramedLockHelper - - ; Exponential backoff; delay by approximately 2*r10 clock cycles - mov eax, r10d - delayLoopThinLock: - pause ; indicate to the CPU that we are spin waiting - sub eax, 1 - jnz delayLoopThinLock - - ; Next time, wait a factor longer - imul r10d, dword ptr [g_SpinConstants + OFFSETOF__g_SpinConstants__dwBackoffFactor] - - cmp r10d, dword ptr [g_SpinConstants + OFFSETOF__g_SpinConstants__dwMaximumDuration] - jle RetryHelperThinLock - - jmp FramedLockHelper - - RetryHelperThinLock: - jmp RetryThinLock - - HaveHashOrSyncBlockIndex: - ; If we have a hash code already, we need to create a sync block - test eax, BIT_SBLK_IS_HASHCODE - jnz FramedLockHelper - - ; OK, we have a sync block index, just and out the top bits and grab the synblock index - and eax, MASK_SYNCBLOCKINDEX - - ; Get the sync block pointer - mov rdx, qword ptr [g_pSyncTable] - shl eax, 4h - mov rdx, [rdx + rax + OFFSETOF__SyncTableEntry__m_SyncBlock] - - ; Check if the sync block has been allocated - test rdx, rdx - jz FramedLockHelper - - ; Get a pointer to the lock object - lea rdx, [rdx + OFFSETOF__SyncBlock__m_Monitor] - - ; Attempt to acquire the lock - RetrySyncBlock: - mov eax, dword ptr [rdx + OFFSETOF__AwareLock__m_MonitorHeld] - test eax, eax - jne HaveWaiters - - ; Common case, lock isn't held and there are no waiters. Attempt to - ; gain ownership ourselves - xor ecx, ecx - inc ecx - - lock cmpxchg dword ptr [rdx + OFFSETOF__AwareLock__m_MonitorHeld], ecx - jnz RetryHelperSyncBlock - - ; Success. Save the thread object in the lock and increment the use count - mov qword ptr [rdx + OFFSETOF__AwareLock__m_HoldingThread], r11 - add dword ptr [rdx + OFFSETOF__AwareLock__m_Recursion], 1 - add dword ptr [r11 + OFFSETOF__Thread__m_dwLockCount], 1 - -ifdef MON_DEBUG -ifdef TRACK_SYNC - mov rcx, [rsp + MON_ENTER_STACK_SIZE_INLINEGETTHREAD + 8h] ; return address - ; void EnterSyncHelper(UINT_PTR caller, AwareLock* lock) - call EnterSyncHelper -endif -endif - - ; Done, leave and set pbLockTaken if we have it - MON_ENTER_RETURN_SUCCESS - - ; It's possible to get here with waiters by no lock held, but in this - ; case a signal is about to be fired which will wake up the waiter. So - ; for fairness sake we should wait too. - ; Check first for recur11ve lock attempts on the same thread. - HaveWaiters: - ; Is mutex already owned by current thread? - cmp [rdx + OFFSETOF__AwareLock__m_HoldingThread], r11 - jne PrepareToWait - - ; Yes, bump our use count. - add dword ptr [rdx + OFFSETOF__AwareLock__m_Recursion], 1 - -ifdef MON_DEBUG -ifdef TRACK_SYNC - mov rcx, [rsp + MON_ENTER_STACK_SIZE_INLINEGETTHREAD + 8h] ; return address - ; void EnterSyncHelper(UINT_PTR caller, AwareLock* lock) - call EnterSyncHelper -endif -endif - ; Done, leave and set pbLockTaken if we have it - MON_ENTER_RETURN_SUCCESS - - PrepareToWait: - ; If we are on a MP system we try spinning for a certain number of iterations - cmp dword ptr [g_SystemInfo + OFFSETOF__g_SystemInfo__dwNumberOfProcessors], 1 - jle HaveWaiters1 - - ; Exponential backoff: delay by approximately 2*r10 clock cycles - mov eax, r10d - delayLoop: - pause ; indicate to the CPU that we are spin waiting - sub eax, 1 - jnz delayLoop - - ; Next time, wait a factor longer - imul r10d, dword ptr [g_SpinConstants + OFFSETOF__g_SpinConstants__dwBackoffFactor] - - cmp r10d, dword ptr [g_SpinConstants + OFFSETOF__g_SpinConstants__dwMaximumDuration] - jle RetrySyncBlock - - HaveWaiters1: - mov rcx, rdx - mov rdx, rsi - MON_ENTER_EPILOG_ADJUST_STACK - pop rsi - ; void JITutil_MonContention(AwareLock* lock, BYTE* pbLockTaken) - jmp JITutil_MonContention - - RetryHelperSyncBlock: - jmp RetrySyncBlock - - FramedLockHelper: - mov rdx, rsi - MON_ENTER_EPILOG_ADJUST_STACK - pop rsi - ; void JITutil_MonEnterWorker(Object* obj, BYTE* pbLockTaken) - jmp JITutil_MonEnterWorker - -NESTED_END JIT_MonEnterWorker_InlineGetThread, _TEXT - - -MON_EXIT_EPILOG_ADJUST_STACK macro -ifdef MON_DEBUG -ifdef TRACK_SYNC - add rsp, MON_EXIT_STACK_SIZE_INLINEGETTHREAD -endif -endif - endm - -MON_EXIT_RETURN_SUCCESS macro - ; This is sensitive to the potential that pbLockTaken is null - test r10, r10 - jz @F - mov byte ptr [r10], 0 - @@: - MON_EXIT_EPILOG_ADJUST_STACK - ret - - endm - - -; The worker versions of these functions are smart about the potential for pbLockTaken -; to be NULL, and if it is then they treat it as if they don't have a state variable. -; This is because when locking is not inserted by the JIT (instead by explicit calls to -; Monitor.Enter() and Monitor.Exit()) we will call these guys. -; -; This is a frameless helper for exiting a monitor on a object. -; The object is in ARGUMENT_REG1. This tries the normal case (no -; blocking or object allocation) in line and calls a framed helper -; for the other cases. -; -; void JIT_MonExitWorker_InlineGetThread(Object* obj, BYTE* pbLockTaken) -JIT_HELPER_MONITOR_THUNK JIT_MonExit, _TEXT -NESTED_ENTRY JIT_MonExitWorker_InlineGetThread, _TEXT - .savereg rcx, 0 -ifdef MON_DEBUG -ifdef TRACK_SYNC - alloc_stack MON_EXIT_STACK_SIZE_INLINEGETTHREAD - - save_reg_postrsp rcx, MON_EXIT_STACK_SIZE_INLINEGETTHREAD + 8h + 0h - save_reg_postrsp rdx, MON_EXIT_STACK_SIZE_INLINEGETTHREAD + 8h + 8h - save_reg_postrsp r8, MON_EXIT_STACK_SIZE_INLINEGETTHREAD + 8h + 10h - save_reg_postrsp r9, MON_EXIT_STACK_SIZE_INLINEGETTHREAD + 8h + 18h -endif -endif - END_PROLOGUE - - ; pbLockTaken is stored in r10, this can be null - mov r10, rdx - - ; if pbLockTaken is NULL then we got here without a state variable, avoid the - ; next comparison in that case as it will AV - test rdx, rdx - jz Null_pbLockTaken - - ; If the lock wasn't taken then we bail quickly without doing anything - cmp byte ptr [rdx], 0 - je LockNotTaken - - Null_pbLockTaken: - ; Check is the instance is null - test rcx, rcx - jz FramedLockHelper - - PATCHABLE_INLINE_GETTHREAD r11, JIT_MonExitWorker_InlineGetThread_GetThread_PatchLabel - - ; r8 will hold the syncblockindex address - lea r8, [rcx - OFFSETOF__ObjHeader__SyncBlkIndex] - - RetryThinLock: - ; Fetch the syncblock dword - mov eax, dword ptr [r8] - test eax, BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX + BIT_SBLK_SPIN_LOCK - jnz NeedMoreTests - - ; Ok, we have a "thin lock" layout - check whether the thread id matches - mov edx, eax - and edx, SBLK_MASK_LOCK_THREADID - cmp edx, dword ptr [r11 + OFFSETOF__Thread__m_ThreadId] - jne FramedLockHelper - - ; check the recursion level - test eax, SBLK_MASK_LOCK_RECLEVEL - jne DecRecursionLevel - - ; It's zero -- we're leaving the lock. - ; So try to put back a zero thread id. - ; edx and eax match in the thread id bits, and edx is zero else where, so the xor is sufficient - xor edx, eax - lock cmpxchg dword ptr [r8], edx - jnz RetryThinLockHelper1 ; forward jump to avoid mispredict on success - - ; Dec the dwLockCount on the thread - sub dword ptr [r11 + OFFSETOF__Thread__m_dwLockCount], 1 - - ; Done, leave and set pbLockTaken if we have it - MON_EXIT_RETURN_SUCCESS - - RetryThinLockHelper1: - jmp RetryThinLock - - DecRecursionLevel: - lea edx, [eax - SBLK_LOCK_RECLEVEL_INC] - lock cmpxchg dword ptr [r8], edx - jnz RetryThinLockHelper2 ; forward jump to avoid mispredict on success - - ; We're done, leave and set pbLockTaken if we have it - MON_EXIT_RETURN_SUCCESS - - RetryThinLockHelper2: - jmp RetryThinLock - - NeedMoreTests: - ; Forward all special cases to the slow helper - test eax, BIT_SBLK_IS_HASHCODE + BIT_SBLK_SPIN_LOCK - jnz FramedLockHelper - - ; Get the sync block index and use it to compute the sync block pointer - mov rdx, qword ptr [g_pSyncTable] - and eax, MASK_SYNCBLOCKINDEX - shl eax, 4 - mov rdx, [rdx + rax + OFFSETOF__SyncTableEntry__m_SyncBlock] - - ; Was there a sync block? - test rdx, rdx - jz FramedLockHelper - - ; Get a pointer to the lock object. - lea rdx, [rdx + OFFSETOF__SyncBlock__m_Monitor] - - ; Check if the lock is held. - cmp qword ptr [rdx + OFFSETOF__AwareLock__m_HoldingThread], r11 - jne FramedLockHelper - -ifdef MON_DEBUG -ifdef TRACK_SYNC - mov [rsp + 28h], rcx - mov [rsp + 30h], rdx - mov [rsp + 38h], r10 - mov [rsp + 40h], r11 - - mov rcx, [rsp + MON_EXIT_STACK_SIZE_INLINEGETTHREAD ] ; return address - ; void LeaveSyncHelper(UINT_PTR caller, AwareLock* lock) - call LeaveSyncHelper - - mov rcx, [rsp + 28h] - mov rdx, [rsp + 30h] - mov r10, [rsp + 38h] - mov r11, [rsp + 40h] -endif -endif - - ; Reduce our recursion count - sub dword ptr [rdx + OFFSETOF__AwareLock__m_Recursion], 1 - jz LastRecursion - - ; Done, leave and set pbLockTaken if we have it - MON_EXIT_RETURN_SUCCESS - - RetryHelperThinLock: - jmp RetryThinLock - - FramedLockHelper: - mov rdx, r10 - MON_EXIT_EPILOG_ADJUST_STACK - ; void JITutil_MonExitWorker(Object* obj, BYTE* pbLockTaken) - jmp JITutil_MonExitWorker - - LastRecursion: -ifdef MON_DEBUG -ifdef TRACK_SYNC - mov rax, [rdx + OFFSETOF__AwareLock__m_HoldingThread] -endif -endif - - sub dword ptr [r11 + OFFSETOF__Thread__m_dwLockCount], 1 - mov qword ptr [rdx + OFFSETOF__AwareLock__m_HoldingThread], 0 - - Retry: - mov eax, dword ptr [rdx + OFFSETOF__AwareLock__m_MonitorHeld] - lea r9d, [eax - 1] - lock cmpxchg dword ptr [rdx + OFFSETOF__AwareLock__m_MonitorHeld], r9d - jne RetryHelper - - test eax, STATE_CHECK - jne MustSignal - - ; Done, leave and set pbLockTaken if we have it - MON_EXIT_RETURN_SUCCESS - - MustSignal: - mov rcx, rdx - mov rdx, r10 - MON_EXIT_EPILOG_ADJUST_STACK - ; void JITutil_MonSignal(AwareLock* lock, BYTE* pbLockTaken) - jmp JITutil_MonSignal - - RetryHelper: - jmp Retry - - LockNotTaken: - MON_EXIT_EPILOG_ADJUST_STACK - REPRET -NESTED_END JIT_MonExitWorker_InlineGetThread, _TEXT - - -; This is a frameless helper for trying to enter a monitor on a object. -; The object is in ARGUMENT_REG1 and a timeout in ARGUMENT_REG2. This tries the -; normal case (no object allocation) in line and calls a framed helper for the -; other cases. -; -; void JIT_MonTryEnter_InlineGetThread(Object* obj, INT32 timeOut, BYTE* pbLockTaken) -NESTED_ENTRY JIT_MonTryEnter_InlineGetThread, _TEXT - ; save rcx, rdx (timeout) in the shadow space - .savereg rcx, 8h - mov [rsp + 8h], rcx - .savereg rdx, 10h - mov [rsp + 10h], rdx -ifdef MON_DEBUG -ifdef TRACK_SYNC - alloc_stack MON_ENTER_STACK_SIZE_INLINEGETTHREAD - -; rcx has already been saved -; save_reg_postrsp rcx, MON_ENTER_STACK_SIZE_INLINEGETTHREAD + 8h + 0h -; rdx has already been saved -; save_reg_postrsp rdx, MON_ENTER_STACK_SIZE + 8h + 8h - save_reg_postrsp r8, MON_ENTER_STACK_SIZE_INLINEGETTHREAD + 8h + 10h - save_reg_postrsp r9, MON_ENTER_STACK_SIZE_INLINEGETTHREAD + 8h + 18h -endif -endif - END_PROLOGUE - - ; Check if the instance is NULL - test rcx, rcx - jz FramedLockHelper - - ; Check if the timeout looks valid - cmp edx, -1 - jl FramedLockHelper - - PATCHABLE_INLINE_GETTHREAD r11, JIT_MonTryEnter_GetThread_PatchLabel - - ; Initialize delay value for retry with exponential backoff - mov r10d, dword ptr [g_SpinConstants + OFFSETOF__g_SpinConstants__dwInitialDuration] - - ; Check if we can abort here - mov eax, dword ptr [r11 + OFFSETOF__Thread__m_State] - and eax, THREAD_CATCHATSAFEPOINT_BITS - ; Go through the slow code path to initiate THreadAbort - jnz FramedLockHelper - - ; r9 will hold the syncblockindex address - lea r9, [rcx - OFFSETOF__ObjHeader__SyncBlkIndex] - - RetryThinLock: - ; Fetch the syncblock dword - mov eax, dword ptr [r9] - - ; Check whether we have the "thin lock" layout, the lock is free and the spin lock bit is not set - test eax, BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX + BIT_SBLK_SPIN_LOCK + SBLK_MASK_LOCK_THREADID + SBLK_MASK_LOCK_RECLEVEL - jne NeedMoreTests - - ; Everything is fine - get the thread id to store in the lock - mov edx, dword ptr [r11 + OFFSETOF__Thread__m_ThreadId] - - ; If the thread id is too large, we need a syncblock for sure - cmp edx, SBLK_MASK_LOCK_THREADID - ja FramedLockHelper - - ; We want to store a new value with the current thread id set in the low 10 bits - or edx, eax - lock cmpxchg dword ptr [r9], edx - jnz RetryHelperThinLock - - ; Got the lock, everything is fine - add dword ptr [r11 + OFFSETOF__Thread__m_dwLockCount], 1 - ; Return TRUE - mov byte ptr [r8], 1 -ifdef MON_DEBUG -ifdef TRACK_SYNC - add rsp, MON_ENTER_STACK_SIZE_INLINEGETTHREAD -endif -endif - ret - - NeedMoreTests: - ; OK, not the simple case, find out which case it is - test eax, BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX - jnz HaveHashOrSyncBlockIndex - - ; The header is transitioning or the lock - test eax, BIT_SBLK_SPIN_LOCK - jnz RetryHelperThinLock - - ; Here we know we have the "thin lock" layout, but the lock is not free. - ; It could still be the recursion case, compare the thread id to check - mov edx, eax - and edx, SBLK_MASK_LOCK_THREADID - cmp edx, dword ptr [r11 + OFFSETOF__Thread__m_ThreadId] - jne PrepareToWaitThinLock - - ; Ok, the thread id matches, it's the recursion case. - ; Dump up the recursion level and check for overflow - lea edx, [eax + SBLK_LOCK_RECLEVEL_INC] - test edx, SBLK_MASK_LOCK_RECLEVEL - jz FramedLockHelper - - ; Try to put the new recursion level back. If the header was changed in the meantime - ; we need a full retry, because the layout could have changed - lock cmpxchg dword ptr [r9], edx - jnz RetryHelperThinLock - - ; Everything went fine and we're done, return TRUE - mov byte ptr [r8], 1 -ifdef MON_DEBUG -ifdef TRACK_SYNC - add rsp, MON_ENTER_STACK_SIZE_INLINEGETTHREAD -endif -endif - ret - - PrepareToWaitThinLock: - ; Return failure if timeout is zero - cmp dword ptr [rsp + 10h], 0 - je TimeoutZero - - ; If we are on an MP system, we try spinning for a certain number of iterations - cmp dword ptr [g_SystemInfo + OFFSETOF__g_SystemInfo__dwNumberOfProcessors], 1 - jle FramedLockHelper - - ; Exponential backoff; delay by approximately 2*r10d clock cycles - mov eax, r10d - DelayLoopThinLock: - pause ; indicate to the CPU that we are spin waiting - sub eax, 1 - jnz DelayLoopThinLock - - ; Next time, wait a factor longer - imul r10d, dword ptr [g_SpinConstants + OFFSETOF__g_SpinConstants__dwBackoffFactor] - - cmp r10d, dword ptr [g_SpinConstants + OFFSETOF__g_SpinConstants__dwMaximumDuration] - jle RetryHelperThinLock - - jmp FramedLockHelper - - RetryHelperThinLock: - jmp RetryThinLock - - TimeoutZero: - ; Did not acquire, return FALSE - mov byte ptr [r8], 0 -ifdef MON_DEBUG -ifdef TRACK_SYNC - add rsp, MON_ENTER_STACK_SIZE_INLINEGETTHREAD -endif -endif - ret - - HaveHashOrSyncBlockIndex: - ; If we have a hash code already, we need to create a sync block - test eax, BIT_SBLK_IS_HASHCODE - jnz FramedLockHelper - - ; OK, we have a sync block index, just and out the top bits and grab the synblock index - and eax, MASK_SYNCBLOCKINDEX - - ; Get the sync block pointer - mov rdx, qword ptr [g_pSyncTable] - shl eax, 4 - mov rdx, [rdx + rax + OFFSETOF__SyncTableEntry__m_SyncBlock] - - ; Check if the sync block has been allocated - test rdx, rdx - jz FramedLockHelper - - ; Get a pointer to the lock object - lea rdx, [rdx + OFFSETOF__SyncBlock__m_Monitor] - - RetrySyncBlock: - ; Attempt to acuire the lock - mov eax, dword ptr [rdx + OFFSETOF__AwareLock__m_MonitorHeld] - test eax, eax - jne HaveWaiters - - ; Common case, lock isn't held and there are no waiters. Attempt to - ; gain ownership ourselves - xor ecx, ecx - inc ecx - lock cmpxchg dword ptr [rdx + OFFSETOF__AwareLock__m_MonitorHeld], ecx - jnz RetryHelperSyncBlock - - ; Success. Save the thread object in the lock and increment the use count - mov qword ptr [rdx + OFFSETOF__AwareLock__m_HoldingThread], r11 - add dword ptr [rdx + OFFSETOF__AwareLock__m_Recursion], 1 - add dword ptr [r11 + OFFSETOF__Thread__m_dwLockCount], 1 - -ifdef MON_DEBUG -ifdef TRACK_SYNC - mov rcx, [rsp + MON_ENTER_STACK_SIZE_INLINEGETTHREAD] ; return address - ; void EnterSyncHelper(UINT_PTR caller, AwareLock* lock) - call EnterSyncHelper -endif -endif - - ; Return TRUE - mov byte ptr [r8], 1 -ifdef MON_DEBUG -ifdef TRACK_SYNC - add rsp, MON_ENTER_STACK_SIZE_INLINEGETTHREAD -endif -endif - ret - - ; It's possible to get here with waiters by no lock held, but in this - ; case a signal is about to be fired which will wake up the waiter. So - ; for fairness sake we should wait too. - ; Check first for recur11ve lock attempts on the same thread. - HaveWaiters: - ; Is mutex already owned by current thread? - cmp [rdx + OFFSETOF__AwareLock__m_HoldingThread], r11 - jne PrepareToWait - - ; Yes, bump our use count. - add dword ptr [rdx + OFFSETOF__AwareLock__m_Recursion], 1 - -ifdef MON_DEBUG -ifdef TRACK_SYNC - mov rcx, [rsp + MON_ENTER_STACK_SIZE_INLINEGETTHREAD] ; return address - ; void EnterSyncHelper(UINT_PTR caller, AwareLock* lock) - call EnterSyncHelper -endif -endif - - ; Return TRUE - mov byte ptr [r8], 1 -ifdef MON_DEBUG -ifdef TRACK_SYNC - add rsp, MON_ENTER_STACK_SIZE_INLINEGETTHREAD -endif -endif - ret - - PrepareToWait: - ; Return failure if timeout is zero - cmp dword ptr [rsp + 10h], 0 -ifdef MON_DEBUG -ifdef TRACK_SYNC - ; if we are using the _DEBUG stuff then rsp has been adjusted - ; so compare the value at the adjusted position - ; there's really little harm in the extra stack read - cmp dword ptr [rsp + MON_ENTER_STACK_SIZE_INLINEGETTHREAD + 10h] -endif -endif - je TimeoutZero - - ; If we are on an MP system, we try spinning for a certain number of iterations - cmp dword ptr [g_SystemInfo + OFFSETOF__g_SystemInfo__dwNumberOfProcessors], 1 - jle Block - - ; Exponential backoff; delay by approximately 2*r10d clock cycles - mov eax, r10d - DelayLoop: - pause ; indicate to the CPU that we are spin waiting - sub eax, 1 - jnz DelayLoop - - ; Next time, wait a factor longer - imul r10d, dword ptr [g_SpinConstants + OFFSETOF__g_SpinConstants__dwBackoffFactor] - - cmp r10d, dword ptr [g_SpinConstants + OFFSETOF__g_SpinConstants__dwMaximumDuration] - jle RetrySyncBlock - - jmp Block - - RetryHelperSyncBlock: - jmp RetrySyncBlock - - Block: - ; In the Block case we've trashed RCX, restore it - mov rcx, [rsp + 8h] -ifdef MON_DEBUG -ifdef TRACK_SYNC - ; if we're tracking this stuff then rcx is at a different offset to RSP, we just - ; overwrite the wrong value which we just got... this is for debug purposes only - ; so there's really no performance issue here - mov rcx, [rsp + MON_ENTER_STACK_SIZE_INLINEGETTHREAD + 8h] -endif -endif - FramedLockHelper: -ifdef MON_DEBUG -ifdef TRACK_SYNC - add rsp, MON_ENTER_STACK_SIZE_INLINEGETTHREAD -endif -endif - mov rdx, [rsp + 10h] - ; void JITutil_MonTryEnter(Object* obj, INT32 timeout) - jmp JITutil_MonTryEnter - -NESTED_END JIT_MonTryEnter_InlineGetThread, _TEXT - - -MON_ENTER_STATIC_RETURN_SUCCESS macro - ; pbLockTaken is never null for static helpers - test rdx, rdx - mov byte ptr [rdx], 1 - REPRET - - endm - -MON_EXIT_STATIC_RETURN_SUCCESS macro - ; pbLockTaken is never null for static helpers - mov byte ptr [rdx], 0 - REPRET - - endm - - -; This is a frameless helper for entering a static monitor on a class. -; The methoddesc is in ARGUMENT_REG1. This tries the normal case (no -; blocking or object allocation) in line and calls a framed helper -; for the other cases. -; -; void JIT_MonEnterStatic_InlineGetThread(AwareLock *lock, BYTE *pbLockTaken) -NESTED_ENTRY JIT_MonEnterStatic_InlineGetThread, _TEXT - .savereg rcx, 0 -ifdef MON_DEBUG -ifdef TRACK_SYNC - alloc_stack MIN_SIZE - save_reg_postrsp rcx, MIN_SIZE + 8h + 0h -endif -endif - END_PROLOGUE - - ; Attempt to acquire the lock - Retry: - mov eax, dword ptr [rcx + OFFSETOF__AwareLock__m_MonitorHeld] - test eax, eax - jne HaveWaiters - - ; Common case; lock isn't held and there are no waiters. Attempt to - ; gain ownership by ourselves. - mov r10d, 1 - - lock cmpxchg dword ptr [rcx + OFFSETOF__AwareLock__m_MonitorHeld], r10d - jnz RetryHelper - - PATCHABLE_INLINE_GETTHREAD rax, JIT_MonEnterStaticWorker_InlineGetThread_GetThread_PatchLabel_1 - - mov qword ptr [rcx + OFFSETOF__AwareLock__m_HoldingThread], rax - add dword ptr [rcx + OFFSETOF__AwareLock__m_Recursion], 1 - add dword ptr [rax + OFFSETOF__Thread__m_dwLockCount], 1 - -ifdef MON_DEBUG -ifdef TRACK_SYNC - mov rdx, rcx - mov rcx, [rsp] - add rsp, MIN_SIZE - ; void EnterSyncHelper(UINT_PTR caller, AwareLock* lock) - jmp EnterSyncHelper -endif -endif - MON_ENTER_STATIC_RETURN_SUCCESS - - ; It's possible to get here with waiters by with no lock held, in this - ; case a signal is about to be fired which will wake up a waiter. So - ; for fairness sake we should wait too. - ; Check first for recursive lock attempts on the same thread. - HaveWaiters: - PATCHABLE_INLINE_GETTHREAD rax, JIT_MonEnterStaticWorker_InlineGetThread_GetThread_PatchLabel_2 - - ; Is mutex alread owned by current thread? - cmp [rcx + OFFSETOF__AwareLock__m_HoldingThread], rax - jne PrepareToWait - - ; Yes, bump our use count. - add dword ptr [rcx + OFFSETOF__AwareLock__m_Recursion], 1 -ifdef MON_DEBUG -ifdef TRACK_SYNC - mov rdx, rcx - mov rcx, [rsp + MIN_SIZE] - add rsp, MIN_SIZE - ; void EnterSyncHelper(UINT_PTR caller, AwareLock* lock) - jmp EnterSyncHelper -endif -endif - ret - - PrepareToWait: -ifdef MON_DEBUG -ifdef TRACK_SYNC - add rsp, MIN_SIZE -endif -endif - ; void JITutil_MonContention(AwareLock* obj, BYTE* pbLockTaken) - jmp JITutil_MonContention - - RetryHelper: - jmp Retry -NESTED_END JIT_MonEnterStatic_InlineGetThread, _TEXT - -; A frameless helper for exiting a static monitor on a class. -; The methoddesc is in ARGUMENT_REG1. This tries the normal case (no -; blocking or object allocation) in line and calls a framed helper -; for the other cases. -; -; void JIT_MonExitStatic_InlineGetThread(AwareLock *lock, BYTE *pbLockTaken) -NESTED_ENTRY JIT_MonExitStatic_InlineGetThread, _TEXT - .savereg rcx, 0 -ifdef MON_DEBUG -ifdef TRACK_SYNC - alloc_stack MIN_SIZE - save_reg_postrsp rcx, MIN_SIZE + 8h + 0h -endif -endif - END_PROLOGUE - -ifdef MON_DEBUG -ifdef TRACK_SYNC - push rsi - push rdi - mov rsi, rcx - mov rdi, rdx - mov rdx, [rsp + 8] - call LeaveSyncHelper - mov rcx, rsi - mov rdx, rdi - pop rdi - pop rsi -endif -endif - PATCHABLE_INLINE_GETTHREAD rax, JIT_MonExitStaticWorker_InlineGetThread_GetThread_PatchLabel - - ; Check if lock is held - cmp [rcx + OFFSETOF__AwareLock__m_HoldingThread], rax - jne LockError - - ; Reduce our recursion count - sub dword ptr [rcx + OFFSETOF__AwareLock__m_Recursion], 1 - jz LastRecursion - -ifdef MON_DEBUG -ifdef TRACK_SYNC - add rsp, MIN_SIZE - ret -endif -endif - REPRET - - ; This is the last count we held on this lock, so release the lock - LastRecursion: - ; Thead* is in rax - sub dword ptr [rax + OFFSETOF__Thread__m_dwLockCount], 1 - mov qword ptr [rcx + OFFSETOF__AwareLock__m_HoldingThread], 0 - - Retry: - mov eax, dword ptr [rcx + OFFSETOF__AwareLock__m_MonitorHeld] - lea r10d, [eax - 1] - lock cmpxchg dword ptr [rcx + OFFSETOF__AwareLock__m_MonitorHeld], r10d - jne RetryHelper - test eax, STATE_CHECK - jne MustSignal - -ifdef MON_DEBUG -ifdef TRACK_SYNC - add rsp, MIN_SIZE - ret -endif -endif - MON_EXIT_STATIC_RETURN_SUCCESS - - MustSignal: -ifdef MON_DEBUG -ifdef TRACK_SYNC - add rsp, MIN_SIZE -endif -endif - ; void JITutil_MonSignal(AwareLock* lock, BYTE* pbLockTaken) - jmp JITutil_MonSignal - - RetryHelper: - jmp Retry - - LockError: - mov rcx, CORINFO_SynchronizationLockException_ASM -ifdef MON_DEBUG -ifdef TRACK_SYNC - add rsp, MIN_SIZE -endif -endif - ; void JIT_InternalThrow(unsigned exceptNum) - jmp JIT_InternalThrow -NESTED_END JIT_MonExitStatic_InlineGetThread, _TEXT - end diff --git a/src/vm/amd64/JitHelpers_Slow.asm b/src/vm/amd64/JitHelpers_Slow.asm index 448bcb2279..f86d429e33 100644 --- a/src/vm/amd64/JitHelpers_Slow.asm +++ b/src/vm/amd64/JitHelpers_Slow.asm @@ -836,894 +836,6 @@ NESTED_ENTRY JIT_GetSharedGCStaticBaseNoCtor_Slow, _TEXT NESTED_END JIT_GetSharedGCStaticBaseNoCtor_Slow, _TEXT -MON_ENTER_STACK_SIZE equ 00000020h -MON_EXIT_STACK_SIZE equ 00000068h - -ifdef MON_DEBUG -ifdef TRACK_SYNC -MON_ENTER_STACK_SIZE_INLINEGETTHREAD equ 00000020h -MON_EXIT_STACK_SIZE_INLINEGETTHREAD equ 00000068h -endif -endif - -BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX equ 08000000h ; syncblk.h -BIT_SBLK_IS_HASHCODE equ 04000000h ; syncblk.h -BIT_SBLK_SPIN_LOCK equ 10000000h ; syncblk.h - -SBLK_MASK_LOCK_THREADID equ 000003FFh ; syncblk.h -SBLK_LOCK_RECLEVEL_INC equ 00000400h ; syncblk.h -SBLK_MASK_LOCK_RECLEVEL equ 0000FC00h ; syncblk.h - -MASK_SYNCBLOCKINDEX equ 03FFFFFFh ; syncblk.h -STATE_CHECK equ 0FFFFFFFEh - -MT_CTX_PROXY_FLAG equ 10000000h - -g_pSyncTable equ ?g_pSyncTable@@3PEAVSyncTableEntry@@EA -g_SystemInfo equ ?g_SystemInfo@@3U_SYSTEM_INFO@@A -g_SpinConstants equ ?g_SpinConstants@@3USpinConstants@@A - -extern g_pSyncTable:QWORD -extern g_SystemInfo:QWORD -extern g_SpinConstants:QWORD - -; JITutil_MonEnterWorker(Object* obj, BYTE* pbLockTaken) -extern JITutil_MonEnterWorker:proc -; JITutil_MonTryEnter(Object* obj, INT32 timeout, BYTE* pbLockTaken) -extern JITutil_MonTryEnter:proc -; JITutil_MonExitWorker(Object* obj, BYTE* pbLockTaken) -extern JITutil_MonExitWorker:proc -; JITutil_MonSignal(AwareLock* lock, BYTE* pbLockTaken) -extern JITutil_MonSignal:proc -; JITutil_MonContention(AwareLock* lock, BYTE* pbLockTaken) -extern JITutil_MonContention:proc - -ifdef _DEBUG -MON_DEBUG equ 1 -endif - -ifdef MON_DEBUG -ifdef TRACK_SYNC -extern EnterSyncHelper:proc -extern LeaveSyncHelper:proc -endif -endif - - -; This is a frameless helper for entering a monitor on a object. -; The object is in ARGUMENT_REG1. This tries the normal case (no -; blocking or object allocation) in line and calls a framed helper -; for the other cases. -; -; EXTERN_C void JIT_MonEnterWorker_Slow(Object* obj, /*OUT*/ BYTE* pbLockTaken) -NESTED_ENTRY JIT_MonEnterWorker_Slow, _TEXT - push_nonvol_reg rsi - - alloc_stack MON_ENTER_STACK_SIZE - - save_reg_postrsp rcx, MON_ENTER_STACK_SIZE + 10h + 0h - save_reg_postrsp rdx, MON_ENTER_STACK_SIZE + 10h + 8h - save_reg_postrsp r8, MON_ENTER_STACK_SIZE + 10h + 10h - save_reg_postrsp r9, MON_ENTER_STACK_SIZE + 10h + 18h - - END_PROLOGUE - - ; Check if the instance is NULL - test rcx, rcx - jz FramedLockHelper - - ; Put pbLockTaken in rsi, this can be null - mov rsi, rdx - - ; We store the thread object in r11 - CALL_GETTHREAD - mov r11, rax - - ; Initialize delay value for retry with exponential backoff - mov r10d, dword ptr [g_SpinConstants + OFFSETOF__g_SpinConstants__dwInitialDuration] - - ; Check if we can abort here - mov eax, dword ptr [r11 + OFFSETOF__Thread__m_State] - and eax, THREAD_CATCHATSAFEPOINT_BITS - ; Go through the slow code path to initiate ThreadAbort - jnz FramedLockHelper - - ; r8 will hold the syncblockindex address - lea r8, [rcx - OFFSETOF__ObjHeader__SyncBlkIndex] - - RetryThinLock: - ; Fetch the syncblock dword - mov eax, dword ptr [r8] - - ; Check whether we have the "thin lock" layout, the lock is free and the spin lock bit is not set - test eax, BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX + BIT_SBLK_SPIN_LOCK + SBLK_MASK_LOCK_THREADID + SBLK_MASK_LOCK_RECLEVEL - jnz NeedMoreTests - - ; Everything is fine - get the thread id to store in the lock - mov edx, dword ptr [r11 + OFFSETOF__Thread__m_ThreadId] - - ; If the thread id is too large, we need a syncblock for sure - cmp edx, SBLK_MASK_LOCK_THREADID - ja FramedLockHelper - - ; We want to store a new value with the current thread id set in the low 10 bits - or edx, eax - lock cmpxchg dword ptr [r8], edx - jnz PrepareToWaitThinLock - - ; Everything went fine and we're done - add dword ptr [r11 + OFFSETOF__Thread__m_dwLockCount], 1 - - ; Done, leave and set pbLockTaken if we have it - jmp LockTaken - - NeedMoreTests: - ; OK, not the simple case, find out which case it is - test eax, BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX - jnz HaveHashOrSyncBlockIndex - - ; The header is transitioning or the lock, treat this as if the lock was taken - test eax, BIT_SBLK_SPIN_LOCK - jnz PrepareToWaitThinLock - - ; Here we know we have the "thin lock" layout, but the lock is not free. - ; It could still be the recursion case, compare the thread id to check - mov edx, eax - and edx, SBLK_MASK_LOCK_THREADID - cmp edx, dword ptr [r11 + OFFSETOF__Thread__m_ThreadId] - jne PrepareToWaitThinLock - - ; Ok, the thread id matches, it's the recursion case. - ; Bump up the recursion level and check for overflow - lea edx, [eax + SBLK_LOCK_RECLEVEL_INC] - test edx, SBLK_MASK_LOCK_RECLEVEL - jz FramedLockHelper - - ; Try to put the new recursion level back. If the header was changed in the meantime - ; we need a full retry, because the layout could have changed - lock cmpxchg dword ptr [r8], edx - jnz RetryHelperThinLock - - ; Done, leave and set pbLockTaken if we have it - jmp LockTaken - - PrepareToWaitThinLock: - ; If we are on an MP system, we try spinning for a certain number of iterations - cmp dword ptr [g_SystemInfo + OFFSETOF__g_SystemInfo__dwNumberOfProcessors], 1 - jle FramedLockHelper - - ; Exponential backoff; delay by approximately 2*r10 clock cycles - mov eax, r10d - delayLoopThinLock: - pause ; indicate to the CPU that we are spin waiting - sub eax, 1 - jnz delayLoopThinLock - - ; Next time, wait a factor longer - imul r10d, dword ptr [g_SpinConstants + OFFSETOF__g_SpinConstants__dwBackoffFactor] - - cmp r10d, dword ptr [g_SpinConstants + OFFSETOF__g_SpinConstants__dwMaximumDuration] - jle RetryHelperThinLock - - jmp FramedLockHelper - - RetryHelperThinLock: - jmp RetryThinLock - - HaveHashOrSyncBlockIndex: - ; If we have a hash code already, we need to create a sync block - test eax, BIT_SBLK_IS_HASHCODE - jnz FramedLockHelper - - ; OK, we have a sync block index, just and out the top bits and grab the synblock index - and eax, MASK_SYNCBLOCKINDEX - - ; Get the sync block pointer - mov rdx, qword ptr [g_pSyncTable] - shl eax, 4h - mov rdx, [rdx + rax + OFFSETOF__SyncTableEntry__m_SyncBlock] - - ; Check if the sync block has been allocated - test rdx, rdx - jz FramedLockHelper - - ; Get a pointer to the lock object - lea rdx, [rdx + OFFSETOF__SyncBlock__m_Monitor] - - ; Attempt to acquire the lock - RetrySyncBlock: - mov eax, dword ptr [rdx + OFFSETOF__AwareLock__m_MonitorHeld] - test eax, eax - jne HaveWaiters - - ; Common case, lock isn't held and there are no waiters. Attempt to - ; gain ownership ourselves - xor ecx, ecx - inc ecx - lock cmpxchg dword ptr [rdx + OFFSETOF__AwareLock__m_MonitorHeld], ecx - jnz RetryHelperSyncBlock - - ; Success. Save the thread object in the lock and increment the use count - mov qword ptr [rdx + OFFSETOF__AwareLock__m_HoldingThread], r11 - add dword ptr [rdx + OFFSETOF__AwareLock__m_Recursion], 1 - add dword ptr [r11 + OFFSETOF__Thread__m_dwLockCount], 1 - -ifdef MON_DEBUG -ifdef TRACK_SYNC - mov rcx, [rsp + MON_ENTER_STACK_SIZE + 8h] ; return address - ; void EnterSyncHelper(UINT_PTR caller, AwareLock* lock) - call EnterSyncHelper -endif -endif - - ; Done, leave and set pbLockTaken if we have it - jmp LockTaken - - ; It's possible to get here with waiters by no lock held, but in this - ; case a signal is about to be fired which will wake up the waiter. So - ; for fairness sake we should wait too. - ; Check first for recur11ve lock attempts on the same thread. - HaveWaiters: - ; Is mutex already owned by current thread? - cmp [rdx + OFFSETOF__AwareLock__m_HoldingThread], r11 - jne PrepareToWait - - ; Yes, bump our use count. - add dword ptr [rdx + OFFSETOF__AwareLock__m_Recursion], 1 - -ifdef MON_DEBUG -ifdef TRACK_SYNC - mov rcx, [rsp + MON_ENTER_STACK_SIZE + 8h] ; return address - ; void EnterSyncHelper(UINT_PTR caller, AwareLock* lock) - call EnterSyncHelper -endif -endif - ; Done, leave and set pbLockTaken if we have it - jmp LockTaken - - PrepareToWait: - ; If we are on a MP system we try spinning for a certain number of iterations - cmp dword ptr [g_SystemInfo + OFFSETOF__g_SystemInfo__dwNumberOfProcessors], 1 - jle HaveWaiters1 - - ; Exponential backoff: delay by approximately 2*r10 clock cycles - mov eax, r10d - delayLoop: - pause ; indicate to the CPU that we are spin waiting - sub eax, 1 - jnz delayLoop - - ; Next time, wait a factor longer - imul r10d, dword ptr [g_SpinConstants + OFFSETOF__g_SpinConstants__dwBackoffFactor] - - cmp r10d, dword ptr [g_SpinConstants + OFFSETOF__g_SpinConstants__dwMaximumDuration] - jle RetrySyncBlock - - HaveWaiters1: - mov rcx, rdx - mov rdx, rsi - add rsp, MON_ENTER_STACK_SIZE - pop rsi - ; void JITutil_MonContention(AwareLock* lock, BYTE* pbLockTaken) - jmp JITutil_MonContention - - RetryHelperSyncBlock: - jmp RetrySyncBlock - - FramedLockHelper: - mov rdx, rsi - add rsp, MON_ENTER_STACK_SIZE - pop rsi - ; void JITutil_MonEnterWorker(Object* obj, BYTE* pbLockTaken) - jmp JITutil_MonEnterWorker - - align 16 - ; This is sensitive to the potential that pbLockTaken is NULL - LockTaken: - test rsi, rsi - jz LockTaken_Exit - mov byte ptr [rsi], 1 - LockTaken_Exit: - add rsp, MON_ENTER_STACK_SIZE - pop rsi - ret -NESTED_END JIT_MonEnterWorker_Slow, _TEXT - -; This is a frameless helper for exiting a monitor on a object. -; The object is in ARGUMENT_REG1. This tries the normal case (no -; blocking or object allocation) in line and calls a framed helper -; for the other cases. -; -; void JIT_MonExitWorker_Slow(Object* obj, BYTE* pbLockTaken) -NESTED_ENTRY JIT_MonExitWorker_Slow, _TEXT - alloc_stack MON_EXIT_STACK_SIZE - - save_reg_postrsp rcx, MON_EXIT_STACK_SIZE + 8h + 0h - save_reg_postrsp rdx, MON_EXIT_STACK_SIZE + 8h + 8h - save_reg_postrsp r8, MON_EXIT_STACK_SIZE + 8h + 10h - save_reg_postrsp r9, MON_EXIT_STACK_SIZE + 8h + 18h - - END_PROLOGUE - - ; pbLockTaken is stored in r10 - mov r10, rdx - - ; if pbLockTaken is NULL then we got here without a state variable, avoid the - ; next comparison in that case as it will AV - test rdx, rdx - jz Null_pbLockTaken - - ; If the lock wasn't taken then we bail quickly without doing anything - cmp byte ptr [rdx], 0 - je LockNotTaken - - Null_pbLockTaken: - ; Check is the instance is null - test rcx, rcx - jz FramedLockHelper - - ; The Thread obj address is stored in r11 - CALL_GETTHREAD - mov r11, rax - - ; r8 will hold the syncblockindex address - lea r8, [rcx - OFFSETOF__ObjHeader__SyncBlkIndex] - - RetryThinLock: - ; Fetch the syncblock dword - mov eax, dword ptr [r8] - test eax, BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX + BIT_SBLK_SPIN_LOCK - jnz NeedMoreTests - - ; Ok, we have a "thin lock" layout - check whether the thread id matches - mov edx, eax - and edx, SBLK_MASK_LOCK_THREADID - cmp edx, dword ptr [r11 + OFFSETOF__Thread__m_ThreadId] - jne FramedLockHelper - - ; check the recursion level - test eax, SBLK_MASK_LOCK_RECLEVEL - jne DecRecursionLevel - - ; It's zero -- we're leaving the lock. - ; So try to put back a zero thread id. - ; edx and eax match in the thread id bits, and edx is zero else where, so the xor is sufficient - xor edx, eax - lock cmpxchg dword ptr [r8], edx - jnz RetryHelperThinLock - - ; Dec the dwLockCount on the thread - sub dword ptr [r11 + OFFSETOF__Thread__m_dwLockCount], 1 - - ; Done, leave and set pbLockTaken if we have it - jmp LockReleased - - DecRecursionLevel: - lea edx, [eax - SBLK_LOCK_RECLEVEL_INC] - lock cmpxchg dword ptr [r8], edx - jnz RetryHelperThinLock - - ; We're done, leave and set pbLockTaken if we have it - jmp LockReleased - - NeedMoreTests: - ; Forward all special cases to the slow helper - test eax, BIT_SBLK_IS_HASHCODE + BIT_SBLK_SPIN_LOCK - jnz FramedLockHelper - - ; Get the sync block index and use it to compute the sync block pointer - mov rdx, qword ptr [g_pSyncTable] - and eax, MASK_SYNCBLOCKINDEX - shl eax, 4 - mov rdx, [rdx + rax + OFFSETOF__SyncTableEntry__m_SyncBlock] - - ; Was there a sync block? - test rdx, rdx - jz FramedLockHelper - - ; Get a pointer to the lock object. - lea rdx, [rdx + OFFSETOF__SyncBlock__m_Monitor] - - ; Check if the lock is held. - cmp qword ptr [rdx + OFFSETOF__AwareLock__m_HoldingThread], r11 - jne FramedLockHelper - -ifdef MON_DEBUG -ifdef TRACK_SYNC - mov [rsp + 28h], rcx - mov [rsp + 30h], rdx - mov [rsp + 38h], r10 - mov [rsp + 40h], r11 - - mov rcx, [rsp + MON_EXIT_STACK_SIZE ] ; return address - ; void LeaveSyncHelper(UINT_PTR caller, AwareLock* lock) - call LeaveSyncHelper - - mov rcx, [rsp + 28h] - mov rdx, [rsp + 30h] - mov r10, [rsp + 38h] - mov r11, [rsp + 40h] -endif -endif - - ; Reduce our recursion count - sub dword ptr [rdx + OFFSETOF__AwareLock__m_Recursion], 1 - jz LastRecursion - - ; Done, leave and set pbLockTaken if we have it - jmp LockReleased - - RetryHelperThinLock: - jmp RetryThinLock - - FramedLockHelper: - mov rdx, r10 - add rsp, MON_EXIT_STACK_SIZE - ; void JITutil_MonExitWorker(Object* obj, BYTE* pbLockTaken) - jmp JITutil_MonExitWorker - - LastRecursion: -ifdef MON_DEBUG -ifdef TRACK_SYNC - mov rax, [rdx + OFFSETOF__AwareLock__m_HoldingThread] -endif -endif - - sub dword ptr [r11 + OFFSETOF__Thread__m_dwLockCount], 1 - mov qword ptr [rdx + OFFSETOF__AwareLock__m_HoldingThread], 0 - - Retry: - mov eax, dword ptr [rdx + OFFSETOF__AwareLock__m_MonitorHeld] - lea r9d, [eax - 1] - lock cmpxchg dword ptr [rdx + OFFSETOF__AwareLock__m_MonitorHeld], r9d - jne RetryHelper - - test eax, STATE_CHECK - jne MustSignal - - ; Done, leave and set pbLockTaken if we have it - jmp LockReleased - - MustSignal: - mov rcx, rdx - mov rdx, r10 - add rsp, MON_EXIT_STACK_SIZE - ; void JITutil_MonSignal(AwareLock* lock, BYTE* pbLockTaken) - jmp JITutil_MonSignal - - RetryHelper: - jmp Retry - - LockNotTaken: - add rsp, MON_EXIT_STACK_SIZE - ret - - align 16 - ; This is sensitive to the potential that pbLockTaken is null - LockReleased: - test r10, r10 - jz LockReleased_Exit - mov byte ptr [r10], 0 - LockReleased_Exit: - add rsp, MON_EXIT_STACK_SIZE - ret -NESTED_END JIT_MonExitWorker_Slow, _TEXT - -; This is a frameless helper for trying to enter a monitor on a object. -; The object is in ARGUMENT_REG1 and a timeout in ARGUMENT_REG2. This tries the -; normal case (no object allocation) in line and calls a framed helper for the -; other cases. -; -; void JIT_MonTryEnter_Slow(Object* obj, INT32 timeOut, BYTE* pbLockTaken) -NESTED_ENTRY JIT_MonTryEnter_Slow, _TEXT - push_nonvol_reg rsi - - alloc_stack MON_ENTER_STACK_SIZE - - save_reg_postrsp rcx, MON_ENTER_STACK_SIZE + 10h + 0h - save_reg_postrsp rdx, MON_ENTER_STACK_SIZE + 10h + 8h - save_reg_postrsp r8, MON_ENTER_STACK_SIZE + 10h + 10h - save_reg_postrsp r9, MON_ENTER_STACK_SIZE + 10h + 18h - - END_PROLOGUE - - mov rsi, rdx - - ; Check if the instance is NULL - test rcx, rcx - jz FramedLockHelper - - ; Check if the timeout looks valid - cmp rdx, -1 - jl FramedLockHelper - - ; We store the thread object in r11 - CALL_GETTHREAD - mov r11, rax - - ; Initialize delay value for retry with exponential backoff - mov r10d, dword ptr [g_SpinConstants + OFFSETOF__g_SpinConstants__dwInitialDuration] - - ; Check if we can abort here - mov eax, dword ptr [r11 + OFFSETOF__Thread__m_State] - and eax, THREAD_CATCHATSAFEPOINT_BITS - ; Go through the slow code path to initiate THreadAbort - jnz FramedLockHelper - - ; r9 will hold the syncblockindex address - lea r9, [rcx - OFFSETOF__ObjHeader__SyncBlkIndex] - - RetryThinLock: - ; Fetch the syncblock dword - mov eax, dword ptr [r9] - - ; Check whether we have the "thin lock" layout, the lock is free and the spin lock bit is not set - test eax, BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX + BIT_SBLK_SPIN_LOCK + SBLK_MASK_LOCK_THREADID + SBLK_MASK_LOCK_RECLEVEL - jne NeedMoreTests - - ; Everything is fine - get the thread id to store in the lock - mov edx, dword ptr [r11 + OFFSETOF__Thread__m_ThreadId] - - ; If the thread id is too large, we need a syncblock for sure - cmp edx, SBLK_MASK_LOCK_THREADID - ja FramedLockHelper - - ; We want to store a new value with the current thread id set in the low 10 bits - or edx, eax - lock cmpxchg dword ptr [r9], edx - jnz RetryHelperThinLock - - ; Got the lock, everything is fine - add dword ptr [r11 + OFFSETOF__Thread__m_dwLockCount], 1 - ; Return TRUE - mov byte ptr [r8], 1 - add rsp, MON_ENTER_STACK_SIZE - pop rsi - ret - - NeedMoreTests: - ; OK, not the simple case, find out which case it is - test eax, BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX - jnz HaveHashOrSyncBlockIndex - - ; The header is transitioning or the lock - test eax, BIT_SBLK_SPIN_LOCK - jnz RetryHelperThinLock - - ; Here we know we have the "thin lock" layout, but the lock is not free. - ; It could still be the recursion case, compare the thread id to check - mov edx, eax - and edx, SBLK_MASK_LOCK_THREADID - cmp edx, dword ptr [r11 + OFFSETOF__Thread__m_ThreadId] - jne PrepareToWaitThinLock - - ; Ok, the thread id matches, it's the recursion case. - ; Dump up the recursion level and check for overflow - lea edx, [eax + SBLK_LOCK_RECLEVEL_INC] - test edx, SBLK_MASK_LOCK_RECLEVEL - jz FramedLockHelper - - ; Try to put the new recursion level back. If the header was changed in the meantime - ; we need a full retry, because the layout could have changed - lock cmpxchg dword ptr [r9], edx - jnz RetryHelperThinLock - - ; Everything went fine and we're done, return TRUE - mov byte ptr [r8], 1 - add rsp, MON_ENTER_STACK_SIZE - pop rsi - ret - - PrepareToWaitThinLock: - ; Return failure if timeout is zero - test rsi, rsi - jz TimeoutZero - - ; If we are on an MP system, we try spinning for a certain number of iterations - cmp dword ptr [g_SystemInfo + OFFSETOF__g_SystemInfo__dwNumberOfProcessors], 1 - jle FramedLockHelper - - ; Exponential backoff; delay by approximately 2*r10d clock cycles - mov eax, r10d - DelayLoopThinLock: - pause ; indicate to the CPU that we are spin waiting - sub eax, 1 - jnz DelayLoopThinLock - - ; Next time, wait a factor longer - imul r10d, dword ptr [g_SpinConstants + OFFSETOF__g_SpinConstants__dwBackoffFactor] - - cmp r10d, dword ptr [g_SpinConstants + OFFSETOF__g_SpinConstants__dwMaximumDuration] - jle RetryHelperThinLock - - jmp FramedLockHelper - - RetryHelperThinLock: - jmp RetryThinLock - - HaveHashOrSyncBlockIndex: - ; If we have a hash code already, we need to create a sync block - test eax, BIT_SBLK_IS_HASHCODE - jnz FramedLockHelper - - ; OK, we have a sync block index, just and out the top bits and grab the synblock index - and eax, MASK_SYNCBLOCKINDEX - - ; Get the sync block pointer - mov rdx, qword ptr [g_pSyncTable] - shl eax, 4 - mov rdx, [rdx + rax + OFFSETOF__SyncTableEntry__m_SyncBlock] - - ; Check if the sync block has been allocated - test rdx, rdx - jz FramedLockHelper - - ; Get a pointer to the lock object - lea rdx, [rdx + OFFSETOF__SyncBlock__m_Monitor] - - RetrySyncBlock: - ; Attempt to acuire the lock - mov eax, dword ptr [rdx + OFFSETOF__AwareLock__m_MonitorHeld] - test eax, eax - jne HaveWaiters - - ; Common case, lock isn't held and there are no waiters. Attempt to - ; gain ownership ourselves - xor ecx, ecx - inc ecx - lock cmpxchg dword ptr [rdx + OFFSETOF__AwareLock__m_MonitorHeld], ecx - jnz RetryHelperSyncBlock - - ; Success. Save the thread object in the lock and increment the use count - mov qword ptr [rdx + OFFSETOF__AwareLock__m_HoldingThread], r11 - add dword ptr [rdx + OFFSETOF__AwareLock__m_Recursion], 1 - add dword ptr [r11 + OFFSETOF__Thread__m_dwLockCount], 1 - -ifdef MON_DEBUG -ifdef TRACK_SYNC - mov rcx, [rsp + MON_ENTER_STACK_SIZE + 8h] ; return address - ; void EnterSyncHelper(UINT_PTR caller, AwareLock* lock) - call EnterSyncHelper -endif -endif - - ; Return TRUE - mov byte ptr [r8], 1 - add rsp, MON_ENTER_STACK_SIZE - pop rsi - ret - - ; It's possible to get here with waiters by no lock held, but in this - ; case a signal is about to be fired which will wake up the waiter. So - ; for fairness sake we should wait too. - ; Check first for recur11ve lock attempts on the same thread. - HaveWaiters: - ; Is mutex already owned by current thread? - cmp [rdx + OFFSETOF__AwareLock__m_HoldingThread], r11 - jne PrepareToWait - - ; Yes, bump our use count. - add dword ptr [rdx + OFFSETOF__AwareLock__m_Recursion], 1 - -ifdef MON_DEBUG -ifdef TRACK_SYNC - mov rcx, [rsp + MON_ENTER_STACK_SIZE + 8h] ; return address - ; void EnterSyncHelper(UINT_PTR caller, AwareLock* lock) - call EnterSyncHelper -endif -endif - - ; Return TRUE - mov byte ptr [r8], 1 - add rsp, MON_ENTER_STACK_SIZE - pop rsi - ret - - PrepareToWait: - ; Return failure if timeout is zero - test rsi, rsi - jz TimeoutZero - - ; If we are on an MP system, we try spinning for a certain number of iterations - cmp dword ptr [g_SystemInfo + OFFSETOF__g_SystemInfo__dwNumberOfProcessors], 1 - jle Block - - ; Exponential backoff; delay by approximately 2*r10d clock cycles - mov eax, r10d - DelayLoop: - pause ; indicate to the CPU that we are spin waiting - sub eax, 1 - jnz DelayLoop - - ; Next time, wait a factor longer - imul r10d, dword ptr [g_SpinConstants + OFFSETOF__g_SpinConstants__dwBackoffFactor] - - cmp r10d, dword ptr [g_SpinConstants + OFFSETOF__g_SpinConstants__dwMaximumDuration] - jle RetrySyncBlock - - jmp Block - - TimeoutZero: - ; Return FALSE - mov byte ptr [r8], 0 - add rsp, MON_ENTER_STACK_SIZE - pop rsi - ret - - RetryHelperSyncBlock: - jmp RetrySyncBlock - - Block: - ; In the Block case we've trashed RCX, restore it - mov rcx, [rsp + MON_ENTER_STACK_SIZE + 10h] - FramedLockHelper: - mov rdx, rsi - add rsp, MON_ENTER_STACK_SIZE - pop rsi - ; void JITutil_MonTryEnter(Object* obj, UINT32 timeout, BYTE* pbLockTaken) - jmp JITutil_MonTryEnter - -NESTED_END JIT_MonTryEnter_Slow, _TEXT - -MON_ENTER_STATIC_RETURN_SUCCESS macro - ; pbLockTaken is never null for static helpers - mov byte ptr [rdx], 1 - add rsp, MIN_SIZE - ret - - endm - -MON_EXIT_STATIC_RETURN_SUCCESS macro - ; pbLockTaken is never null for static helpers - mov byte ptr [rdx], 0 - add rsp, MIN_SIZE - ret - - endm - - -; This is a frameless helper for entering a static monitor on a class. -; The methoddesc is in ARGUMENT_REG1. This tries the normal case (no -; blocking or object allocation) in line and calls a framed helper -; for the other cases. -; -; void JIT_MonEnterStatic_Slow(AwareLock *lock, BYTE *pbLockTaken) -NESTED_ENTRY JIT_MonEnterStatic_Slow, _TEXT - alloc_stack MIN_SIZE - END_PROLOGUE - - ; Attempt to acquire the lock - Retry: - mov eax, dword ptr [rcx + OFFSETOF__AwareLock__m_MonitorHeld] - test eax, eax - jne HaveWaiters - - ; Common case; lock isn't held and there are no waiters. Attempt to - ; gain ownership by ourselves. - mov r10d, 1 - lock cmpxchg dword ptr [rcx + OFFSETOF__AwareLock__m_MonitorHeld], r10d - jnz RetryHelper - - ; Success. Save the thread object in the lock and increment the use count. - CALL_GETTHREAD - - mov qword ptr [rcx + OFFSETOF__AwareLock__m_HoldingThread], rax - add dword ptr [rcx + OFFSETOF__AwareLock__m_Recursion], 1 - add dword ptr [rax + OFFSETOF__Thread__m_dwLockCount], 1 - -ifdef MON_DEBUG -ifdef TRACK_SYNC - add rsp, MIN_SIZE - mov rdx, rcx - mov rcx, [rsp] - ; void EnterSyncHelper(UINT_PTR caller, AwareLock* lock) - jmp EnterSyncHelper -endif -endif - MON_ENTER_STATIC_RETURN_SUCCESS - - ; It's possible to get here with waiters by with no lock held, in this - ; case a signal is about to be fired which will wake up a waiter. So - ; for fairness sake we should wait too. - ; Check first for recursive lock attempts on the same thread. - HaveWaiters: - CALL_GETTHREAD - - ; Is mutex alread owned by current thread? - cmp [rcx + OFFSETOF__AwareLock__m_HoldingThread], rax - jne PrepareToWait - - ; Yes, bump our use count. - add dword ptr [rcx + OFFSETOF__AwareLock__m_Recursion], 1 -ifdef MON_DEBUG -ifdef TRACK_SYNC - mov rdx, rcx - mov rcx, [rsp] - ; void EnterSyncHelper(UINT_PTR caller, AwareLock* lock) - add rsp, MIN_SIZE - jmp EnterSyncHelper -endif -endif - MON_ENTER_STATIC_RETURN_SUCCESS - - PrepareToWait: - add rsp, MIN_SIZE - ; void JITutil_MonContention(AwareLock* obj, BYTE* pbLockTaken) - jmp JITutil_MonContention - - RetryHelper: - jmp Retry -NESTED_END JIT_MonEnterStatic_Slow, _TEXT - -; A frameless helper for exiting a static monitor on a class. -; The methoddesc is in ARGUMENT_REG1. This tries the normal case (no -; blocking or object allocation) in line and calls a framed helper -; for the other cases. -; -; void JIT_MonExitStatic_Slow(AwareLock *lock, BYTE *pbLockTaken) -NESTED_ENTRY JIT_MonExitStatic_Slow, _TEXT - alloc_stack MIN_SIZE - END_PROLOGUE - -ifdef MON_DEBUG -ifdef TRACK_SYNC - push rsi - push rdi - mov rsi, rcx - mov rdi, rdx - mov rdx, [rsp + 8] - call LeaveSyncHelper - mov rcx, rsi - mov rdx, rdi - pop rdi - pop rsi -endif -endif - - ; Check if lock is held - CALL_GETTHREAD - - cmp [rcx + OFFSETOF__AwareLock__m_HoldingThread], rax - jne LockError - - ; Reduce our recursion count - sub dword ptr [rcx + OFFSETOF__AwareLock__m_Recursion], 1 - jz LastRecursion - - MON_EXIT_STATIC_RETURN_SUCCESS - - ; This is the last count we held on this lock, so release the lock - LastRecursion: - ; Thead* is in rax - sub dword ptr [rax + OFFSETOF__Thread__m_dwLockCount], 1 - mov qword ptr [rcx + OFFSETOF__AwareLock__m_HoldingThread], 0 - - Retry: - mov eax, dword ptr [rcx + OFFSETOF__AwareLock__m_MonitorHeld] - lea r10d, [eax - 1] - lock cmpxchg dword ptr [rcx + OFFSETOF__AwareLock__m_MonitorHeld], r10d - jne RetryHelper - test eax, STATE_CHECK - jne MustSignal - - MON_EXIT_STATIC_RETURN_SUCCESS - - MustSignal: - add rsp, MIN_SIZE - ; void JITutil_MonSignal(AwareLock* lock, BYTE* pbLockTaken) - jmp JITutil_MonSignal - - RetryHelper: - jmp Retry - - LockError: - mov rcx, CORINFO_SynchronizationLockException_ASM - add rsp, MIN_SIZE - ; void JIT_InternalThrow(unsigned exceptNum) - jmp JIT_InternalThrow -NESTED_END JIT_MonExitStatic_Slow, _TEXT - - ifdef _DEBUG extern Object__DEBUG_SetAppDomain:proc diff --git a/src/vm/amd64/asmconstants.h b/src/vm/amd64/asmconstants.h index 4a100c1823..1fef80f66d 100644 --- a/src/vm/amd64/asmconstants.h +++ b/src/vm/amd64/asmconstants.h @@ -184,37 +184,12 @@ ASMCONSTANTS_C_ASSERT(THREAD_CATCHATSAFEPOINT_BITS == Thread::TS_CatchAtSafePoin #define OFFSETOF__NDirectMethodDesc__m_pWriteableData DBG_FRE(0x48, 0x20) ASMCONSTANTS_C_ASSERT(OFFSETOF__NDirectMethodDesc__m_pWriteableData == offsetof(NDirectMethodDesc, ndirect.m_pWriteableData)); -#define OFFSETOF__ObjHeader__SyncBlkIndex 0x4 -ASMCONSTANTS_C_ASSERT(OFFSETOF__ObjHeader__SyncBlkIndex - == (sizeof(ObjHeader) - offsetof(ObjHeader, m_SyncBlockValue))); - -#define SIZEOF__SyncTableEntry 0x10 -ASMCONSTANT_SIZEOF_ASSERT(SyncTableEntry); - -#define OFFSETOF__SyncTableEntry__m_SyncBlock 0x0 -ASMCONSTANT_OFFSETOF_ASSERT(SyncTableEntry, m_SyncBlock); - -#define OFFSETOF__SyncBlock__m_Monitor 0x0 -ASMCONSTANT_OFFSETOF_ASSERT(SyncBlock, m_Monitor); - #define OFFSETOF__DelegateObject___methodPtr 0x18 ASMCONSTANT_OFFSETOF_ASSERT(DelegateObject, _methodPtr); #define OFFSETOF__DelegateObject___target 0x08 ASMCONSTANT_OFFSETOF_ASSERT(DelegateObject, _target); -#define OFFSETOF__AwareLock__m_MonitorHeld 0x0 -ASMCONSTANTS_C_ASSERT(OFFSETOF__AwareLock__m_MonitorHeld - == offsetof(AwareLock, m_MonitorHeld)); - -#define OFFSETOF__AwareLock__m_Recursion 0x4 -ASMCONSTANTS_C_ASSERT(OFFSETOF__AwareLock__m_Recursion - == offsetof(AwareLock, m_Recursion)); - -#define OFFSETOF__AwareLock__m_HoldingThread 0x8 -ASMCONSTANTS_C_ASSERT(OFFSETOF__AwareLock__m_HoldingThread - == offsetof(AwareLock, m_HoldingThread)); - #define OFFSETOF__g_SystemInfo__dwNumberOfProcessors 0x20 ASMCONSTANTS_C_ASSERT(OFFSETOF__g_SystemInfo__dwNumberOfProcessors == offsetof(SYSTEM_INFO, dwNumberOfProcessors)); @@ -346,10 +321,6 @@ ASMCONSTANTS_C_ASSERT( CORINFO_InvalidCastException_ASM ASMCONSTANTS_C_ASSERT( CORINFO_IndexOutOfRangeException_ASM == CORINFO_IndexOutOfRangeException); -#define CORINFO_SynchronizationLockException_ASM 5 -ASMCONSTANTS_C_ASSERT( CORINFO_SynchronizationLockException_ASM - == CORINFO_SynchronizationLockException); - #define CORINFO_ArrayTypeMismatchException_ASM 6 ASMCONSTANTS_C_ASSERT( CORINFO_ArrayTypeMismatchException_ASM == CORINFO_ArrayTypeMismatchException); @@ -613,10 +584,6 @@ ASMCONSTANTS_C_ASSERT(OFFSETOF__StringObject__m_StringLength ASMCONSTANTS_C_ASSERT(OFFSETOF__ArrayTypeDesc__m_Arg == offsetof(ArrayTypeDesc, m_Arg)); -#define SYNCBLOCKINDEX_OFFSET 0x4 -ASMCONSTANTS_C_ASSERT(SYNCBLOCKINDEX_OFFSET - == (sizeof(ObjHeader) - offsetof(ObjHeader, m_SyncBlockValue))); - #define CallDescrData__pSrc 0x00 #define CallDescrData__numStackSlots 0x08 #ifdef UNIX_AMD64_ABI diff --git a/src/vm/amd64/cgencpu.h b/src/vm/amd64/cgencpu.h index b74e3ca7d3..98e9770858 100644 --- a/src/vm/amd64/cgencpu.h +++ b/src/vm/amd64/cgencpu.h @@ -544,20 +544,10 @@ inline BOOL ClrFlushInstructionCache(LPCVOID pCodeAddr, size_t sizeOfCode) // // Create alias for optimized implementations of helpers provided on this platform // -#define JIT_MonEnter JIT_MonEnter -#define JIT_MonEnterWorker JIT_MonEnterWorker_InlineGetThread -#define JIT_MonReliableEnter JIT_MonEnterWorker -#define JIT_MonTryEnter JIT_MonTryEnter_InlineGetThread -#define JIT_MonExit JIT_MonExit -#define JIT_MonExitWorker JIT_MonExitWorker_InlineGetThread -#define JIT_MonEnterStatic JIT_MonEnterStatic_InlineGetThread -#define JIT_MonExitStatic JIT_MonExitStatic_InlineGetThread - #define JIT_GetSharedGCStaticBase JIT_GetSharedGCStaticBase_InlineGetAppDomain #define JIT_GetSharedNonGCStaticBase JIT_GetSharedNonGCStaticBase_InlineGetAppDomain #define JIT_GetSharedGCStaticBaseNoCtor JIT_GetSharedGCStaticBaseNoCtor_InlineGetAppDomain #define JIT_GetSharedNonGCStaticBaseNoCtor JIT_GetSharedNonGCStaticBaseNoCtor_InlineGetAppDomain - #endif // FEATURE_IMPLICIT_TLS #ifndef FEATURE_PAL diff --git a/src/vm/i386/asmconstants.h b/src/vm/i386/asmconstants.h index 0a581cffe0..f7d5f709dc 100644 --- a/src/vm/i386/asmconstants.h +++ b/src/vm/i386/asmconstants.h @@ -176,9 +176,6 @@ ASMCONSTANTS_C_ASSERT(CORINFO_IndexOutOfRangeException_ASM == CORINFO_IndexOutOf #define CORINFO_OverflowException_ASM 4 ASMCONSTANTS_C_ASSERT(CORINFO_OverflowException_ASM == CORINFO_OverflowException) -#define CORINFO_SynchronizationLockException_ASM 5 -ASMCONSTANTS_C_ASSERT(CORINFO_SynchronizationLockException_ASM == CORINFO_SynchronizationLockException) - #define CORINFO_ArrayTypeMismatchException_ASM 6 ASMCONSTANTS_C_ASSERT(CORINFO_ArrayTypeMismatchException_ASM == CORINFO_ArrayTypeMismatchException) @@ -232,79 +229,6 @@ ASMCONSTANTS_C_ASSERT(Thread::TS_Hijacked == TS_Hijacked_ASM) #define AppDomain__m_dwId 0x4 ASMCONSTANTS_C_ASSERT(AppDomain__m_dwId == offsetof(AppDomain, m_dwId)); -// from clr/src/vm/ceeload.cpp - -// from clr/src/vm/syncblk.h -#define SizeOfSyncTableEntry_ASM 8 -ASMCONSTANTS_C_ASSERT(sizeof(SyncTableEntry) == SizeOfSyncTableEntry_ASM) - -#define SyncBlockIndexOffset_ASM 4 -ASMCONSTANTS_C_ASSERT(sizeof(ObjHeader) - offsetof(ObjHeader, m_SyncBlockValue) == SyncBlockIndexOffset_ASM) - -#ifndef __GNUC__ -#define SyncTableEntry_m_SyncBlock 0 -ASMCONSTANTS_C_ASSERT(offsetof(SyncTableEntry, m_SyncBlock) == SyncTableEntry_m_SyncBlock) - -#define SyncBlock_m_Monitor 0 -ASMCONSTANTS_C_ASSERT(offsetof(SyncBlock, m_Monitor) == SyncBlock_m_Monitor) - -#define AwareLock_m_MonitorHeld 0 -ASMCONSTANTS_C_ASSERT(offsetof(AwareLock, m_MonitorHeld) == AwareLock_m_MonitorHeld) -#else -// The following 3 offsets have value of 0, and must be -// defined to be an empty string. Otherwise, gas may generate assembly -// code with 0 displacement if 0 is left in the displacement field -// of an instruction. -#define SyncTableEntry_m_SyncBlock // 0 -ASMCONSTANTS_C_ASSERT(offsetof(SyncTableEntry, m_SyncBlock) == 0) - -#define SyncBlock_m_Monitor // 0 -ASMCONSTANTS_C_ASSERT(offsetof(SyncBlock, m_Monitor) == 0) - -#define AwareLock_m_MonitorHeld // 0 -ASMCONSTANTS_C_ASSERT(offsetof(AwareLock, m_MonitorHeld) == 0) -#endif // !__GNUC__ - -#define AwareLock_m_HoldingThread 8 -ASMCONSTANTS_C_ASSERT(offsetof(AwareLock, m_HoldingThread) == AwareLock_m_HoldingThread) - -#define AwareLock_m_Recursion 4 -ASMCONSTANTS_C_ASSERT(offsetof(AwareLock, m_Recursion) == AwareLock_m_Recursion) - -#define BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX_ASM 0x08000000 -ASMCONSTANTS_C_ASSERT(BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX_ASM == BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX) - -#define BIT_SBLK_SPIN_LOCK_ASM 0x10000000 -ASMCONSTANTS_C_ASSERT(BIT_SBLK_SPIN_LOCK_ASM == BIT_SBLK_SPIN_LOCK) - -#define SBLK_MASK_LOCK_THREADID_ASM 0x000003FF // special value of 0 + 1023 thread ids -ASMCONSTANTS_C_ASSERT(SBLK_MASK_LOCK_THREADID_ASM == SBLK_MASK_LOCK_THREADID) - -#define SBLK_MASK_LOCK_RECLEVEL_ASM 0x0000FC00 // 64 recursion levels -ASMCONSTANTS_C_ASSERT(SBLK_MASK_LOCK_RECLEVEL_ASM == SBLK_MASK_LOCK_RECLEVEL) - -#define SBLK_LOCK_RECLEVEL_INC_ASM 0x00000400 // each level is this much higher than the previous one -ASMCONSTANTS_C_ASSERT(SBLK_LOCK_RECLEVEL_INC_ASM == SBLK_LOCK_RECLEVEL_INC) - -#define BIT_SBLK_IS_HASHCODE_ASM 0x04000000 -ASMCONSTANTS_C_ASSERT(BIT_SBLK_IS_HASHCODE_ASM == BIT_SBLK_IS_HASHCODE) - -#define MASK_SYNCBLOCKINDEX_ASM 0x03ffffff // ((1<<SYNCBLOCKINDEX_BITS)-1) -ASMCONSTANTS_C_ASSERT(MASK_SYNCBLOCKINDEX_ASM == MASK_SYNCBLOCKINDEX) - -// BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX_ASM + BIT_SBLK_SPIN_LOCK_ASM + -// SBLK_MASK_LOCK_THREADID_ASM + SBLK_MASK_LOCK_RECLEVEL_ASM -#define SBLK_COMBINED_MASK_ASM 0x1800ffff -ASMCONSTANTS_C_ASSERT(SBLK_COMBINED_MASK_ASM == (BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX + BIT_SBLK_SPIN_LOCK + SBLK_MASK_LOCK_THREADID + SBLK_MASK_LOCK_RECLEVEL)) - -// BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX_ASM + BIT_SBLK_SPIN_LOCK_ASM -#define BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX_SPIN_LOCK_ASM 0x18000000 -ASMCONSTANTS_C_ASSERT(BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX_SPIN_LOCK_ASM == (BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX + BIT_SBLK_SPIN_LOCK)) - -// BIT_SBLK_IS_HASHCODE + BIT_SBLK_SPIN_LOCK -#define BIT_SBLK_IS_HASHCODE_OR_SPIN_LOCK_ASM 0x14000000 -ASMCONSTANTS_C_ASSERT(BIT_SBLK_IS_HASHCODE_OR_SPIN_LOCK_ASM == (BIT_SBLK_IS_HASHCODE + BIT_SBLK_SPIN_LOCK)) - // This is the offset from EBP at which the original CONTEXT is stored in one of the // RedirectedHandledJITCase*_Stub functions. #define REDIRECTSTUB_EBP_OFFSET_CONTEXT (-4) diff --git a/src/vm/i386/cgencpu.h b/src/vm/i386/cgencpu.h index e4a623b715..5360b3eb0e 100644 --- a/src/vm/i386/cgencpu.h +++ b/src/vm/i386/cgencpu.h @@ -558,24 +558,6 @@ inline BOOL ClrFlushInstructionCache(LPCVOID pCodeAddr, size_t sizeOfCode) return TRUE; } -#ifndef FEATURE_IMPLICIT_TLS -// -// JIT HELPER ALIASING FOR PORTABILITY. -// -// Create alias for optimized implementations of helpers provided on this platform -// - -#define JIT_MonEnter JIT_MonEnterWorker -#define JIT_MonEnterWorker JIT_MonEnterWorker -#define JIT_MonReliableEnter JIT_MonReliableEnter -#define JIT_MonTryEnter JIT_MonTryEnter -#define JIT_MonExit JIT_MonExitWorker -#define JIT_MonExitWorker JIT_MonExitWorker -#define JIT_MonEnterStatic JIT_MonEnterStatic -#define JIT_MonExitStatic JIT_MonExitStatic - -#endif - // optimized static helpers generated dynamically at runtime // #define JIT_GetSharedGCStaticBase // #define JIT_GetSharedNonGCStaticBase diff --git a/src/vm/i386/jithelp.asm b/src/vm/i386/jithelp.asm index e8d2f121e0..85e824040a 100644 --- a/src/vm/i386/jithelp.asm +++ b/src/vm/i386/jithelp.asm @@ -1240,1058 +1240,6 @@ fremloopd: ;------------------------------------------------------------------------------ -g_SystemInfo TEXTEQU <?g_SystemInfo@@3U_SYSTEM_INFO@@A> -g_SpinConstants TEXTEQU <?g_SpinConstants@@3USpinConstants@@A> -g_pSyncTable TEXTEQU <?g_pSyncTable@@3PAVSyncTableEntry@@A> -JITutil_MonEnterWorker TEXTEQU <@JITutil_MonEnterWorker@4> -JITutil_MonReliableEnter TEXTEQU <@JITutil_MonReliableEnter@8> -JITutil_MonTryEnter TEXTEQU <@JITutil_MonTryEnter@12> -JITutil_MonExitWorker TEXTEQU <@JITutil_MonExitWorker@4> -JITutil_MonContention TEXTEQU <@JITutil_MonContention@4> -JITutil_MonReliableContention TEXTEQU <@JITutil_MonReliableContention@8> -JITutil_MonSignal TEXTEQU <@JITutil_MonSignal@4> -JIT_InternalThrow TEXTEQU <@JIT_InternalThrow@4> -EXTRN g_SystemInfo:BYTE -EXTRN g_SpinConstants:BYTE -EXTRN g_pSyncTable:DWORD -EXTRN JITutil_MonEnterWorker:PROC -EXTRN JITutil_MonReliableEnter:PROC -EXTRN JITutil_MonTryEnter:PROC -EXTRN JITutil_MonExitWorker:PROC -EXTRN JITutil_MonContention:PROC -EXTRN JITutil_MonReliableContention:PROC -EXTRN JITutil_MonSignal:PROC -EXTRN JIT_InternalThrow:PROC - -ifdef MON_DEBUG -ifdef TRACK_SYNC -EnterSyncHelper TEXTEQU <_EnterSyncHelper@8> -LeaveSyncHelper TEXTEQU <_LeaveSyncHelper@8> -EXTRN EnterSyncHelper:PROC -EXTRN LeaveSyncHelper:PROC -endif ;TRACK_SYNC -endif ;MON_DEBUG - -; The following macro is needed because MASM returns -; "instruction prefix not allowed" error message for -; rep nop mnemonic -$repnop MACRO - db 0F3h - db 090h -ENDM - -; Safe ThreadAbort does not abort a thread if it is running finally or has lock counts. -; At the time we call Monitor.Enter, we initiate the abort if we can. -; We do not need to do the same for Monitor.Leave, since most of time, Monitor.Leave is called -; during finally. - -;********************************************************************** -; This is a frameless helper for entering a monitor on a object. -; The object is in ARGUMENT_REG1. This tries the normal case (no -; blocking or object allocation) in line and calls a framed helper -; for the other cases. -; ***** NOTE: if you make any changes to this routine, build with MON_DEBUG undefined -; to make sure you don't break the non-debug build. This is very fragile code. -; Also, propagate the changes to jithelp.s which contains the same helper and assembly code -; (in AT&T syntax) for gnu assembler. -@JIT_MonEnterWorker@4 proc public - ; Initialize delay value for retry with exponential backoff - push ebx - mov ebx, dword ptr g_SpinConstants+SpinConstants_dwInitialDuration - - ; We need yet another register to avoid refetching the thread object - push esi - - ; Check if the instance is NULL. - test ARGUMENT_REG1, ARGUMENT_REG1 - jz MonEnterFramedLockHelper - - call _GetThread@0 - mov esi,eax - - ; Check if we can abort here - mov eax, [esi+Thread_m_State] - and eax, TS_CatchAtSafePoint_ASM - jz MonEnterRetryThinLock - ; go through the slow code path to initiate ThreadAbort. - jmp MonEnterFramedLockHelper - -MonEnterRetryThinLock: - ; Fetch the object header dword - mov eax, [ARGUMENT_REG1-SyncBlockIndexOffset_ASM] - - ; Check whether we have the "thin lock" layout, the lock is free and the spin lock bit not set - ; SBLK_COMBINED_MASK_ASM = BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX + BIT_SBLK_SPIN_LOCK + SBLK_MASK_LOCK_THREADID + SBLK_MASK_LOCK_RECLEVEL - test eax, SBLK_COMBINED_MASK_ASM - jnz MonEnterNeedMoreTests - - ; Everything is fine - get the thread id to store in the lock - mov edx, [esi+Thread_m_ThreadId] - - ; If the thread id is too large, we need a syncblock for sure - cmp edx, SBLK_MASK_LOCK_THREADID_ASM - ja MonEnterFramedLockHelper - - ; We want to store a new value with the current thread id set in the low 10 bits - or edx,eax - lock cmpxchg dword ptr [ARGUMENT_REG1-SyncBlockIndexOffset_ASM], edx - jnz MonEnterPrepareToWaitThinLock - - ; Everything went fine and we're done - add [esi+Thread_m_dwLockCount],1 - pop esi - pop ebx - ret - -MonEnterNeedMoreTests: - ; Ok, it's not the simple case - find out which case it is - test eax, BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX_ASM - jnz MonEnterHaveHashOrSyncBlockIndex - - ; The header is transitioning or the lock - treat this as if the lock was taken - test eax, BIT_SBLK_SPIN_LOCK_ASM - jnz MonEnterPrepareToWaitThinLock - - ; Here we know we have the "thin lock" layout, but the lock is not free. - ; It could still be the recursion case - compare the thread id to check - mov edx,eax - and edx, SBLK_MASK_LOCK_THREADID_ASM - cmp edx, [esi+Thread_m_ThreadId] - jne MonEnterPrepareToWaitThinLock - - ; Ok, the thread id matches, it's the recursion case. - ; Bump up the recursion level and check for overflow - lea edx, [eax+SBLK_LOCK_RECLEVEL_INC_ASM] - test edx, SBLK_MASK_LOCK_RECLEVEL_ASM - jz MonEnterFramedLockHelper - - ; Try to put the new recursion level back. If the header was changed in the meantime, - ; we need a full retry, because the layout could have changed. - lock cmpxchg [ARGUMENT_REG1-SyncBlockIndexOffset_ASM], edx - jnz MonEnterRetryHelperThinLock - - ; Everything went fine and we're done - pop esi - pop ebx - ret - -MonEnterPrepareToWaitThinLock: - ; If we are on an MP system, we try spinning for a certain number of iterations - cmp dword ptr g_SystemInfo+SYSTEM_INFO_dwNumberOfProcessors,1 - jle MonEnterFramedLockHelper - - ; exponential backoff: delay by approximately 2*ebx clock cycles (on a PIII) - mov eax, ebx -MonEnterdelayLoopThinLock: - $repnop ; indicate to the CPU that we are spin waiting (useful for some Intel P4 multiprocs) - dec eax - jnz MonEnterdelayLoopThinLock - - ; next time, wait a factor longer - imul ebx, dword ptr g_SpinConstants+SpinConstants_dwBackoffFactor - - cmp ebx, dword ptr g_SpinConstants+SpinConstants_dwMaximumDuration - jle MonEnterRetryHelperThinLock - - jmp MonEnterFramedLockHelper - -MonEnterRetryHelperThinLock: - jmp MonEnterRetryThinLock - -MonEnterHaveHashOrSyncBlockIndex: - ; If we have a hash code already, we need to create a sync block - test eax, BIT_SBLK_IS_HASHCODE_ASM - jnz MonEnterFramedLockHelper - - ; Ok, we have a sync block index - just and out the top bits and grab the syncblock index - and eax, MASK_SYNCBLOCKINDEX_ASM - - ; Get the sync block pointer. - mov ARGUMENT_REG2, dword ptr g_pSyncTable - mov ARGUMENT_REG2, [ARGUMENT_REG2+eax*SizeOfSyncTableEntry_ASM+SyncTableEntry_m_SyncBlock] - - ; Check if the sync block has been allocated. - test ARGUMENT_REG2, ARGUMENT_REG2 - jz MonEnterFramedLockHelper - - ; Get a pointer to the lock object. - lea ARGUMENT_REG2, [ARGUMENT_REG2+SyncBlock_m_Monitor] - - ; Attempt to acquire the lock. -MonEnterRetrySyncBlock: - mov eax, [ARGUMENT_REG2+AwareLock_m_MonitorHeld] - test eax,eax - jne MonEnterHaveWaiters - - ; Common case, lock isn't held and there are no waiters. Attempt to - ; gain ownership ourselves. - mov ARGUMENT_REG1,1 - lock cmpxchg [ARGUMENT_REG2+AwareLock_m_MonitorHeld], ARGUMENT_REG1 - jnz MonEnterRetryHelperSyncBlock - - ; Success. Save the thread object in the lock and increment the use count. - mov dword ptr [ARGUMENT_REG2+AwareLock_m_HoldingThread],esi - inc dword ptr [esi+Thread_m_dwLockCount] - inc dword ptr [ARGUMENT_REG2+AwareLock_m_Recursion] - -ifdef MON_DEBUG -ifdef TRACK_SYNC - push ARGUMENT_REG2 ; AwareLock - push [esp+4] ; return address - call EnterSyncHelper -endif ;TRACK_SYNC -endif ;MON_DEBUG - pop esi - pop ebx - ret - - ; It's possible to get here with waiters but no lock held, but in this - ; case a signal is about to be fired which will wake up a waiter. So - ; for fairness sake we should wait too. - ; Check first for recursive lock attempts on the same thread. -MonEnterHaveWaiters: - ; Is mutex already owned by current thread? - cmp [ARGUMENT_REG2+AwareLock_m_HoldingThread],esi - jne MonEnterPrepareToWait - - ; Yes, bump our use count. - inc dword ptr [ARGUMENT_REG2+AwareLock_m_Recursion] -ifdef MON_DEBUG -ifdef TRACK_SYNC - push ARGUMENT_REG2 ; AwareLock - push [esp+4] ; return address - call EnterSyncHelper -endif ;TRACK_SYNC -endif ;MON_DEBUG - pop esi - pop ebx - ret - -MonEnterPrepareToWait: - ; If we are on an MP system, we try spinning for a certain number of iterations - cmp dword ptr g_SystemInfo+SYSTEM_INFO_dwNumberOfProcessors,1 - jle MonEnterHaveWaiters1 - - ; exponential backoff: delay by approximately 2*ebx clock cycles (on a PIII) - mov eax,ebx -MonEnterdelayLoop: - $repnop ; indicate to the CPU that we are spin waiting (useful for some Intel P4 multiprocs) - dec eax - jnz MonEnterdelayLoop - - ; next time, wait a factor longer - imul ebx, dword ptr g_SpinConstants+SpinConstants_dwBackoffFactor - - cmp ebx, dword ptr g_SpinConstants+SpinConstants_dwMaximumDuration - jle MonEnterRetrySyncBlock - -MonEnterHaveWaiters1: - - pop esi - pop ebx - - ; Place AwareLock in arg1 then call contention helper. - mov ARGUMENT_REG1, ARGUMENT_REG2 - jmp JITutil_MonContention - -MonEnterRetryHelperSyncBlock: - jmp MonEnterRetrySyncBlock - - ; ECX has the object to synchronize on -MonEnterFramedLockHelper: - pop esi - pop ebx - jmp JITutil_MonEnterWorker - -@JIT_MonEnterWorker@4 endp - -;********************************************************************** -; This is a frameless helper for entering a monitor on a object, and -; setting a flag to indicate that the lock was taken. -; The object is in ARGUMENT_REG1. The flag is in ARGUMENT_REG2. -; This tries the normal case (no blocking or object allocation) in line -; and calls a framed helper for the other cases. -; ***** NOTE: if you make any changes to this routine, build with MON_DEBUG undefined -; to make sure you don't break the non-debug build. This is very fragile code. -; Also, propagate the changes to jithelp.s which contains the same helper and assembly code -; (in AT&T syntax) for gnu assembler. -@JIT_MonReliableEnter@8 proc public - ; Initialize delay value for retry with exponential backoff - push ebx - mov ebx, dword ptr g_SpinConstants+SpinConstants_dwInitialDuration - - ; Put pbLockTaken in edi - push edi - mov edi, ARGUMENT_REG2 - - ; We need yet another register to avoid refetching the thread object - push esi - - ; Check if the instance is NULL. - test ARGUMENT_REG1, ARGUMENT_REG1 - jz MonReliableEnterFramedLockHelper - - call _GetThread@0 - mov esi,eax - - ; Check if we can abort here - mov eax, [esi+Thread_m_State] - and eax, TS_CatchAtSafePoint_ASM - jz MonReliableEnterRetryThinLock - ; go through the slow code path to initiate ThreadAbort. - jmp MonReliableEnterFramedLockHelper - -MonReliableEnterRetryThinLock: - ; Fetch the object header dword - mov eax, [ARGUMENT_REG1-SyncBlockIndexOffset_ASM] - - ; Check whether we have the "thin lock" layout, the lock is free and the spin lock bit not set - ; SBLK_COMBINED_MASK_ASM = BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX + BIT_SBLK_SPIN_LOCK + SBLK_MASK_LOCK_THREADID + SBLK_MASK_LOCK_RECLEVEL - test eax, SBLK_COMBINED_MASK_ASM - jnz MonReliableEnterNeedMoreTests - - ; Everything is fine - get the thread id to store in the lock - mov edx, [esi+Thread_m_ThreadId] - - ; If the thread id is too large, we need a syncblock for sure - cmp edx, SBLK_MASK_LOCK_THREADID_ASM - ja MonReliableEnterFramedLockHelper - - ; We want to store a new value with the current thread id set in the low 10 bits - or edx,eax - lock cmpxchg dword ptr [ARGUMENT_REG1-SyncBlockIndexOffset_ASM], edx - jnz MonReliableEnterPrepareToWaitThinLock - - ; Everything went fine and we're done - add [esi+Thread_m_dwLockCount],1 - ; Set *pbLockTaken=true - mov byte ptr [edi],1 - pop esi - pop edi - pop ebx - ret - -MonReliableEnterNeedMoreTests: - ; Ok, it's not the simple case - find out which case it is - test eax, BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX_ASM - jnz MonReliableEnterHaveHashOrSyncBlockIndex - - ; The header is transitioning or the lock - treat this as if the lock was taken - test eax, BIT_SBLK_SPIN_LOCK_ASM - jnz MonReliableEnterPrepareToWaitThinLock - - ; Here we know we have the "thin lock" layout, but the lock is not free. - ; It could still be the recursion case - compare the thread id to check - mov edx,eax - and edx, SBLK_MASK_LOCK_THREADID_ASM - cmp edx, [esi+Thread_m_ThreadId] - jne MonReliableEnterPrepareToWaitThinLock - - ; Ok, the thread id matches, it's the recursion case. - ; Bump up the recursion level and check for overflow - lea edx, [eax+SBLK_LOCK_RECLEVEL_INC_ASM] - test edx, SBLK_MASK_LOCK_RECLEVEL_ASM - jz MonReliableEnterFramedLockHelper - - ; Try to put the new recursion level back. If the header was changed in the meantime, - ; we need a full retry, because the layout could have changed. - lock cmpxchg [ARGUMENT_REG1-SyncBlockIndexOffset_ASM], edx - jnz MonReliableEnterRetryHelperThinLock - - ; Everything went fine and we're done - ; Set *pbLockTaken=true - mov byte ptr [edi],1 - pop esi - pop edi - pop ebx - ret - -MonReliableEnterPrepareToWaitThinLock: - ; If we are on an MP system, we try spinning for a certain number of iterations - cmp dword ptr g_SystemInfo+SYSTEM_INFO_dwNumberOfProcessors,1 - jle MonReliableEnterFramedLockHelper - - ; exponential backoff: delay by approximately 2*ebx clock cycles (on a PIII) - mov eax, ebx -MonReliableEnterdelayLoopThinLock: - $repnop ; indicate to the CPU that we are spin waiting (useful for some Intel P4 multiprocs) - dec eax - jnz MonReliableEnterdelayLoopThinLock - - ; next time, wait a factor longer - imul ebx, dword ptr g_SpinConstants+SpinConstants_dwBackoffFactor - - cmp ebx, dword ptr g_SpinConstants+SpinConstants_dwMaximumDuration - jle MonReliableEnterRetryHelperThinLock - - jmp MonReliableEnterFramedLockHelper - -MonReliableEnterRetryHelperThinLock: - jmp MonReliableEnterRetryThinLock - -MonReliableEnterHaveHashOrSyncBlockIndex: - ; If we have a hash code already, we need to create a sync block - test eax, BIT_SBLK_IS_HASHCODE_ASM - jnz MonReliableEnterFramedLockHelper - - ; Ok, we have a sync block index - just and out the top bits and grab the syncblock index - and eax, MASK_SYNCBLOCKINDEX_ASM - - ; Get the sync block pointer. - mov ARGUMENT_REG2, dword ptr g_pSyncTable - mov ARGUMENT_REG2, [ARGUMENT_REG2+eax*SizeOfSyncTableEntry_ASM+SyncTableEntry_m_SyncBlock] - - ; Check if the sync block has been allocated. - test ARGUMENT_REG2, ARGUMENT_REG2 - jz MonReliableEnterFramedLockHelper - - ; Get a pointer to the lock object. - lea ARGUMENT_REG2, [ARGUMENT_REG2+SyncBlock_m_Monitor] - - ; Attempt to acquire the lock. -MonReliableEnterRetrySyncBlock: - mov eax, [ARGUMENT_REG2+AwareLock_m_MonitorHeld] - test eax,eax - jne MonReliableEnterHaveWaiters - - ; Common case, lock isn't held and there are no waiters. Attempt to - ; gain ownership ourselves. - mov ARGUMENT_REG1,1 - lock cmpxchg [ARGUMENT_REG2+AwareLock_m_MonitorHeld], ARGUMENT_REG1 - jnz MonReliableEnterRetryHelperSyncBlock - - ; Success. Save the thread object in the lock and increment the use count. - mov dword ptr [ARGUMENT_REG2+AwareLock_m_HoldingThread],esi - inc dword ptr [esi+Thread_m_dwLockCount] - inc dword ptr [ARGUMENT_REG2+AwareLock_m_Recursion] - ; Set *pbLockTaken=true - mov byte ptr [edi],1 - -ifdef MON_DEBUG -ifdef TRACK_SYNC - push ARGUMENT_REG2 ; AwareLock - push [esp+4] ; return address - call EnterSyncHelper -endif ;TRACK_SYNC -endif ;MON_DEBUG - pop esi - pop edi - pop ebx - ret - - ; It's possible to get here with waiters but no lock held, but in this - ; case a signal is about to be fired which will wake up a waiter. So - ; for fairness sake we should wait too. - ; Check first for recursive lock attempts on the same thread. -MonReliableEnterHaveWaiters: - ; Is mutex already owned by current thread? - cmp [ARGUMENT_REG2+AwareLock_m_HoldingThread],esi - jne MonReliableEnterPrepareToWait - - ; Yes, bump our use count. - inc dword ptr [ARGUMENT_REG2+AwareLock_m_Recursion] - ; Set *pbLockTaken=true - mov byte ptr [edi],1 -ifdef MON_DEBUG -ifdef TRACK_SYNC - push ARGUMENT_REG2 ; AwareLock - push [esp+4] ; return address - call EnterSyncHelper -endif ;TRACK_SYNC -endif ;MON_DEBUG - pop esi - pop edi - pop ebx - ret - -MonReliableEnterPrepareToWait: - ; If we are on an MP system, we try spinning for a certain number of iterations - cmp dword ptr g_SystemInfo+SYSTEM_INFO_dwNumberOfProcessors,1 - jle MonReliableEnterHaveWaiters1 - - ; exponential backoff: delay by approximately 2*ebx clock cycles (on a PIII) - mov eax,ebx -MonReliableEnterdelayLoop: - $repnop ; indicate to the CPU that we are spin waiting (useful for some Intel P4 multiprocs) - dec eax - jnz MonReliableEnterdelayLoop - - ; next time, wait a factor longer - imul ebx, dword ptr g_SpinConstants+SpinConstants_dwBackoffFactor - - cmp ebx, dword ptr g_SpinConstants+SpinConstants_dwMaximumDuration - jle MonReliableEnterRetrySyncBlock - -MonReliableEnterHaveWaiters1: - - ; Place AwareLock in arg1, pbLockTaken in arg2, then call contention helper. - mov ARGUMENT_REG1, ARGUMENT_REG2 - mov ARGUMENT_REG2, edi - - pop esi - pop edi - pop ebx - - jmp JITutil_MonReliableContention - -MonReliableEnterRetryHelperSyncBlock: - jmp MonReliableEnterRetrySyncBlock - - ; ECX has the object to synchronize on -MonReliableEnterFramedLockHelper: - mov ARGUMENT_REG2, edi - pop esi - pop edi - pop ebx - jmp JITutil_MonReliableEnter - -@JIT_MonReliableEnter@8 endp - -;************************************************************************ -; This is a frameless helper for trying to enter a monitor on a object. -; The object is in ARGUMENT_REG1 and a timeout in ARGUMENT_REG2. This tries the -; normal case (no object allocation) in line and calls a framed helper for the -; other cases. -; ***** NOTE: if you make any changes to this routine, build with MON_DEBUG undefined -; to make sure you don't break the non-debug build. This is very fragile code. -; Also, propagate the changes to jithelp.s which contains the same helper and assembly code -; (in AT&T syntax) for gnu assembler. -@JIT_MonTryEnter@12 proc public - ; Save the timeout parameter. - push ARGUMENT_REG2 - - ; Initialize delay value for retry with exponential backoff - push ebx - mov ebx, dword ptr g_SpinConstants+SpinConstants_dwInitialDuration - - ; The thin lock logic needs another register to store the thread - push esi - - ; Check if the instance is NULL. - test ARGUMENT_REG1, ARGUMENT_REG1 - jz MonTryEnterFramedLockHelper - - ; Check if the timeout looks valid - cmp ARGUMENT_REG2,-1 - jl MonTryEnterFramedLockHelper - - ; Get the thread right away, we'll need it in any case - call _GetThread@0 - mov esi,eax - - ; Check if we can abort here - mov eax, [esi+Thread_m_State] - and eax, TS_CatchAtSafePoint_ASM - jz MonTryEnterRetryThinLock - ; go through the slow code path to initiate ThreadAbort. - jmp MonTryEnterFramedLockHelper - -MonTryEnterRetryThinLock: - ; Get the header dword and check its layout - mov eax, [ARGUMENT_REG1-SyncBlockIndexOffset_ASM] - - ; Check whether we have the "thin lock" layout, the lock is free and the spin lock bit not set - ; SBLK_COMBINED_MASK_ASM = BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX + BIT_SBLK_SPIN_LOCK + SBLK_MASK_LOCK_THREADID + SBLK_MASK_LOCK_RECLEVEL - test eax, SBLK_COMBINED_MASK_ASM - jnz MonTryEnterNeedMoreTests - - ; Ok, everything is fine. Fetch the thread id and make sure it's small enough for thin locks - mov edx, [esi+Thread_m_ThreadId] - cmp edx, SBLK_MASK_LOCK_THREADID_ASM - ja MonTryEnterFramedLockHelper - - ; Try to put our thread id in there - or edx,eax - lock cmpxchg [ARGUMENT_REG1-SyncBlockIndexOffset_ASM],edx - jnz MonTryEnterRetryHelperThinLock - - ; Got the lock - everything is fine" - add [esi+Thread_m_dwLockCount],1 - pop esi - - ; Delay value no longer needed - pop ebx - - ; Timeout parameter not needed, ditch it from the stack. - add esp,4 - - mov eax, [esp+4] - mov byte ptr [eax], 1 - ret 4 - -MonTryEnterNeedMoreTests: - ; Ok, it's not the simple case - find out which case it is - test eax, BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX_ASM - jnz MonTryEnterHaveSyncBlockIndexOrHash - - ; The header is transitioning or the lock is taken - test eax, BIT_SBLK_SPIN_LOCK_ASM - jnz MonTryEnterRetryHelperThinLock - - mov edx, eax - and edx, SBLK_MASK_LOCK_THREADID_ASM - cmp edx, [esi+Thread_m_ThreadId] - jne MonTryEnterPrepareToWaitThinLock - - ; Ok, the thread id matches, it's the recursion case. - ; Bump up the recursion level and check for overflow - lea edx, [eax+SBLK_LOCK_RECLEVEL_INC_ASM] - test edx, SBLK_MASK_LOCK_RECLEVEL_ASM - jz MonTryEnterFramedLockHelper - - ; Try to put the new recursion level back. If the header was changed in the meantime, - ; we need a full retry, because the layout could have changed. - lock cmpxchg [ARGUMENT_REG1-SyncBlockIndexOffset_ASM],edx - jnz MonTryEnterRetryHelperThinLock - - ; Everything went fine and we're done - pop esi - pop ebx - - ; Timeout parameter not needed, ditch it from the stack. - add esp, 4 - mov eax, [esp+4] - mov byte ptr [eax], 1 - ret 4 - -MonTryEnterPrepareToWaitThinLock: - ; If we are on an MP system, we try spinning for a certain number of iterations - cmp dword ptr g_SystemInfo+SYSTEM_INFO_dwNumberOfProcessors,1 - jle MonTryEnterFramedLockHelper - - ; exponential backoff: delay by approximately 2*ebx clock cycles (on a PIII) - mov eax, ebx -MonTryEnterdelayLoopThinLock: - $repnop ; indicate to the CPU that we are spin waiting (useful for some Intel P4 multiprocs) - dec eax - jnz MonTryEnterdelayLoopThinLock - - ; next time, wait a factor longer - imul ebx, dword ptr g_SpinConstants+SpinConstants_dwBackoffFactor - - cmp ebx, dword ptr g_SpinConstants+SpinConstants_dwMaximumDuration - jle MonTryEnterRetryHelperThinLock - - jmp MonTryEnterWouldBlock - -MonTryEnterRetryHelperThinLock: - jmp MonTryEnterRetryThinLock - - -MonTryEnterHaveSyncBlockIndexOrHash: - ; If we have a hash code already, we need to create a sync block - test eax, BIT_SBLK_IS_HASHCODE_ASM - jnz MonTryEnterFramedLockHelper - - ; Just and out the top bits and grab the syncblock index - and eax, MASK_SYNCBLOCKINDEX_ASM - - ; Get the sync block pointer. - mov ARGUMENT_REG2, dword ptr g_pSyncTable - mov ARGUMENT_REG2, [ARGUMENT_REG2+eax*SizeOfSyncTableEntry_ASM+SyncTableEntry_m_SyncBlock] - - ; Check if the sync block has been allocated. - test ARGUMENT_REG2, ARGUMENT_REG2 - jz MonTryEnterFramedLockHelper - - ; Get a pointer to the lock object. - lea ARGUMENT_REG2, [ARGUMENT_REG2+SyncBlock_m_Monitor] - -MonTryEnterRetrySyncBlock: - ; Attempt to acquire the lock. - mov eax, [ARGUMENT_REG2+AwareLock_m_MonitorHeld] - test eax,eax - jne MonTryEnterHaveWaiters - - ; We need another scratch register for what follows, so save EBX now so" - ; we can use it for that purpose." - push ebx - - ; Common case, lock isn't held and there are no waiters. Attempt to - ; gain ownership ourselves. - mov ebx,1 - lock cmpxchg [ARGUMENT_REG2+AwareLock_m_MonitorHeld],ebx - - pop ebx - - jnz MonTryEnterRetryHelperSyncBlock - - ; Success. Save the thread object in the lock and increment the use count. - mov dword ptr [ARGUMENT_REG2+AwareLock_m_HoldingThread],esi - inc dword ptr [ARGUMENT_REG2+AwareLock_m_Recursion] - inc dword ptr [esi+Thread_m_dwLockCount] - -ifdef MON_DEBUG -ifdef TRACK_SYNC - push ARGUMENT_REG2 ; AwareLock - push [esp+4] ; return address - call EnterSyncHelper -endif ;TRACK_SYNC -endif ;MON_DEBUG - - pop esi - pop ebx - - ; Timeout parameter not needed, ditch it from the stack." - add esp,4 - - mov eax, [esp+4] - mov byte ptr [eax], 1 - ret 4 - - ; It's possible to get here with waiters but no lock held, but in this - ; case a signal is about to be fired which will wake up a waiter. So - ; for fairness sake we should wait too. - ; Check first for recursive lock attempts on the same thread. -MonTryEnterHaveWaiters: - ; Is mutex already owned by current thread? - cmp [ARGUMENT_REG2+AwareLock_m_HoldingThread],esi - jne MonTryEnterPrepareToWait - - ; Yes, bump our use count. - inc dword ptr [ARGUMENT_REG2+AwareLock_m_Recursion] -ifdef MON_DEBUG -ifdef TRACK_SYNC - push ARGUMENT_REG2 ; AwareLock - push [esp+4] ; return address - call EnterSyncHelper -endif ;TRACK_SYNC -endif ;MON_DEBUG - pop esi - pop ebx - - ; Timeout parameter not needed, ditch it from the stack. - add esp,4 - - mov eax, [esp+4] - mov byte ptr [eax], 1 - ret 4 - -MonTryEnterPrepareToWait: - ; If we are on an MP system, we try spinning for a certain number of iterations - cmp dword ptr g_SystemInfo+SYSTEM_INFO_dwNumberOfProcessors,1 - jle MonTryEnterWouldBlock - - ; exponential backoff: delay by approximately 2*ebx clock cycles (on a PIII) - mov eax, ebx -MonTryEnterdelayLoop: - $repnop ; indicate to the CPU that we are spin waiting (useful for some Intel P4 multiprocs) - dec eax - jnz MonTryEnterdelayLoop - - ; next time, wait a factor longer - imul ebx, dword ptr g_SpinConstants+SpinConstants_dwBackoffFactor - - cmp ebx, dword ptr g_SpinConstants+SpinConstants_dwMaximumDuration - jle MonTryEnterRetrySyncBlock - - ; We would need to block to enter the section. Return failure if - ; timeout is zero, else call the framed helper to do the blocking - ; form of TryEnter." -MonTryEnterWouldBlock: - pop esi - pop ebx - pop ARGUMENT_REG2 - test ARGUMENT_REG2, ARGUMENT_REG2 - jnz MonTryEnterBlock - mov eax, [esp+4] - mov byte ptr [eax], 0 - ret 4 - -MonTryEnterRetryHelperSyncBlock: - jmp MonTryEnterRetrySyncBlock - -MonTryEnterFramedLockHelper: - ; ARGUMENT_REG1 has the object to synchronize on, must retrieve the - ; timeout parameter from the stack. - pop esi - pop ebx - pop ARGUMENT_REG2 -MonTryEnterBlock: - jmp JITutil_MonTryEnter - -@JIT_MonTryEnter@12 endp - -;********************************************************************** -; This is a frameless helper for exiting a monitor on a object. -; The object is in ARGUMENT_REG1. This tries the normal case (no -; blocking or object allocation) in line and calls a framed helper -; for the other cases. -; ***** NOTE: if you make any changes to this routine, build with MON_DEBUG undefined -; to make sure you don't break the non-debug build. This is very fragile code. -; Also, propagate the changes to jithelp.s which contains the same helper and assembly code -; (in AT&T syntax) for gnu assembler. -@JIT_MonExitWorker@4 proc public - ; The thin lock logic needs an additional register to hold the thread, unfortunately - push esi - - ; Check if the instance is NULL. - test ARGUMENT_REG1, ARGUMENT_REG1 - jz MonExitFramedLockHelper - - call _GetThread@0 - mov esi,eax - -MonExitRetryThinLock: - ; Fetch the header dword and check its layout and the spin lock bit - mov eax, [ARGUMENT_REG1-SyncBlockIndexOffset_ASM] - ;BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX_SPIN_LOCK_ASM = BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX + BIT_SBLK_SPIN_LOCK - test eax, BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX_SPIN_LOCK_ASM - jnz MonExitNeedMoreTests - - ; Ok, we have a "thin lock" layout - check whether the thread id matches - mov edx,eax - and edx, SBLK_MASK_LOCK_THREADID_ASM - cmp edx, [esi+Thread_m_ThreadId] - jne MonExitFramedLockHelper - - ; Check the recursion level - test eax, SBLK_MASK_LOCK_RECLEVEL_ASM - jne MonExitDecRecursionLevel - - ; It's zero - we're leaving the lock. - ; So try to put back a zero thread id. - ; edx and eax match in the thread id bits, and edx is zero elsewhere, so the xor is sufficient - xor edx,eax - lock cmpxchg [ARGUMENT_REG1-SyncBlockIndexOffset_ASM],edx - jnz MonExitRetryHelperThinLock - - ; We're done - sub [esi+Thread_m_dwLockCount],1 - pop esi - ret - -MonExitDecRecursionLevel: - lea edx, [eax-SBLK_LOCK_RECLEVEL_INC_ASM] - lock cmpxchg [ARGUMENT_REG1-SyncBlockIndexOffset_ASM],edx - jnz MonExitRetryHelperThinLock - - ; We're done - pop esi - ret - -MonExitNeedMoreTests: - ;Forward all special cases to the slow helper - ;BIT_SBLK_IS_HASHCODE_OR_SPIN_LOCK_ASM = BIT_SBLK_IS_HASHCODE + BIT_SBLK_SPIN_LOCK - test eax, BIT_SBLK_IS_HASHCODE_OR_SPIN_LOCK_ASM - jnz MonExitFramedLockHelper - - ; Get the sync block index and use it to compute the sync block pointer - mov ARGUMENT_REG2, dword ptr g_pSyncTable - and eax, MASK_SYNCBLOCKINDEX_ASM - mov ARGUMENT_REG2, [ARGUMENT_REG2+eax*SizeOfSyncTableEntry_ASM+SyncTableEntry_m_SyncBlock] - - ; was there a sync block? - test ARGUMENT_REG2, ARGUMENT_REG2 - jz MonExitFramedLockHelper - - ; Get a pointer to the lock object. - lea ARGUMENT_REG2, [ARGUMENT_REG2+SyncBlock_m_Monitor] - - ; Check if lock is held. - cmp [ARGUMENT_REG2+AwareLock_m_HoldingThread],esi - jne MonExitFramedLockHelper - -ifdef MON_DEBUG -ifdef TRACK_SYNC - push ARGUMENT_REG1 ; preserve regs - push ARGUMENT_REG2 - - push ARGUMENT_REG2 ; AwareLock - push [esp+8] ; return address - call LeaveSyncHelper - - pop ARGUMENT_REG2 ; restore regs - pop ARGUMENT_REG1 -endif ;TRACK_SYNC -endif ;MON_DEBUG - ; Reduce our recursion count. - dec dword ptr [ARGUMENT_REG2+AwareLock_m_Recursion] - jz MonExitLastRecursion - - pop esi - ret - -MonExitRetryHelperThinLock: - jmp MonExitRetryThinLock - -MonExitFramedLockHelper: - pop esi - jmp JITutil_MonExitWorker - - ; This is the last count we held on this lock, so release the lock. -MonExitLastRecursion: - dec dword ptr [esi+Thread_m_dwLockCount] - mov dword ptr [ARGUMENT_REG2+AwareLock_m_HoldingThread],0 - -MonExitRetry: - mov eax, [ARGUMENT_REG2+AwareLock_m_MonitorHeld] - lea esi, [eax-1] - lock cmpxchg [ARGUMENT_REG2+AwareLock_m_MonitorHeld], esi - jne MonExitRetryHelper - pop esi - test eax,0FFFFFFFEh - jne MonExitMustSignal - - ret - -MonExitMustSignal: - mov ARGUMENT_REG1, ARGUMENT_REG2 - jmp JITutil_MonSignal - -MonExitRetryHelper: - jmp MonExitRetry - -@JIT_MonExitWorker@4 endp - -;********************************************************************** -; This is a frameless helper for entering a static monitor on a class. -; The methoddesc is in ARGUMENT_REG1. This tries the normal case (no -; blocking or object allocation) in line and calls a framed helper -; for the other cases. -; Note we are changing the methoddesc parameter to a pointer to the -; AwareLock. -; ***** NOTE: if you make any changes to this routine, build with MON_DEBUG undefined -; to make sure you don't break the non-debug build. This is very fragile code. -; Also, propagate the changes to jithelp.s which contains the same helper and assembly code -; (in AT&T syntax) for gnu assembler. -@JIT_MonEnterStatic@4 proc public - ; We need another scratch register for what follows, so save EBX now so - ; we can use it for that purpose. - push ebx - - ; Attempt to acquire the lock -MonEnterStaticRetry: - mov eax, [ARGUMENT_REG1+AwareLock_m_MonitorHeld] - test eax,eax - jne MonEnterStaticHaveWaiters - - ; Common case, lock isn't held and there are no waiters. Attempt to - ; gain ownership ourselves. - mov ebx,1 - lock cmpxchg [ARGUMENT_REG1+AwareLock_m_MonitorHeld],ebx - jnz MonEnterStaticRetryHelper - - pop ebx - - ; Success. Save the thread object in the lock and increment the use count. - call _GetThread@0 - mov [ARGUMENT_REG1+AwareLock_m_HoldingThread], eax - inc dword ptr [ARGUMENT_REG1+AwareLock_m_Recursion] - inc dword ptr [eax+Thread_m_dwLockCount] - -ifdef MON_DEBUG -ifdef TRACK_SYNC - push ARGUMENT_REG1 ; AwareLock - push [esp+4] ; return address - call EnterSyncHelper -endif ;TRACK_SYNC -endif ;MON_DEBUG - ret - - ; It's possible to get here with waiters but no lock held, but in this - ; case a signal is about to be fired which will wake up a waiter. So - ; for fairness sake we should wait too. - ; Check first for recursive lock attempts on the same thread. -MonEnterStaticHaveWaiters: - ; Get thread but preserve EAX (contains cached contents of m_MonitorHeld). - push eax - call _GetThread@0 - mov ebx,eax - pop eax - - ; Is mutex already owned by current thread? - cmp [ARGUMENT_REG1+AwareLock_m_HoldingThread],ebx - jne MonEnterStaticPrepareToWait - - ; Yes, bump our use count. - inc dword ptr [ARGUMENT_REG1+AwareLock_m_Recursion] -ifdef MON_DEBUG -ifdef TRACK_SYNC - push ARGUMENT_REG1 ; AwareLock - push [esp+4] ; return address - call EnterSyncHelper -endif ;TRACK_SYNC -endif ;MON_DEBUG - pop ebx - ret - -MonEnterStaticPrepareToWait: - pop ebx - - ; ARGUMENT_REG1 should have AwareLock. Call contention helper. - jmp JITutil_MonContention - -MonEnterStaticRetryHelper: - jmp MonEnterStaticRetry -@JIT_MonEnterStatic@4 endp - -;********************************************************************** -; A frameless helper for exiting a static monitor on a class. -; The methoddesc is in ARGUMENT_REG1. This tries the normal case (no -; blocking or object allocation) in line and calls a framed helper -; for the other cases. -; Note we are changing the methoddesc parameter to a pointer to the -; AwareLock. -; ***** NOTE: if you make any changes to this routine, build with MON_DEBUG undefined -; to make sure you don't break the non-debug build. This is very fragile code. -; Also, propagate the changes to jithelp.s which contains the same helper and assembly code -; (in AT&T syntax) for gnu assembler. -@JIT_MonExitStatic@4 proc public - -ifdef MON_DEBUG -ifdef TRACK_SYNC - push ARGUMENT_REG1 ; preserve regs - - push ARGUMENT_REG1 ; AwareLock - push [esp+8] ; return address - call LeaveSyncHelper - - pop [ARGUMENT_REG1] ; restore regs -endif ;TRACK_SYNC -endif ;MON_DEBUG - - ; Check if lock is held. - call _GetThread@0 - cmp [ARGUMENT_REG1+AwareLock_m_HoldingThread],eax - jne MonExitStaticLockError - - ; Reduce our recursion count. - dec dword ptr [ARGUMENT_REG1+AwareLock_m_Recursion] - jz MonExitStaticLastRecursion - - ret - - ; This is the last count we held on this lock, so release the lock. -MonExitStaticLastRecursion: - ; eax must have the thread object - dec dword ptr [eax+Thread_m_dwLockCount] - mov dword ptr [ARGUMENT_REG1+AwareLock_m_HoldingThread],0 - push ebx - -MonExitStaticRetry: - mov eax, [ARGUMENT_REG1+AwareLock_m_MonitorHeld] - lea ebx, [eax-1] - lock cmpxchg [ARGUMENT_REG1+AwareLock_m_MonitorHeld],ebx - jne MonExitStaticRetryHelper - pop ebx - test eax,0FFFFFFFEh - jne MonExitStaticMustSignal - - ret - -MonExitStaticMustSignal: - jmp JITutil_MonSignal - -MonExitStaticRetryHelper: - jmp MonExitStaticRetry - ; Throw a synchronization lock exception. -MonExitStaticLockError: - mov ARGUMENT_REG1, CORINFO_SynchronizationLockException_ASM - jmp JIT_InternalThrow - -@JIT_MonExitStatic@4 endp - ; PatchedCodeStart and PatchedCodeEnd are used to determine bounds of patched code. ; diff --git a/src/vm/jithelpers.cpp b/src/vm/jithelpers.cpp index 32be77823c..8cff03ae60 100644 --- a/src/vm/jithelpers.cpp +++ b/src/vm/jithelpers.cpp @@ -4472,19 +4472,18 @@ HCIMPL_MONHELPER(JIT_MonEnterWorker_Portable, Object* obj) result = obj->EnterObjMonitorHelper(pCurThread); if (result == AwareLock::EnterHelperResult_Entered) { - MONHELPER_STATE(*pbLockTaken = 1;) + MONHELPER_STATE(*pbLockTaken = 1); return; } - else if (result == AwareLock::EnterHelperResult_Contention) { - AwareLock::EnterHelperResult resultSpin = obj->EnterObjMonitorHelperSpin(pCurThread); - if (resultSpin == AwareLock::EnterHelperResult_Entered) + result = obj->EnterObjMonitorHelperSpin(pCurThread); + if (result == AwareLock::EnterHelperResult_Entered) { - MONHELPER_STATE(*pbLockTaken = 1;) + MONHELPER_STATE(*pbLockTaken = 1); return; } - if (resultSpin == AwareLock::EnterHelperResult_Contention) + if (result == AwareLock::EnterHelperResult_Contention) { FC_INNER_RETURN_VOID(JIT_MonContention_Helper(obj, MONHELPER_ARG, GetEEFuncEntryPointMacro(JIT_MonEnter))); } @@ -4519,15 +4518,14 @@ HCIMPL1(void, JIT_MonEnter_Portable, Object* obj) { return; } - else if (result == AwareLock::EnterHelperResult_Contention) { - AwareLock::EnterHelperResult resultSpin = obj->EnterObjMonitorHelperSpin(pCurThread); - if (resultSpin == AwareLock::EnterHelperResult_Entered) + result = obj->EnterObjMonitorHelperSpin(pCurThread); + if (result == AwareLock::EnterHelperResult_Entered) { return; } - if (resultSpin == AwareLock::EnterHelperResult_Contention) + if (result == AwareLock::EnterHelperResult_Contention) { FC_INNER_RETURN_VOID(JIT_MonContention_Helper(obj, NULL, GetEEFuncEntryPointMacro(JIT_MonEnter))); } @@ -4563,16 +4561,15 @@ HCIMPL2(void, JIT_MonReliableEnter_Portable, Object* obj, BYTE* pbLockTaken) *pbLockTaken = 1; return; } - else if (result == AwareLock::EnterHelperResult_Contention) { - AwareLock::EnterHelperResult resultSpin = obj->EnterObjMonitorHelperSpin(pCurThread); - if (resultSpin == AwareLock::EnterHelperResult_Entered) + result = obj->EnterObjMonitorHelperSpin(pCurThread); + if (result == AwareLock::EnterHelperResult_Entered) { *pbLockTaken = 1; return; } - if (resultSpin == AwareLock::EnterHelperResult_Contention) + if (result == AwareLock::EnterHelperResult_Contention) { FC_INNER_RETURN_VOID(JIT_MonContention_Helper(obj, pbLockTaken, GetEEFuncEntryPointMacro(JIT_MonReliableEnter))); } @@ -4649,14 +4646,15 @@ HCIMPL3(void, JIT_MonTryEnter_Portable, Object* obj, INT32 timeOut, BYTE* pbLock *pbLockTaken = 1; return; } - else if (result == AwareLock::EnterHelperResult_Contention) { if (timeOut == 0) + { return; + } - AwareLock::EnterHelperResult resultSpin = obj->EnterObjMonitorHelperSpin(pCurThread); - if (resultSpin == AwareLock::EnterHelperResult_Entered) + result = obj->EnterObjMonitorHelperSpin(pCurThread); + if (result == AwareLock::EnterHelperResult_Entered) { *pbLockTaken = 1; return; @@ -4741,7 +4739,6 @@ FCIMPL1(void, JIT_MonExit_Portable, Object* obj) { return; } - else if (action == AwareLock::LeaveHelperAction_Signal) { FC_INNER_RETURN_VOID(JIT_MonExit_Signal(obj)); @@ -4773,7 +4770,6 @@ HCIMPL_MONHELPER(JIT_MonExitWorker_Portable, Object* obj) MONHELPER_STATE(*pbLockTaken = 0;) return; } - else if (action == AwareLock::LeaveHelperAction_Signal) { MONHELPER_STATE(*pbLockTaken = 0;) @@ -4821,7 +4817,7 @@ HCIMPL_MONHELPER(JIT_MonEnterStatic_Portable, AwareLock *lock) goto FramedLockHelper; } - if (lock->EnterHelper(pCurThread) == AwareLock::EnterHelperResult_Entered) + if (lock->EnterHelper(pCurThread, true /* checkRecursiveCase */)) { #if defined(_DEBUG) && defined(TRACK_SYNC) // The best place to grab this is from the ECall frame @@ -4909,289 +4905,6 @@ HCIMPL_MONHELPER(JIT_MonExitStatic_Portable, AwareLock *lock) HCIMPLEND #include <optdefault.h> -/*********************************************************************/ -// JITutil_Mon* are helpers than handle slow paths for JIT_Mon* methods -// implemented in assembly. They are not doing any spinning compared -// to the full fledged portable implementations above. -/*********************************************************************/ - -/*********************************************************************/ -HCIMPL_MONHELPER(JITutil_MonEnterWorker, Object* obj) -{ - CONTRACTL - { - FCALL_CHECK; - } - CONTRACTL_END; - - OBJECTREF objRef = ObjectToOBJECTREF(obj); - - // The following makes sure that Monitor.Enter shows up on thread abort - // stack walks (otherwise Monitor.Enter called within a CER can block a - // thread abort indefinitely). Setting the __me internal variable (normally - // only set for fcalls) will cause the helper frame below to be able to - // backtranslate into the method desc for the Monitor.Enter fcall. - // - // Note that we need explicitly initialize Monitor.Enter fcall in - // code:SystemDomain::LoadBaseSystemClasses to make this work in the case - // where the first call ever to Monitor.Enter is done as JIT helper - // for synchronized method. - __me = GetEEFuncEntryPointMacro(JIT_MonEnter); - - // Monitor helpers are used as both hcalls and fcalls, thus we need exact depth. - HELPER_METHOD_FRAME_BEGIN_ATTRIB_1(Frame::FRAME_ATTR_EXACT_DEPTH, objRef); - - if (objRef == NULL) - COMPlusThrow(kArgumentNullException); - - MONHELPER_STATE(GCPROTECT_BEGININTERIOR(pbLockTaken);) - -#ifdef _DEBUG - Thread *pThread = GetThread(); - DWORD lockCount = pThread->m_dwLockCount; -#endif - if (GET_THREAD()->CatchAtSafePointOpportunistic()) - { - GET_THREAD()->PulseGCMode(); - } - objRef->EnterObjMonitor(); - _ASSERTE ((objRef->GetSyncBlock()->GetMonitor()->m_Recursion == 1 && pThread->m_dwLockCount == lockCount + 1) || - pThread->m_dwLockCount == lockCount); - MONHELPER_STATE(if (pbLockTaken != 0) *pbLockTaken = 1;) - - MONHELPER_STATE(GCPROTECT_END();) - HELPER_METHOD_FRAME_END(); -} -HCIMPLEND - -/*********************************************************************/ - -// This helper is only ever used as part of FCall, but it is implemented using HCIMPL macro -// so that it can be tail called from assembly helper without triggering asserts in debug. -HCIMPL2(void, JITutil_MonReliableEnter, Object* obj, BYTE* pbLockTaken) -{ - CONTRACTL - { - FCALL_CHECK; - } - CONTRACTL_END; - - OBJECTREF objRef = ObjectToOBJECTREF(obj); - - // The following makes sure that Monitor.Enter shows up on thread abort - // stack walks (otherwise Monitor.Enter called within a CER can block a - // thread abort indefinitely). Setting the __me internal variable (normally - // only set for fcalls) will cause the helper frame below to be able to - // backtranslate into the method desc for the Monitor.Enter fcall. - __me = GetEEFuncEntryPointMacro(JIT_MonReliableEnter); - - // Monitor helpers are used as both hcalls and fcalls, thus we need exact depth. - HELPER_METHOD_FRAME_BEGIN_ATTRIB_1(Frame::FRAME_ATTR_EXACT_DEPTH, objRef); - - if (objRef == NULL) - COMPlusThrow(kArgumentNullException); - - GCPROTECT_BEGININTERIOR(pbLockTaken); - -#ifdef _DEBUG - Thread *pThread = GetThread(); - DWORD lockCount = pThread->m_dwLockCount; -#endif - if (GET_THREAD()->CatchAtSafePointOpportunistic()) - { - GET_THREAD()->PulseGCMode(); - } - objRef->EnterObjMonitor(); - _ASSERTE ((objRef->GetSyncBlock()->GetMonitor()->m_Recursion == 1 && pThread->m_dwLockCount == lockCount + 1) || - pThread->m_dwLockCount == lockCount); - *pbLockTaken = 1; - - GCPROTECT_END(); - HELPER_METHOD_FRAME_END(); -} -HCIMPLEND - - -/*********************************************************************/ - -// This helper is only ever used as part of FCall, but it is implemented using HCIMPL macro -// so that it can be tail called from assembly helper without triggering asserts in debug. -HCIMPL3(void, JITutil_MonTryEnter, Object* obj, INT32 timeOut, BYTE* pbLockTaken) -{ - CONTRACTL - { - FCALL_CHECK; - } - CONTRACTL_END; - - BOOL result = FALSE; - - OBJECTREF objRef = ObjectToOBJECTREF(obj); - - // The following makes sure that Monitor.TryEnter shows up on thread - // abort stack walks (otherwise Monitor.TryEnter called within a CER can - // block a thread abort for long periods of time). Setting the __me internal - // variable (normally only set for fcalls) will cause the helper frame below - // to be able to backtranslate into the method desc for the Monitor.TryEnter - // fcall. - __me = GetEEFuncEntryPointMacro(JIT_MonTryEnter); - - // Monitor helpers are used as both hcalls and fcalls, thus we need exact depth. - HELPER_METHOD_FRAME_BEGIN_ATTRIB_1(Frame::FRAME_ATTR_EXACT_DEPTH, objRef); - - if (objRef == NULL) - COMPlusThrow(kArgumentNullException); - - if (timeOut < -1) - COMPlusThrow(kArgumentOutOfRangeException); - - GCPROTECT_BEGININTERIOR(pbLockTaken); - - if (GET_THREAD()->CatchAtSafePointOpportunistic()) - { - GET_THREAD()->PulseGCMode(); - } - - result = objRef->TryEnterObjMonitor(timeOut); - *pbLockTaken = result != FALSE; - - GCPROTECT_END(); - HELPER_METHOD_FRAME_END(); -} -HCIMPLEND - -/*********************************************************************/ -HCIMPL_MONHELPER(JITutil_MonExitWorker, Object* obj) -{ - CONTRACTL - { - FCALL_CHECK; - } - CONTRACTL_END; - - MONHELPER_STATE(if (pbLockTaken != NULL && *pbLockTaken == 0) return;) - - OBJECTREF objRef = ObjectToOBJECTREF(obj); - - // Monitor helpers are used as both hcalls and fcalls, thus we need exact depth. - HELPER_METHOD_FRAME_BEGIN_ATTRIB_1(Frame::FRAME_ATTR_NO_THREAD_ABORT|Frame::FRAME_ATTR_EXACT_DEPTH, objRef); - - if (objRef == NULL) - COMPlusThrow(kArgumentNullException); - - if (!objRef->LeaveObjMonitor()) - COMPlusThrow(kSynchronizationLockException); - - MONHELPER_STATE(if (pbLockTaken != 0) *pbLockTaken = 0;) - - TESTHOOKCALL(AppDomainCanBeUnloaded(GET_THREAD()->GetDomain()->GetId().m_dwId,FALSE)); - - if (GET_THREAD()->IsAbortRequested()) { - GET_THREAD()->HandleThreadAbort(); - } - - HELPER_METHOD_FRAME_END(); -} -HCIMPLEND - -/*********************************************************************/ -// A helper for JIT_MonEnter that is on the callee side of an ecall -// frame and handles the contention case. - -HCIMPL_MONHELPER(JITutil_MonContention, AwareLock* lock) -{ - CONTRACTL - { - FCALL_CHECK; - } - CONTRACTL_END; - - // The following makes sure that Monitor.Enter shows up on thread abort - // stack walks (otherwise Monitor.Enter called within a CER can block a - // thread abort indefinitely). Setting the __me internal variable (normally - // only set for fcalls) will cause the helper frame below to be able to - // backtranslate into the method desc for the Monitor.Enter fcall. - __me = GetEEFuncEntryPointMacro(JIT_MonEnter); - - // Monitor helpers are used as both hcalls and fcalls, thus we need exact depth. - HELPER_METHOD_FRAME_BEGIN_ATTRIB(Frame::FRAME_ATTR_EXACT_DEPTH); - MONHELPER_STATE(GCPROTECT_BEGININTERIOR(pbLockTaken);) - -#ifdef _DEBUG - Thread *pThread = GetThread(); - DWORD lockCount = pThread->m_dwLockCount; -#endif - lock->Contention(); - _ASSERTE (pThread->m_dwLockCount == lockCount + 1); - MONHELPER_STATE(if (pbLockTaken != 0) *pbLockTaken = 1;) - - MONHELPER_STATE(GCPROTECT_END();) - HELPER_METHOD_FRAME_END(); -} -HCIMPLEND - -// This helper is only ever used as part of FCall, but it is implemented using HCIMPL macro -// so that it can be tail called from assembly helper without triggering asserts in debug. -HCIMPL2(void, JITutil_MonReliableContention, AwareLock* lock, BYTE* pbLockTaken) -{ - CONTRACTL - { - FCALL_CHECK; - } - CONTRACTL_END; - - // The following makes sure that Monitor.Enter shows up on thread abort - // stack walks (otherwise Monitor.Enter called within a CER can block a - // thread abort indefinitely). Setting the __me internal variable (normally - // only set for fcalls) will cause the helper frame below to be able to - // backtranslate into the method desc for the Monitor.Enter fcall. - __me = GetEEFuncEntryPointMacro(JIT_MonReliableEnter); - - // Monitor helpers are used as both hcalls and fcalls, thus we need exact depth. - HELPER_METHOD_FRAME_BEGIN_ATTRIB(Frame::FRAME_ATTR_EXACT_DEPTH); - GCPROTECT_BEGININTERIOR(pbLockTaken); - -#ifdef _DEBUG - Thread *pThread = GetThread(); - DWORD lockCount = pThread->m_dwLockCount; -#endif - lock->Contention(); - _ASSERTE (pThread->m_dwLockCount == lockCount + 1); - *pbLockTaken = 1; - - GCPROTECT_END(); - HELPER_METHOD_FRAME_END(); -} -HCIMPLEND - -/*********************************************************************/ -// A helper for JIT_MonExit and JIT_MonExitStatic that is on the -// callee side of an ecall frame and handles cases that might allocate, -// throw or block. -HCIMPL_MONHELPER(JITutil_MonSignal, AwareLock* lock) -{ - CONTRACTL - { - FCALL_CHECK; - } - CONTRACTL_END; - - // Monitor helpers are used as both hcalls and fcalls, thus we need exact depth. - HELPER_METHOD_FRAME_BEGIN_ATTRIB(Frame::FRAME_ATTR_EXACT_DEPTH | Frame::FRAME_ATTR_NO_THREAD_ABORT); - - lock->Signal(); - MONHELPER_STATE(if (pbLockTaken != 0) *pbLockTaken = 0;) - - TESTHOOKCALL(AppDomainCanBeUnloaded(GET_THREAD()->GetDomain()->GetId().m_dwId,FALSE)); - - if (GET_THREAD()->IsAbortRequested()) { - GET_THREAD()->HandleThreadAbort(); - } - - HELPER_METHOD_FRAME_END(); -} -HCIMPLEND - HCIMPL1(void *, JIT_GetSyncFromClassHandle, CORINFO_CLASS_HANDLE typeHnd_) CONTRACTL { FCALL_CHECK; diff --git a/src/vm/jitinterfacegen.cpp b/src/vm/jitinterfacegen.cpp index 26387405b7..38f1a7436a 100644 --- a/src/vm/jitinterfacegen.cpp +++ b/src/vm/jitinterfacegen.cpp @@ -47,15 +47,6 @@ EXTERN_C Object* JIT_NewArr1OBJ_UP (CORINFO_CLASS_HANDLE arrayMT, INT_PTR size); EXTERN_C Object* JIT_NewArr1VC_MP (CORINFO_CLASS_HANDLE arrayMT, INT_PTR size); EXTERN_C Object* JIT_NewArr1VC_UP (CORINFO_CLASS_HANDLE arrayMT, INT_PTR size); -//For the optimized JIT_Mon helpers -#if defined(_TARGET_AMD64_) -EXTERN_C void JIT_MonEnterWorker_Slow(Object* obj, BYTE* pbLockTaken); -EXTERN_C void JIT_MonExitWorker_Slow(Object* obj, BYTE* pbLockTaken); -EXTERN_C void JIT_MonTryEnter_Slow(Object* obj, INT32 timeOut, BYTE* pbLockTaken); -EXTERN_C void JIT_MonEnterStatic_Slow(AwareLock* lock, BYTE* pbLockTaken); -EXTERN_C void JIT_MonExitStatic_Slow(AwareLock* lock, BYTE* pbLockTaken); -#endif // _TARGET_AMD64_ - extern "C" void* JIT_GetSharedNonGCStaticBase_Slow(SIZE_T moduleDomainID, DWORD dwModuleClassID); extern "C" void* JIT_GetSharedNonGCStaticBaseNoCtor_Slow(SIZE_T moduleDomainID, DWORD dwModuleClassID); extern "C" void* JIT_GetSharedGCStaticBase_Slow(SIZE_T moduleDomainID, DWORD dwModuleClassID); @@ -114,12 +105,6 @@ EXTERN_C void JIT_BoxFastMPIGT__PatchTLSLabel(); EXTERN_C void AllocateStringFastMP_InlineGetThread__PatchTLSOffset(); EXTERN_C void JIT_NewArr1VC_MP_InlineGetThread__PatchTLSOffset(); EXTERN_C void JIT_NewArr1OBJ_MP_InlineGetThread__PatchTLSOffset(); -EXTERN_C void JIT_MonEnterWorker_InlineGetThread_GetThread_PatchLabel(); -EXTERN_C void JIT_MonExitWorker_InlineGetThread_GetThread_PatchLabel(); -EXTERN_C void JIT_MonTryEnter_GetThread_PatchLabel(); -EXTERN_C void JIT_MonEnterStaticWorker_InlineGetThread_GetThread_PatchLabel_1(); -EXTERN_C void JIT_MonEnterStaticWorker_InlineGetThread_GetThread_PatchLabel_2(); -EXTERN_C void JIT_MonExitStaticWorker_InlineGetThread_GetThread_PatchLabel(); static const LPVOID InlineGetThreadLocations[] = { @@ -128,12 +113,6 @@ static const LPVOID InlineGetThreadLocations[] = { (PVOID)AllocateStringFastMP_InlineGetThread__PatchTLSOffset, (PVOID)JIT_NewArr1VC_MP_InlineGetThread__PatchTLSOffset, (PVOID)JIT_NewArr1OBJ_MP_InlineGetThread__PatchTLSOffset, - (PVOID)JIT_MonEnterWorker_InlineGetThread_GetThread_PatchLabel, - (PVOID)JIT_MonExitWorker_InlineGetThread_GetThread_PatchLabel, - (PVOID)JIT_MonTryEnter_GetThread_PatchLabel, - (PVOID)JIT_MonEnterStaticWorker_InlineGetThread_GetThread_PatchLabel_1, - (PVOID)JIT_MonEnterStaticWorker_InlineGetThread_GetThread_PatchLabel_2, - (PVOID)JIT_MonExitStaticWorker_InlineGetThread_GetThread_PatchLabel, }; EXTERN_C void JIT_GetSharedNonGCStaticBase__PatchTLSLabel(); @@ -187,11 +166,6 @@ void FixupInlineGetters(DWORD tlsSlot, const LPVOID * pLocations, int nLocations } #endif // defined(_WIN64) && !defined(FEATURE_IMPLICIT_TLS) -#if defined(_TARGET_AMD64_) -EXTERN_C void JIT_MonEnterStaticWorker(); -EXTERN_C void JIT_MonExitStaticWorker(); -#endif - void InitJITHelpers1() { STANDARD_VM_CONTRACT; @@ -274,22 +248,6 @@ void InitJITHelpers1() } } -#ifndef FEATURE_IMPLICIT_TLS - if (gThreadTLSIndex >= TLS_MINIMUM_AVAILABLE) - { - // We need to patch the helpers for FCalls - MakeIntoJumpStub(JIT_MonEnterWorker_InlineGetThread, JIT_MonEnterWorker_Slow); - MakeIntoJumpStub(JIT_MonExitWorker_InlineGetThread, JIT_MonExitWorker_Slow); - MakeIntoJumpStub(JIT_MonTryEnter_InlineGetThread, JIT_MonTryEnter_Slow); - - SetJitHelperFunction(CORINFO_HELP_MON_ENTER, JIT_MonEnterWorker_Slow); - SetJitHelperFunction(CORINFO_HELP_MON_EXIT, JIT_MonExitWorker_Slow); - - SetJitHelperFunction(CORINFO_HELP_MON_ENTER_STATIC, JIT_MonEnterStatic_Slow); - SetJitHelperFunction(CORINFO_HELP_MON_EXIT_STATIC, JIT_MonExitStatic_Slow); - } -#endif - if(IsSingleAppDomain()) { SetJitHelperFunction(CORINFO_HELP_GETSHARED_GCSTATIC_BASE, JIT_GetSharedGCStaticBase_SingleAppDomain); diff --git a/src/vm/syncblk.cpp b/src/vm/syncblk.cpp index 50eec9b068..9c512675b6 100644 --- a/src/vm/syncblk.cpp +++ b/src/vm/syncblk.cpp @@ -1901,6 +1901,90 @@ BOOL ObjHeader::TryEnterObjMonitor(INT32 timeOut) return GetSyncBlock()->TryEnterMonitor(timeOut); } +AwareLock::EnterHelperResult ObjHeader::EnterObjMonitorHelperSpin(Thread* pCurThread) +{ + CONTRACTL{ + SO_TOLERANT; + NOTHROW; + GC_NOTRIGGER; + MODE_COOPERATIVE; + } CONTRACTL_END; + + // Note: EnterObjMonitorHelper must be called before this function (see below) + + if (g_SystemInfo.dwNumberOfProcessors == 1) + { + return AwareLock::EnterHelperResult_Contention; + } + + for (DWORD spinCount = g_SpinConstants.dwInitialDuration; AwareLock::SpinWaitAndBackOffBeforeOperation(&spinCount);) + { + LONG oldValue = m_SyncBlockValue.LoadWithoutBarrier(); + + // Since spinning has begun, chances are good that the monitor has already switched to AwareLock mode, so check for that + // case first + if (oldValue & BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX) + { + // If we have a hash code already, we need to create a sync block + if (oldValue & BIT_SBLK_IS_HASHCODE) + { + return AwareLock::EnterHelperResult_UseSlowPath; + } + + // Check the recursive case once before the spin loop. If it's not the recursive case in the beginning, it will not + // be in the future, so the spin loop can avoid checking the recursive case. + SyncBlock *syncBlock = g_pSyncTable[oldValue & MASK_SYNCBLOCKINDEX].m_SyncBlock; + _ASSERTE(syncBlock != NULL); + AwareLock *awareLock = &syncBlock->m_Monitor; + if (awareLock->EnterHelper(pCurThread, true /* checkRecursiveCase */)) + { + return AwareLock::EnterHelperResult_Entered; + } + while (AwareLock::SpinWaitAndBackOffBeforeOperation(&spinCount)) + { + if (awareLock->EnterHelper(pCurThread, false /* checkRecursiveCase */)) + { + return AwareLock::EnterHelperResult_Entered; + } + } + break; + } + + DWORD tid = pCurThread->GetThreadId(); + if ((oldValue & (BIT_SBLK_SPIN_LOCK + + SBLK_MASK_LOCK_THREADID + + SBLK_MASK_LOCK_RECLEVEL)) == 0) + { + if (tid > SBLK_MASK_LOCK_THREADID) + { + return AwareLock::EnterHelperResult_UseSlowPath; + } + + LONG newValue = oldValue | tid; + if (InterlockedCompareExchangeAcquire((LONG*)&m_SyncBlockValue, newValue, oldValue) == oldValue) + { + pCurThread->IncLockCount(); + return AwareLock::EnterHelperResult_Entered; + } + + continue; + } + + // EnterObjMonitorHelper handles the thin lock recursion case. If it's not that case, it won't become that case. If + // EnterObjMonitorHelper failed to increment the recursion level, it will go down the slow path and won't come here. So, + // no need to check the recursion case here. + _ASSERTE( + // The header is transitioning - treat this as if the lock was taken + oldValue & BIT_SBLK_SPIN_LOCK || + // Here we know we have the "thin lock" layout, but the lock is not free. + // It can't be the recursion case though, because the call to EnterObjMonitorHelper prior to this would have taken + // the slow path in the recursive case. + tid != (DWORD)(oldValue & SBLK_MASK_LOCK_THREADID)); + } + + return AwareLock::EnterHelperResult_Contention; +} + BOOL ObjHeader::LeaveObjMonitor() { CONTRACTL diff --git a/src/vm/syncblk.h b/src/vm/syncblk.h index c6c63aefc7..9dcd57c497 100644 --- a/src/vm/syncblk.h +++ b/src/vm/syncblk.h @@ -251,9 +251,10 @@ public: LeaveHelperAction_Error, }; + static bool SpinWaitAndBackOffBeforeOperation(DWORD *spinCountRef); + // Helper encapsulating the fast path entering monitor. Returns what kind of result was achieved. - AwareLock::EnterHelperResult EnterHelper(Thread* pCurThread); - AwareLock::EnterHelperResult EnterHelperSpin(Thread* pCurThread, INT32 timeOut = -1); + bool EnterHelper(Thread* pCurThread, bool checkRecursiveCase); // Helper encapsulating the core logic for leaving monitor. Returns what kind of // follow up action is necessary @@ -1265,8 +1266,11 @@ class ObjHeader // non-blocking version of above BOOL TryEnterObjMonitor(INT32 timeOut = 0); - // Inlineable fast path of EnterObjMonitor/TryEnterObjMonitor + // Inlineable fast path of EnterObjMonitor/TryEnterObjMonitor. Must be called before EnterObjMonitorHelperSpin. AwareLock::EnterHelperResult EnterObjMonitorHelper(Thread* pCurThread); + + // Typically non-inlined spin loop for some fast paths of EnterObjMonitor/TryEnterObjMonitor. EnterObjMonitorHelper must be + // called before this function. AwareLock::EnterHelperResult EnterObjMonitorHelperSpin(Thread* pCurThread); // leaves the monitor of an object diff --git a/src/vm/syncblk.inl b/src/vm/syncblk.inl index cb6b280228..376cd4c2e7 100644 --- a/src/vm/syncblk.inl +++ b/src/vm/syncblk.inl @@ -8,157 +8,141 @@ #ifndef DACCESS_COMPILE -FORCEINLINE AwareLock::EnterHelperResult AwareLock::EnterHelper(Thread* pCurThread) +FORCEINLINE bool AwareLock::SpinWaitAndBackOffBeforeOperation(DWORD *spinCountRef) { - CONTRACTL { + CONTRACTL{ SO_TOLERANT; NOTHROW; GC_NOTRIGGER; - MODE_ANY; + MODE_COOPERATIVE; } CONTRACTL_END; - for (;;) + _ASSERTE(spinCountRef != nullptr); + DWORD &spinCount = *spinCountRef; + _ASSERTE(g_SystemInfo.dwNumberOfProcessors != 1); + + if (spinCount > g_SpinConstants.dwMaximumDuration) { - LONG state = m_MonitorHeld.LoadWithoutBarrier(); + return false; + } - if (state == 0) - { - if (InterlockedCompareExchangeAcquire((LONG*)&m_MonitorHeld, 1, 0) == 0) - { - m_HoldingThread = pCurThread; - m_Recursion = 1; - pCurThread->IncLockCount(); - return AwareLock::EnterHelperResult_Entered; - } - } - else - { - if (GetOwningThread() == pCurThread) /* monitor is held, but it could be a recursive case */ - { - m_Recursion++; - return AwareLock::EnterHelperResult_Entered; - } + for (DWORD i = 0; i < spinCount; i++) + { + YieldProcessor(); + } + + spinCount *= g_SpinConstants.dwBackoffFactor; + return true; +} + +FORCEINLINE bool AwareLock::EnterHelper(Thread* pCurThread, bool checkRecursiveCase) +{ + CONTRACTL{ + SO_TOLERANT; + NOTHROW; + GC_NOTRIGGER; + MODE_ANY; + } CONTRACTL_END; - return AwareLock::EnterHelperResult_Contention; + LONG state = m_MonitorHeld.LoadWithoutBarrier(); + if (state == 0) + { + if (InterlockedCompareExchangeAcquire((LONG*)&m_MonitorHeld, 1, 0) == 0) + { + m_HoldingThread = pCurThread; + m_Recursion = 1; + pCurThread->IncLockCount(); + return true; } } + else if (checkRecursiveCase && GetOwningThread() == pCurThread) /* monitor is held, but it could be a recursive case */ + { + m_Recursion++; + return true; + } + return false; } FORCEINLINE AwareLock::EnterHelperResult ObjHeader::EnterObjMonitorHelper(Thread* pCurThread) { - CONTRACTL { + CONTRACTL{ SO_TOLERANT; NOTHROW; GC_NOTRIGGER; MODE_COOPERATIVE; } CONTRACTL_END; - DWORD tid = pCurThread->GetThreadId(); + LONG oldValue = m_SyncBlockValue.LoadWithoutBarrier(); - LONG oldvalue = m_SyncBlockValue.LoadWithoutBarrier(); - - if ((oldvalue & (BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX + - BIT_SBLK_SPIN_LOCK + - SBLK_MASK_LOCK_THREADID + - SBLK_MASK_LOCK_RECLEVEL)) == 0) + if ((oldValue & (BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX + + BIT_SBLK_SPIN_LOCK + + SBLK_MASK_LOCK_THREADID + + SBLK_MASK_LOCK_RECLEVEL)) == 0) { + DWORD tid = pCurThread->GetThreadId(); if (tid > SBLK_MASK_LOCK_THREADID) { return AwareLock::EnterHelperResult_UseSlowPath; } - LONG newvalue = oldvalue | tid; - if (InterlockedCompareExchangeAcquire((LONG*)&m_SyncBlockValue, newvalue, oldvalue) == oldvalue) + LONG newValue = oldValue | tid; + if (InterlockedCompareExchangeAcquire((LONG*)&m_SyncBlockValue, newValue, oldValue) == oldValue) { pCurThread->IncLockCount(); return AwareLock::EnterHelperResult_Entered; } - } - else - if (oldvalue & BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX) - { - // If we have a hash code already, we need to create a sync block - if (oldvalue & BIT_SBLK_IS_HASHCODE) - { - return AwareLock::EnterHelperResult_UseSlowPath; - } - - SyncBlock *syncBlock = g_pSyncTable [oldvalue & MASK_SYNCBLOCKINDEX].m_SyncBlock; - _ASSERTE(syncBlock != NULL); - return syncBlock->m_Monitor.EnterHelper(pCurThread); + return AwareLock::EnterHelperResult_Contention; } - else + + if (oldValue & BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX) { - // The header is transitioning - treat this as if the lock was taken - if (oldvalue & BIT_SBLK_SPIN_LOCK) + // If we have a hash code already, we need to create a sync block + if (oldValue & BIT_SBLK_IS_HASHCODE) { - return AwareLock::EnterHelperResult_Contention; + return AwareLock::EnterHelperResult_UseSlowPath; } - // Here we know we have the "thin lock" layout, but the lock is not free. - // It could still be the recursion case - compare the thread id to check - if (tid == (DWORD) (oldvalue & SBLK_MASK_LOCK_THREADID)) + SyncBlock *syncBlock = g_pSyncTable[oldValue & MASK_SYNCBLOCKINDEX].m_SyncBlock; + _ASSERTE(syncBlock != NULL); + if (syncBlock->m_Monitor.EnterHelper(pCurThread, true /* checkRecursiveCase */)) { - // Ok, the thread id matches, it's the recursion case. - // Bump up the recursion level and check for overflow - LONG newvalue = oldvalue + SBLK_LOCK_RECLEVEL_INC; - - if ((newvalue & SBLK_MASK_LOCK_RECLEVEL) == 0) - { - return AwareLock::EnterHelperResult_UseSlowPath; - } - - if (InterlockedCompareExchangeAcquire((LONG*)&m_SyncBlockValue, newvalue, oldvalue) == oldvalue) - { - return AwareLock::EnterHelperResult_Entered; - } + return AwareLock::EnterHelperResult_Entered; } - } - return AwareLock::EnterHelperResult_Contention; -} + return AwareLock::EnterHelperResult_Contention; + } -inline AwareLock::EnterHelperResult ObjHeader::EnterObjMonitorHelperSpin(Thread* pCurThread) -{ - CONTRACTL { - SO_TOLERANT; - NOTHROW; - GC_NOTRIGGER; - MODE_COOPERATIVE; - } CONTRACTL_END; + // The header is transitioning - treat this as if the lock was taken + if (oldValue & BIT_SBLK_SPIN_LOCK) + { + return AwareLock::EnterHelperResult_Contention; + } - if (1 == g_SystemInfo.dwNumberOfProcessors) + // Here we know we have the "thin lock" layout, but the lock is not free. + // It could still be the recursion case - compare the thread id to check + if (pCurThread->GetThreadId() != (DWORD)(oldValue & SBLK_MASK_LOCK_THREADID)) { return AwareLock::EnterHelperResult_Contention; } - DWORD spincount = g_SpinConstants.dwInitialDuration; + // Ok, the thread id matches, it's the recursion case. + // Bump up the recursion level and check for overflow + LONG newValue = oldValue + SBLK_LOCK_RECLEVEL_INC; - for (;;) + if ((newValue & SBLK_MASK_LOCK_RECLEVEL) == 0) { - // - // exponential backoff - // - for (DWORD i = 0; i < spincount; i++) - { - YieldProcessor(); - } - - AwareLock::EnterHelperResult result = EnterObjMonitorHelper(pCurThread); - if (result != AwareLock::EnterHelperResult_Contention) - { - return result; - } + return AwareLock::EnterHelperResult_UseSlowPath; + } - spincount *= g_SpinConstants.dwBackoffFactor; - if (spincount > g_SpinConstants.dwMaximumDuration) - { - break; - } + if (InterlockedCompareExchangeAcquire((LONG*)&m_SyncBlockValue, newValue, oldValue) == oldValue) + { + return AwareLock::EnterHelperResult_Entered; } - return AwareLock::EnterHelperResult_Contention; + // Use the slow path instead of spinning. The compare-exchange above would not fail often, and it's not worth forcing the + // spin loop that typically follows the call to this function to check the recursive case, so just bail to the slow path. + return AwareLock::EnterHelperResult_UseSlowPath; } // Helper encapsulating the core logic for releasing monitor. Returns what kind of @@ -185,31 +169,20 @@ FORCEINLINE AwareLock::LeaveHelperAction AwareLock::LeaveHelper(Thread* pCurThre pCurThread->m_pTrackSync->LeaveSync(caller, this); #endif - if (--m_Recursion != 0) - { - return AwareLock::LeaveHelperAction_None; - } - - m_HoldingThread->DecLockCount(); - m_HoldingThread = NULL; - - for (;;) + if (--m_Recursion == 0) { - // Read existing lock state - LONG state = m_MonitorHeld.LoadWithoutBarrier(); + m_HoldingThread->DecLockCount(); + m_HoldingThread = NULL; // Clear lock bit. - if (InterlockedCompareExchangeRelease((LONG*)&m_MonitorHeld, state - 1, state) == state) + LONG state = InterlockedDecrementRelease((LONG*)&m_MonitorHeld); + + // If wait count is non-zero on successful clear, we must signal the event. + if (state & ~1) { - // If wait count is non-zero on successful clear, we must signal the event. - if (state & ~1) - { - return AwareLock::LeaveHelperAction_Signal; - } - break; + return AwareLock::LeaveHelperAction_Signal; } } - return AwareLock::LeaveHelperAction_None; } @@ -234,34 +207,34 @@ FORCEINLINE AwareLock::LeaveHelperAction ObjHeader::LeaveObjMonitorHelper(Thread return AwareLock::LeaveHelperAction_Error; } - if (syncBlockValue & SBLK_MASK_LOCK_RECLEVEL) + if (!(syncBlockValue & SBLK_MASK_LOCK_RECLEVEL)) { - // recursion and ThinLock - DWORD newValue = syncBlockValue - SBLK_LOCK_RECLEVEL_INC; + // We are leaving the lock + DWORD newValue = (syncBlockValue & (~SBLK_MASK_LOCK_THREADID)); if (InterlockedCompareExchangeRelease((LONG*)&m_SyncBlockValue, newValue, syncBlockValue) != (LONG)syncBlockValue) { return AwareLock::LeaveHelperAction_Yield; } + pCurThread->DecLockCount(); } else { - // We are leaving the lock - DWORD newValue = (syncBlockValue & (~SBLK_MASK_LOCK_THREADID)); + // recursion and ThinLock + DWORD newValue = syncBlockValue - SBLK_LOCK_RECLEVEL_INC; if (InterlockedCompareExchangeRelease((LONG*)&m_SyncBlockValue, newValue, syncBlockValue) != (LONG)syncBlockValue) { return AwareLock::LeaveHelperAction_Yield; } - pCurThread->DecLockCount(); } return AwareLock::LeaveHelperAction_None; } - if ((syncBlockValue & (BIT_SBLK_SPIN_LOCK + BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX + BIT_SBLK_IS_HASHCODE)) == BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX) + if ((syncBlockValue & (BIT_SBLK_SPIN_LOCK + BIT_SBLK_IS_HASHCODE)) == 0) { - SyncBlock *syncBlock = g_pSyncTable [syncBlockValue & MASK_SYNCBLOCKINDEX].m_SyncBlock; + _ASSERTE((syncBlockValue & BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX) != 0); + SyncBlock *syncBlock = g_pSyncTable[syncBlockValue & MASK_SYNCBLOCKINDEX].m_SyncBlock; _ASSERTE(syncBlock != NULL); - return syncBlock->m_Monitor.LeaveHelper(pCurThread); } |