summaryrefslogtreecommitdiff
path: root/src/vm/threads.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/vm/threads.cpp')
-rw-r--r--src/vm/threads.cpp38
1 files changed, 19 insertions, 19 deletions
diff --git a/src/vm/threads.cpp b/src/vm/threads.cpp
index 1eeadf7ead..59fec2bdc3 100644
--- a/src/vm/threads.cpp
+++ b/src/vm/threads.cpp
@@ -1369,7 +1369,7 @@ void InitThreadManager()
// All patched helpers should fit into one page.
// If you hit this assert on retail build, there is most likely problem with BBT script.
- _ASSERTE_ALL_BUILDS("clr/src/VM/threads.cpp", (BYTE*)JIT_PatchedCodeLast - (BYTE*)JIT_PatchedCodeStart < PAGE_SIZE);
+ _ASSERTE_ALL_BUILDS("clr/src/VM/threads.cpp", (BYTE*)JIT_PatchedCodeLast - (BYTE*)JIT_PatchedCodeStart < (ptrdiff_t)GetOsPageSize());
// I am using virtual protect to cover the entire range that this code falls in.
//
@@ -2570,7 +2570,7 @@ DWORD WINAPI Thread::intermediateThreadProc(PVOID arg)
WRAPPER_NO_CONTRACT;
m_offset_counter++;
- if (m_offset_counter * offset_multiplier > PAGE_SIZE)
+ if (m_offset_counter * offset_multiplier > (int) GetOsPageSize())
m_offset_counter = 0;
(void)_alloca(m_offset_counter * offset_multiplier);
@@ -2685,11 +2685,11 @@ BOOL Thread::CreateNewOSThread(SIZE_T sizeToCommitOrReserve, LPTHREAD_START_ROUT
dwCreationFlags |= STACK_SIZE_PARAM_IS_A_RESERVATION;
#ifndef FEATURE_PAL // the PAL does its own adjustments as necessary
- if (sizeToCommitOrReserve != 0 && sizeToCommitOrReserve <= OS_PAGE_SIZE)
+ if (sizeToCommitOrReserve != 0 && sizeToCommitOrReserve <= GetOsPageSize())
{
// On Windows, passing a value that is <= one page size bizarrely causes the OS to use the default stack size instead of
// a minimum, which is undesirable. This adjustment fixes that issue to use a minimum stack size (typically 64 KB).
- sizeToCommitOrReserve = OS_PAGE_SIZE + 1;
+ sizeToCommitOrReserve = GetOsPageSize() + 1;
}
#endif // !FEATURE_PAL
@@ -6518,7 +6518,7 @@ void Thread::HandleThreadInterrupt (BOOL fWaitForADUnload)
}
#ifdef _DEBUG
-#define MAXSTACKBYTES (2 * PAGE_SIZE)
+#define MAXSTACKBYTES (2 * GetOsPageSize())
void CleanStackForFastGCStress ()
{
CONTRACTL {
@@ -7112,16 +7112,16 @@ HRESULT Thread::CLRSetThreadStackGuarantee(SetThreadStackGuaranteeScope fScope)
int ThreadGuardPages = CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_ThreadGuardPages);
if (ThreadGuardPages == 0)
{
- uGuardSize += (EXTRA_PAGES * PAGE_SIZE);
+ uGuardSize += (EXTRA_PAGES * GetOsPageSize());
}
else
{
- uGuardSize += (ThreadGuardPages * PAGE_SIZE);
+ uGuardSize += (ThreadGuardPages * GetOsPageSize());
}
#else // _WIN64
#ifdef _DEBUG
- uGuardSize += (1 * PAGE_SIZE); // one extra page for debug infrastructure
+ uGuardSize += (1 * GetOsPageSize()); // one extra page for debug infrastructure
#endif // _DEBUG
#endif // _WIN64
@@ -7165,14 +7165,14 @@ UINT_PTR Thread::GetLastNormalStackAddress(UINT_PTR StackLimit)
UINT_PTR cbStackGuarantee = GetStackGuarantee();
// Here we take the "hard guard region size", the "stack guarantee" and the "fault page" and add them
- // all together. Note that the "fault page" is the reason for the extra OS_PAGE_SIZE below. The OS
+ // all together. Note that the "fault page" is the reason for the extra GetOsPageSize() below. The OS
// will guarantee us a certain amount of stack remaining after a stack overflow. This is called the
// "stack guarantee". But to do this, it has to fault on the page before that region as the app is
// allowed to fault at the very end of that page. So, as a result, the last normal stack address is
// one page sooner.
return StackLimit + (cbStackGuarantee
#ifndef FEATURE_PAL
- + OS_PAGE_SIZE
+ + GetOsPageSize()
#endif // !FEATURE_PAL
+ HARD_GUARD_REGION_SIZE);
}
@@ -7273,7 +7273,7 @@ static void DebugLogStackRegionMBIs(UINT_PTR uLowAddress, UINT_PTR uHighAddress)
UINT_PTR uRegionSize = uStartOfNextRegion - uStartOfThisRegion;
- LOG((LF_EH, LL_INFO1000, "0x%p -> 0x%p (%d pg) ", uStartOfThisRegion, uStartOfNextRegion - 1, uRegionSize / OS_PAGE_SIZE));
+ LOG((LF_EH, LL_INFO1000, "0x%p -> 0x%p (%d pg) ", uStartOfThisRegion, uStartOfNextRegion - 1, uRegionSize / GetOsPageSize()));
DebugLogMBIFlags(meminfo.State, meminfo.Protect);
LOG((LF_EH, LL_INFO1000, "\n"));
@@ -7312,7 +7312,7 @@ void Thread::DebugLogStackMBIs()
UINT_PTR uStackSize = uStackBase - uStackLimit;
LOG((LF_EH, LL_INFO1000, "----------------------------------------------------------------------\n"));
- LOG((LF_EH, LL_INFO1000, "Stack Snapshot 0x%p -> 0x%p (%d pg)\n", uStackLimit, uStackBase, uStackSize / OS_PAGE_SIZE));
+ LOG((LF_EH, LL_INFO1000, "Stack Snapshot 0x%p -> 0x%p (%d pg)\n", uStackLimit, uStackBase, uStackSize / GetOsPageSize()));
if (pThread)
{
LOG((LF_EH, LL_INFO1000, "Last normal addr: 0x%p\n", pThread->GetLastNormalStackAddress()));
@@ -7534,7 +7534,7 @@ BOOL Thread::CanResetStackTo(LPCVOID stackPointer)
// We need to have enough space to call back into the EE from the handler, so we use the twice the entry point amount.
// We need enough to do work and enough that partway through that work we won't probe and COMPlusThrowSO.
- const INT_PTR iStackSizeThreshold = (ADJUST_PROBE(DEFAULT_ENTRY_PROBE_AMOUNT * 2) * OS_PAGE_SIZE);
+ const INT_PTR iStackSizeThreshold = (ADJUST_PROBE(DEFAULT_ENTRY_PROBE_AMOUNT * 2) * GetOsPageSize());
if (iStackSpaceLeft > iStackSizeThreshold)
{
@@ -7577,7 +7577,7 @@ BOOL Thread::IsStackSpaceAvailable(float numPages)
// If we have access to the stack guarantee (either in the guard region or we've tripped the guard page), then
// use that.
- if ((iStackSpaceLeft/OS_PAGE_SIZE) < numPages && !DetermineIfGuardPagePresent())
+ if ((iStackSpaceLeft/GetOsPageSize()) < numPages && !DetermineIfGuardPagePresent())
{
UINT_PTR stackGuarantee = GetStackGuarantee();
// GetLastNormalStackAddress actually returns the 2nd to last stack page on the stack. We'll add that to our available
@@ -7585,9 +7585,9 @@ BOOL Thread::IsStackSpaceAvailable(float numPages)
//
// All these values are OS supplied, and will never overflow. (If they do, that means the stack is on the order
// over GB, which isn't possible.
- iStackSpaceLeft += stackGuarantee + OS_PAGE_SIZE;
+ iStackSpaceLeft += stackGuarantee + GetOsPageSize();
}
- if ((iStackSpaceLeft/OS_PAGE_SIZE) < numPages)
+ if ((iStackSpaceLeft/GetOsPageSize()) < numPages)
{
return FALSE;
}
@@ -7723,13 +7723,13 @@ VOID Thread::RestoreGuardPage()
// to change the size of the guard region, we'll just go ahead and protect the next page down from where we are
// now. The guard page will get pushed forward again, just like normal, until the next stack overflow.
approxStackPointer = (UINT_PTR)GetCurrentSP();
- guardPageBase = (UINT_PTR)ALIGN_DOWN(approxStackPointer, OS_PAGE_SIZE) - OS_PAGE_SIZE;
+ guardPageBase = (UINT_PTR)ALIGN_DOWN(approxStackPointer, GetOsPageSize()) - GetOsPageSize();
// OS uses soft guard page to update the stack info in TEB. If our guard page is not beyond the current stack, the TEB
// will not be updated, and then OS's check of stack during exception will fail.
if (approxStackPointer >= guardPageBase)
{
- guardPageBase -= OS_PAGE_SIZE;
+ guardPageBase -= GetOsPageSize();
}
// If we're currently "too close" to the page we want to mark as a guard then the call to VirtualProtect to set
// PAGE_GUARD will fail, but it won't return an error. Therefore, we protect the page, then query it to make
@@ -7759,7 +7759,7 @@ VOID Thread::RestoreGuardPage()
}
else
{
- guardPageBase -= OS_PAGE_SIZE;
+ guardPageBase -= GetOsPageSize();
}
}
}