summaryrefslogtreecommitdiff
path: root/src/vm
diff options
context:
space:
mode:
authorSteve MacLean <sdmaclea.qdt@qualcommdatacenter.com>2017-06-12 17:14:12 -0400
committerMaoni Stephens <Maoni0@users.noreply.github.com>2017-06-12 14:14:12 -0700
commit0ee3b5e64a98dc71aefed2304fe4bcf7f66ca9f5 (patch)
tree8f099e2391d8990de1c7d0f4ca4c3fcae6839e4d /src/vm
parentc655981474be1d3aa0165408e5c3914c5cfc35a1 (diff)
downloadcoreclr-0ee3b5e64a98dc71aefed2304fe4bcf7f66ca9f5.tar.gz
coreclr-0ee3b5e64a98dc71aefed2304fe4bcf7f66ca9f5.tar.bz2
coreclr-0ee3b5e64a98dc71aefed2304fe4bcf7f66ca9f5.zip
[Arm64/Unix] Add 64K page support (#10981)
* [Arm64/Unix] Support 64K pages * GC move GCToOSInterface::Initialize() into InitializeGarbageCollector()
Diffstat (limited to 'src/vm')
-rw-r--r--src/vm/appdomain.hpp12
-rw-r--r--src/vm/ceemain.cpp4
-rw-r--r--src/vm/codeman.h8
-rw-r--r--src/vm/corhost.cpp4
-rw-r--r--src/vm/debughelp.cpp6
-rw-r--r--src/vm/dynamicmethod.cpp6
-rw-r--r--src/vm/eetwain.cpp8
-rw-r--r--src/vm/excep.cpp2
-rw-r--r--src/vm/exceptionhandling.h2
-rw-r--r--src/vm/frames.cpp6
-rw-r--r--src/vm/gcenv.h6
-rw-r--r--src/vm/gcenv.os.cpp12
-rw-r--r--src/vm/gcenv.unix.inl5
-rw-r--r--src/vm/gcenv.windows.inl5
-rw-r--r--src/vm/generics.cpp4
-rw-r--r--src/vm/i386/jitinterfacex86.cpp4
-rw-r--r--src/vm/jitinterface.cpp2
-rw-r--r--src/vm/jitinterface.h2
-rw-r--r--src/vm/loaderallocator.cpp16
-rw-r--r--src/vm/peimagelayout.cpp6
-rw-r--r--src/vm/reflectioninvocation.cpp2
-rw-r--r--src/vm/siginfo.cpp8
-rw-r--r--src/vm/stackprobe.cpp16
-rw-r--r--src/vm/syncblk.cpp4
-rw-r--r--src/vm/threads.cpp38
-rw-r--r--src/vm/threads.h6
-rw-r--r--src/vm/virtualcallstub.cpp54
-rw-r--r--src/vm/win32threadpool.cpp2
28 files changed, 137 insertions, 113 deletions
diff --git a/src/vm/appdomain.hpp b/src/vm/appdomain.hpp
index adf668413c..edd638ed31 100644
--- a/src/vm/appdomain.hpp
+++ b/src/vm/appdomain.hpp
@@ -808,14 +808,14 @@ private:
// set) and being able to specify specific versions.
//
-#define LOW_FREQUENCY_HEAP_RESERVE_SIZE (3 * PAGE_SIZE)
-#define LOW_FREQUENCY_HEAP_COMMIT_SIZE (1 * PAGE_SIZE)
+#define LOW_FREQUENCY_HEAP_RESERVE_SIZE (3 * GetOsPageSize())
+#define LOW_FREQUENCY_HEAP_COMMIT_SIZE (1 * GetOsPageSize())
-#define HIGH_FREQUENCY_HEAP_RESERVE_SIZE (10 * PAGE_SIZE)
-#define HIGH_FREQUENCY_HEAP_COMMIT_SIZE (1 * PAGE_SIZE)
+#define HIGH_FREQUENCY_HEAP_RESERVE_SIZE (10 * GetOsPageSize())
+#define HIGH_FREQUENCY_HEAP_COMMIT_SIZE (1 * GetOsPageSize())
-#define STUB_HEAP_RESERVE_SIZE (3 * PAGE_SIZE)
-#define STUB_HEAP_COMMIT_SIZE (1 * PAGE_SIZE)
+#define STUB_HEAP_RESERVE_SIZE (3 * GetOsPageSize())
+#define STUB_HEAP_COMMIT_SIZE (1 * GetOsPageSize())
// --------------------------------------------------------------------------------
// PE File List lock - for creating list locks on PE files
diff --git a/src/vm/ceemain.cpp b/src/vm/ceemain.cpp
index ec16bdd153..a618492637 100644
--- a/src/vm/ceemain.cpp
+++ b/src/vm/ceemain.cpp
@@ -1081,8 +1081,8 @@ void EEStartupHelper(COINITIEE fFlags)
#ifdef FEATURE_MINIMETADATA_IN_TRIAGEDUMPS
// retrieve configured max size for the mini-metadata buffer (defaults to 64KB)
g_MiniMetaDataBuffMaxSize = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_MiniMdBufferCapacity);
- // align up to OS_PAGE_SIZE, with a maximum of 1 MB
- g_MiniMetaDataBuffMaxSize = (DWORD) min(ALIGN_UP(g_MiniMetaDataBuffMaxSize, OS_PAGE_SIZE), 1024 * 1024);
+ // align up to GetOsPageSize(), with a maximum of 1 MB
+ g_MiniMetaDataBuffMaxSize = (DWORD) min(ALIGN_UP(g_MiniMetaDataBuffMaxSize, GetOsPageSize()), 1024 * 1024);
// allocate the buffer. this is never touched while the process is running, so it doesn't
// contribute to the process' working set. it is needed only as a "shadow" for a mini-metadata
// buffer that will be set up and reported / updated in the Watson process (the
diff --git a/src/vm/codeman.h b/src/vm/codeman.h
index cca5f5e2d2..f85eeb59db 100644
--- a/src/vm/codeman.h
+++ b/src/vm/codeman.h
@@ -91,10 +91,8 @@ typedef struct
} EH_CLAUSE_ENUMERATOR;
class EECodeInfo;
-#define PAGE_MASK (PAGE_SIZE-1)
-#define PAGE_ALIGN ~(PAGE_MASK)
-#define ROUND_DOWN_TO_PAGE(x) ( (size_t) (x) & PAGE_ALIGN)
-#define ROUND_UP_TO_PAGE(x) (((size_t) (x) + PAGE_MASK) & PAGE_ALIGN)
+#define ROUND_DOWN_TO_PAGE(x) ( (size_t) (x) & ~((size_t)GetOsPageSize()-1))
+#define ROUND_UP_TO_PAGE(x) (((size_t) (x) + (GetOsPageSize()-1)) & ~((size_t)GetOsPageSize()-1))
enum StubCodeBlockKind : int
{
@@ -463,7 +461,7 @@ typedef struct _HeapList
TADDR startAddress;
TADDR endAddress; // the current end of the used portion of the Heap
- TADDR mapBase; // "startAddress" rounded down to PAGE_SIZE. pHdrMap is relative to this address
+ TADDR mapBase; // "startAddress" rounded down to GetOsPageSize(). pHdrMap is relative to this address
PTR_DWORD pHdrMap; // bit array used to find the start of methods
size_t maxCodeHeapSize;// Size of the entire contiguous block of memory
diff --git a/src/vm/corhost.cpp b/src/vm/corhost.cpp
index 9eb895e86d..74c42d3a85 100644
--- a/src/vm/corhost.cpp
+++ b/src/vm/corhost.cpp
@@ -4227,9 +4227,9 @@ BOOL STDMETHODCALLTYPE CExecutionEngine::ClrVirtualProtect(LPVOID lpAddress,
//
// because the section following UEF will also be included in the region size
// if it has the same protection as the UEF section.
- DWORD dwUEFSectionPageCount = ((pUEFSection->Misc.VirtualSize + OS_PAGE_SIZE - 1)/OS_PAGE_SIZE);
+ DWORD dwUEFSectionPageCount = ((pUEFSection->Misc.VirtualSize + GetOsPageSize() - 1)/GetOsPageSize());
- BYTE* pAddressOfFollowingSection = pStartOfUEFSection + (OS_PAGE_SIZE * dwUEFSectionPageCount);
+ BYTE* pAddressOfFollowingSection = pStartOfUEFSection + (GetOsPageSize() * dwUEFSectionPageCount);
// Ensure that the section following us is having different memory protection
MEMORY_BASIC_INFORMATION nextSectionInfo;
diff --git a/src/vm/debughelp.cpp b/src/vm/debughelp.cpp
index 3e66f14047..376b88cd42 100644
--- a/src/vm/debughelp.cpp
+++ b/src/vm/debughelp.cpp
@@ -73,10 +73,10 @@ BOOL isMemoryReadable(const TADDR start, unsigned len)
// Now we have to loop thru each and every page in between and touch them.
//
location = start;
- while (len > PAGE_SIZE)
+ while (len > GetOsPageSize())
{
- location += PAGE_SIZE;
- len -= PAGE_SIZE;
+ location += GetOsPageSize();
+ len -= GetOsPageSize();
#ifdef DACCESS_COMPILE
if (DacReadAll(location, &buff, 1, false) != S_OK)
diff --git a/src/vm/dynamicmethod.cpp b/src/vm/dynamicmethod.cpp
index 43f4c696df..acfea3e7f6 100644
--- a/src/vm/dynamicmethod.cpp
+++ b/src/vm/dynamicmethod.cpp
@@ -330,7 +330,7 @@ HeapList* HostCodeHeap::CreateCodeHeap(CodeHeapRequestInfo *pInfo, EEJitManager
size_t MaxCodeHeapSize = pInfo->getRequestSize();
size_t ReserveBlockSize = MaxCodeHeapSize + sizeof(HeapList);
- ReserveBlockSize += sizeof(TrackAllocation) + PAGE_SIZE; // make sure we have enough for the allocation
+ ReserveBlockSize += sizeof(TrackAllocation) + GetOsPageSize(); // make sure we have enough for the allocation
// take a conservative size for the nibble map, we may change that later if appropriate
size_t nibbleMapSize = ROUND_UP_TO_PAGE(HEAP2MAPSIZE(ROUND_UP_TO_PAGE(ALIGN_UP(ReserveBlockSize, VIRTUAL_ALLOC_RESERVE_GRANULARITY))));
size_t heapListSize = (sizeof(HeapList) + CODE_SIZE_ALIGN - 1) & (~(CODE_SIZE_ALIGN - 1));
@@ -343,7 +343,7 @@ HeapList* HostCodeHeap::CreateCodeHeap(CodeHeapRequestInfo *pInfo, EEJitManager
(HostCodeHeap*)pCodeHeap, ReserveBlockSize, pCodeHeap->m_TotalBytesAvailable, reservedData, nibbleMapSize));
BYTE *pBuffer = pCodeHeap->InitCodeHeapPrivateData(ReserveBlockSize, reservedData, nibbleMapSize);
- _ASSERTE(((size_t)pBuffer & PAGE_MASK) == 0);
+ _ASSERTE(IS_ALIGNED(pBuffer, GetOsPageSize()));
LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap creation {0x%p} - base addr 0x%p, size available 0x%p, nibble map ptr 0x%p\n",
(HostCodeHeap*)pCodeHeap, pCodeHeap->m_pBaseAddr, pCodeHeap->m_TotalBytesAvailable, pBuffer));
@@ -754,7 +754,7 @@ void* HostCodeHeap::AllocMemory_NoThrow(size_t size, DWORD alignment)
}
_ASSERTE(size > availableInFreeList);
size_t sizeToCommit = size - availableInFreeList;
- sizeToCommit = (size + PAGE_SIZE - 1) & (~(PAGE_SIZE - 1)); // round up to page
+ sizeToCommit = ROUND_UP_TO_PAGE(size); // round up to page
if (m_pLastAvailableCommittedAddr + sizeToCommit <= m_pBaseAddr + m_TotalBytesAvailable)
{
diff --git a/src/vm/eetwain.cpp b/src/vm/eetwain.cpp
index 4a02be7b82..99e9107189 100644
--- a/src/vm/eetwain.cpp
+++ b/src/vm/eetwain.cpp
@@ -3019,12 +3019,12 @@ unsigned SKIP_ALLOC_FRAME(int size, PTR_CBYTE base, unsigned offset)
return (SKIP_PUSH_REG(base, offset));
}
- if (size >= OS_PAGE_SIZE)
+ if (size >= (int)GetOsPageSize())
{
- if (size < (3 * OS_PAGE_SIZE))
+ if (size < int(3 * GetOsPageSize()))
{
- // add 7 bytes for one or two TEST EAX, [ESP+OS_PAGE_SIZE]
- offset += (size / OS_PAGE_SIZE) * 7;
+ // add 7 bytes for one or two TEST EAX, [ESP+GetOsPageSize()]
+ offset += (size / GetOsPageSize()) * 7;
}
else
{
diff --git a/src/vm/excep.cpp b/src/vm/excep.cpp
index 99ebe6d8ea..af2554e324 100644
--- a/src/vm/excep.cpp
+++ b/src/vm/excep.cpp
@@ -57,7 +57,7 @@
// Windows uses 64kB as the null-reference area
#define NULL_AREA_SIZE (64 * 1024)
#else // !FEATURE_PAL
-#define NULL_AREA_SIZE OS_PAGE_SIZE
+#define NULL_AREA_SIZE GetOsPageSize()
#endif // !FEATURE_PAL
#ifndef CROSSGEN_COMPILE
diff --git a/src/vm/exceptionhandling.h b/src/vm/exceptionhandling.h
index 02788e7ef8..27981e6c32 100644
--- a/src/vm/exceptionhandling.h
+++ b/src/vm/exceptionhandling.h
@@ -797,7 +797,7 @@ private:
{
//
// Due to the unexpected growth of the ExceptionTracker struct,
- // OS_PAGE_SIZE does not seem appropriate anymore on x64, and
+ // GetOsPageSize() does not seem appropriate anymore on x64, and
// we should behave the same on x64 as on ia64 regardless of
// the difference between the page sizes on the platforms.
//
diff --git a/src/vm/frames.cpp b/src/vm/frames.cpp
index fa5c7875eb..6598357a6a 100644
--- a/src/vm/frames.cpp
+++ b/src/vm/frames.cpp
@@ -417,13 +417,13 @@ VOID Frame::Push(Thread *pThread)
m_Next = pThread->GetFrame();
- // PAGE_SIZE is used to relax the assert for cases where two Frames are
+ // GetOsPageSize() is used to relax the assert for cases where two Frames are
// declared in the same source function. We cannot predict the order
// in which the C compiler will lay them out in the stack frame.
- // So PAGE_SIZE is a guess of the maximum stack frame size of any method
+ // So GetOsPageSize() is a guess of the maximum stack frame size of any method
// with multiple Frames in mscorwks.dll
_ASSERTE(((m_Next == FRAME_TOP) ||
- (PBYTE(m_Next) + (2 * PAGE_SIZE)) > PBYTE(this)) &&
+ (PBYTE(m_Next) + (2 * GetOsPageSize())) > PBYTE(this)) &&
"Pushing a frame out of order ?");
_ASSERTE(// If AssertOnFailFast is set, the test expects to do stack overrun
diff --git a/src/vm/gcenv.h b/src/vm/gcenv.h
index 865eb288e2..767adb892c 100644
--- a/src/vm/gcenv.h
+++ b/src/vm/gcenv.h
@@ -44,6 +44,12 @@
#include "gcenv.interlocked.h"
#include "gcenv.interlocked.inl"
+#ifdef PLATFORM_UNIX
+#include "gcenv.unix.inl"
+#else
+#include "gcenv.windows.inl"
+#endif
+
namespace ETW
{
typedef enum _GC_ROOT_KIND {
diff --git a/src/vm/gcenv.os.cpp b/src/vm/gcenv.os.cpp
index abacc3c76d..8572551a06 100644
--- a/src/vm/gcenv.os.cpp
+++ b/src/vm/gcenv.os.cpp
@@ -26,12 +26,22 @@
#define MAX_PTR ((uint8_t*)(~(ptrdiff_t)0))
+#ifdef FEATURE_PAL
+uint32_t g_pageSizeUnixInl = 0;
+#endif
+
+
// Initialize the interface implementation
// Return:
// true if it has succeeded, false if it has failed
bool GCToOSInterface::Initialize()
{
LIMITED_METHOD_CONTRACT;
+
+#ifdef FEATURE_PAL
+ g_pageSizeUnixInl = GetOsPageSize();
+#endif
+
return true;
}
@@ -299,7 +309,7 @@ bool GCToOSInterface::GetWriteWatch(bool resetState, void* address, size_t size,
ULONG granularity;
bool success = ::GetWriteWatch(flags, address, size, pageAddresses, (ULONG_PTR*)pageAddressesCount, &granularity) == 0;
- _ASSERTE (granularity == OS_PAGE_SIZE);
+ _ASSERTE (granularity == GetOsPageSize());
return success;
}
diff --git a/src/vm/gcenv.unix.inl b/src/vm/gcenv.unix.inl
new file mode 100644
index 0000000000..7523864c7d
--- /dev/null
+++ b/src/vm/gcenv.unix.inl
@@ -0,0 +1,5 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+#include "../gc/env/gcenv.unix.inl"
diff --git a/src/vm/gcenv.windows.inl b/src/vm/gcenv.windows.inl
new file mode 100644
index 0000000000..aeb35f6b20
--- /dev/null
+++ b/src/vm/gcenv.windows.inl
@@ -0,0 +1,5 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+#include "../gc/env/gcenv.windows.inl"
diff --git a/src/vm/generics.cpp b/src/vm/generics.cpp
index 63d95a0e61..51e6d7bbac 100644
--- a/src/vm/generics.cpp
+++ b/src/vm/generics.cpp
@@ -146,9 +146,9 @@ TypeHandle ClassLoader::LoadCanonicalGenericInstantiation(TypeKey *pTypeKey,
TypeHandle ret = TypeHandle();
DECLARE_INTERIOR_STACK_PROBE;
#ifndef DACCESS_COMPILE
- if ((dwAllocSize/PAGE_SIZE+1) >= 2)
+ if ((dwAllocSize/GetOsPageSize()+1) >= 2)
{
- DO_INTERIOR_STACK_PROBE_FOR_NOTHROW_CHECK_THREAD((10+dwAllocSize/PAGE_SIZE+1), NO_FORBIDGC_LOADER_USE_ThrowSO(););
+ DO_INTERIOR_STACK_PROBE_FOR_NOTHROW_CHECK_THREAD((10+dwAllocSize/GetOsPageSize()+1), NO_FORBIDGC_LOADER_USE_ThrowSO(););
}
#endif // DACCESS_COMPILE
TypeHandle *repInst = (TypeHandle*) _alloca(dwAllocSize);
diff --git a/src/vm/i386/jitinterfacex86.cpp b/src/vm/i386/jitinterfacex86.cpp
index 18acbf0126..4ffed2d2bd 100644
--- a/src/vm/i386/jitinterfacex86.cpp
+++ b/src/vm/i386/jitinterfacex86.cpp
@@ -1530,8 +1530,8 @@ void InitJITHelpers1()
// All write barrier helpers should fit into one page.
// If you hit this assert on retail build, there is most likely problem with BBT script.
- _ASSERTE_ALL_BUILDS("clr/src/VM/i386/JITinterfaceX86.cpp", (BYTE*)JIT_WriteBarrierGroup_End - (BYTE*)JIT_WriteBarrierGroup < PAGE_SIZE);
- _ASSERTE_ALL_BUILDS("clr/src/VM/i386/JITinterfaceX86.cpp", (BYTE*)JIT_PatchedWriteBarrierGroup_End - (BYTE*)JIT_PatchedWriteBarrierGroup < PAGE_SIZE);
+ _ASSERTE_ALL_BUILDS("clr/src/VM/i386/JITinterfaceX86.cpp", (BYTE*)JIT_WriteBarrierGroup_End - (BYTE*)JIT_WriteBarrierGroup < (ptrdiff_t)GetOsPageSize());
+ _ASSERTE_ALL_BUILDS("clr/src/VM/i386/JITinterfaceX86.cpp", (BYTE*)JIT_PatchedWriteBarrierGroup_End - (BYTE*)JIT_PatchedWriteBarrierGroup < (ptrdiff_t)GetOsPageSize());
// Copy the write barriers to their final resting place.
for (int iBarrier = 0; iBarrier < NUM_WRITE_BARRIERS; iBarrier++)
diff --git a/src/vm/jitinterface.cpp b/src/vm/jitinterface.cpp
index e5d6efe483..23f8b7f836 100644
--- a/src/vm/jitinterface.cpp
+++ b/src/vm/jitinterface.cpp
@@ -9767,7 +9767,7 @@ void CEEInfo::getEEInfo(CORINFO_EE_INFO *pEEInfoOut)
pEEInfoOut->sizeOfReversePInvokeFrame = (DWORD)-1;
- pEEInfoOut->osPageSize = OS_PAGE_SIZE;
+ pEEInfoOut->osPageSize = GetOsPageSize();
pEEInfoOut->maxUncheckedOffsetForNullObject = MAX_UNCHECKED_OFFSET_FOR_NULL_OBJECT;
pEEInfoOut->targetAbi = CORINFO_CORECLR_ABI;
diff --git a/src/vm/jitinterface.h b/src/vm/jitinterface.h
index 182e797737..26f32bec3a 100644
--- a/src/vm/jitinterface.h
+++ b/src/vm/jitinterface.h
@@ -19,7 +19,7 @@
#ifndef FEATURE_PAL
#define MAX_UNCHECKED_OFFSET_FOR_NULL_OBJECT ((32*1024)-1) // when generating JIT code
#else // !FEATURE_PAL
-#define MAX_UNCHECKED_OFFSET_FOR_NULL_OBJECT ((OS_PAGE_SIZE / 2) - 1)
+#define MAX_UNCHECKED_OFFSET_FOR_NULL_OBJECT ((GetOsPageSize() / 2) - 1)
#endif // !FEATURE_PAL
class Stub;
diff --git a/src/vm/loaderallocator.cpp b/src/vm/loaderallocator.cpp
index 70c8cabb79..1a05bf2c05 100644
--- a/src/vm/loaderallocator.cpp
+++ b/src/vm/loaderallocator.cpp
@@ -887,11 +887,11 @@ void LoaderAllocator::ActivateManagedTracking()
// We don't actually allocate a low frequency heap for collectible types
-#define COLLECTIBLE_LOW_FREQUENCY_HEAP_SIZE (0 * PAGE_SIZE)
-#define COLLECTIBLE_HIGH_FREQUENCY_HEAP_SIZE (3 * PAGE_SIZE)
-#define COLLECTIBLE_STUB_HEAP_SIZE PAGE_SIZE
-#define COLLECTIBLE_CODEHEAP_SIZE (7 * PAGE_SIZE)
-#define COLLECTIBLE_VIRTUALSTUBDISPATCH_HEAP_SPACE (5 * PAGE_SIZE)
+#define COLLECTIBLE_LOW_FREQUENCY_HEAP_SIZE (0 * GetOsPageSize())
+#define COLLECTIBLE_HIGH_FREQUENCY_HEAP_SIZE (3 * GetOsPageSize())
+#define COLLECTIBLE_STUB_HEAP_SIZE GetOsPageSize()
+#define COLLECTIBLE_CODEHEAP_SIZE (7 * GetOsPageSize())
+#define COLLECTIBLE_VIRTUALSTUBDISPATCH_HEAP_SPACE (5 * GetOsPageSize())
void LoaderAllocator::Init(BaseDomain *pDomain, BYTE *pExecutableHeapMemory)
{
@@ -940,9 +940,9 @@ void LoaderAllocator::Init(BaseDomain *pDomain, BYTE *pExecutableHeapMemory)
#ifdef FEATURE_WINDOWSPHONE
// code:UMEntryThunk::CreateUMEntryThunk allocates memory on executable loader heap for phone.
// Reserve enough for a typical phone app to fit.
- dwExecutableHeapReserveSize = 3 * PAGE_SIZE;
+ dwExecutableHeapReserveSize = 3 * GetOsPageSize();
#else
- dwExecutableHeapReserveSize = PAGE_SIZE;
+ dwExecutableHeapReserveSize = GetOsPageSize();
#endif
_ASSERTE(dwExecutableHeapReserveSize < dwHighFrequencyHeapReserveSize);
@@ -1038,7 +1038,7 @@ void LoaderAllocator::Init(BaseDomain *pDomain, BYTE *pExecutableHeapMemory)
#endif
#ifdef CROSSGEN_COMPILE
- m_pPrecodeHeap = new (&m_PrecodeHeapInstance) LoaderHeap(PAGE_SIZE, PAGE_SIZE);
+ m_pPrecodeHeap = new (&m_PrecodeHeapInstance) LoaderHeap(GetOsPageSize(), GetOsPageSize());
#else
m_pPrecodeHeap = new (&m_PrecodeHeapInstance) CodeFragmentHeap(this, STUB_CODE_BLOCK_PRECODE);
#endif
diff --git a/src/vm/peimagelayout.cpp b/src/vm/peimagelayout.cpp
index 34ba4d8215..93ab77cc78 100644
--- a/src/vm/peimagelayout.cpp
+++ b/src/vm/peimagelayout.cpp
@@ -637,7 +637,7 @@ bool PEImageLayout::ConvertILOnlyPE32ToPE64Worker()
+ VAL16(pHeader32->FileHeader.NumberOfSections));
// On AMD64, used for a 12-byte jump thunk + the original entry point offset.
- if (((pEnd32 + IMAGE_HEADER_3264_SIZE_DIFF /* delta in headers to compute end of 64bit header */) - pImage) > OS_PAGE_SIZE ) {
+ if (((pEnd32 + IMAGE_HEADER_3264_SIZE_DIFF /* delta in headers to compute end of 64bit header */) - pImage) > GetOsPageSize() ) {
// This should never happen. An IL_ONLY image should at most 3 sections.
_ASSERTE(!"ConvertILOnlyPE32ToPE64Worker: Insufficient room to rewrite headers as PE64");
return false;
@@ -693,7 +693,7 @@ bool PEImageLayout::ConvertILOnlyPE32ToPE64()
PBYTE pageBase = (PBYTE)GetBase();
DWORD oldProtect;
- if (!ClrVirtualProtect(pageBase, OS_PAGE_SIZE, PAGE_READWRITE, &oldProtect))
+ if (!ClrVirtualProtect(pageBase, GetOsPageSize(), PAGE_READWRITE, &oldProtect))
{
// We are not going to be able to update header.
return false;
@@ -702,7 +702,7 @@ bool PEImageLayout::ConvertILOnlyPE32ToPE64()
fConvertedToPE64 = ConvertILOnlyPE32ToPE64Worker();
DWORD ignore;
- if (!ClrVirtualProtect(pageBase, OS_PAGE_SIZE, oldProtect, &ignore))
+ if (!ClrVirtualProtect(pageBase, GetOsPageSize(), oldProtect, &ignore))
{
// This is not so bad; just ignore it
}
diff --git a/src/vm/reflectioninvocation.cpp b/src/vm/reflectioninvocation.cpp
index 05c4adf3d3..7f8a9e0075 100644
--- a/src/vm/reflectioninvocation.cpp
+++ b/src/vm/reflectioninvocation.cpp
@@ -1208,7 +1208,7 @@ FCIMPL4(Object*, RuntimeMethodHandle::InvokeMethod,
// Make sure we have enough room on the stack for this. Note that we will need the stack amount twice - once to build the stack
// and second time to actually make the call.
- INTERIOR_STACK_PROBE_FOR(pThread, 1 + static_cast<UINT>((2 * nAllocaSize) / OS_PAGE_SIZE) + static_cast<UINT>(HOLDER_CODE_NORMAL_STACK_LIMIT));
+ INTERIOR_STACK_PROBE_FOR(pThread, 1 + static_cast<UINT>((2 * nAllocaSize) / GetOsPageSize()) + static_cast<UINT>(HOLDER_CODE_NORMAL_STACK_LIMIT));
LPBYTE pAlloc = (LPBYTE)_alloca(nAllocaSize);
diff --git a/src/vm/siginfo.cpp b/src/vm/siginfo.cpp
index cf0cceaf53..fc73b94487 100644
--- a/src/vm/siginfo.cpp
+++ b/src/vm/siginfo.cpp
@@ -1348,9 +1348,9 @@ TypeHandle SigPointer::GetTypeHandleThrowing(
if (!ClrSafeInt<DWORD>::multiply(ntypars, sizeof(TypeHandle), dwAllocaSize))
ThrowHR(COR_E_OVERFLOW);
- if ((dwAllocaSize/PAGE_SIZE+1) >= 2)
+ if ((dwAllocaSize/GetOsPageSize()+1) >= 2)
{
- DO_INTERIOR_STACK_PROBE_FOR_NOTHROW_CHECK_THREAD((10+dwAllocaSize/PAGE_SIZE+1), NO_FORBIDGC_LOADER_USE_ThrowSO(););
+ DO_INTERIOR_STACK_PROBE_FOR_NOTHROW_CHECK_THREAD((10+dwAllocaSize/GetOsPageSize()+1), NO_FORBIDGC_LOADER_USE_ThrowSO(););
}
TypeHandle *thisinst = (TypeHandle*) _alloca(dwAllocaSize);
@@ -1634,9 +1634,9 @@ TypeHandle SigPointer::GetTypeHandleThrowing(
ThrowHR(COR_E_OVERFLOW);
}
- if ((cAllocaSize/PAGE_SIZE+1) >= 2)
+ if ((cAllocaSize/GetOsPageSize()+1) >= 2)
{
- DO_INTERIOR_STACK_PROBE_FOR_NOTHROW_CHECK_THREAD((10+cAllocaSize/PAGE_SIZE+1), NO_FORBIDGC_LOADER_USE_ThrowSO(););
+ DO_INTERIOR_STACK_PROBE_FOR_NOTHROW_CHECK_THREAD((10+cAllocaSize/GetOsPageSize()+1), NO_FORBIDGC_LOADER_USE_ThrowSO(););
}
TypeHandle *retAndArgTypes = (TypeHandle*) _alloca(cAllocaSize);
diff --git a/src/vm/stackprobe.cpp b/src/vm/stackprobe.cpp
index 695f355d10..ef997ff5e5 100644
--- a/src/vm/stackprobe.cpp
+++ b/src/vm/stackprobe.cpp
@@ -155,8 +155,8 @@ void ReportStackOverflow()
// We expect the stackGuarantee to be a multiple of the page size for
// the call to IsStackSpaceAvailable.
- _ASSERTE(stackGuarantee%OS_PAGE_SIZE == 0);
- if (pThread->IsStackSpaceAvailable(static_cast<float>(stackGuarantee)/OS_PAGE_SIZE))
+ _ASSERTE(stackGuarantee%GetOsPageSize() == 0);
+ if (pThread->IsStackSpaceAvailable(static_cast<float>(stackGuarantee)/GetOsPageSize()))
{
COMPlusThrowSO();
}
@@ -296,7 +296,7 @@ FORCEINLINE BOOL RetailStackProbeHelper(unsigned int n, Thread *pThread)
{
probeLimit = pThread->GetProbeLimit();
}
- UINT_PTR probeAddress = (UINT_PTR)(&pThread) - (n * OS_PAGE_SIZE);
+ UINT_PTR probeAddress = (UINT_PTR)(&pThread) - (n * GetOsPageSize());
// If the address we want to probe to is beyond the precalculated limit we fail
// Note that we don't check for stack probing being disabled. This is encoded in
@@ -761,7 +761,7 @@ void BaseStackGuard::HandleOverwrittenPreviousStackGuard(int probeShortFall, __i
"The stack requested by the previous guard is at least %d pages (%d bytes) short.\n"
MORE_INFO_STRING, stackID ? stackID : "", m_szFunction, m_szFile, m_lineNum,
m_pPrevGuard->m_szFunction, m_pPrevGuard->m_szFile, m_pPrevGuard->m_lineNum, m_pPrevGuard->m_numPages,
- probeShortFall/OS_PAGE_SIZE + (probeShortFall%OS_PAGE_SIZE ? 1 : 0), probeShortFall);
+ probeShortFall/GetOsPageSize() + (probeShortFall%GetOsPageSize() ? 1 : 0), probeShortFall);
LOG((LF_EH, LL_INFO100000, "%s", buff));
@@ -796,7 +796,7 @@ void BaseStackGuard::HandleOverwrittenCurrentStackGuard(int probeShortFall, __in
"The%s stack guard installed in %s at \"%s\" @ %d has been violated\n\n"
"The guard requested %d pages of stack and is at least %d pages (%d bytes) short.\n"
MORE_INFO_STRING, stackID ? stackID : "", m_szFunction, m_szFile, m_lineNum, m_numPages,
- probeShortFall/OS_PAGE_SIZE + (probeShortFall%OS_PAGE_SIZE ? 1 : 0), probeShortFall);
+ probeShortFall/GetOsPageSize() + (probeShortFall%GetOsPageSize() ? 1 : 0), probeShortFall);
LOG((LF_EH, LL_INFO100000, buff));
@@ -1044,8 +1044,8 @@ BOOL BaseStackGuard::RequiresNStackPagesInternal(unsigned int n, BOOL fThrowOnSO
// Get the address of the last few bytes on the penultimate page we probed for. This is slightly early than the probe point,
// but gives us more conservatism in our overrun checking. ("Last" here means the bytes with the smallest address.)
- m_pMarker = ((UINT_PTR*)pStack) - (OS_PAGE_SIZE / sizeof(UINT_PTR) * (n-1));
- m_pMarker = (UINT_PTR*)((UINT_PTR)m_pMarker & ~(OS_PAGE_SIZE - 1));
+ m_pMarker = ((UINT_PTR*)pStack) - (GetOsPageSize() / sizeof(UINT_PTR) * (n-1));
+ m_pMarker = (UINT_PTR*)((UINT_PTR)m_pMarker & ~(GetOsPageSize() - 1));
// Grab the previous guard, if any, and update our depth.
m_pPrevGuard = GetCurrentGuard();
@@ -1166,7 +1166,7 @@ BOOL BaseStackGuard::DoProbe(unsigned int n, BOOL fThrowOnSO)
UINT_PTR *sp = (UINT_PTR*)GetCurrentSP();
while (sp >= m_pMarker)
{
- sp -= (OS_PAGE_SIZE / sizeof(UINT_PTR));
+ sp -= (GetOsPageSize() / sizeof(UINT_PTR));
*sp = NULL;
}
diff --git a/src/vm/syncblk.cpp b/src/vm/syncblk.cpp
index 78e455a580..50eec9b068 100644
--- a/src/vm/syncblk.cpp
+++ b/src/vm/syncblk.cpp
@@ -35,8 +35,8 @@
#include "runtimecallablewrapper.h"
#endif // FEATURE_COMINTEROP
-// Allocate 1 page worth. Typically enough
-#define MAXSYNCBLOCK (PAGE_SIZE-sizeof(void*))/sizeof(SyncBlock)
+// Allocate 4K worth. Typically enough
+#define MAXSYNCBLOCK (0x1000-sizeof(void*))/sizeof(SyncBlock)
#define SYNC_TABLE_INITIAL_SIZE 250
//#define DUMP_SB
diff --git a/src/vm/threads.cpp b/src/vm/threads.cpp
index 1eeadf7ead..59fec2bdc3 100644
--- a/src/vm/threads.cpp
+++ b/src/vm/threads.cpp
@@ -1369,7 +1369,7 @@ void InitThreadManager()
// All patched helpers should fit into one page.
// If you hit this assert on retail build, there is most likely problem with BBT script.
- _ASSERTE_ALL_BUILDS("clr/src/VM/threads.cpp", (BYTE*)JIT_PatchedCodeLast - (BYTE*)JIT_PatchedCodeStart < PAGE_SIZE);
+ _ASSERTE_ALL_BUILDS("clr/src/VM/threads.cpp", (BYTE*)JIT_PatchedCodeLast - (BYTE*)JIT_PatchedCodeStart < (ptrdiff_t)GetOsPageSize());
// I am using virtual protect to cover the entire range that this code falls in.
//
@@ -2570,7 +2570,7 @@ DWORD WINAPI Thread::intermediateThreadProc(PVOID arg)
WRAPPER_NO_CONTRACT;
m_offset_counter++;
- if (m_offset_counter * offset_multiplier > PAGE_SIZE)
+ if (m_offset_counter * offset_multiplier > (int) GetOsPageSize())
m_offset_counter = 0;
(void)_alloca(m_offset_counter * offset_multiplier);
@@ -2685,11 +2685,11 @@ BOOL Thread::CreateNewOSThread(SIZE_T sizeToCommitOrReserve, LPTHREAD_START_ROUT
dwCreationFlags |= STACK_SIZE_PARAM_IS_A_RESERVATION;
#ifndef FEATURE_PAL // the PAL does its own adjustments as necessary
- if (sizeToCommitOrReserve != 0 && sizeToCommitOrReserve <= OS_PAGE_SIZE)
+ if (sizeToCommitOrReserve != 0 && sizeToCommitOrReserve <= GetOsPageSize())
{
// On Windows, passing a value that is <= one page size bizarrely causes the OS to use the default stack size instead of
// a minimum, which is undesirable. This adjustment fixes that issue to use a minimum stack size (typically 64 KB).
- sizeToCommitOrReserve = OS_PAGE_SIZE + 1;
+ sizeToCommitOrReserve = GetOsPageSize() + 1;
}
#endif // !FEATURE_PAL
@@ -6518,7 +6518,7 @@ void Thread::HandleThreadInterrupt (BOOL fWaitForADUnload)
}
#ifdef _DEBUG
-#define MAXSTACKBYTES (2 * PAGE_SIZE)
+#define MAXSTACKBYTES (2 * GetOsPageSize())
void CleanStackForFastGCStress ()
{
CONTRACTL {
@@ -7112,16 +7112,16 @@ HRESULT Thread::CLRSetThreadStackGuarantee(SetThreadStackGuaranteeScope fScope)
int ThreadGuardPages = CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_ThreadGuardPages);
if (ThreadGuardPages == 0)
{
- uGuardSize += (EXTRA_PAGES * PAGE_SIZE);
+ uGuardSize += (EXTRA_PAGES * GetOsPageSize());
}
else
{
- uGuardSize += (ThreadGuardPages * PAGE_SIZE);
+ uGuardSize += (ThreadGuardPages * GetOsPageSize());
}
#else // _WIN64
#ifdef _DEBUG
- uGuardSize += (1 * PAGE_SIZE); // one extra page for debug infrastructure
+ uGuardSize += (1 * GetOsPageSize()); // one extra page for debug infrastructure
#endif // _DEBUG
#endif // _WIN64
@@ -7165,14 +7165,14 @@ UINT_PTR Thread::GetLastNormalStackAddress(UINT_PTR StackLimit)
UINT_PTR cbStackGuarantee = GetStackGuarantee();
// Here we take the "hard guard region size", the "stack guarantee" and the "fault page" and add them
- // all together. Note that the "fault page" is the reason for the extra OS_PAGE_SIZE below. The OS
+ // all together. Note that the "fault page" is the reason for the extra GetOsPageSize() below. The OS
// will guarantee us a certain amount of stack remaining after a stack overflow. This is called the
// "stack guarantee". But to do this, it has to fault on the page before that region as the app is
// allowed to fault at the very end of that page. So, as a result, the last normal stack address is
// one page sooner.
return StackLimit + (cbStackGuarantee
#ifndef FEATURE_PAL
- + OS_PAGE_SIZE
+ + GetOsPageSize()
#endif // !FEATURE_PAL
+ HARD_GUARD_REGION_SIZE);
}
@@ -7273,7 +7273,7 @@ static void DebugLogStackRegionMBIs(UINT_PTR uLowAddress, UINT_PTR uHighAddress)
UINT_PTR uRegionSize = uStartOfNextRegion - uStartOfThisRegion;
- LOG((LF_EH, LL_INFO1000, "0x%p -> 0x%p (%d pg) ", uStartOfThisRegion, uStartOfNextRegion - 1, uRegionSize / OS_PAGE_SIZE));
+ LOG((LF_EH, LL_INFO1000, "0x%p -> 0x%p (%d pg) ", uStartOfThisRegion, uStartOfNextRegion - 1, uRegionSize / GetOsPageSize()));
DebugLogMBIFlags(meminfo.State, meminfo.Protect);
LOG((LF_EH, LL_INFO1000, "\n"));
@@ -7312,7 +7312,7 @@ void Thread::DebugLogStackMBIs()
UINT_PTR uStackSize = uStackBase - uStackLimit;
LOG((LF_EH, LL_INFO1000, "----------------------------------------------------------------------\n"));
- LOG((LF_EH, LL_INFO1000, "Stack Snapshot 0x%p -> 0x%p (%d pg)\n", uStackLimit, uStackBase, uStackSize / OS_PAGE_SIZE));
+ LOG((LF_EH, LL_INFO1000, "Stack Snapshot 0x%p -> 0x%p (%d pg)\n", uStackLimit, uStackBase, uStackSize / GetOsPageSize()));
if (pThread)
{
LOG((LF_EH, LL_INFO1000, "Last normal addr: 0x%p\n", pThread->GetLastNormalStackAddress()));
@@ -7534,7 +7534,7 @@ BOOL Thread::CanResetStackTo(LPCVOID stackPointer)
// We need to have enough space to call back into the EE from the handler, so we use the twice the entry point amount.
// We need enough to do work and enough that partway through that work we won't probe and COMPlusThrowSO.
- const INT_PTR iStackSizeThreshold = (ADJUST_PROBE(DEFAULT_ENTRY_PROBE_AMOUNT * 2) * OS_PAGE_SIZE);
+ const INT_PTR iStackSizeThreshold = (ADJUST_PROBE(DEFAULT_ENTRY_PROBE_AMOUNT * 2) * GetOsPageSize());
if (iStackSpaceLeft > iStackSizeThreshold)
{
@@ -7577,7 +7577,7 @@ BOOL Thread::IsStackSpaceAvailable(float numPages)
// If we have access to the stack guarantee (either in the guard region or we've tripped the guard page), then
// use that.
- if ((iStackSpaceLeft/OS_PAGE_SIZE) < numPages && !DetermineIfGuardPagePresent())
+ if ((iStackSpaceLeft/GetOsPageSize()) < numPages && !DetermineIfGuardPagePresent())
{
UINT_PTR stackGuarantee = GetStackGuarantee();
// GetLastNormalStackAddress actually returns the 2nd to last stack page on the stack. We'll add that to our available
@@ -7585,9 +7585,9 @@ BOOL Thread::IsStackSpaceAvailable(float numPages)
//
// All these values are OS supplied, and will never overflow. (If they do, that means the stack is on the order
// over GB, which isn't possible.
- iStackSpaceLeft += stackGuarantee + OS_PAGE_SIZE;
+ iStackSpaceLeft += stackGuarantee + GetOsPageSize();
}
- if ((iStackSpaceLeft/OS_PAGE_SIZE) < numPages)
+ if ((iStackSpaceLeft/GetOsPageSize()) < numPages)
{
return FALSE;
}
@@ -7723,13 +7723,13 @@ VOID Thread::RestoreGuardPage()
// to change the size of the guard region, we'll just go ahead and protect the next page down from where we are
// now. The guard page will get pushed forward again, just like normal, until the next stack overflow.
approxStackPointer = (UINT_PTR)GetCurrentSP();
- guardPageBase = (UINT_PTR)ALIGN_DOWN(approxStackPointer, OS_PAGE_SIZE) - OS_PAGE_SIZE;
+ guardPageBase = (UINT_PTR)ALIGN_DOWN(approxStackPointer, GetOsPageSize()) - GetOsPageSize();
// OS uses soft guard page to update the stack info in TEB. If our guard page is not beyond the current stack, the TEB
// will not be updated, and then OS's check of stack during exception will fail.
if (approxStackPointer >= guardPageBase)
{
- guardPageBase -= OS_PAGE_SIZE;
+ guardPageBase -= GetOsPageSize();
}
// If we're currently "too close" to the page we want to mark as a guard then the call to VirtualProtect to set
// PAGE_GUARD will fail, but it won't return an error. Therefore, we protect the page, then query it to make
@@ -7759,7 +7759,7 @@ VOID Thread::RestoreGuardPage()
}
else
{
- guardPageBase -= OS_PAGE_SIZE;
+ guardPageBase -= GetOsPageSize();
}
}
}
diff --git a/src/vm/threads.h b/src/vm/threads.h
index a53a4a1932..801b22e901 100644
--- a/src/vm/threads.h
+++ b/src/vm/threads.h
@@ -3573,7 +3573,7 @@ private:
PTR_VOID m_CacheStackLimit;
UINT_PTR m_CacheStackSufficientExecutionLimit;
-#define HARD_GUARD_REGION_SIZE OS_PAGE_SIZE
+#define HARD_GUARD_REGION_SIZE GetOsPageSize()
private:
//
@@ -3587,8 +3587,8 @@ private:
// Every stack has a single reserved page at its limit that we call the 'hard guard page'. This page is never
// committed, and access to it after a stack overflow will terminate the thread.
-#define HARD_GUARD_REGION_SIZE OS_PAGE_SIZE
-#define SIZEOF_DEFAULT_STACK_GUARANTEE 1 * OS_PAGE_SIZE
+#define HARD_GUARD_REGION_SIZE GetOsPageSize()
+#define SIZEOF_DEFAULT_STACK_GUARANTEE 1 * GetOsPageSize()
public:
// This will return the last stack address that one could write to before a stack overflow.
diff --git a/src/vm/virtualcallstub.cpp b/src/vm/virtualcallstub.cpp
index e753860fd1..c230f254c6 100644
--- a/src/vm/virtualcallstub.cpp
+++ b/src/vm/virtualcallstub.cpp
@@ -592,20 +592,20 @@ void VirtualCallStubManager::Init(BaseDomain *pDomain, LoaderAllocator *pLoaderA
//
// Align up all of the commit and reserve sizes
//
- indcell_heap_reserve_size = (DWORD) ALIGN_UP(indcell_heap_reserve_size, PAGE_SIZE);
- indcell_heap_commit_size = (DWORD) ALIGN_UP(indcell_heap_commit_size, PAGE_SIZE);
+ indcell_heap_reserve_size = (DWORD) ALIGN_UP(indcell_heap_reserve_size, GetOsPageSize());
+ indcell_heap_commit_size = (DWORD) ALIGN_UP(indcell_heap_commit_size, GetOsPageSize());
- cache_entry_heap_reserve_size = (DWORD) ALIGN_UP(cache_entry_heap_reserve_size, PAGE_SIZE);
- cache_entry_heap_commit_size = (DWORD) ALIGN_UP(cache_entry_heap_commit_size, PAGE_SIZE);
+ cache_entry_heap_reserve_size = (DWORD) ALIGN_UP(cache_entry_heap_reserve_size, GetOsPageSize());
+ cache_entry_heap_commit_size = (DWORD) ALIGN_UP(cache_entry_heap_commit_size, GetOsPageSize());
- lookup_heap_reserve_size = (DWORD) ALIGN_UP(lookup_heap_reserve_size, PAGE_SIZE);
- lookup_heap_commit_size = (DWORD) ALIGN_UP(lookup_heap_commit_size, PAGE_SIZE);
+ lookup_heap_reserve_size = (DWORD) ALIGN_UP(lookup_heap_reserve_size, GetOsPageSize());
+ lookup_heap_commit_size = (DWORD) ALIGN_UP(lookup_heap_commit_size, GetOsPageSize());
- dispatch_heap_reserve_size = (DWORD) ALIGN_UP(dispatch_heap_reserve_size, PAGE_SIZE);
- dispatch_heap_commit_size = (DWORD) ALIGN_UP(dispatch_heap_commit_size, PAGE_SIZE);
+ dispatch_heap_reserve_size = (DWORD) ALIGN_UP(dispatch_heap_reserve_size, GetOsPageSize());
+ dispatch_heap_commit_size = (DWORD) ALIGN_UP(dispatch_heap_commit_size, GetOsPageSize());
- resolve_heap_reserve_size = (DWORD) ALIGN_UP(resolve_heap_reserve_size, PAGE_SIZE);
- resolve_heap_commit_size = (DWORD) ALIGN_UP(resolve_heap_commit_size, PAGE_SIZE);
+ resolve_heap_reserve_size = (DWORD) ALIGN_UP(resolve_heap_reserve_size, GetOsPageSize());
+ resolve_heap_commit_size = (DWORD) ALIGN_UP(resolve_heap_commit_size, GetOsPageSize());
BYTE * initReservedMem = NULL;
@@ -624,16 +624,16 @@ void VirtualCallStubManager::Init(BaseDomain *pDomain, LoaderAllocator *pLoaderA
DWORD dwWastedReserveMemSize = dwTotalReserveMemSize - dwTotalReserveMemSizeCalc;
if (dwWastedReserveMemSize != 0)
{
- DWORD cWastedPages = dwWastedReserveMemSize / PAGE_SIZE;
+ DWORD cWastedPages = dwWastedReserveMemSize / GetOsPageSize();
DWORD cPagesPerHeap = cWastedPages / 5;
DWORD cPagesRemainder = cWastedPages % 5; // We'll throw this at the resolve heap
- indcell_heap_reserve_size += cPagesPerHeap * PAGE_SIZE;
- cache_entry_heap_reserve_size += cPagesPerHeap * PAGE_SIZE;
- lookup_heap_reserve_size += cPagesPerHeap * PAGE_SIZE;
- dispatch_heap_reserve_size += cPagesPerHeap * PAGE_SIZE;
- resolve_heap_reserve_size += cPagesPerHeap * PAGE_SIZE;
- resolve_heap_reserve_size += cPagesRemainder * PAGE_SIZE;
+ indcell_heap_reserve_size += cPagesPerHeap * GetOsPageSize();
+ cache_entry_heap_reserve_size += cPagesPerHeap * GetOsPageSize();
+ lookup_heap_reserve_size += cPagesPerHeap * GetOsPageSize();
+ dispatch_heap_reserve_size += cPagesPerHeap * GetOsPageSize();
+ resolve_heap_reserve_size += cPagesPerHeap * GetOsPageSize();
+ resolve_heap_reserve_size += cPagesRemainder * GetOsPageSize();
}
CONSISTENCY_CHECK((indcell_heap_reserve_size +
@@ -653,20 +653,20 @@ void VirtualCallStubManager::Init(BaseDomain *pDomain, LoaderAllocator *pLoaderA
}
else
{
- indcell_heap_reserve_size = PAGE_SIZE;
- indcell_heap_commit_size = PAGE_SIZE;
+ indcell_heap_reserve_size = GetOsPageSize();
+ indcell_heap_commit_size = GetOsPageSize();
- cache_entry_heap_reserve_size = PAGE_SIZE;
- cache_entry_heap_commit_size = PAGE_SIZE;
+ cache_entry_heap_reserve_size = GetOsPageSize();
+ cache_entry_heap_commit_size = GetOsPageSize();
- lookup_heap_reserve_size = PAGE_SIZE;
- lookup_heap_commit_size = PAGE_SIZE;
+ lookup_heap_reserve_size = GetOsPageSize();
+ lookup_heap_commit_size = GetOsPageSize();
- dispatch_heap_reserve_size = PAGE_SIZE;
- dispatch_heap_commit_size = PAGE_SIZE;
+ dispatch_heap_reserve_size = GetOsPageSize();
+ dispatch_heap_commit_size = GetOsPageSize();
- resolve_heap_reserve_size = PAGE_SIZE;
- resolve_heap_commit_size = PAGE_SIZE;
+ resolve_heap_reserve_size = GetOsPageSize();
+ resolve_heap_commit_size = GetOsPageSize();
#ifdef _DEBUG
DWORD dwTotalReserveMemSizeCalc = indcell_heap_reserve_size +
diff --git a/src/vm/win32threadpool.cpp b/src/vm/win32threadpool.cpp
index a79656e745..18df0dc76e 100644
--- a/src/vm/win32threadpool.cpp
+++ b/src/vm/win32threadpool.cpp
@@ -1758,7 +1758,7 @@ DWORD WINAPI ThreadpoolMgr::intermediateThreadProc(PVOID arg)
STATIC_CONTRACT_SO_INTOLERANT;
offset_counter++;
- if (offset_counter * offset_multiplier > PAGE_SIZE)
+ if (offset_counter * offset_multiplier > (int)GetOsPageSize())
offset_counter = 0;
(void)_alloca(offset_counter * offset_multiplier);