summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/coding-guidelines/clr-jit-coding-conventions.md43
-rw-r--r--src/jit/alloc.cpp428
-rw-r--r--src/jit/alloc.h235
-rw-r--r--src/jit/arraystack.h16
-rw-r--r--src/jit/bitset.cpp18
-rw-r--r--src/jit/bitset.h2
-rw-r--r--src/jit/block.cpp2
-rw-r--r--src/jit/codegencommon.cpp8
-rw-r--r--src/jit/compiler.cpp174
-rw-r--r--src/jit/compiler.h185
-rw-r--r--src/jit/compiler.hpp88
-rw-r--r--src/jit/compilerbitsettraits.hpp8
-rw-r--r--src/jit/copyprop.cpp18
-rw-r--r--src/jit/ee_il_dll.cpp2
-rw-r--r--src/jit/eeinterface.cpp2
-rw-r--r--src/jit/emit.cpp2
-rw-r--r--src/jit/flowgraph.cpp16
-rw-r--r--src/jit/gcencode.cpp2
-rw-r--r--src/jit/gentree.cpp10
-rw-r--r--src/jit/gentree.h4
-rw-r--r--src/jit/hostallocator.cpp26
-rw-r--r--src/jit/hostallocator.h38
-rw-r--r--src/jit/importer.cpp6
-rw-r--r--src/jit/jit.h102
-rw-r--r--src/jit/jitexpandarray.h29
-rw-r--r--src/jit/jithashtable.h22
-rw-r--r--src/jit/jitstd/allocator.h32
-rw-r--r--src/jit/jitstd/utility.h13
-rw-r--r--src/jit/lclvars.cpp4
-rw-r--r--src/jit/lir.cpp4
-rw-r--r--src/jit/loopcloning.cpp2
-rw-r--r--src/jit/loopcloning.h10
-rw-r--r--src/jit/lower.cpp2
-rw-r--r--src/jit/lsra.cpp7
-rw-r--r--src/jit/lsra.h62
-rw-r--r--src/jit/lsrabuild.cpp6
-rw-r--r--src/jit/morph.cpp14
-rw-r--r--src/jit/rangecheck.cpp16
-rw-r--r--src/jit/rangecheck.h8
-rw-r--r--src/jit/regset.cpp2
-rw-r--r--src/jit/scopeinfo.cpp4
-rw-r--r--src/jit/smallhash.h55
-rw-r--r--src/jit/ssabuilder.cpp21
-rw-r--r--src/jit/ssarenamestate.cpp10
-rw-r--r--src/jit/ssarenamestate.h4
-rw-r--r--src/jit/stacklevelsetter.cpp4
-rw-r--r--src/jit/unwind.cpp10
-rw-r--r--src/jit/utils.cpp10
-rw-r--r--src/jit/utils.h6
-rw-r--r--src/jit/valuenum.cpp10
-rw-r--r--src/jit/valuenum.h8
51 files changed, 741 insertions, 1069 deletions
diff --git a/Documentation/coding-guidelines/clr-jit-coding-conventions.md b/Documentation/coding-guidelines/clr-jit-coding-conventions.md
index 0bab27616a..4873e1f274 100644
--- a/Documentation/coding-guidelines/clr-jit-coding-conventions.md
+++ b/Documentation/coding-guidelines/clr-jit-coding-conventions.md
@@ -125,7 +125,8 @@ Note that these conventions are different from the CLR C++ Coding Conventions, d
* [15.5.9 Global class objects](#15.5.9)
* [15.6 Exceptions](#15.6)
* [15.7 Code tuning for performance optimization](#15.7)
- * [15.8 Obsoleting functions, classes and macros](#15.8)
+ * [15.8 Memory allocation](#15.8)
+ * [15.9 Obsoleting functions, classes and macros](#15.9)
# <a name="4"/>4 Principles
@@ -1938,7 +1939,45 @@ In general, code should be written to be readable first, and optimized for perfo
In the case of tight loops and code that has been analyzed to be a performance bottleneck, performance optimizations take a higher priority. Talk to the performance team if in doubt.
-## <a name="15.8"/>15.8 Obsoleting functions, classes and macros
+## <a name="15.8"/>15.8 Memory allocation
+
+All memory required during the compilation of a method must be allocated using the `Compiler`'s arena allocator. This allocator takes care of deallocating all the memory when compilation ends, avoiding memory leaks and simplifying memory management.
+
+However, the use of an arena allocator can increase memory usage and it's worth considering its impact when writing JIT code. Simple code changes can have a significant impact on memory usage, such as hoisting a `std::vector` variable out of a loop:
+```c++
+std::vector<int> outer; // same memory gets used for all iterations
+for (...)
+{
+ std::vector<int> inner; // this will allocate memory on every iteration
+ // and previously allocated memory is simply wasted
+}
+```
+Node based data structures (e.g linked lists) may benefit from retaining and reusing removed nodes, provided that maintaining free lists doesn't add significant cost.
+
+The arena allocator should not be used directly. `Compiler::getAllocator(CompMemKind)` returns a `CompAllocator` object that wraps the arena allocator and supports memory usage tracking when `MEASURE_MEM_ALLOC` is enabled. It's best to use a meaningful memory kind (e.g. not `CMK_Generic`) but exceptions can be made for small allocations. `CompAllocator` objects are always pointer sized and can be freely copied and stored (useful to avoid repeated `CompMemKind` references).
+
+The `new (CompAllocator)` operator should be preferred over `CompAllocator::allocate(size_t)`. The later is intended to be used only when constructors must not be run, such as when allocating arrays for containers like `std::vector`.
+```c++
+// typical object allocation
+RangeCheck* p = new (compiler->getAllocator(CMK_RangeCheck)) RangeCheck(compiler);
+// slightly shorter alternative
+RangeCheck* p = new (compiler, CMK_RangeCheck) RangeCheck(compiler);
+// allocate an array with default initialized elements
+LclVarDsc* p = new (compiler->getAllocator(CMK_LvaTable)) LclVarDsc[lvaCount];
+// use list initialization to zero out an array
+unsigned* p = new (compiler->getAllocator(CMK_LvaTable)) unsigned[lvaTrackedCount] { };
+// use CompAllocator::allocate to allocate memory without doing any initialization...
+LclVarDsc* p = compiler->getAllocator(CMK_LvaTable).allocate<LclVarDsc>(lvaCount);
+// ... and construct elements in place as needed
+new (&p[i], jitstd::placement_t()) LclVarDsc(compiler)
+```
+Note that certain classes (e.g. `GenTree`) provide their own `new` operator overloads, those should be used instead of the general purpose `new (CompAllocator)` operator.
+
+`jitstd` container classes accept a `CompAllocator` object by implicit conversion from `CompAllocator` to `jitstd::allocator`.
+
+Debug/checked code that needs to allocate memory outside of method compilation can use the `HostAllocator` class and the associated `new` operator. This is a normal memory allocator that requires manual memory deallocation.
+
+## <a name="15.9"/>15.9 Obsoleting functions, classes and macros
The Visual C++ compiler has support built in for marking various user defined constructs as deprecated. This functionality is accessed via one of two mechanisms:
diff --git a/src/jit/alloc.cpp b/src/jit/alloc.cpp
index 4cde8664dc..8f1e0cc12e 100644
--- a/src/jit/alloc.cpp
+++ b/src/jit/alloc.cpp
@@ -9,44 +9,108 @@
#endif // defined(_MSC_VER)
//------------------------------------------------------------------------
-// PooledAllocator:
-// This subclass of `ArenaAllocator` is a singleton that always keeps
-// a single default-sized page allocated. We try to use the singleton
-// allocator as often as possible (i.e. for all non-concurrent
-// method compilations).
-class PooledAllocator : public ArenaAllocator
+// SinglePagePool: Manage a single, default-sized page pool for ArenaAllocator.
+//
+// Allocating a page is slightly costly as it involves the JIT host and
+// possibly the operating system as well. This pool avoids allocation
+// in many cases (i.e. for all non-concurrent method compilations).
+//
+class ArenaAllocator::SinglePagePool
{
-private:
- enum
+ // The page maintained by this pool
+ PageDescriptor* m_page;
+ // The page available for allocation (either m_page or &m_shutdownPage if shutdown was called)
+ PageDescriptor* m_availablePage;
+ // A dummy page that is made available during shutdown
+ PageDescriptor m_shutdownPage;
+
+public:
+ // Attempt to acquire the page managed by this pool.
+ PageDescriptor* tryAcquirePage(IEEMemoryManager* memoryManager)
{
- POOLED_ALLOCATOR_NOTINITIALIZED = 0,
- POOLED_ALLOCATOR_IN_USE = 1,
- POOLED_ALLOCATOR_AVAILABLE = 2,
- POOLED_ALLOCATOR_SHUTDOWN = 3,
- };
+ assert(memoryManager != nullptr);
- static PooledAllocator s_pooledAllocator;
- static LONG s_pooledAllocatorState;
+ PageDescriptor* page = InterlockedExchangeT(&m_availablePage, nullptr);
+ if ((page != nullptr) && (page->m_memoryManager != memoryManager))
+ {
+ // The pool page belongs to a different memory manager, release it.
+ releasePage(page, page->m_memoryManager);
+ page = nullptr;
+ }
- PooledAllocator() : ArenaAllocator()
+ assert((page == nullptr) || isPoolPage(page));
+
+ return page;
+ }
+
+ // Attempt to pool the specified page.
+ void tryPoolPage(PageDescriptor* page)
{
+ assert(page != &m_shutdownPage);
+
+ // Try to pool this page, give up if another thread has already pooled a page.
+ InterlockedCompareExchangeT(&m_page, page, nullptr);
}
- PooledAllocator(IEEMemoryManager* memoryManager);
- PooledAllocator(const PooledAllocator& other) = delete;
- PooledAllocator& operator=(const PooledAllocator& other) = delete;
+ // Check if a page is pooled.
+ bool isEmpty()
+ {
+ return (m_page == nullptr);
+ }
-public:
- PooledAllocator& operator=(PooledAllocator&& other);
+ // Check if the specified page is pooled.
+ bool isPoolPage(PageDescriptor* page)
+ {
+ return (m_page == page);
+ }
- void destroy() override;
+ // Release the specified page.
+ PageDescriptor* releasePage(PageDescriptor* page, IEEMemoryManager* memoryManager)
+ {
+ // tryAcquirePage may end up releasing the shutdown page if shutdown was called.
+ assert((page == &m_shutdownPage) || isPoolPage(page));
+ assert((page == &m_shutdownPage) || (memoryManager != nullptr));
+
+ // Normally m_availablePage should be null when releasePage is called but it can
+ // be the shutdown page if shutdown is called while the pool page is in use.
+ assert((m_availablePage == nullptr) || (m_availablePage == &m_shutdownPage));
+
+ PageDescriptor* next = page->m_next;
+ // Update the page's memory manager (replaces m_next that's not needed in this state).
+ page->m_memoryManager = memoryManager;
+ // Try to make the page available. This will fail if the pool was shutdown
+ // and then we need to free the page here.
+ PageDescriptor* shutdownPage = InterlockedCompareExchangeT(&m_availablePage, page, nullptr);
+ if (shutdownPage != nullptr)
+ {
+ assert(shutdownPage == &m_shutdownPage);
+ freeHostMemory(memoryManager, page);
+ }
+
+ // Return the next page for caller's convenience.
+ return next;
+ }
+
+ // Free the pooled page.
+ void shutdown()
+ {
+ // If the pool page is available then acquire it now so it can be freed.
+ // Also make the shutdown page available so that:
+ // - tryAcquirePage won't be return it because it has a null memory manager
+ // - releasePage won't be able to make the pool page available and instead will free it
+ PageDescriptor* page = InterlockedExchangeT(&m_availablePage, &m_shutdownPage);
- static void shutdown();
+ assert(page != &m_shutdownPage);
+ assert((page == nullptr) || isPoolPage(page));
- static ArenaAllocator* getPooledAllocator(IEEMemoryManager* memoryManager);
+ if ((page != nullptr) && (page->m_memoryManager != nullptr))
+ {
+ freeHostMemory(page->m_memoryManager, page);
+ }
+ }
};
-size_t ArenaAllocator::s_defaultPageSize = 0;
+ArenaAllocator::SinglePagePool ArenaAllocator::s_pagePool;
//------------------------------------------------------------------------
// ArenaAllocator::bypassHostAllocator:
@@ -76,7 +140,7 @@ bool ArenaAllocator::bypassHostAllocator()
// The default size of an arena page.
size_t ArenaAllocator::getDefaultPageSize()
{
- return s_defaultPageSize;
+ return DEFAULT_PAGE_SIZE;
}
//------------------------------------------------------------------------
@@ -89,46 +153,26 @@ ArenaAllocator::ArenaAllocator()
, m_nextFreeByte(nullptr)
, m_lastFreeByte(nullptr)
{
+ assert(!isInitialized());
}
//------------------------------------------------------------------------
-// ArenaAllocator::ArenaAllocator:
-// Constructs an arena allocator.
+// ArenaAllocator::initialize:
+// Initializes the arena allocator.
//
// Arguments:
// memoryManager - The `IEEMemoryManager` instance that will be used to
// allocate memory for arena pages.
-ArenaAllocator::ArenaAllocator(IEEMemoryManager* memoryManager)
- : m_memoryManager(memoryManager)
- , m_firstPage(nullptr)
- , m_lastPage(nullptr)
- , m_nextFreeByte(nullptr)
- , m_lastFreeByte(nullptr)
-{
- assert(getDefaultPageSize() != 0);
- assert(isInitialized());
-}
-
-//------------------------------------------------------------------------
-// ArenaAllocator::operator=:
-// Move-assigns a `ArenaAllocator`.
-ArenaAllocator& ArenaAllocator::operator=(ArenaAllocator&& other)
+void ArenaAllocator::initialize(IEEMemoryManager* memoryManager)
{
assert(!isInitialized());
+ m_memoryManager = memoryManager;
+ assert(isInitialized());
- m_memoryManager = other.m_memoryManager;
- m_firstPage = other.m_firstPage;
- m_lastPage = other.m_lastPage;
- m_nextFreeByte = other.m_nextFreeByte;
- m_lastFreeByte = other.m_lastFreeByte;
-
- other.m_memoryManager = nullptr;
- other.m_firstPage = nullptr;
- other.m_lastPage = nullptr;
- other.m_nextFreeByte = nullptr;
- other.m_lastFreeByte = nullptr;
-
- return *this;
+#if MEASURE_MEM_ALLOC
+ memset(&m_stats, 0, sizeof(m_stats));
+ memset(&m_statsAllocators, 0, sizeof(m_statsAllocators));
+#endif // MEASURE_MEM_ALLOC
}
bool ArenaAllocator::isInitialized()
@@ -146,7 +190,7 @@ bool ArenaAllocator::isInitialized()
//
// Return Value:
// A pointer to the first usable byte of the newly allocated page.
-void* ArenaAllocator::allocateNewPage(size_t size, bool canThrow)
+void* ArenaAllocator::allocateNewPage(size_t size)
{
assert(isInitialized());
@@ -155,11 +199,7 @@ void* ArenaAllocator::allocateNewPage(size_t size, bool canThrow)
// Check for integer overflow
if (pageSize < size)
{
- if (canThrow)
- {
- NOMEM();
- }
-
+ NOMEM();
return nullptr;
}
@@ -173,34 +213,52 @@ void* ArenaAllocator::allocateNewPage(size_t size, bool canThrow)
m_lastPage->m_usedBytes = m_nextFreeByte - m_lastPage->m_contents;
}
- // Round up to a default-sized page if necessary
- if (pageSize <= s_defaultPageSize)
- {
- pageSize = s_defaultPageSize;
- }
+ PageDescriptor* newPage = nullptr;
+ bool tryPoolNewPage = false;
- // Round to the nearest multiple of OS page size if necessary
if (!bypassHostAllocator())
{
+ // Round to the nearest multiple of OS page size
pageSize = roundUp(pageSize, DEFAULT_PAGE_SIZE);
+
+ // If this is the first time we allocate a page then try to use the pool page.
+ if ((m_firstPage == nullptr) && (pageSize == DEFAULT_PAGE_SIZE))
+ {
+ newPage = s_pagePool.tryAcquirePage(m_memoryManager);
+
+ if (newPage == nullptr)
+ {
+ // If there's no pool page yet then try to pool the newly allocated page.
+ tryPoolNewPage = s_pagePool.isEmpty();
+ }
+ else
+ {
+ assert(newPage->m_memoryManager == m_memoryManager);
+ assert(newPage->m_pageBytes == DEFAULT_PAGE_SIZE);
+ }
+ }
}
- // Allocate the new page
- PageDescriptor* newPage = (PageDescriptor*)allocateHostMemory(pageSize);
if (newPage == nullptr)
{
- if (canThrow)
+ // Allocate the new page
+ newPage = static_cast<PageDescriptor*>(allocateHostMemory(m_memoryManager, pageSize));
+
+ if (newPage == nullptr)
{
NOMEM();
+ return nullptr;
}
- return nullptr;
+ if (tryPoolNewPage)
+ {
+ s_pagePool.tryPoolPage(newPage);
+ }
}
// Append the new page to the end of the list
newPage->m_next = nullptr;
newPage->m_pageBytes = pageSize;
- newPage->m_previous = m_lastPage;
newPage->m_usedBytes = 0; // m_usedBytes is meaningless until a new page is allocated.
// Instead of letting it contain garbage (so to confuse us),
// set it to zero.
@@ -231,11 +289,20 @@ void ArenaAllocator::destroy()
{
assert(isInitialized());
+ PageDescriptor* page = m_firstPage;
+
+ // If the first page is the pool page then return it to the pool.
+ if ((page != nullptr) && s_pagePool.isPoolPage(page))
+ {
+ page = s_pagePool.releasePage(page, m_memoryManager);
+ }
+
// Free all of the allocated pages
- for (PageDescriptor *page = m_firstPage, *next; page != nullptr; page = next)
+ for (PageDescriptor* next; page != nullptr; page = next)
{
+ assert(!s_pagePool.isPoolPage(page));
next = page->m_next;
- freeHostMemory(page);
+ freeHostMemory(m_memoryManager, page);
}
// Clear out the allocator's fields
@@ -244,6 +311,7 @@ void ArenaAllocator::destroy()
m_lastPage = nullptr;
m_nextFreeByte = nullptr;
m_lastFreeByte = nullptr;
+ assert(!isInitialized());
}
// The debug version of the allocator may allocate directly from the
@@ -266,9 +334,9 @@ void ArenaAllocator::destroy()
//
// Return Value:
// A pointer to the allocated memory.
-void* ArenaAllocator::allocateHostMemory(size_t size)
+void* ArenaAllocator::allocateHostMemory(IEEMemoryManager* memoryManager, size_t size)
{
- assert(isInitialized());
+ assert(memoryManager != nullptr);
#if defined(DEBUG)
if (bypassHostAllocator())
@@ -280,7 +348,7 @@ void* ArenaAllocator::allocateHostMemory(size_t size)
return ClrAllocInProcessHeap(0, S_SIZE_T(size));
}
#else // defined(DEBUG)
- return m_memoryManager->ClrVirtualAlloc(nullptr, size, MEM_COMMIT, PAGE_READWRITE);
+ return memoryManager->ClrVirtualAlloc(nullptr, size, MEM_COMMIT, PAGE_READWRITE);
#endif // !defined(DEBUG)
}
@@ -290,9 +358,9 @@ void* ArenaAllocator::allocateHostMemory(size_t size)
//
// Arguments:
// block - A pointer to the memory to free.
-void ArenaAllocator::freeHostMemory(void* block)
+void ArenaAllocator::freeHostMemory(IEEMemoryManager* memoryManager, void* block)
{
- assert(isInitialized());
+ assert(memoryManager != nullptr);
#if defined(DEBUG)
if (bypassHostAllocator())
@@ -304,7 +372,7 @@ void ArenaAllocator::freeHostMemory(void* block)
ClrFreeInProcessHeap(0, block);
}
#else // defined(DEBUG)
- m_memoryManager->ClrVirtualFree(block, 0, MEM_RELEASE);
+ memoryManager->ClrVirtualFree(block, 0, MEM_RELEASE);
#endif // !defined(DEBUG)
}
@@ -362,175 +430,97 @@ size_t ArenaAllocator::getTotalBytesUsed()
}
//------------------------------------------------------------------------
-// ArenaAllocator::startup:
-// Performs any necessary initialization for the arena allocator
-// subsystem.
-void ArenaAllocator::startup()
-{
- s_defaultPageSize = bypassHostAllocator() ? (size_t)MIN_PAGE_SIZE : (size_t)DEFAULT_PAGE_SIZE;
-}
-
-//------------------------------------------------------------------------
// ArenaAllocator::shutdown:
// Performs any necessary teardown for the arena allocator subsystem.
void ArenaAllocator::shutdown()
{
- PooledAllocator::shutdown();
+ s_pagePool.shutdown();
}
-PooledAllocator PooledAllocator::s_pooledAllocator;
-LONG PooledAllocator::s_pooledAllocatorState = POOLED_ALLOCATOR_NOTINITIALIZED;
+#if MEASURE_MEM_ALLOC
+CritSecObject ArenaAllocator::s_statsLock;
+ArenaAllocator::AggregateMemStats ArenaAllocator::s_aggStats;
+ArenaAllocator::MemStats ArenaAllocator::s_maxStats;
-//------------------------------------------------------------------------
-// PooledAllocator::PooledAllocator:
-// Constructs a `PooledAllocator`.
-PooledAllocator::PooledAllocator(IEEMemoryManager* memoryManager) : ArenaAllocator(memoryManager)
-{
-}
+const char* ArenaAllocator::MemStats::s_CompMemKindNames[] = {
+#define CompMemKindMacro(kind) #kind,
+#include "compmemkind.h"
+};
-//------------------------------------------------------------------------
-// PooledAllocator::operator=:
-// Move-assigns a `PooledAllocator`.
-PooledAllocator& PooledAllocator::operator=(PooledAllocator&& other)
+void ArenaAllocator::MemStats::Print(FILE* f)
{
- *((ArenaAllocator*)this) = std::move((ArenaAllocator &&)other);
- return *this;
+ fprintf(f, "count: %10u, size: %10llu, max = %10llu\n", allocCnt, allocSz, allocSzMax);
+ fprintf(f, "allocateMemory: %10llu, nraUsed: %10llu\n", nraTotalSizeAlloc, nraTotalSizeUsed);
+ PrintByKind(f);
}
-//------------------------------------------------------------------------
-// PooledAllocator::shutdown:
-// Performs any necessary teardown for the pooled allocator.
-//
-// Notes:
-// If the allocator has been initialized and is in use when this method is called,
-// it is up to whatever is using the pooled allocator to call `destroy` in order
-// to free its memory.
-void PooledAllocator::shutdown()
+void ArenaAllocator::MemStats::PrintByKind(FILE* f)
{
- LONG oldState = InterlockedExchange(&s_pooledAllocatorState, POOLED_ALLOCATOR_SHUTDOWN);
- switch (oldState)
+ fprintf(f, "\nAlloc'd bytes by kind:\n %20s | %10s | %7s\n", "kind", "size", "pct");
+ fprintf(f, " %20s-+-%10s-+-%7s\n", "--------------------", "----------", "-------");
+ float allocSzF = static_cast<float>(allocSz);
+ for (int cmk = 0; cmk < CMK_Count; cmk++)
{
- case POOLED_ALLOCATOR_NOTINITIALIZED:
- case POOLED_ALLOCATOR_SHUTDOWN:
- case POOLED_ALLOCATOR_IN_USE:
- return;
-
- case POOLED_ALLOCATOR_AVAILABLE:
- // The pooled allocator was initialized and not in use; we must destroy it.
- s_pooledAllocator.destroy();
- break;
+ float pct = 100.0f * static_cast<float>(allocSzByKind[cmk]) / allocSzF;
+ fprintf(f, " %20s | %10llu | %6.2f%%\n", s_CompMemKindNames[cmk], allocSzByKind[cmk], pct);
}
+ fprintf(f, "\n");
}
-//------------------------------------------------------------------------
-// PooledAllocator::getPooledAllocator:
-// Returns the pooled allocator if it is not already in use.
-//
-// Arguments:
-// memoryManager: The `IEEMemoryManager` instance in use by the caller.
-//
-// Return Value:
-// A pointer to the pooled allocator if it is available or `nullptr`
-// if it is already in use.
-//
-// Notes:
-// Calling `destroy` on the returned allocator will return it to the
-// pool.
-ArenaAllocator* PooledAllocator::getPooledAllocator(IEEMemoryManager* memoryManager)
+void ArenaAllocator::AggregateMemStats::Print(FILE* f)
{
- LONG oldState = InterlockedExchange(&s_pooledAllocatorState, POOLED_ALLOCATOR_IN_USE);
- switch (oldState)
+ fprintf(f, "For %9u methods:\n", nMethods);
+ if (nMethods == 0)
{
- case POOLED_ALLOCATOR_IN_USE:
- case POOLED_ALLOCATOR_SHUTDOWN:
- // Either the allocator is in use or this call raced with a call to `shutdown`.
- // Return `nullptr`.
- return nullptr;
-
- case POOLED_ALLOCATOR_AVAILABLE:
- if (s_pooledAllocator.m_memoryManager != memoryManager)
- {
- // The allocator is available, but it was initialized with a different
- // memory manager. Release it and return `nullptr`.
- InterlockedExchange(&s_pooledAllocatorState, POOLED_ALLOCATOR_AVAILABLE);
- return nullptr;
- }
-
- return &s_pooledAllocator;
-
- case POOLED_ALLOCATOR_NOTINITIALIZED:
- {
- PooledAllocator allocator(memoryManager);
- if (allocator.allocateNewPage(0, false) == nullptr)
- {
- // Failed to grab the initial memory page.
- InterlockedExchange(&s_pooledAllocatorState, POOLED_ALLOCATOR_NOTINITIALIZED);
- return nullptr;
- }
-
- s_pooledAllocator = std::move(allocator);
- }
-
- return &s_pooledAllocator;
-
- default:
- assert(!"Unknown pooled allocator state");
- unreached();
+ return;
}
+ fprintf(f, " count: %12u (avg %7u per method)\n", allocCnt, allocCnt / nMethods);
+ fprintf(f, " alloc size : %12llu (avg %7llu per method)\n", allocSz, allocSz / nMethods);
+ fprintf(f, " max alloc : %12llu\n", allocSzMax);
+ fprintf(f, "\n");
+ fprintf(f, " allocateMemory : %12llu (avg %7llu per method)\n", nraTotalSizeAlloc, nraTotalSizeAlloc / nMethods);
+ fprintf(f, " nraUsed : %12llu (avg %7llu per method)\n", nraTotalSizeUsed, nraTotalSizeUsed / nMethods);
+ PrintByKind(f);
}
-//------------------------------------------------------------------------
-// PooledAllocator::destroy:
-// Performs any necessary teardown for an `PooledAllocator` and returns the allocator
-// to the pool.
-void PooledAllocator::destroy()
+ArenaAllocator::MemStatsAllocator* ArenaAllocator::getMemStatsAllocator(CompMemKind kind)
{
- assert(isInitialized());
- assert(this == &s_pooledAllocator);
- assert(s_pooledAllocatorState == POOLED_ALLOCATOR_IN_USE || s_pooledAllocatorState == POOLED_ALLOCATOR_SHUTDOWN);
- assert(m_firstPage != nullptr);
+ assert(kind < CMK_Count);
- // Free all but the first allocated page
- for (PageDescriptor *page = m_firstPage->m_next, *next; page != nullptr; page = next)
+ if (m_statsAllocators[kind].m_arena == nullptr)
{
- next = page->m_next;
- freeHostMemory(page);
+ m_statsAllocators[kind].m_arena = this;
+ m_statsAllocators[kind].m_kind = kind;
}
- // Reset the relevant state to point back to the first byte of the first page
- m_firstPage->m_next = nullptr;
- m_lastPage = m_firstPage;
- m_nextFreeByte = m_firstPage->m_contents;
- m_lastFreeByte = (BYTE*)m_firstPage + m_firstPage->m_pageBytes;
+ return &m_statsAllocators[kind];
+}
- assert(getTotalBytesAllocated() == s_defaultPageSize);
+void ArenaAllocator::finishMemStats()
+{
+ m_stats.nraTotalSizeAlloc = getTotalBytesAllocated();
+ m_stats.nraTotalSizeUsed = getTotalBytesUsed();
- // If we've already been shut down, free the first page. Otherwise, return the allocator to the pool.
- if (s_pooledAllocatorState == POOLED_ALLOCATOR_SHUTDOWN)
+ CritSecHolder statsLock(s_statsLock);
+ s_aggStats.Add(m_stats);
+ if (m_stats.allocSz > s_maxStats.allocSz)
{
- ArenaAllocator::destroy();
- }
- else
- {
- InterlockedExchange(&s_pooledAllocatorState, POOLED_ALLOCATOR_AVAILABLE);
+ s_maxStats = m_stats;
}
}
-//------------------------------------------------------------------------
-// ArenaAllocator::getPooledAllocator:
-// Returns the pooled allocator if it is not already in use.
-//
-// Arguments:
-// memoryManager: The `IEEMemoryManager` instance in use by the caller.
-//
-// Return Value:
-// A pointer to the pooled allocator if it is available or `nullptr`
-// if it is already in use.
-//
-// Notes:
-// Calling `destroy` on the returned allocator will return it to the
-// pool.
-ArenaAllocator* ArenaAllocator::getPooledAllocator(IEEMemoryManager* memoryManager)
+void ArenaAllocator::dumpMemStats(FILE* file)
+{
+ m_stats.Print(file);
+}
+
+void ArenaAllocator::dumpAggregateMemStats(FILE* file)
+{
+ s_aggStats.Print(file);
+}
+
+void ArenaAllocator::dumpMaxMemStats(FILE* file)
{
- return PooledAllocator::getPooledAllocator(memoryManager);
+ s_maxStats.Print(file);
}
+#endif // MEASURE_MEM_ALLOC
diff --git a/src/jit/alloc.h b/src/jit/alloc.h
index 62e5bc0848..7090c7fa43 100644
--- a/src/jit/alloc.h
+++ b/src/jit/alloc.h
@@ -9,17 +9,32 @@
#include "host.h"
#endif // defined(_HOST_H_)
+// CompMemKind values are used to tag memory allocations performed via
+// the compiler's allocator so that the memory usage of various compiler
+// components can be tracked separately (when MEASURE_MEM_ALLOC is defined).
+
+enum CompMemKind
+{
+#define CompMemKindMacro(kind) CMK_##kind,
+#include "compmemkind.h"
+ CMK_Count
+};
+
class ArenaAllocator
{
private:
ArenaAllocator(const ArenaAllocator& other) = delete;
ArenaAllocator& operator=(const ArenaAllocator& other) = delete;
+ ArenaAllocator& operator=(ArenaAllocator&& other) = delete;
-protected:
struct PageDescriptor
{
- PageDescriptor* m_next;
- PageDescriptor* m_previous;
+ union {
+ // Used when the page is allocated
+ PageDescriptor* m_next;
+ // Used by the pooled page when available
+ IEEMemoryManager* m_memoryManager;
+ };
size_t m_pageBytes; // # of bytes allocated
size_t m_usedBytes; // # of bytes actually used. (This is only valid when we've allocated a new page.)
@@ -33,10 +48,11 @@ protected:
enum
{
DEFAULT_PAGE_SIZE = 16 * OS_page_size,
- MIN_PAGE_SIZE = sizeof(PageDescriptor)
};
- static size_t s_defaultPageSize;
+ class SinglePagePool;
+
+ static SinglePagePool s_pagePool;
IEEMemoryManager* m_memoryManager;
@@ -49,15 +65,93 @@ protected:
bool isInitialized();
- void* allocateNewPage(size_t size, bool canThrow);
+ void* allocateNewPage(size_t size);
+
+ static void* allocateHostMemory(IEEMemoryManager* memoryManager, size_t size);
+ static void freeHostMemory(IEEMemoryManager* memoryManager, void* block);
+
+#if MEASURE_MEM_ALLOC
+ struct MemStats
+ {
+ unsigned allocCnt; // # of allocs
+ UINT64 allocSz; // total size of those alloc.
+ UINT64 allocSzMax; // Maximum single allocation.
+ UINT64 allocSzByKind[CMK_Count]; // Classified by "kind".
+ UINT64 nraTotalSizeAlloc;
+ UINT64 nraTotalSizeUsed;
+
+ static const char* s_CompMemKindNames[]; // Names of the kinds.
+
+ void AddAlloc(size_t sz, CompMemKind cmk)
+ {
+ allocCnt += 1;
+ allocSz += sz;
+ if (sz > allocSzMax)
+ {
+ allocSzMax = sz;
+ }
+ allocSzByKind[cmk] += sz;
+ }
+
+ void Print(FILE* f); // Print these stats to file.
+ void PrintByKind(FILE* f); // Do just the by-kind histogram part.
+ };
+
+ struct AggregateMemStats : public MemStats
+ {
+ unsigned nMethods;
+
+ void Add(const MemStats& ms)
+ {
+ nMethods++;
+ allocCnt += ms.allocCnt;
+ allocSz += ms.allocSz;
+ allocSzMax = max(allocSzMax, ms.allocSzMax);
+ for (int i = 0; i < CMK_Count; i++)
+ {
+ allocSzByKind[i] += ms.allocSzByKind[i];
+ }
+ nraTotalSizeAlloc += ms.nraTotalSizeAlloc;
+ nraTotalSizeUsed += ms.nraTotalSizeUsed;
+ }
+
+ void Print(FILE* f); // Print these stats to file.
+ };
+
+public:
+ struct MemStatsAllocator
+ {
+ ArenaAllocator* m_arena;
+ CompMemKind m_kind;
+
+ void* allocateMemory(size_t sz)
+ {
+ m_arena->m_stats.AddAlloc(sz, m_kind);
+ return m_arena->allocateMemory(sz);
+ }
+ };
+
+private:
+ static CritSecObject s_statsLock; // This lock protects the data structures below.
+ static MemStats s_maxStats; // Stats for the allocator with the largest amount allocated.
+ static AggregateMemStats s_aggStats; // Aggregates statistics for all allocators.
+
+ MemStats m_stats;
+ MemStatsAllocator m_statsAllocators[CMK_Count];
+
+public:
+ MemStatsAllocator* getMemStatsAllocator(CompMemKind kind);
+ void finishMemStats();
+ void dumpMemStats(FILE* file);
- void* allocateHostMemory(size_t size);
- void freeHostMemory(void* block);
+ static void dumpMaxMemStats(FILE* file);
+ static void dumpAggregateMemStats(FILE* file);
+#endif // MEASURE_MEM_ALLOC
public:
ArenaAllocator();
- ArenaAllocator(IEEMemoryManager* memoryManager);
- ArenaAllocator& operator=(ArenaAllocator&& other);
+
+ void initialize(IEEMemoryManager* memoryManager);
// NOTE: it would be nice to have a destructor on this type to ensure that any value that
// goes out of scope is either uninitialized or has been torn down via a call to
@@ -65,7 +159,7 @@ public:
// revisiting EH in the JIT; such a destructor could be added if SEH is removed
// as part of that work.
- virtual void destroy();
+ void destroy();
inline void* allocateMemory(size_t sz);
@@ -75,10 +169,7 @@ public:
static bool bypassHostAllocator();
static size_t getDefaultPageSize();
- static void startup();
static void shutdown();
-
- static ArenaAllocator* getPooledAllocator(IEEMemoryManager* memoryManager);
};
//------------------------------------------------------------------------
@@ -103,7 +194,7 @@ inline void* ArenaAllocator::allocateMemory(size_t size)
assert(size != 0);
// Ensure that we always allocate in pointer sized increments.
- size = (size_t)roundUp(size, sizeof(size_t));
+ size = roundUp(size, sizeof(size_t));
#if defined(DEBUG)
if (JitConfig.ShouldInjectFault() != 0)
@@ -127,7 +218,7 @@ inline void* ArenaAllocator::allocateMemory(size_t size)
if (m_nextFreeByte > m_lastFreeByte)
{
- block = allocateNewPage(size, true);
+ block = allocateNewPage(size);
}
#if defined(DEBUG)
@@ -137,4 +228,116 @@ inline void* ArenaAllocator::allocateMemory(size_t size)
return block;
}
+// Allows general purpose code (e.g. collection classes) to allocate
+// memory of a pre-determined kind via an arena allocator.
+
+class CompAllocator
+{
+#if MEASURE_MEM_ALLOC
+ ArenaAllocator::MemStatsAllocator* m_arena;
+#else
+ ArenaAllocator* m_arena;
+#endif
+
+public:
+ CompAllocator(ArenaAllocator* arena, CompMemKind cmk)
+#if MEASURE_MEM_ALLOC
+ : m_arena(arena->getMemStatsAllocator(cmk))
+#else
+ : m_arena(arena)
+#endif
+ {
+ }
+
+ // Allocate a block of memory suitable to store `count` objects of type `T`.
+ // Zero-length allocations are not allowed.
+ template <typename T>
+ T* allocate(size_t count)
+ {
+ // Ensure that count * sizeof(T) does not overflow.
+ if (count > (SIZE_MAX / sizeof(T)))
+ {
+ NOMEM();
+ }
+
+ void* p = m_arena->allocateMemory(count * sizeof(T));
+
+ // Ensure that the allocator returned sizeof(size_t) aligned memory.
+ assert((size_t(p) & (sizeof(size_t) - 1)) == 0);
+
+ return static_cast<T*>(p);
+ }
+
+ // Deallocate a block of memory previously allocated by `allocate`.
+ // The arena allocator does not release memory so this doesn't do anything.
+ void deallocate(void* p)
+ {
+ }
+};
+
+// Global operator new overloads that work with CompAllocator
+
+inline void* __cdecl operator new(size_t n, CompAllocator alloc)
+{
+ return alloc.allocate<char>(n);
+}
+
+inline void* __cdecl operator new[](size_t n, CompAllocator alloc)
+{
+ return alloc.allocate<char>(n);
+}
+
+// A CompAllocator wrapper that implements IAllocator and allows zero-length
+// memory allocations (the arena allocator does not support zero-length
+// allocation).
+
+class CompIAllocator : public IAllocator
+{
+ CompAllocator m_alloc;
+ char m_zeroLenAllocTarg;
+
+public:
+ CompIAllocator(CompAllocator alloc) : m_alloc(alloc)
+ {
+ }
+
+ // Allocates a block of memory at least `sz` in size.
+ virtual void* Alloc(size_t sz) override
+ {
+ if (sz == 0)
+ {
+ return &m_zeroLenAllocTarg;
+ }
+ else
+ {
+ return m_alloc.allocate<char>(sz);
+ }
+ }
+
+ // Allocates a block of memory at least `elems * elemSize` in size.
+ virtual void* ArrayAlloc(size_t elems, size_t elemSize) override
+ {
+ if ((elems == 0) || (elemSize == 0))
+ {
+ return &m_zeroLenAllocTarg;
+ }
+ else
+ {
+ // Ensure that elems * elemSize does not overflow.
+ if (elems > (SIZE_MAX / elemSize))
+ {
+ NOMEM();
+ }
+
+ return m_alloc.allocate<char>(elems * elemSize);
+ }
+ }
+
+ // Frees the block of memory pointed to by p.
+ virtual void Free(void* p) override
+ {
+ m_alloc.deallocate(p);
+ }
+};
+
#endif // _ALLOC_H_
diff --git a/src/jit/arraystack.h b/src/jit/arraystack.h
index c6ac6b2628..2565e19856 100644
--- a/src/jit/arraystack.h
+++ b/src/jit/arraystack.h
@@ -11,14 +11,12 @@ class ArrayStack
static const int builtinSize = 8;
public:
- ArrayStack(Compiler* comp, int initialSize = builtinSize)
+ ArrayStack(CompAllocator alloc, int initialSize = builtinSize) : m_alloc(alloc)
{
- compiler = comp;
-
if (initialSize > builtinSize)
{
maxIndex = initialSize;
- data = new (compiler, CMK_ArrayStack) T[initialSize];
+ data = new (alloc) T[initialSize];
}
else
{
@@ -58,7 +56,7 @@ public:
// and copy over
T* oldData = data;
noway_assert(maxIndex * 2 > maxIndex);
- data = new (compiler, CMK_ArrayStack) T[maxIndex * 2];
+ data = new (m_alloc) T[maxIndex * 2];
for (int i = 0; i < maxIndex; i++)
{
data[i] = oldData[i];
@@ -149,10 +147,10 @@ public:
}
private:
- Compiler* compiler; // needed for allocation
- int tosIndex; // first free location
- int maxIndex;
- T* data;
+ CompAllocator m_alloc;
+ int tosIndex; // first free location
+ int maxIndex;
+ T* data;
// initial allocation
T builtinData[builtinSize];
};
diff --git a/src/jit/bitset.cpp b/src/jit/bitset.cpp
index 6802c1a717..5771e1858a 100644
--- a/src/jit/bitset.cpp
+++ b/src/jit/bitset.cpp
@@ -98,30 +98,30 @@ void BitSetSupport::RunTests(Env env)
class TestBitSetTraits
{
public:
- static void* Alloc(CompAllocator* alloc, size_t byteSize)
+ static void* Alloc(CompAllocator alloc, size_t byteSize)
{
- return alloc->Alloc(byteSize);
+ return alloc.allocate<char>(byteSize);
}
- static unsigned GetSize(CompAllocator* alloc)
+ static unsigned GetSize(CompAllocator alloc)
{
return 64;
}
- static unsigned GetArrSize(CompAllocator* alloc, unsigned elemSize)
+ static unsigned GetArrSize(CompAllocator alloc, unsigned elemSize)
{
assert(elemSize == sizeof(size_t));
return (64 / 8) / sizeof(size_t);
}
- static unsigned GetEpoch(CompAllocator* alloc)
+ static unsigned GetEpoch(CompAllocator alloc)
{
return 0;
}
};
-void BitSetSupport::TestSuite(CompAllocator* env)
+void BitSetSupport::TestSuite(CompAllocator env)
{
- BitSetSupport::RunTests<UINT64, BSUInt64, CompAllocator*, TestBitSetTraits>(env);
- BitSetSupport::RunTests<BitSetShortLongRep, BSShortLong, CompAllocator*, TestBitSetTraits>(env);
- BitSetSupport::RunTests<BitSetUint64<CompAllocator*, TestBitSetTraits>, BSUInt64Class, CompAllocator*,
+ BitSetSupport::RunTests<UINT64, BSUInt64, CompAllocator, TestBitSetTraits>(env);
+ BitSetSupport::RunTests<BitSetShortLongRep, BSShortLong, CompAllocator, TestBitSetTraits>(env);
+ BitSetSupport::RunTests<BitSetUint64<CompAllocator, TestBitSetTraits>, BSUInt64Class, CompAllocator,
TestBitSetTraits>(env);
}
#endif
diff --git a/src/jit/bitset.h b/src/jit/bitset.h
index df03dee50e..a0192e62e8 100644
--- a/src/jit/bitset.h
+++ b/src/jit/bitset.h
@@ -40,7 +40,7 @@ public:
#ifdef DEBUG
// This runs the "TestSuite" method for a few important instantiations of BitSet.
- static void TestSuite(CompAllocator* env);
+ static void TestSuite(CompAllocator env);
#endif
enum Operation
diff --git a/src/jit/block.cpp b/src/jit/block.cpp
index 265369802b..00c70afd75 100644
--- a/src/jit/block.cpp
+++ b/src/jit/block.cpp
@@ -588,7 +588,7 @@ const char* BasicBlock::dspToString(int blockNumPadding /* = 2*/)
// Allocation function for MemoryPhiArg.
void* BasicBlock::MemoryPhiArg::operator new(size_t sz, Compiler* comp)
{
- return comp->compGetMem(sz, CMK_MemoryPhiArg);
+ return comp->getAllocator(CMK_MemoryPhiArg).allocate<char>(sz);
}
//------------------------------------------------------------------------
diff --git a/src/jit/codegencommon.cpp b/src/jit/codegencommon.cpp
index fcc0676ea8..56cebe0efd 100644
--- a/src/jit/codegencommon.cpp
+++ b/src/jit/codegencommon.cpp
@@ -11239,9 +11239,7 @@ void CodeGen::genIPmappingAdd(IL_OFFSETX offsx, bool isLabel)
/* Create a mapping entry and append it to the list */
- Compiler::IPmappingDsc* addMapping =
- (Compiler::IPmappingDsc*)compiler->compGetMem(sizeof(*addMapping), CMK_DebugInfo);
-
+ Compiler::IPmappingDsc* addMapping = compiler->getAllocator(CMK_DebugInfo).allocate<Compiler::IPmappingDsc>(1);
addMapping->ipmdNativeLoc.CaptureLocation(getEmitter());
addMapping->ipmdILoffsx = offsx;
addMapping->ipmdIsLabel = isLabel;
@@ -11300,9 +11298,7 @@ void CodeGen::genIPmappingAddToFront(IL_OFFSETX offsx)
/* Create a mapping entry and prepend it to the list */
- Compiler::IPmappingDsc* addMapping =
- (Compiler::IPmappingDsc*)compiler->compGetMem(sizeof(*addMapping), CMK_DebugInfo);
-
+ Compiler::IPmappingDsc* addMapping = compiler->getAllocator(CMK_DebugInfo).allocate<Compiler::IPmappingDsc>(1);
addMapping->ipmdNativeLoc.CaptureLocation(getEmitter());
addMapping->ipmdILoffsx = offsx;
addMapping->ipmdIsLabel = true;
diff --git a/src/jit/compiler.cpp b/src/jit/compiler.cpp
index 170cf38997..4bafdfd2c4 100644
--- a/src/jit/compiler.cpp
+++ b/src/jit/compiler.cpp
@@ -1212,11 +1212,11 @@ struct FileLine
FileLine(const char* file, unsigned line, const char* condStr) : m_line(line)
{
size_t newSize = (strlen(file) + 1) * sizeof(char);
- m_file = (char*)HostAllocator::getHostAllocator()->Alloc(newSize);
+ m_file = HostAllocator::getHostAllocator().allocate<char>(newSize);
strcpy_s(m_file, newSize, file);
newSize = (strlen(condStr) + 1) * sizeof(char);
- m_condStr = (char*)HostAllocator::getHostAllocator()->Alloc(newSize);
+ m_condStr = HostAllocator::getHostAllocator().allocate<char>(newSize);
strcpy_s(m_condStr, newSize, condStr);
}
@@ -1401,9 +1401,6 @@ void Compiler::compStartup()
grossVMsize = grossNCsize = totalNCsize = 0;
#endif // DISPLAY_SIZES
- // Initialize the JIT's allocator.
- ArenaAllocator::startup();
-
/* Initialize the table of tree node sizes */
GenTree::InitNodeSize();
@@ -1709,10 +1706,10 @@ void Compiler::compShutdown()
if (s_dspMemStats)
{
fprintf(fout, "\nAll allocations:\n");
- s_aggMemStats.Print(jitstdout);
+ ArenaAllocator::dumpAggregateMemStats(jitstdout);
fprintf(fout, "\nLargest method:\n");
- s_maxCompMemStats.Print(jitstdout);
+ ArenaAllocator::dumpMaxMemStats(jitstdout);
fprintf(fout, "\n");
fprintf(fout, "---------------------------------------------------\n");
@@ -1918,7 +1915,7 @@ void Compiler::compDisplayStaticSizes(FILE* fout)
void Compiler::compInit(ArenaAllocator* pAlloc, InlineInfo* inlineInfo)
{
assert(pAlloc);
- compAllocator = pAlloc;
+ compArenaAllocator = pAlloc;
// Inlinee Compile object will only be allocated when needed for the 1st time.
InlineeCompiler = nullptr;
@@ -1934,32 +1931,11 @@ void Compiler::compInit(ArenaAllocator* pAlloc, InlineInfo* inlineInfo)
{
m_inlineStrategy = nullptr;
compInlineResult = inlineInfo->inlineResult;
-
- // We shouldn't be using the compAllocatorGeneric for other than the root compiler.
- compAllocatorGeneric = nullptr;
-#if MEASURE_MEM_ALLOC
- compAllocatorBitset = nullptr;
- compAllocatorGC = nullptr;
- compAllocatorLoopHoist = nullptr;
-#ifdef DEBUG
- compAllocatorDebugOnly = nullptr;
-#endif // DEBUG
-#endif // MEASURE_MEM_ALLOC
}
else
{
m_inlineStrategy = new (this, CMK_Inlining) InlineStrategy(this);
compInlineResult = nullptr;
-
- compAllocatorGeneric = new (this, CMK_Unknown) CompAllocator(this, CMK_Generic);
-#if MEASURE_MEM_ALLOC
- compAllocatorBitset = new (this, CMK_Unknown) CompAllocator(this, CMK_bitset);
- compAllocatorGC = new (this, CMK_Unknown) CompAllocator(this, CMK_GC);
- compAllocatorLoopHoist = new (this, CMK_Unknown) CompAllocator(this, CMK_LoopHoist);
-#ifdef DEBUG
- compAllocatorDebugOnly = new (this, CMK_Unknown) CompAllocator(this, CMK_DebugOnly);
-#endif // DEBUG
-#endif // MEASURE_MEM_ALLOC
}
#ifdef FEATURE_TRACELOGGING
@@ -2007,9 +1983,6 @@ void Compiler::compInit(ArenaAllocator* pAlloc, InlineInfo* inlineInfo)
optLoopsCloned = 0;
-#if MEASURE_MEM_ALLOC
- genMemStats.Init();
-#endif // MEASURE_MEM_ALLOC
#if LOOP_HOIST_STATS
m_loopsConsidered = 0;
m_curLoopHasHoistedExpression = false;
@@ -2256,47 +2229,6 @@ unsigned char Compiler::compGetJitDefaultFill()
#endif // DEBUG
-/*****************************************************************************
- *
- * The central memory allocation routine used by the compiler. Normally this
- * is a simple inline method defined in compiler.hpp, but for debugging it's
- * often convenient to keep it non-inline.
- */
-
-#ifdef DEBUG
-
-void* Compiler::compGetMem(size_t sz, CompMemKind cmk)
-{
-#if 0
-#if SMALL_TREE_NODES
- if (sz != TREE_NODE_SZ_SMALL &&
- sz != TREE_NODE_SZ_LARGE && sz > 32)
- {
- printf("Alloc %3u bytes\n", sz);
- }
-#else
- if (sz != sizeof(GenTree) && sz > 32)
- {
- printf("Alloc %3u bytes\n", sz);
- }
-#endif
-#endif // 0
-
-#if MEASURE_MEM_ALLOC
- genMemStats.AddAlloc(sz, cmk);
-#endif
-
- void* ptr = compAllocator->allocateMemory(sz);
-
- // Verify that the current block is aligned. Only then will the next
- // block allocated be on an aligned boundary.
- assert((size_t(ptr) & (sizeof(size_t) - 1)) == 0);
-
- return ptr;
-}
-
-#endif
-
/*****************************************************************************/
#ifdef DEBUG
/*****************************************************************************/
@@ -5239,7 +5171,7 @@ bool Compiler::compQuirkForPPP()
assert((varDscExposedStruct->lvExactSize / TARGET_POINTER_SIZE) == 8);
BYTE* oldGCPtrs = varDscExposedStruct->lvGcLayout;
- BYTE* newGCPtrs = (BYTE*)compGetMem(8, CMK_LvaTable);
+ BYTE* newGCPtrs = getAllocator(CMK_LvaTable).allocate<BYTE>(8);
for (int i = 0; i < 4; i++)
{
@@ -5432,7 +5364,7 @@ int Compiler::compCompile(CORINFO_METHOD_HANDLE methodHnd,
info.compMethodName = eeGetMethodName(methodHnd, &classNamePtr);
unsigned len = (unsigned)roundUp(strlen(classNamePtr) + 1);
- info.compClassName = (char*)compGetMem(len, CMK_DebugOnly);
+ info.compClassName = getAllocator(CMK_DebugOnly).allocate<char>(len);
strcpy_s((char*)info.compClassName, len, classNamePtr);
info.compFullName = eeGetMethodFullName(methodHnd);
@@ -5599,26 +5531,16 @@ void Compiler::compCompileFinish()
#if MEASURE_MEM_ALLOC
{
- // Grab the relevant lock.
- CritSecHolder statsLock(s_memStatsLock);
-
- // Make the updates.
- genMemStats.nraTotalSizeAlloc = compGetAllocator()->getTotalBytesAllocated();
- genMemStats.nraTotalSizeUsed = compGetAllocator()->getTotalBytesUsed();
- memAllocHist.record((unsigned)((genMemStats.nraTotalSizeAlloc + 1023) / 1024));
- memUsedHist.record((unsigned)((genMemStats.nraTotalSizeUsed + 1023) / 1024));
- s_aggMemStats.Add(genMemStats);
- if (genMemStats.allocSz > s_maxCompMemStats.allocSz)
- {
- s_maxCompMemStats = genMemStats;
- }
+ compArenaAllocator->finishMemStats();
+ memAllocHist.record((unsigned)((compArenaAllocator->getTotalBytesAllocated() + 1023) / 1024));
+ memUsedHist.record((unsigned)((compArenaAllocator->getTotalBytesUsed() + 1023) / 1024));
}
#ifdef DEBUG
if (s_dspMemStats || verbose)
{
printf("\nAllocations for %s (MethodHash=%08x)\n", info.compFullName, info.compMethodHash());
- genMemStats.Print(jitstdout);
+ compArenaAllocator->dumpMemStats(jitstdout);
}
#endif // DEBUG
#endif // MEASURE_MEM_ALLOC
@@ -5646,12 +5568,12 @@ void Compiler::compCompileFinish()
(info.compLocalsCount <= 32) && (!opts.MinOpts()) && // We may have too many local variables, etc
(getJitStressLevel() == 0) && // We need extra memory for stress
!opts.optRepeat && // We need extra memory to repeat opts
- !compAllocator->bypassHostAllocator() && // ArenaAllocator::getDefaultPageSize() is artificially low for
- // DirectAlloc
+ !compArenaAllocator->bypassHostAllocator() && // ArenaAllocator::getDefaultPageSize() is artificially low for
+ // DirectAlloc
// Factor of 2x is because data-structures are bigger under DEBUG
- (compAllocator->getTotalBytesAllocated() > (2 * ArenaAllocator::getDefaultPageSize())) &&
+ (compArenaAllocator->getTotalBytesAllocated() > (2 * ArenaAllocator::getDefaultPageSize())) &&
// RyuJIT backend needs memory tuning! TODO-Cleanup: remove this case when memory tuning is complete.
- (compAllocator->getTotalBytesAllocated() > (10 * ArenaAllocator::getDefaultPageSize())) &&
+ (compArenaAllocator->getTotalBytesAllocated() > (10 * ArenaAllocator::getDefaultPageSize())) &&
!verbose) // We allocate lots of memory to convert sets to strings for JitDump
{
genSmallMethodsNeedingExtraMemoryCnt++;
@@ -6754,20 +6676,12 @@ START:
if (inlineInfo)
{
// Use inliner's memory allocator when compiling the inlinee.
- pAlloc = inlineInfo->InlinerCompiler->compGetAllocator();
+ pAlloc = inlineInfo->InlinerCompiler->compGetArenaAllocator();
}
else
{
- IEEMemoryManager* pMemoryManager = compHnd->getMemoryManager();
-
- // Try to reuse the pre-inited allocator
- pAlloc = ArenaAllocator::getPooledAllocator(pMemoryManager);
-
- if (pAlloc == nullptr)
- {
- alloc = ArenaAllocator(pMemoryManager);
- pAlloc = &alloc;
- }
+ alloc.initialize(compHnd->getMemoryManager());
+ pAlloc = &alloc;
}
Compiler* pComp;
@@ -6777,7 +6691,6 @@ START:
{
Compiler* pComp;
ArenaAllocator* pAlloc;
- ArenaAllocator* alloc;
bool jitFallbackCompile;
CORINFO_METHOD_HANDLE methodHnd;
@@ -6796,7 +6709,6 @@ START:
} param;
param.pComp = nullptr;
param.pAlloc = pAlloc;
- param.alloc = &alloc;
param.jitFallbackCompile = jitFallbackCompile;
param.methodHnd = methodHnd;
param.classPtr = classPtr;
@@ -8241,7 +8153,7 @@ void JitTimer::PrintCsvMethodStats(Compiler* comp)
fprintf(fp, "%Iu,", comp->info.compNativeCodeSize);
fprintf(fp, "%Iu,", comp->compInfoBlkSize);
- fprintf(fp, "%Iu,", comp->compGetAllocator()->getTotalBytesAllocated());
+ fprintf(fp, "%Iu,", comp->compGetArenaAllocator()->getTotalBytesAllocated());
fprintf(fp, "%I64u,", m_info.m_totalCycles);
fprintf(fp, "%f\n", CycleTimer::CyclesPerSecond());
fclose(fp);
@@ -8259,54 +8171,6 @@ void JitTimer::Terminate(Compiler* comp, CompTimeSummaryInfo& sum, bool includeP
}
#endif // FEATURE_JIT_METHOD_PERF
-#if MEASURE_MEM_ALLOC
-// static vars.
-CritSecObject Compiler::s_memStatsLock; // Default constructor.
-Compiler::AggregateMemStats Compiler::s_aggMemStats; // Default constructor.
-Compiler::MemStats Compiler::s_maxCompMemStats; // Default constructor.
-
-const char* Compiler::MemStats::s_CompMemKindNames[] = {
-#define CompMemKindMacro(kind) #kind,
-#include "compmemkind.h"
-};
-
-void Compiler::MemStats::Print(FILE* f)
-{
- fprintf(f, "count: %10u, size: %10llu, max = %10llu\n", allocCnt, allocSz, allocSzMax);
- fprintf(f, "allocateMemory: %10llu, nraUsed: %10llu\n", nraTotalSizeAlloc, nraTotalSizeUsed);
- PrintByKind(f);
-}
-
-void Compiler::MemStats::PrintByKind(FILE* f)
-{
- fprintf(f, "\nAlloc'd bytes by kind:\n %20s | %10s | %7s\n", "kind", "size", "pct");
- fprintf(f, " %20s-+-%10s-+-%7s\n", "--------------------", "----------", "-------");
- float allocSzF = static_cast<float>(allocSz);
- for (int cmk = 0; cmk < CMK_Count; cmk++)
- {
- float pct = 100.0f * static_cast<float>(allocSzByKind[cmk]) / allocSzF;
- fprintf(f, " %20s | %10llu | %6.2f%%\n", s_CompMemKindNames[cmk], allocSzByKind[cmk], pct);
- }
- fprintf(f, "\n");
-}
-
-void Compiler::AggregateMemStats::Print(FILE* f)
-{
- fprintf(f, "For %9u methods:\n", nMethods);
- if (nMethods == 0)
- {
- return;
- }
- fprintf(f, " count: %12u (avg %7u per method)\n", allocCnt, allocCnt / nMethods);
- fprintf(f, " alloc size : %12llu (avg %7llu per method)\n", allocSz, allocSz / nMethods);
- fprintf(f, " max alloc : %12llu\n", allocSzMax);
- fprintf(f, "\n");
- fprintf(f, " allocateMemory : %12llu (avg %7llu per method)\n", nraTotalSizeAlloc, nraTotalSizeAlloc / nMethods);
- fprintf(f, " nraUsed : %12llu (avg %7llu per method)\n", nraTotalSizeUsed, nraTotalSizeUsed / nMethods);
- PrintByKind(f);
-}
-#endif // MEASURE_MEM_ALLOC
-
#if LOOP_HOIST_STATS
// Static fields.
CritSecObject Compiler::s_loopHoistStatsLock; // Default constructor.
diff --git a/src/jit/compiler.h b/src/jit/compiler.h
index 7de944035f..2cbf668157 100644
--- a/src/jit/compiler.h
+++ b/src/jit/compiler.h
@@ -98,7 +98,7 @@ class Compiler;
/*****************************************************************************/
//
-// Declare global operator new overloads that use the Compiler::compGetMem() function for allocation.
+// Declare global operator new overloads that use the compiler's arena allocator
//
// I wanted to make the second argument optional, with default = CMK_Unknown, but that
@@ -3665,7 +3665,7 @@ public:
template <typename T>
T* fgAllocateTypeForEachBlk(CompMemKind cmk = CMK_Unknown)
{
- return (T*)compGetMem((fgBBNumMax + 1) * sizeof(T), cmk);
+ return getAllocator(cmk).allocate<T>(fgBBNumMax + 1);
}
// BlockSets are relative to a specific set of BasicBlock numbers. If that changes
@@ -4394,7 +4394,7 @@ public:
// The switch block "switchBlk" just had an entry with value "from" modified to the value "to".
// Update "this" as necessary: if "from" is no longer an element of the jump table of "switchBlk",
// remove it from "this", and ensure that "to" is a member. Use "alloc" to do any required allocation.
- void UpdateTarget(CompAllocator* alloc, BasicBlock* switchBlk, BasicBlock* from, BasicBlock* to);
+ void UpdateTarget(CompAllocator alloc, BasicBlock* switchBlk, BasicBlock* from, BasicBlock* to);
};
typedef JitHashTable<BasicBlock*, JitPtrKeyFuncs<BasicBlock>, SwitchUniqueSuccSet> BlockToSwitchDescMap;
@@ -8615,95 +8615,11 @@ public:
JitFlags* compileFlags,
CorInfoInstantiationVerification instVerInfo);
- ArenaAllocator* compGetAllocator();
+ ArenaAllocator* compGetArenaAllocator();
#if MEASURE_MEM_ALLOC
-
static bool s_dspMemStats; // Display per-phase memory statistics for every function
-
- struct MemStats
- {
- unsigned allocCnt; // # of allocs
- UINT64 allocSz; // total size of those alloc.
- UINT64 allocSzMax; // Maximum single allocation.
- UINT64 allocSzByKind[CMK_Count]; // Classified by "kind".
- UINT64 nraTotalSizeAlloc;
- UINT64 nraTotalSizeUsed;
-
- static const char* s_CompMemKindNames[]; // Names of the kinds.
-
- MemStats() : allocCnt(0), allocSz(0), allocSzMax(0), nraTotalSizeAlloc(0), nraTotalSizeUsed(0)
- {
- for (int i = 0; i < CMK_Count; i++)
- {
- allocSzByKind[i] = 0;
- }
- }
- MemStats(const MemStats& ms)
- : allocCnt(ms.allocCnt)
- , allocSz(ms.allocSz)
- , allocSzMax(ms.allocSzMax)
- , nraTotalSizeAlloc(ms.nraTotalSizeAlloc)
- , nraTotalSizeUsed(ms.nraTotalSizeUsed)
- {
- for (int i = 0; i < CMK_Count; i++)
- {
- allocSzByKind[i] = ms.allocSzByKind[i];
- }
- }
-
- // Until we have ubiquitous constructors.
- void Init()
- {
- this->MemStats::MemStats();
- }
-
- void AddAlloc(size_t sz, CompMemKind cmk)
- {
- allocCnt += 1;
- allocSz += sz;
- if (sz > allocSzMax)
- {
- allocSzMax = sz;
- }
- allocSzByKind[cmk] += sz;
- }
-
- void Print(FILE* f); // Print these stats to f.
- void PrintByKind(FILE* f); // Do just the by-kind histogram part.
- };
- MemStats genMemStats;
-
- struct AggregateMemStats : public MemStats
- {
- unsigned nMethods;
-
- AggregateMemStats() : MemStats(), nMethods(0)
- {
- }
-
- void Add(const MemStats& ms)
- {
- nMethods++;
- allocCnt += ms.allocCnt;
- allocSz += ms.allocSz;
- allocSzMax = max(allocSzMax, ms.allocSzMax);
- for (int i = 0; i < CMK_Count; i++)
- {
- allocSzByKind[i] += ms.allocSzByKind[i];
- }
- nraTotalSizeAlloc += ms.nraTotalSizeAlloc;
- nraTotalSizeUsed += ms.nraTotalSizeUsed;
- }
-
- void Print(FILE* f); // Print these stats to jitstdout.
- };
-
- static CritSecObject s_memStatsLock; // This lock protects the data structures below.
- static MemStats s_maxCompMemStats; // Stats for the compilation with the largest amount allocated.
- static AggregateMemStats s_aggMemStats; // Aggregates statistics for all compilations.
-
-#endif // MEASURE_MEM_ALLOC
+#endif // MEASURE_MEM_ALLOC
#if LOOP_HOIST_STATS
unsigned m_loopsConsidered;
@@ -8722,10 +8638,6 @@ public:
static void PrintAggregateLoopHoistStats(FILE* f);
#endif // LOOP_HOIST_STATS
- void* compGetMemArray(size_t numElem, size_t elemSize, CompMemKind cmk = CMK_Unknown);
- void* compGetMem(size_t sz, CompMemKind cmk = CMK_Unknown);
- void compFreeMem(void*);
-
bool compIsForImportOnly();
bool compIsForInlining();
bool compDonotInline();
@@ -8749,7 +8661,7 @@ public:
{
VarScopeDsc* data;
VarScopeListNode* next;
- static VarScopeListNode* Create(VarScopeDsc* value, CompAllocator* alloc)
+ static VarScopeListNode* Create(VarScopeDsc* value, CompAllocator alloc)
{
VarScopeListNode* node = new (alloc) VarScopeListNode;
node->data = value;
@@ -8762,7 +8674,7 @@ public:
{
VarScopeListNode* head;
VarScopeListNode* tail;
- static VarScopeMapInfo* Create(VarScopeListNode* node, CompAllocator* alloc)
+ static VarScopeMapInfo* Create(VarScopeListNode* node, CompAllocator alloc)
{
VarScopeMapInfo* info = new (alloc) VarScopeMapInfo;
info->head = node;
@@ -8832,19 +8744,9 @@ protected:
bool skipMethod();
#endif
- ArenaAllocator* compAllocator;
+ ArenaAllocator* compArenaAllocator;
public:
- CompAllocator* compAllocatorGeneric; // An allocator that uses the CMK_Generic tracker.
-#if MEASURE_MEM_ALLOC
- CompAllocator* compAllocatorBitset; // An allocator that uses the CMK_bitset tracker.
- CompAllocator* compAllocatorGC; // An allocator that uses the CMK_GC tracker.
- CompAllocator* compAllocatorLoopHoist; // An allocator that uses the CMK_LoopHoist tracker.
-#ifdef DEBUG
- CompAllocator* compAllocatorDebugOnly; // An allocator that uses the CMK_DebugOnly tracker.
-#endif // DEBUG
-#endif // MEASURE_MEM_ALLOC
-
void compFunctionTraceStart();
void compFunctionTraceEnd(void* methodCodePtr, ULONG methodCodeSize, bool isNYI);
@@ -8882,47 +8784,25 @@ public:
// Assumes called as part of process shutdown; does any compiler-specific work associated with that.
static void ProcessShutdownWork(ICorStaticInfo* statInfo);
- CompAllocator* getAllocator()
+ CompAllocator getAllocator(CompMemKind cmk = CMK_Generic)
{
- return compAllocatorGeneric;
+ return CompAllocator(compArenaAllocator, cmk);
}
-#if MEASURE_MEM_ALLOC
- CompAllocator* getAllocatorBitset()
- {
- return compAllocatorBitset;
- }
- CompAllocator* getAllocatorGC()
- {
- return compAllocatorGC;
- }
- CompAllocator* getAllocatorLoopHoist()
- {
- return compAllocatorLoopHoist;
- }
-#else // !MEASURE_MEM_ALLOC
- CompAllocator* getAllocatorBitset()
+ CompAllocator getAllocatorGC()
{
- return compAllocatorGeneric;
+ return getAllocator(CMK_GC);
}
- CompAllocator* getAllocatorGC()
- {
- return compAllocatorGeneric;
- }
- CompAllocator* getAllocatorLoopHoist()
+
+ CompAllocator getAllocatorLoopHoist()
{
- return compAllocatorGeneric;
+ return getAllocator(CMK_LoopHoist);
}
-#endif // !MEASURE_MEM_ALLOC
#ifdef DEBUG
- CompAllocator* getAllocatorDebugOnly()
+ CompAllocator getAllocatorDebugOnly()
{
-#if MEASURE_MEM_ALLOC
- return compAllocatorDebugOnly;
-#else // !MEASURE_MEM_ALLOC
- return compAllocatorGeneric;
-#endif // !MEASURE_MEM_ALLOC
+ return getAllocator(CMK_DebugOnly);
}
#endif // DEBUG
@@ -9293,7 +9173,7 @@ public:
if (compRoot->m_fieldSeqStore == nullptr)
{
// Create a CompAllocator that labels sub-structure with CMK_FieldSeqStore, and use that for allocation.
- CompAllocator* ialloc = new (this, CMK_FieldSeqStore) CompAllocator(this, CMK_FieldSeqStore);
+ CompAllocator ialloc(getAllocator(CMK_FieldSeqStore));
compRoot->m_fieldSeqStore = new (ialloc) FieldSeqStore(ialloc);
}
return compRoot->m_fieldSeqStore;
@@ -9314,8 +9194,8 @@ public:
{
// Create a CompAllocator that labels sub-structure with CMK_ZeroOffsetFieldMap, and use that for
// allocation.
- CompAllocator* ialloc = new (this, CMK_ZeroOffsetFieldMap) CompAllocator(this, CMK_ZeroOffsetFieldMap);
- m_zeroOffsetFieldMap = new (ialloc) NodeToFieldSeqMap(ialloc);
+ CompAllocator ialloc(getAllocator(CMK_ZeroOffsetFieldMap));
+ m_zeroOffsetFieldMap = new (ialloc) NodeToFieldSeqMap(ialloc);
}
return m_zeroOffsetFieldMap;
}
@@ -9341,7 +9221,7 @@ public:
if (compRoot->m_arrayInfoMap == nullptr)
{
// Create a CompAllocator that labels sub-structure with CMK_ArrayInfoMap, and use that for allocation.
- CompAllocator* ialloc = new (this, CMK_ArrayInfoMap) CompAllocator(this, CMK_ArrayInfoMap);
+ CompAllocator ialloc(getAllocator(CMK_ArrayInfoMap));
compRoot->m_arrayInfoMap = new (ialloc) NodeToArrayInfoMap(ialloc);
}
return compRoot->m_arrayInfoMap;
@@ -9396,7 +9276,7 @@ public:
if (compRoot->m_memorySsaMap[memoryKind] == nullptr)
{
// Create a CompAllocator that labels sub-structure with CMK_ArrayInfoMap, and use that for allocation.
- CompAllocator* ialloc = new (this, CMK_ArrayInfoMap) CompAllocator(this, CMK_ArrayInfoMap);
+ CompAllocator ialloc(getAllocator(CMK_ArrayInfoMap));
compRoot->m_memorySsaMap[memoryKind] = new (ialloc) NodeToUnsignedMap(ialloc);
}
return compRoot->m_memorySsaMap[memoryKind];
@@ -9456,25 +9336,6 @@ public:
}; // end of class Compiler
-// Inline methods of CompAllocator.
-void* CompAllocator::Alloc(size_t sz)
-{
-#if MEASURE_MEM_ALLOC
- return m_comp->compGetMem(sz, m_cmk);
-#else
- return m_comp->compGetMem(sz);
-#endif
-}
-
-void* CompAllocator::ArrayAlloc(size_t elems, size_t elemSize)
-{
-#if MEASURE_MEM_ALLOC
- return m_comp->compGetMemArray(elems, elemSize, m_cmk);
-#else
- return m_comp->compGetMemArray(elems, elemSize);
-#endif
-}
-
// LclVarDsc constructor. Uses Compiler, so must come after Compiler definition.
inline LclVarDsc::LclVarDsc(Compiler* comp)
: // Initialize the ArgRegs to REG_STK.
@@ -9564,7 +9425,7 @@ protected:
Compiler* m_compiler;
ArrayStack<GenTree*> m_ancestors;
- GenTreeVisitor(Compiler* compiler) : m_compiler(compiler), m_ancestors(compiler)
+ GenTreeVisitor(Compiler* compiler) : m_compiler(compiler), m_ancestors(compiler->getAllocator(CMK_ArrayStack))
{
assert(compiler != nullptr);
diff --git a/src/jit/compiler.hpp b/src/jit/compiler.hpp
index 92ea539508..d873eadaa8 100644
--- a/src/jit/compiler.hpp
+++ b/src/jit/compiler.hpp
@@ -888,7 +888,7 @@ void* GenTree::operator new(size_t sz, Compiler* comp, genTreeOps oper)
#endif // MEASURE_NODE_SIZE
assert(size >= sz);
- return comp->compGetMem(size, CMK_ASTNode);
+ return comp->getAllocator(CMK_ASTNode).allocate<char>(size);
}
// GenTree constructor
@@ -1663,8 +1663,7 @@ inline unsigned Compiler::lvaGrabTemp(bool shortLifetime DEBUGARG(const char* re
IMPL_LIMITATION("too many locals");
}
- // Note: compGetMemArray might throw.
- LclVarDsc* newLvaTable = (LclVarDsc*)compGetMemArray(newLvaTableCnt, sizeof(*lvaTable), CMK_LvaTable);
+ LclVarDsc* newLvaTable = getAllocator(CMK_LvaTable).allocate<LclVarDsc>(newLvaTableCnt);
memcpy(newLvaTable, lvaTable, lvaCount * sizeof(*lvaTable));
memset(newLvaTable + lvaCount, 0, (newLvaTableCnt - lvaCount) * sizeof(*lvaTable));
@@ -1738,8 +1737,7 @@ inline unsigned Compiler::lvaGrabTemps(unsigned cnt DEBUGARG(const char* reason)
IMPL_LIMITATION("too many locals");
}
- // Note: compGetMemArray might throw.
- LclVarDsc* newLvaTable = (LclVarDsc*)compGetMemArray(newLvaTableCnt, sizeof(*lvaTable), CMK_LvaTable);
+ LclVarDsc* newLvaTable = getAllocator(CMK_LvaTable).allocate<LclVarDsc>(newLvaTableCnt);
memcpy(newLvaTable, lvaTable, lvaCount * sizeof(*lvaTable));
memset(newLvaTable + lvaCount, 0, (newLvaTableCnt - lvaCount) * sizeof(*lvaTable));
@@ -4069,75 +4067,9 @@ inline bool Compiler::compStressCompile(compStressArea stressArea, unsigned weig
}
#endif
-inline ArenaAllocator* Compiler::compGetAllocator()
-{
- return compAllocator;
-}
-
-/*****************************************************************************
- *
- * Allocate memory from the no-release allocator. All such memory will be
- * freed up simulataneously at the end of the procedure
- */
-
-#ifndef DEBUG
-
-inline void* Compiler::compGetMem(size_t sz, CompMemKind cmk)
-{
- assert(sz);
-
-#if MEASURE_MEM_ALLOC
- genMemStats.AddAlloc(sz, cmk);
-#endif
-
- return compAllocator->allocateMemory(sz);
-}
-
-#endif
-
-// Wrapper for Compiler::compGetMem that can be forward-declared for use in template
-// types which Compiler depends on but which need to allocate heap memory.
-inline void* compGetMem(Compiler* comp, size_t sz)
-{
- return comp->compGetMem(sz);
-}
-
-/*****************************************************************************
- *
- * A common memory allocation for arrays of structures involves the
- * multiplication of the number of elements with the size of each element.
- * If this computation overflows, then the memory allocation might succeed,
- * but not allocate sufficient memory for all the elements. This can cause
- * us to overwrite the allocation, and AV or worse, corrupt memory.
- *
- * This method checks for overflow, and succeeds only when it detects
- * that there's no overflow. It should be cheap, because when inlined with
- * a constant elemSize, the division should be done in compile time, and so
- * at run time we simply have a check of numElem against some number (this
- * is why we __forceinline).
- */
-
-#define MAX_MEMORY_PER_ALLOCATION (512 * 1024 * 1024)
-
-__forceinline void* Compiler::compGetMemArray(size_t numElem, size_t elemSize, CompMemKind cmk)
-{
- if (numElem > (MAX_MEMORY_PER_ALLOCATION / elemSize))
- {
- NOMEM();
- }
-
- return compGetMem(numElem * elemSize, cmk);
-}
-
-/******************************************************************************
- *
- * Roundup the allocated size so that if this memory block is aligned,
- * then the next block allocated too will be aligned.
- * The JIT will always try to keep all the blocks aligned.
- */
-
-inline void Compiler::compFreeMem(void* ptr)
+inline ArenaAllocator* Compiler::compGetArenaAllocator()
{
+ return compArenaAllocator;
}
inline bool Compiler::compIsProfilerHookNeeded()
@@ -4909,18 +4841,18 @@ void GenTree::VisitBinOpOperands(TVisitor visitor)
/*****************************************************************************
* operator new
*
- * Note that compGetMem is an arena allocator that returns memory that is
+ * Note that compiler's allocator is an arena allocator that returns memory that is
* not zero-initialized and can contain data from a prior allocation lifetime.
*/
-inline void* __cdecl operator new(size_t sz, Compiler* context, CompMemKind cmk)
+inline void* __cdecl operator new(size_t sz, Compiler* compiler, CompMemKind cmk)
{
- return context->compGetMem(sz, cmk);
+ return compiler->getAllocator(cmk).allocate<char>(sz);
}
-inline void* __cdecl operator new[](size_t sz, Compiler* context, CompMemKind cmk)
+inline void* __cdecl operator new[](size_t sz, Compiler* compiler, CompMemKind cmk)
{
- return context->compGetMem(sz, cmk);
+ return compiler->getAllocator(cmk).allocate<char>(sz);
}
inline void* __cdecl operator new(size_t sz, void* p, const jitstd::placement_t& /* syntax_difference */)
diff --git a/src/jit/compilerbitsettraits.hpp b/src/jit/compilerbitsettraits.hpp
index be30564701..e6c6b1326b 100644
--- a/src/jit/compilerbitsettraits.hpp
+++ b/src/jit/compilerbitsettraits.hpp
@@ -17,14 +17,14 @@
// static
void* CompAllocBitSetTraits::Alloc(Compiler* comp, size_t byteSize)
{
- return comp->compGetMem(byteSize, CMK_bitset);
+ return comp->getAllocator(CMK_bitset).allocate<char>(byteSize);
}
#ifdef DEBUG
// static
void* CompAllocBitSetTraits::DebugAlloc(Compiler* comp, size_t byteSize)
{
- return comp->compGetMem(byteSize, CMK_DebugOnly);
+ return comp->getAllocator(CMK_DebugOnly).allocate<char>(byteSize);
}
#endif // DEBUG
@@ -141,14 +141,14 @@ BitSetSupport::BitSetOpCounter* BasicBlockBitSetTraits::GetOpCounter(Compiler* c
// static
void* BitVecTraits::Alloc(BitVecTraits* b, size_t byteSize)
{
- return b->comp->compGetMem(byteSize, CMK_bitset);
+ return b->comp->getAllocator(CMK_bitset).allocate<char>(byteSize);
}
#ifdef DEBUG
// static
void* BitVecTraits::DebugAlloc(BitVecTraits* b, size_t byteSize)
{
- return b->comp->compGetMem(byteSize, CMK_DebugOnly);
+ return b->comp->getAllocator(CMK_DebugOnly).allocate<char>(byteSize);
}
#endif // DEBUG
diff --git a/src/jit/copyprop.cpp b/src/jit/copyprop.cpp
index 6c21d45c1f..c09fa91509 100644
--- a/src/jit/copyprop.cpp
+++ b/src/jit/copyprop.cpp
@@ -21,12 +21,6 @@
#include "ssabuilder.h"
#include "treelifeupdater.h"
-template <typename T>
-inline static T* allocate_any(jitstd::allocator<void>& alloc, size_t count = 1)
-{
- return jitstd::allocator<T>(alloc).allocate(count);
-}
-
/**************************************************************************************
*
* Corresponding to the live definition pushes, pop the stack as we finish a sub-paths
@@ -370,7 +364,7 @@ void Compiler::optBlockCopyProp(BasicBlock* block, LclNumToGenTreePtrStack* curS
GenTreePtrStack* stack;
if (!curSsaName->Lookup(lclNum, &stack))
{
- stack = new (curSsaName->GetAllocator()) GenTreePtrStack(this);
+ stack = new (curSsaName->GetAllocator()) GenTreePtrStack(curSsaName->GetAllocator());
}
stack->Push(tree);
curSsaName->Set(lclNum, stack);
@@ -383,7 +377,7 @@ void Compiler::optBlockCopyProp(BasicBlock* block, LclNumToGenTreePtrStack* curS
GenTreePtrStack* stack;
if (!curSsaName->Lookup(lclNum, &stack))
{
- stack = new (curSsaName->GetAllocator()) GenTreePtrStack(this);
+ stack = new (curSsaName->GetAllocator()) GenTreePtrStack(curSsaName->GetAllocator());
stack->Push(tree);
curSsaName->Set(lclNum, stack);
}
@@ -431,10 +425,10 @@ void Compiler::optVnCopyProp()
return;
}
- CompAllocator allocator(this, CMK_CopyProp);
+ CompAllocator allocator(getAllocator(CMK_CopyProp));
// Compute the domTree to use.
- BlkToBlkVectorMap* domTree = new (&allocator) BlkToBlkVectorMap(&allocator);
+ BlkToBlkVectorMap* domTree = new (allocator) BlkToBlkVectorMap(allocator);
domTree->Reallocate(fgBBcount * 3 / 2); // Prime the allocation
SsaBuilder::ComputeDominators(this, domTree);
@@ -453,9 +447,9 @@ void Compiler::optVnCopyProp()
VarSetOps::AssignNoCopy(this, optCopyPropKillSet, VarSetOps::MakeEmpty(this));
// The map from lclNum to its recently live definitions as a stack.
- LclNumToGenTreePtrStack curSsaName(&allocator);
+ LclNumToGenTreePtrStack curSsaName(allocator);
- BlockWorkStack* worklist = new (&allocator) BlockWorkStack(&allocator);
+ BlockWorkStack* worklist = new (allocator) BlockWorkStack(allocator);
worklist->push_back(BlockWork(fgFirstBB));
while (!worklist->empty())
diff --git a/src/jit/ee_il_dll.cpp b/src/jit/ee_il_dll.cpp
index 94cf81cdf0..88612bcfda 100644
--- a/src/jit/ee_il_dll.cpp
+++ b/src/jit/ee_il_dll.cpp
@@ -733,7 +733,7 @@ void Compiler::eeGetVars()
{
// Allocate a bit-array for all the variables and initialize to false
- bool* varInfoProvided = (bool*)compGetMem(info.compLocalsCount * sizeof(varInfoProvided[0]));
+ bool* varInfoProvided = getAllocator(CMK_Unknown).allocate<bool>(info.compLocalsCount);
unsigned i;
for (i = 0; i < info.compLocalsCount; i++)
{
diff --git a/src/jit/eeinterface.cpp b/src/jit/eeinterface.cpp
index 6581230978..5a0e412afa 100644
--- a/src/jit/eeinterface.cpp
+++ b/src/jit/eeinterface.cpp
@@ -149,7 +149,7 @@ const char* Compiler::eeGetMethodFullName(CORINFO_METHOD_HANDLE hnd)
length += param.siglength + 2;
- char* retName = (char*)compGetMem(length, CMK_DebugOnly);
+ char* retName = getAllocator(CMK_DebugOnly).allocate<char>(length);
/* Now generate the full signature string in the allocated buffer */
diff --git a/src/jit/emit.cpp b/src/jit/emit.cpp
index e51c5690fa..3492f2fee4 100644
--- a/src/jit/emit.cpp
+++ b/src/jit/emit.cpp
@@ -514,7 +514,7 @@ void* emitter::emitGetMem(size_t sz)
emitTotMemAlloc += sz;
#endif
- return emitComp->compGetMem(sz, CMK_InstDesc);
+ return emitComp->getAllocator(CMK_InstDesc).allocate<char>(sz);
}
/*****************************************************************************
diff --git a/src/jit/flowgraph.cpp b/src/jit/flowgraph.cpp
index 197af3100b..7516792b3f 100644
--- a/src/jit/flowgraph.cpp
+++ b/src/jit/flowgraph.cpp
@@ -2337,7 +2337,7 @@ void Compiler::fgDfsInvPostOrderHelper(BasicBlock* block, BlockSet& visited, uns
// Allocate a local stack to hold the DFS traversal actions necessary
// to compute pre/post-ordering of the control flowgraph.
- ArrayStack<DfsBlockEntry> stack(this);
+ ArrayStack<DfsBlockEntry> stack(getAllocator(CMK_ArrayStack));
// Push the first block on the stack to seed the traversal.
stack.Push(DfsBlockEntry(DSS_Pre, block));
@@ -2778,7 +2778,7 @@ void Compiler::fgTraverseDomTree(unsigned bbNum, BasicBlockList** domTree, unsig
// Allocate a local stack to hold the Dfs traversal actions necessary
// to compute pre/post-ordering of the dominator tree.
- ArrayStack<DfsNumEntry> stack(this);
+ ArrayStack<DfsNumEntry> stack(getAllocator(CMK_ArrayStack));
// Push the first entry number on the stack to seed the traversal.
stack.Push(DfsNumEntry(DSS_Pre, bbNum));
@@ -3350,10 +3350,10 @@ Compiler::SwitchUniqueSuccSet Compiler::GetDescriptorForSwitch(BasicBlock* switc
}
}
-void Compiler::SwitchUniqueSuccSet::UpdateTarget(CompAllocator* alloc,
- BasicBlock* switchBlk,
- BasicBlock* from,
- BasicBlock* to)
+void Compiler::SwitchUniqueSuccSet::UpdateTarget(CompAllocator alloc,
+ BasicBlock* switchBlk,
+ BasicBlock* from,
+ BasicBlock* to)
{
assert(switchBlk->bbJumpKind == BBJ_SWITCH); // Precondition.
unsigned jmpTabCnt = switchBlk->bbJumpSwt->bbsCount;
@@ -19287,7 +19287,7 @@ const char* Compiler::fgProcessEscapes(const char* nameIn, escapeMapping_t* map)
if (subsitutionRequired)
{
- char* newName = (char*)compGetMem(lengthOut, CMK_DebugOnly);
+ char* newName = getAllocator(CMK_DebugOnly).allocate<char>(lengthOut);
char* pDest;
pDest = newName;
pChar = nameIn;
@@ -21159,7 +21159,7 @@ void Compiler::fgDebugCheckFlags(GenTree* tree)
case GT_FIELD_LIST:
if ((op2 != nullptr) && op2->OperIsAnyList())
{
- ArrayStack<GenTree*> stack(this);
+ ArrayStack<GenTree*> stack(getAllocator(CMK_DebugOnly));
while ((tree->gtGetOp2() != nullptr) && tree->gtGetOp2()->OperIsAnyList())
{
stack.Push(tree);
diff --git a/src/jit/gcencode.cpp b/src/jit/gcencode.cpp
index a05e8eb08d..9cd478bb3a 100644
--- a/src/jit/gcencode.cpp
+++ b/src/jit/gcencode.cpp
@@ -1906,7 +1906,7 @@ PendingArgsStack::PendingArgsStack(unsigned maxDepth, Compiler* pComp)
/* Do we need an array as well as the mask ? */
if (pasMaxDepth > BITS_IN_pasMask)
- pasTopArray = (BYTE*)pComp->compGetMem(pasMaxDepth - BITS_IN_pasMask);
+ pasTopArray = pComp->getAllocator(CMK_Unknown).allocate<BYTE>(pasMaxDepth - BITS_IN_pasMask);
}
//-----------------------------------------------------------------------------
diff --git a/src/jit/gentree.cpp b/src/jit/gentree.cpp
index bba58cbecf..08aa2efabc 100644
--- a/src/jit/gentree.cpp
+++ b/src/jit/gentree.cpp
@@ -73,7 +73,7 @@ struct IndentStack
const char** indents;
// Constructor for IndentStack. Uses 'compiler' to determine the mode of printing.
- IndentStack(Compiler* compiler) : stack(compiler)
+ IndentStack(Compiler* compiler) : stack(compiler->getAllocator(CMK_DebugOnly))
{
if (compiler->asciiTrees)
{
@@ -2722,7 +2722,7 @@ unsigned Compiler::gtSetListOrder(GenTree* list, bool isListCallArgs, bool callA
assert((list != nullptr) && list->OperIsAnyList());
assert(!callArgsInRegs || isListCallArgs);
- ArrayStack<GenTree*> listNodes(this);
+ ArrayStack<GenTree*> listNodes(getAllocator(CMK_ArrayStack));
do
{
@@ -17193,7 +17193,7 @@ void GenTree::LabelIndex(Compiler* comp, bool isConst)
FieldSeqNode FieldSeqStore::s_notAField(nullptr, nullptr);
// FieldSeqStore methods.
-FieldSeqStore::FieldSeqStore(CompAllocator* alloc) : m_alloc(alloc), m_canonMap(new (alloc) FieldSeqNodeCanonMap(alloc))
+FieldSeqStore::FieldSeqStore(CompAllocator alloc) : m_alloc(alloc), m_canonMap(new (alloc) FieldSeqNodeCanonMap(alloc))
{
}
@@ -17207,7 +17207,7 @@ FieldSeqNode* FieldSeqStore::CreateSingleton(CORINFO_FIELD_HANDLE fieldHnd)
}
else
{
- res = reinterpret_cast<FieldSeqNode*>(m_alloc->Alloc(sizeof(FieldSeqNode)));
+ res = m_alloc.allocate<FieldSeqNode>(1);
*res = fsn;
m_canonMap->Set(fsn, res);
return res;
@@ -17250,7 +17250,7 @@ FieldSeqNode* FieldSeqStore::Append(FieldSeqNode* a, FieldSeqNode* b)
}
else
{
- res = reinterpret_cast<FieldSeqNode*>(m_alloc->Alloc(sizeof(FieldSeqNode)));
+ res = m_alloc.allocate<FieldSeqNode>(1);
*res = fsn;
m_canonMap->Set(fsn, res);
return res;
diff --git a/src/jit/gentree.h b/src/jit/gentree.h
index e59b79d9c8..3078b77728 100644
--- a/src/jit/gentree.h
+++ b/src/jit/gentree.h
@@ -258,7 +258,7 @@ class FieldSeqStore
{
typedef JitHashTable<FieldSeqNode, /*KeyFuncs*/ FieldSeqNode, FieldSeqNode*> FieldSeqNodeCanonMap;
- CompAllocator* m_alloc;
+ CompAllocator m_alloc;
FieldSeqNodeCanonMap* m_canonMap;
static FieldSeqNode s_notAField; // No value, just exists to provide an address.
@@ -268,7 +268,7 @@ class FieldSeqStore
static int ConstantIndexPseudoFieldStruct;
public:
- FieldSeqStore(CompAllocator* alloc);
+ FieldSeqStore(CompAllocator alloc);
// Returns the (canonical in the store) singleton field sequence for the given handle.
FieldSeqNode* CreateSingleton(CORINFO_FIELD_HANDLE fieldHnd);
diff --git a/src/jit/hostallocator.cpp b/src/jit/hostallocator.cpp
index b737424ee8..c9afd1a2bc 100644
--- a/src/jit/hostallocator.cpp
+++ b/src/jit/hostallocator.cpp
@@ -5,36 +5,14 @@
#include "jitpch.h"
#include "hostallocator.h"
-HostAllocator HostAllocator::s_hostAllocator;
-
-void* HostAllocator::Alloc(size_t size)
+void* HostAllocator::allocateHostMemory(size_t size)
{
assert(g_jitHost != nullptr);
return g_jitHost->allocateMemory(size, false);
}
-void* HostAllocator::ArrayAlloc(size_t elemSize, size_t numElems)
-{
- assert(g_jitHost != nullptr);
-
- ClrSafeInt<size_t> safeElemSize(elemSize);
- ClrSafeInt<size_t> safeNumElems(numElems);
- ClrSafeInt<size_t> size = safeElemSize * safeNumElems;
- if (size.IsOverflow())
- {
- return nullptr;
- }
-
- return g_jitHost->allocateMemory(size.Value(), false);
-}
-
-void HostAllocator::Free(void* p)
+void HostAllocator::freeHostMemory(void* p)
{
assert(g_jitHost != nullptr);
g_jitHost->freeMemory(p, false);
}
-
-HostAllocator* HostAllocator::getHostAllocator()
-{
- return &s_hostAllocator;
-}
diff --git a/src/jit/hostallocator.h b/src/jit/hostallocator.h
index 39a32ef49b..447fc67eb1 100644
--- a/src/jit/hostallocator.h
+++ b/src/jit/hostallocator.h
@@ -7,30 +7,48 @@
class HostAllocator final
{
private:
- static HostAllocator s_hostAllocator;
-
HostAllocator()
{
}
public:
- void* Alloc(size_t size);
+ template <typename T>
+ T* allocate(size_t count)
+ {
+ ClrSafeInt<size_t> safeElemSize(sizeof(T));
+ ClrSafeInt<size_t> safeCount(count);
+ ClrSafeInt<size_t> size = safeElemSize * safeCount;
+ if (size.IsOverflow())
+ {
+ return nullptr;
+ }
+
+ return static_cast<T*>(allocateHostMemory(size.Value()));
+ }
- void* ArrayAlloc(size_t elemSize, size_t numElems);
+ void deallocate(void* p)
+ {
+ freeHostMemory(p);
+ }
- void Free(void* p);
+ static HostAllocator getHostAllocator()
+ {
+ return HostAllocator();
+ }
- static HostAllocator* getHostAllocator();
+private:
+ void* allocateHostMemory(size_t size);
+ void freeHostMemory(void* p);
};
// Global operator new overloads that work with HostAllocator
-inline void* __cdecl operator new(size_t n, HostAllocator* alloc)
+inline void* __cdecl operator new(size_t n, HostAllocator alloc)
{
- return alloc->Alloc(n);
+ return alloc.allocate<char>(n);
}
-inline void* __cdecl operator new[](size_t n, HostAllocator* alloc)
+inline void* __cdecl operator new[](size_t n, HostAllocator alloc)
{
- return alloc->Alloc(n);
+ return alloc.allocate<char>(n);
}
diff --git a/src/jit/importer.cpp b/src/jit/importer.cpp
index 222e787f5f..8ec930385a 100644
--- a/src/jit/importer.cpp
+++ b/src/jit/importer.cpp
@@ -17045,7 +17045,7 @@ void* Compiler::BlockListNode::operator new(size_t sz, Compiler* comp)
{
if (comp->impBlockListNodeFreeList == nullptr)
{
- return (BlockListNode*)comp->compGetMem(sizeof(BlockListNode), CMK_BasicBlock);
+ return comp->getAllocator(CMK_BasicBlock).allocate<BlockListNode>(1);
}
else
{
@@ -17272,7 +17272,7 @@ void Compiler::verInitBBEntryState(BasicBlock* block, EntryState* srcState)
return;
}
- block->bbEntryState = (EntryState*)compGetMem(sizeof(EntryState));
+ block->bbEntryState = getAllocator(CMK_Unknown).allocate<EntryState>(1);
// block->bbEntryState.esRefcount = 1;
@@ -19839,7 +19839,7 @@ CORINFO_CLASS_HANDLE Compiler::impGetSpecialIntrinsicExactReturnType(CORINFO_MET
// pointer to token into jit-allocated memory.
CORINFO_RESOLVED_TOKEN* Compiler::impAllocateToken(CORINFO_RESOLVED_TOKEN token)
{
- CORINFO_RESOLVED_TOKEN* memory = (CORINFO_RESOLVED_TOKEN*)compGetMem(sizeof(token));
+ CORINFO_RESOLVED_TOKEN* memory = getAllocator(CMK_Unknown).allocate<CORINFO_RESOLVED_TOKEN>(1);
*memory = token;
return memory;
}
diff --git a/src/jit/jit.h b/src/jit/jit.h
index 0e3ec9295f..f6a575d262 100644
--- a/src/jit/jit.h
+++ b/src/jit/jit.h
@@ -830,110 +830,8 @@ const int MIN_SHORT_AS_INT = -32768;
/*****************************************************************************/
-// CompMemKind values are used to tag memory allocations performed via
-// the compiler's allocator so that the memory usage of various compiler
-// components can be tracked separately (when MEASURE_MEM_ALLOC is defined).
-
-enum CompMemKind
-{
-#define CompMemKindMacro(kind) CMK_##kind,
-#include "compmemkind.h"
- CMK_Count
-};
-
class Compiler;
-// Allows general purpose code (e.g. collection classes) to allocate memory
-// of a pre-determined kind via the compiler's allocator.
-
-class CompAllocator
-{
- Compiler* const m_comp;
-#if MEASURE_MEM_ALLOC
- CompMemKind const m_cmk;
-#endif
-public:
- CompAllocator(Compiler* comp, CompMemKind cmk)
- : m_comp(comp)
-#if MEASURE_MEM_ALLOC
- , m_cmk(cmk)
-#endif
- {
- }
-
- // Allocates a block of memory at least `sz` in size.
- // Zero-length allocation are not allowed.
- inline void* Alloc(size_t sz);
-
- // Allocates a block of memory at least `elems * elemSize` in size.
- // Zero-length allocation are not allowed.
- inline void* ArrayAlloc(size_t elems, size_t elemSize);
-
- // For the compiler's ArenaAllocator, free operations are no-ops.
- void Free(void* p)
- {
- }
-};
-
-// Global operator new overloads that work with CompAllocator
-
-inline void* __cdecl operator new(size_t n, CompAllocator* alloc)
-{
- return alloc->Alloc(n);
-}
-
-inline void* __cdecl operator new[](size_t n, CompAllocator* alloc)
-{
- return alloc->Alloc(n);
-}
-
-// A CompAllocator wrapper that implements IAllocator and allows zero-length
-// memory allocations (the compiler's ArenAllocator does not support zero-length
-// allocation).
-
-class CompIAllocator : public IAllocator
-{
- CompAllocator* const m_alloc;
- char m_zeroLenAllocTarg;
-
-public:
- CompIAllocator(CompAllocator* alloc) : m_alloc(alloc)
- {
- }
-
- // Allocates a block of memory at least `sz` in size.
- virtual void* Alloc(size_t sz) override
- {
- if (sz == 0)
- {
- return &m_zeroLenAllocTarg;
- }
- else
- {
- return m_alloc->Alloc(sz);
- }
- }
-
- // Allocates a block of memory at least `elems * elemSize` in size.
- virtual void* ArrayAlloc(size_t elemSize, size_t numElems) override
- {
- if ((elemSize == 0) || (numElems == 0))
- {
- return &m_zeroLenAllocTarg;
- }
- else
- {
- return m_alloc->ArrayAlloc(elemSize, numElems);
- }
- }
-
- // Frees the block of memory pointed to by p.
- virtual void Free(void* p) override
- {
- m_alloc->Free(p);
- }
-};
-
class JitTls
{
#ifdef DEBUG
diff --git a/src/jit/jitexpandarray.h b/src/jit/jitexpandarray.h
index 03d81f50f3..abe086c337 100644
--- a/src/jit/jitexpandarray.h
+++ b/src/jit/jitexpandarray.h
@@ -11,10 +11,10 @@ template <class T>
class JitExpandArray
{
protected:
- CompAllocator* m_alloc; // The allocator object that should be used to allocate members.
- T* m_members; // Pointer to the element array.
- unsigned m_size; // The size of the element array.
- unsigned m_minSize; // The minimum size of the element array.
+ CompAllocator m_alloc; // The allocator object that should be used to allocate members.
+ T* m_members; // Pointer to the element array.
+ unsigned m_size; // The size of the element array.
+ unsigned m_minSize; // The minimum size of the element array.
// Ensure that the element array is large enough for the specified index to be valid.
void EnsureCoversInd(unsigned idx);
@@ -54,7 +54,7 @@ public:
// time an array element (having index `idx`) is accessed, an array
// of size max(`minSize`, `idx`) is allocated.
//
- JitExpandArray(CompAllocator* alloc, unsigned minSize = 1)
+ JitExpandArray(CompAllocator alloc, unsigned minSize = 1)
: m_alloc(alloc), m_members(nullptr), m_size(0), m_minSize(minSize)
{
assert(minSize > 0);
@@ -71,7 +71,7 @@ public:
{
if (m_members != nullptr)
{
- m_alloc->Free(m_members);
+ m_alloc.deallocate(m_members);
}
}
@@ -86,11 +86,11 @@ public:
// This is equivalent to calling the destructor and then constructing
// the array again.
//
- void Init(CompAllocator* alloc, unsigned minSize = 1)
+ void Init(CompAllocator alloc, unsigned minSize = 1)
{
if (m_members != nullptr)
{
- m_alloc->Free(m_members);
+ m_alloc.deallocate(m_members);
}
m_alloc = alloc;
m_members = nullptr;
@@ -220,7 +220,7 @@ public:
// Notes:
// See JitExpandArray constructor notes.
//
- JitExpandArrayStack(CompAllocator* alloc, unsigned minSize = 1) : JitExpandArray<T>(alloc, minSize), m_used(0)
+ JitExpandArrayStack(CompAllocator alloc, unsigned minSize = 1) : JitExpandArray<T>(alloc, minSize), m_used(0)
{
}
@@ -391,18 +391,11 @@ void JitExpandArray<T>::EnsureCoversInd(unsigned idx)
unsigned oldSize = m_size;
T* oldMembers = m_members;
m_size = max(idx + 1, max(m_minSize, m_size * 2));
- if (sizeof(T) < sizeof(int))
- {
- m_members = (T*)m_alloc->ArrayAlloc(ALIGN_UP(m_size * sizeof(T), sizeof(int)), sizeof(BYTE));
- }
- else
- {
- m_members = (T*)m_alloc->ArrayAlloc(m_size, sizeof(T));
- }
+ m_members = m_alloc.allocate<T>(m_size);
if (oldMembers != nullptr)
{
memcpy(m_members, oldMembers, oldSize * sizeof(T));
- m_alloc->Free(oldMembers);
+ m_alloc.deallocate(oldMembers);
}
InitializeRange(oldSize, m_size);
}
diff --git a/src/jit/jithashtable.h b/src/jit/jithashtable.h
index e47fa04d35..d411a2b870 100644
--- a/src/jit/jithashtable.h
+++ b/src/jit/jithashtable.h
@@ -147,10 +147,8 @@ public:
// JitHashTable always starts out empty, with no allocation overhead.
// Call Reallocate to prime with an initial size if desired.
//
- JitHashTable(Allocator* alloc) : m_alloc(alloc), m_table(nullptr), m_tableSizeInfo(), m_tableCount(0), m_tableMax(0)
+ JitHashTable(Allocator alloc) : m_alloc(alloc), m_table(nullptr), m_tableSizeInfo(), m_tableCount(0), m_tableMax(0)
{
- assert(m_alloc != nullptr);
-
#ifndef __GNUC__ // these crash GCC
static_assert_no_msg(Behavior::s_growth_factor_numerator > Behavior::s_growth_factor_denominator);
static_assert_no_msg(Behavior::s_density_factor_numerator < Behavior::s_density_factor_denominator);
@@ -361,7 +359,7 @@ public:
pN = pNext;
}
}
- m_alloc->Free(m_table);
+ m_alloc.deallocate(m_table);
m_table = nullptr;
m_tableSizeInfo = JitPrimeInfo();
@@ -391,7 +389,7 @@ public:
}
// Get the allocator used by this hash table.
- Allocator* GetAllocator()
+ Allocator GetAllocator()
{
return m_alloc;
}
@@ -513,7 +511,7 @@ public:
JitPrimeInfo newPrime = NextPrime(newTableSize);
newTableSize = newPrime.prime;
- Node** newTable = (Node**)m_alloc->ArrayAlloc(newTableSize, sizeof(Node*));
+ Node** newTable = m_alloc.template allocate<Node*>(newTableSize);
for (unsigned i = 0; i < newTableSize; i++)
{
@@ -539,7 +537,7 @@ public:
if (m_table != nullptr)
{
- m_alloc->Free(m_table);
+ m_alloc.deallocate(m_table);
}
m_table = newTable;
@@ -763,19 +761,19 @@ private:
{
}
- void* operator new(size_t sz, Allocator* alloc)
+ void* operator new(size_t sz, Allocator alloc)
{
- return alloc->Alloc(sz);
+ return alloc.template allocate<unsigned char>(sz);
}
- void operator delete(void* p, Allocator* alloc)
+ void operator delete(void* p, Allocator alloc)
{
- alloc->Free(p);
+ alloc.deallocate(p);
}
};
// Instance members
- Allocator* m_alloc; // Allocator to use in this table.
+ Allocator m_alloc; // Allocator to use in this table.
Node** m_table; // pointer to table
JitPrimeInfo m_tableSizeInfo; // size of table (a prime) and information about it
unsigned m_tableCount; // number of elements in table
diff --git a/src/jit/jitstd/allocator.h b/src/jit/jitstd/allocator.h
index f370af8e9d..a6a25deae4 100644
--- a/src/jit/jitstd/allocator.h
+++ b/src/jit/jitstd/allocator.h
@@ -32,7 +32,7 @@ private:
allocator();
public:
- inline allocator(CompAllocator* pAlloc);
+ inline allocator(CompAllocator alloc);
template <typename U>
inline allocator(const allocator<U>& alloc);
@@ -43,31 +43,31 @@ public:
inline allocator& operator=(const allocator<U>& alloc);
private:
- CompAllocator* m_pAlloc;
+ CompAllocator m_alloc;
template <typename U>
friend class allocator;
};
-allocator<void>::allocator(CompAllocator* pAlloc)
- : m_pAlloc(pAlloc)
+allocator<void>::allocator(CompAllocator alloc)
+ : m_alloc(alloc)
{
}
allocator<void>::allocator(const allocator& alloc)
- : m_pAlloc(alloc.m_pAlloc)
+ : m_alloc(alloc.m_alloc)
{
}
template <typename U>
allocator<void>::allocator(const allocator<U>& alloc)
- : m_pAlloc(alloc.m_pAlloc)
+ : m_alloc(alloc.m_alloc)
{
}
template <typename U>
allocator<void>& allocator<void>::operator=(const allocator<U>& alloc)
{
- m_pAlloc = alloc.m_pAlloc;
+ m_alloc = alloc.m_alloc;
return *this;
}
@@ -86,7 +86,7 @@ public:
private:
allocator();
public:
- allocator(CompAllocator* pAlloc);
+ allocator(CompAllocator alloc);
template <typename U>
allocator(const allocator<U>& alloc);
@@ -110,7 +110,7 @@ public:
};
private:
- CompAllocator* m_pAlloc;
+ CompAllocator m_alloc;
template <typename U>
friend class allocator;
};
@@ -122,21 +122,21 @@ namespace jitstd
{
template <typename T>
-allocator<T>::allocator(CompAllocator* pAlloc)
- : m_pAlloc(pAlloc)
+allocator<T>::allocator(CompAllocator alloc)
+ : m_alloc(alloc)
{
}
template <typename T>
template <typename U>
allocator<T>::allocator(const allocator<U>& alloc)
- : m_pAlloc(alloc.m_pAlloc)
+ : m_alloc(alloc.m_alloc)
{
}
template <typename T>
allocator<T>::allocator(const allocator<T>& alloc)
- : m_pAlloc(alloc.m_pAlloc)
+ : m_alloc(alloc.m_alloc)
{
}
@@ -144,7 +144,7 @@ template <typename T>
template <typename U>
allocator<T>& allocator<T>::operator=(const allocator<U>& alloc)
{
- m_pAlloc = alloc.m_pAlloc;
+ m_alloc = alloc.m_alloc;
return *this;
}
@@ -163,7 +163,7 @@ typename allocator<T>::const_pointer allocator<T>::address(const_reference val)
template <typename T>
T* allocator<T>::allocate(size_type count, allocator<void>::const_pointer hint)
{
- return (pointer) m_pAlloc->Alloc(sizeof(value_type) * count);
+ return m_alloc.allocate<value_type>(count);
}
template <typename T>
@@ -175,7 +175,7 @@ void allocator<T>::construct(pointer ptr, const_reference val)
template <typename T>
void allocator<T>::deallocate(pointer ptr, size_type size)
{
- // m_pAlloc->Free(ptr);
+ m_alloc.deallocate(ptr);
}
template <typename T>
diff --git a/src/jit/jitstd/utility.h b/src/jit/jitstd/utility.h
index 80ce58e4d7..1930be8fbe 100644
--- a/src/jit/jitstd/utility.h
+++ b/src/jit/jitstd/utility.h
@@ -45,19 +45,6 @@ namespace utility
};
- // Helper to allocate objects of any type, given an allocator of void type.
- //
- // @param alloc An allocator of void type used to create an allocator of type T.
- // @param count The number of objects of type T that need to be allocated.
- //
- // @return A pointer to an object or an array of objects that was allocated.
- template <typename T>
- inline
- static T* allocate(jitstd::allocator<void>& alloc, size_t count = 1)
- {
- return jitstd::allocator<T>(alloc).allocate(count);
- }
-
// Ensures that "wset" is the union of the initial state of "wset" and "rset".
// Elements from "rset" that were not in "wset" are added to "cset."
template <typename Set>
diff --git a/src/jit/lclvars.cpp b/src/jit/lclvars.cpp
index 4b31bd6a66..0e141fe549 100644
--- a/src/jit/lclvars.cpp
+++ b/src/jit/lclvars.cpp
@@ -214,7 +214,7 @@ void Compiler::lvaInitTypeRef()
lvaTableCnt = 16;
}
- lvaTable = (LclVarDsc*)compGetMemArray(lvaTableCnt, sizeof(*lvaTable), CMK_LvaTable);
+ lvaTable = getAllocator(CMK_LvaTable).allocate<LclVarDsc>(lvaTableCnt);
size_t tableSize = lvaTableCnt * sizeof(*lvaTable);
memset(lvaTable, 0, tableSize);
for (unsigned i = 0; i < lvaTableCnt; i++)
@@ -2314,7 +2314,7 @@ void Compiler::lvaSetStruct(unsigned varNum, CORINFO_CLASS_HANDLE typeHnd, bool
size_t lvSize = varDsc->lvSize();
assert((lvSize % TARGET_POINTER_SIZE) ==
0); // The struct needs to be a multiple of TARGET_POINTER_SIZE bytes for getClassGClayout() to be valid.
- varDsc->lvGcLayout = (BYTE*)compGetMem((lvSize / TARGET_POINTER_SIZE) * sizeof(BYTE), CMK_LvaTable);
+ varDsc->lvGcLayout = getAllocator(CMK_LvaTable).allocate<BYTE>(lvSize / TARGET_POINTER_SIZE);
unsigned numGCVars;
var_types simdBaseType = TYP_UNKNOWN;
varDsc->lvType = impNormStructType(typeHnd, varDsc->lvGcLayout, &numGCVars, &simdBaseType);
diff --git a/src/jit/lir.cpp b/src/jit/lir.cpp
index 2d45af4177..0ba18a2d6d 100644
--- a/src/jit/lir.cpp
+++ b/src/jit/lir.cpp
@@ -1435,7 +1435,7 @@ public:
CheckLclVarSemanticsHelper(Compiler* compiler,
const LIR::Range* range,
SmallHashTable<GenTree*, bool, 32U>& unusedDefs)
- : compiler(compiler), range(range), unusedDefs(unusedDefs), unusedLclVarReads(compiler)
+ : compiler(compiler), range(range), unusedDefs(unusedDefs), unusedLclVarReads(compiler->getAllocator())
{
}
@@ -1572,7 +1572,7 @@ bool LIR::Range::CheckLIR(Compiler* compiler, bool checkUnusedValues) const
slowNode = slowNode->gtNext;
}
- SmallHashTable<GenTree*, bool, 32> unusedDefs(compiler);
+ SmallHashTable<GenTree*, bool, 32> unusedDefs(compiler->getAllocator());
bool pastPhis = false;
GenTree* prev = nullptr;
diff --git a/src/jit/loopcloning.cpp b/src/jit/loopcloning.cpp
index cc988b14e8..f741ff81b7 100644
--- a/src/jit/loopcloning.cpp
+++ b/src/jit/loopcloning.cpp
@@ -789,7 +789,7 @@ void LC_Deref::DeriveLevelConditions(JitExpandArrayStack<JitExpandArrayStack<LC_
// Return Values:
// None
//
-void LC_Deref::EnsureChildren(CompAllocator* alloc)
+void LC_Deref::EnsureChildren(CompAllocator alloc)
{
if (children == nullptr)
{
diff --git a/src/jit/loopcloning.h b/src/jit/loopcloning.h
index b5986510b3..cd9aa9f946 100644
--- a/src/jit/loopcloning.h
+++ b/src/jit/loopcloning.h
@@ -105,7 +105,7 @@ struct ArrIndex
unsigned rank; // Rank of the array
BasicBlock* useBlock; // Block where the [] occurs
- ArrIndex(CompAllocator* alloc) : arrLcl(BAD_VAR_NUM), indLcls(alloc), bndsChks(alloc), rank(0), useBlock(nullptr)
+ ArrIndex(CompAllocator alloc) : arrLcl(BAD_VAR_NUM), indLcls(alloc), bndsChks(alloc), rank(0), useBlock(nullptr)
{
}
@@ -186,7 +186,7 @@ struct LcMdArrayOptInfo : public LcOptInfo
{
}
- ArrIndex* GetArrIndexForDim(CompAllocator* alloc)
+ ArrIndex* GetArrIndexForDim(CompAllocator alloc)
{
if (index == nullptr)
{
@@ -513,7 +513,7 @@ struct LC_Deref
unsigned Lcl();
bool HasChildren();
- void EnsureChildren(CompAllocator* alloc);
+ void EnsureChildren(CompAllocator alloc);
static LC_Deref* Find(JitExpandArrayStack<LC_Deref*>* children, unsigned lcl);
void DeriveLevelConditions(JitExpandArrayStack<JitExpandArrayStack<LC_Condition>*>* len);
@@ -560,7 +560,7 @@ struct LC_Deref
*/
struct LoopCloneContext
{
- CompAllocator* alloc; // The allocator
+ CompAllocator alloc; // The allocator
JitExpandArrayStack<LcOptInfo*>** optInfo; // The array of optimization opportunities found in each loop. (loop x
// optimization-opportunities)
JitExpandArrayStack<LC_Condition>** conditions; // The array of conditions that influence which path to take for
@@ -572,7 +572,7 @@ struct LoopCloneContext
// conditions for
// each loop. (loop x level x conditions)
- LoopCloneContext(unsigned loopCount, CompAllocator* alloc) : alloc(alloc)
+ LoopCloneContext(unsigned loopCount, CompAllocator alloc) : alloc(alloc)
{
optInfo = new (alloc) JitExpandArrayStack<LcOptInfo*>*[loopCount];
conditions = new (alloc) JitExpandArrayStack<LC_Condition>*[loopCount];
diff --git a/src/jit/lower.cpp b/src/jit/lower.cpp
index b477e20e82..8859c2fe62 100644
--- a/src/jit/lower.cpp
+++ b/src/jit/lower.cpp
@@ -1935,7 +1935,7 @@ void Lowering::LowerFastTailCall(GenTreeCall* call)
// call could over-write the stack arg that is setup earlier.
GenTree* firstPutArgStk = nullptr;
GenTreeArgList* args;
- ArrayStack<GenTree*> putargs(comp);
+ ArrayStack<GenTree*> putargs(comp->getAllocator(CMK_ArrayStack));
for (args = call->gtCallArgs; args; args = args->Rest())
{
diff --git a/src/jit/lsra.cpp b/src/jit/lsra.cpp
index a4ce963f28..01192b26e3 100644
--- a/src/jit/lsra.cpp
+++ b/src/jit/lsra.cpp
@@ -605,11 +605,8 @@ LinearScanInterface* getLinearScanAllocator(Compiler* comp)
LinearScan::LinearScan(Compiler* theCompiler)
: compiler(theCompiler)
-#if MEASURE_MEM_ALLOC
- , lsraAllocator(nullptr)
-#endif // MEASURE_MEM_ALLOC
- , intervals(LinearScanMemoryAllocatorInterval(theCompiler))
- , refPositions(LinearScanMemoryAllocatorRefPosition(theCompiler))
+ , intervals(theCompiler->getAllocator(CMK_LSRA_Interval))
+ , refPositions(theCompiler->getAllocator(CMK_LSRA_RefPosition))
, listNodePool(theCompiler)
{
#ifdef DEBUG
diff --git a/src/jit/lsra.h b/src/jit/lsra.h
index 84ff2e2cef..e646294f3f 100644
--- a/src/jit/lsra.h
+++ b/src/jit/lsra.h
@@ -433,48 +433,10 @@ inline bool RefTypeIsDef(RefType refType)
typedef regNumberSmall* VarToRegMap;
-template <typename ElementType, CompMemKind MemKind>
-class ListElementAllocator
-{
-private:
- template <typename U, CompMemKind CMK>
- friend class ListElementAllocator;
-
- Compiler* m_compiler;
-
-public:
- ListElementAllocator(Compiler* compiler) : m_compiler(compiler)
- {
- }
-
- template <typename U>
- ListElementAllocator(const ListElementAllocator<U, MemKind>& other) : m_compiler(other.m_compiler)
- {
- }
-
- ElementType* allocate(size_t count)
- {
- return reinterpret_cast<ElementType*>(m_compiler->compGetMem(sizeof(ElementType) * count, MemKind));
- }
-
- void deallocate(ElementType* pointer, size_t count)
- {
- }
-
- template <typename U>
- struct rebind
- {
- typedef ListElementAllocator<U, MemKind> allocator;
- };
-};
-
-typedef ListElementAllocator<Interval, CMK_LSRA_Interval> LinearScanMemoryAllocatorInterval;
-typedef ListElementAllocator<RefPosition, CMK_LSRA_RefPosition> LinearScanMemoryAllocatorRefPosition;
-
-typedef jitstd::list<Interval, LinearScanMemoryAllocatorInterval> IntervalList;
-typedef jitstd::list<RefPosition, LinearScanMemoryAllocatorRefPosition> RefPositionList;
-typedef jitstd::list<RefPosition, LinearScanMemoryAllocatorRefPosition>::iterator RefPositionIterator;
-typedef jitstd::list<RefPosition, LinearScanMemoryAllocatorRefPosition>::reverse_iterator RefPositionReverseIterator;
+typedef jitstd::list<Interval> IntervalList;
+typedef jitstd::list<RefPosition> RefPositionList;
+typedef jitstd::list<RefPosition>::iterator RefPositionIterator;
+typedef jitstd::list<RefPosition>::reverse_iterator RefPositionReverseIterator;
class Referenceable
{
@@ -1399,21 +1361,9 @@ private:
Compiler* compiler;
private:
-#if MEASURE_MEM_ALLOC
- CompAllocator* lsraAllocator;
-#endif
-
- CompAllocator* getAllocator(Compiler* comp)
+ CompAllocator getAllocator(Compiler* comp)
{
-#if MEASURE_MEM_ALLOC
- if (lsraAllocator == nullptr)
- {
- lsraAllocator = new (comp, CMK_LSRA) CompAllocator(comp, CMK_LSRA);
- }
- return lsraAllocator;
-#else
- return comp->getAllocator();
-#endif
+ return comp->getAllocator(CMK_LSRA);
}
#ifdef DEBUG
diff --git a/src/jit/lsrabuild.cpp b/src/jit/lsrabuild.cpp
index 0d0efa9efa..3cd3008d5b 100644
--- a/src/jit/lsrabuild.cpp
+++ b/src/jit/lsrabuild.cpp
@@ -83,9 +83,7 @@ RefInfoListNodePool::RefInfoListNodePool(Compiler* compiler, unsigned preallocat
{
if (preallocate > 0)
{
- size_t preallocateSize = sizeof(RefInfoListNode) * preallocate;
- RefInfoListNode* preallocatedNodes =
- static_cast<RefInfoListNode*>(compiler->compGetMem(preallocateSize, CMK_LSRA));
+ RefInfoListNode* preallocatedNodes = compiler->getAllocator(CMK_LSRA).allocate<RefInfoListNode>(preallocate);
RefInfoListNode* head = preallocatedNodes;
head->m_next = nullptr;
@@ -119,7 +117,7 @@ RefInfoListNode* RefInfoListNodePool::GetNode(RefPosition* r, GenTree* t, unsign
RefInfoListNode* head = m_freeList;
if (head == nullptr)
{
- head = reinterpret_cast<RefInfoListNode*>(m_compiler->compGetMem(sizeof(RefInfoListNode)));
+ head = m_compiler->getAllocator(CMK_LSRA).allocate<RefInfoListNode>(1);
}
else
{
diff --git a/src/jit/morph.cpp b/src/jit/morph.cpp
index 4dcc591f9c..6fd800eadc 100644
--- a/src/jit/morph.cpp
+++ b/src/jit/morph.cpp
@@ -2787,7 +2787,7 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call)
ArrayStack<NonStandardArg> args;
public:
- NonStandardArgs(Compiler* compiler) : args(compiler, 3) // We will have at most 3 non-standard arguments
+ NonStandardArgs(CompAllocator alloc) : args(alloc, 3) // We will have at most 3 non-standard arguments
{
}
@@ -2874,7 +2874,7 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call)
args.IndexRef(index).node = node;
}
- } nonStandardArgs(this);
+ } nonStandardArgs(getAllocator(CMK_ArrayStack));
// Count of args. On first morph, this is counted before we've filled in the arg table.
// On remorph, we grab it from the arg table.
@@ -5122,7 +5122,7 @@ GenTree* Compiler::fgMorphMultiregStructArg(GenTree* arg, fgArgTabEntry* fgEntry
// We need to propagate any GTF_ALL_EFFECT flags from the end of the list back to the beginning.
// This is verified in fgDebugCheckFlags().
- ArrayStack<GenTree*> stack(this);
+ ArrayStack<GenTree*> stack(getAllocator(CMK_ArrayStack));
GenTree* tree;
for (tree = newArg; (tree->gtGetOp2() != nullptr) && tree->gtGetOp2()->OperIsFieldList(); tree = tree->gtGetOp2())
{
@@ -9872,7 +9872,7 @@ GenTree* Compiler::fgMorphBlkNode(GenTree* tree, bool isDest)
addr = tree;
GenTree* effectiveVal = tree->gtEffectiveVal();
- GenTreePtrStack commas(this);
+ GenTreePtrStack commas(getAllocator(CMK_ArrayStack));
for (GenTree* comma = tree; comma != nullptr && comma->gtOper == GT_COMMA; comma = comma->gtGetOp2())
{
commas.Push(comma);
@@ -13639,7 +13639,7 @@ DONE_MORPHING_CHILDREN:
// Perform the transform ADDR(COMMA(x, ..., z)) == COMMA(x, ..., ADDR(z)).
// (Be sure to mark "z" as an l-value...)
- GenTreePtrStack commas(this);
+ GenTreePtrStack commas(getAllocator(CMK_ArrayStack));
for (GenTree* comma = op1; comma != nullptr && comma->gtOper == GT_COMMA; comma = comma->gtGetOp2())
{
commas.Push(comma);
@@ -18540,7 +18540,7 @@ void Compiler::fgMarkAddressExposedLocals()
for (stmt = block->bbTreeList; stmt; stmt = stmt->gtNext)
{
// Call Compiler::fgMarkAddrTakenLocalsCB on each node
- AXCStack stk(this);
+ AXCStack stk(getAllocator(CMK_ArrayStack));
stk.Push(AXC_None); // We start in neither an addr or ind context.
fgWalkTree(&stmt->gtStmt.gtStmtExpr, fgMarkAddrTakenLocalsPreCB, fgMarkAddrTakenLocalsPostCB, &stk);
}
@@ -18746,7 +18746,7 @@ bool Compiler::fgMorphCombineSIMDFieldAssignments(BasicBlock* block, GenTree* st
// Since we generated a new address node which didn't exist before,
// we should expose this address manually here.
- AXCStack stk(this);
+ AXCStack stk(getAllocator(CMK_ArrayStack));
stk.Push(AXC_None);
fgWalkTree(&stmt->gtStmt.gtStmtExpr, fgMarkAddrTakenLocalsPreCB, fgMarkAddrTakenLocalsPostCB, &stk);
diff --git a/src/jit/rangecheck.cpp b/src/jit/rangecheck.cpp
index d80576d97c..0c1107c9aa 100644
--- a/src/jit/rangecheck.cpp
+++ b/src/jit/rangecheck.cpp
@@ -23,7 +23,7 @@ RangeCheck::RangeCheck(Compiler* pCompiler)
, m_pDefTable(nullptr)
#endif
, m_pCompiler(pCompiler)
- , m_alloc(pCompiler, CMK_RangeCheck)
+ , m_alloc(pCompiler->getAllocator(CMK_RangeCheck))
, m_nVisitBudget(MAX_VISIT_BUDGET)
{
}
@@ -38,7 +38,7 @@ RangeCheck::RangeMap* RangeCheck::GetRangeMap()
{
if (m_pRangeMap == nullptr)
{
- m_pRangeMap = new (&m_alloc) RangeMap(&m_alloc);
+ m_pRangeMap = new (m_alloc) RangeMap(m_alloc);
}
return m_pRangeMap;
}
@@ -48,7 +48,7 @@ RangeCheck::OverflowMap* RangeCheck::GetOverflowMap()
{
if (m_pOverflowMap == nullptr)
{
- m_pOverflowMap = new (&m_alloc) OverflowMap(&m_alloc);
+ m_pOverflowMap = new (m_alloc) OverflowMap(m_alloc);
}
return m_pOverflowMap;
}
@@ -256,7 +256,7 @@ void RangeCheck::OptimizeRangeCheck(BasicBlock* block, GenTree* stmt, GenTree* t
GetRangeMap()->RemoveAll();
GetOverflowMap()->RemoveAll();
- m_pSearchPath = new (&m_alloc) SearchPath(&m_alloc);
+ m_pSearchPath = new (m_alloc) SearchPath(m_alloc);
// Get the range for this index.
Range range = GetRange(block, treeIndex, false DEBUGARG(0));
@@ -517,7 +517,7 @@ void RangeCheck::SetDef(UINT64 hash, Location* loc)
{
if (m_pDefTable == nullptr)
{
- m_pDefTable = new (&m_alloc) VarToLocMap(&m_alloc);
+ m_pDefTable = new (m_alloc) VarToLocMap(m_alloc);
}
#ifdef DEBUG
Location* loc2;
@@ -1186,7 +1186,7 @@ Range RangeCheck::ComputeRange(BasicBlock* block, GenTree* expr, bool monotonic
range = Range(Limit(Limit::keUnknown));
}
- GetRangeMap()->Set(expr, new (&m_alloc) Range(range));
+ GetRangeMap()->Set(expr, new (m_alloc) Range(range));
m_pSearchPath->Remove(expr);
return range;
}
@@ -1254,7 +1254,7 @@ void RangeCheck::MapStmtDefs(const Location& loc)
// To avoid ind(addr) use asgs
if (loc.parent->OperIsAssignment())
{
- SetDef(HashCode(lclNum, ssaNum), new (&m_alloc) Location(loc));
+ SetDef(HashCode(lclNum, ssaNum), new (m_alloc) Location(loc));
}
}
}
@@ -1263,7 +1263,7 @@ void RangeCheck::MapStmtDefs(const Location& loc)
{
if (loc.parent->OperGet() == GT_ASG)
{
- SetDef(HashCode(lclNum, ssaNum), new (&m_alloc) Location(loc));
+ SetDef(HashCode(lclNum, ssaNum), new (m_alloc) Location(loc));
}
}
}
diff --git a/src/jit/rangecheck.h b/src/jit/rangecheck.h
index 8b97308502..35372a1d24 100644
--- a/src/jit/rangecheck.h
+++ b/src/jit/rangecheck.h
@@ -175,10 +175,10 @@ struct Limit
return false;
}
#ifdef DEBUG
- const char* ToString(CompAllocator* alloc)
+ const char* ToString(CompAllocator alloc)
{
unsigned size = 64;
- char* buf = (char*)alloc->Alloc(size);
+ char* buf = alloc.allocate<char>(size);
switch (type)
{
case keUndef:
@@ -231,10 +231,10 @@ struct Range
}
#ifdef DEBUG
- char* ToString(CompAllocator* alloc)
+ char* ToString(CompAllocator alloc)
{
size_t size = 64;
- char* buf = (char*)alloc->Alloc(size);
+ char* buf = alloc.allocate<char>(size);
sprintf_s(buf, size, "<%s, %s>", lLimit.ToString(alloc), uLimit.ToString(alloc));
return buf;
}
diff --git a/src/jit/regset.cpp b/src/jit/regset.cpp
index 1ff5ee023f..7896fa25b8 100644
--- a/src/jit/regset.cpp
+++ b/src/jit/regset.cpp
@@ -1065,7 +1065,7 @@ RegSet::SpillDsc* RegSet::SpillDsc::alloc(Compiler* pComp, RegSet* regSet, var_t
}
else
{
- spill = (RegSet::SpillDsc*)pComp->compGetMem(sizeof(SpillDsc));
+ spill = pComp->getAllocator().allocate<SpillDsc>(1);
}
return spill;
}
diff --git a/src/jit/scopeinfo.cpp b/src/jit/scopeinfo.cpp
index 5a3f704cfc..366a060d54 100644
--- a/src/jit/scopeinfo.cpp
+++ b/src/jit/scopeinfo.cpp
@@ -160,7 +160,7 @@ CodeGen::siScope* CodeGen::siNewScope(unsigned LVnum, unsigned varNum)
siEndTrackedScope(varIndex);
}
- siScope* newScope = (siScope*)compiler->compGetMem(sizeof(*newScope), CMK_SiScope);
+ siScope* newScope = compiler->getAllocator(CMK_SiScope).allocate<siScope>(1);
newScope->scStartLoc.CaptureLocation(getEmitter());
assert(newScope->scStartLoc.Valid());
@@ -825,7 +825,7 @@ void CodeGen::siDispOpenScopes()
CodeGen::psiScope* CodeGen::psiNewPrologScope(unsigned LVnum, unsigned slotNum)
{
- psiScope* newScope = (psiScope*)compiler->compGetMem(sizeof(*newScope), CMK_SiScope);
+ psiScope* newScope = compiler->getAllocator(CMK_SiScope).allocate<psiScope>(1);
newScope->scStartLoc.CaptureLocation(getEmitter());
assert(newScope->scStartLoc.Valid());
diff --git a/src/jit/smallhash.h b/src/jit/smallhash.h
index 65c5eeda65..5900d286b6 100644
--- a/src/jit/smallhash.h
+++ b/src/jit/smallhash.h
@@ -5,11 +5,6 @@
#ifndef _SMALLHASHTABLE_H_
#define _SMALLHASHTABLE_H_
-// Since compiler depends on valuenum which depends on smallhash, forward declare
-// a wrapper for comp->compGetMem here (implemented in compiler.hpp) that can be used below.
-class Compiler;
-void* compGetMem(Compiler* comp, size_t sz);
-
// genLog2 is defined in compiler.hpp
unsigned genLog2(unsigned value);
@@ -109,7 +104,7 @@ struct HashTableInfo<unsigned>
// TKey - The type of the table's keys.
// TValue - The type of the table's values.
// TKeyInfo - A type that conforms to the HashTableInfo<TKey> concept.
-template <typename TKey, typename TValue, typename TKeyInfo = HashTableInfo<TKey>>
+template <typename TKey, typename TValue, typename TKeyInfo = HashTableInfo<TKey>, typename TAllocator = CompAllocator>
class HashTableBase
{
friend class KeyValuePair;
@@ -151,10 +146,10 @@ protected:
};
private:
- Compiler* m_compiler; // The compiler context to use for allocations.
- Bucket* m_buckets; // The bucket array.
- unsigned m_numBuckets; // The number of buckets in the bucket array.
- unsigned m_numFullBuckets; // The number of occupied buckets.
+ TAllocator m_alloc; // The memory allocator.
+ Bucket* m_buckets; // The bucket array.
+ unsigned m_numBuckets; // The number of buckets in the bucket array.
+ unsigned m_numFullBuckets; // The number of occupied buckets.
//------------------------------------------------------------------------
// HashTableBase::Insert: inserts a key-value pair into a bucket array.
@@ -302,11 +297,8 @@ private:
Bucket* currentBuckets = m_buckets;
unsigned newNumBuckets = m_numBuckets == 0 ? InitialNumBuckets : m_numBuckets * 2;
- size_t allocSize = sizeof(Bucket) * newNumBuckets;
- assert((sizeof(Bucket) * m_numBuckets) < allocSize);
-
- auto* newBuckets = reinterpret_cast<Bucket*>(compGetMem(m_compiler, allocSize));
- memset(newBuckets, 0, allocSize);
+ Bucket* newBuckets = m_alloc.template allocate<Bucket>(newNumBuckets);
+ memset(newBuckets, 0, sizeof(Bucket) * newNumBuckets);
for (unsigned currentIndex = 0; currentIndex < m_numBuckets; currentIndex++)
{
@@ -326,11 +318,9 @@ private:
}
protected:
- HashTableBase(Compiler* compiler, Bucket* buckets, unsigned numBuckets)
- : m_compiler(compiler), m_buckets(buckets), m_numBuckets(numBuckets), m_numFullBuckets(0)
+ HashTableBase(TAllocator alloc, Bucket* buckets, unsigned numBuckets)
+ : m_alloc(alloc), m_buckets(buckets), m_numBuckets(numBuckets), m_numFullBuckets(0)
{
- assert(compiler != nullptr);
-
if (numBuckets > 0)
{
assert((numBuckets & (numBuckets - 1)) == 0); // Size must be a power of 2
@@ -599,10 +589,10 @@ public:
//------------------------------------------------------------------------
// HashTable: a simple subclass of `HashTableBase` that always uses heap
// storage for its bucket array.
-template <typename TKey, typename TValue, typename TKeyInfo = HashTableInfo<TKey>>
-class HashTable final : public HashTableBase<TKey, TValue, TKeyInfo>
+template <typename TKey, typename TValue, typename TKeyInfo = HashTableInfo<TKey>, typename TAllocator = CompAllocator>
+class HashTable final : public HashTableBase<TKey, TValue, TKeyInfo, TAllocator>
{
- typedef HashTableBase<TKey, TValue, TKeyInfo> TBase;
+ typedef HashTableBase<TKey, TValue, TKeyInfo, TAllocator> TBase;
static unsigned RoundUp(unsigned initialSize)
{
@@ -610,15 +600,12 @@ class HashTable final : public HashTableBase<TKey, TValue, TKeyInfo>
}
public:
- HashTable(Compiler* compiler) : TBase(compiler, nullptr, 0)
+ HashTable(TAllocator alloc) : TBase(alloc, nullptr, 0)
{
}
- HashTable(Compiler* compiler, unsigned initialSize)
- : TBase(compiler,
- reinterpret_cast<typename TBase::Bucket*>(
- compGetMem(compiler, RoundUp(initialSize) * sizeof(typename TBase::Bucket))),
- RoundUp(initialSize))
+ HashTable(TAllocator alloc, unsigned initialSize)
+ : TBase(alloc, alloc.template allocate<TBase::Bucket>(RoundUp(initialSize)), RoundUp(initialSize))
{
}
};
@@ -630,10 +617,14 @@ public:
// the map at any given time falls below a certain
// threshold. Switches to heap storage once the initial
// inline storage is exhausted.
-template <typename TKey, typename TValue, unsigned NumInlineBuckets = 8, typename TKeyInfo = HashTableInfo<TKey>>
-class SmallHashTable final : public HashTableBase<TKey, TValue, TKeyInfo>
+template <typename TKey,
+ typename TValue,
+ unsigned NumInlineBuckets = 8,
+ typename TKeyInfo = HashTableInfo<TKey>,
+ typename TAllocator = CompAllocator>
+class SmallHashTable final : public HashTableBase<TKey, TValue, TKeyInfo, TAllocator>
{
- typedef HashTableBase<TKey, TValue, TKeyInfo> TBase;
+ typedef HashTableBase<TKey, TValue, TKeyInfo, TAllocator> TBase;
enum : unsigned
{
@@ -643,7 +634,7 @@ class SmallHashTable final : public HashTableBase<TKey, TValue, TKeyInfo>
typename TBase::Bucket m_inlineBuckets[RoundedNumInlineBuckets];
public:
- SmallHashTable(Compiler* compiler) : TBase(compiler, m_inlineBuckets, RoundedNumInlineBuckets)
+ SmallHashTable(TAllocator alloc) : TBase(alloc, m_inlineBuckets, RoundedNumInlineBuckets)
{
}
};
diff --git a/src/jit/ssabuilder.cpp b/src/jit/ssabuilder.cpp
index 9845aa98fe..fbd7d90e34 100644
--- a/src/jit/ssabuilder.cpp
+++ b/src/jit/ssabuilder.cpp
@@ -134,7 +134,7 @@ void Compiler::fgResetForSsa()
*/
SsaBuilder::SsaBuilder(Compiler* pCompiler)
: m_pCompiler(pCompiler)
- , m_allocator(pCompiler, CMK_SSA)
+ , m_allocator(pCompiler->getAllocator(CMK_SSA))
, m_visitedTraits(0, pCompiler) // at this point we do not know the size, SetupBBRoot can add a block
#ifdef SSA_FEATURE_DOMARR
, m_pDomPreOrder(nullptr)
@@ -193,8 +193,7 @@ int SsaBuilder::TopologicalSort(BasicBlock** postOrder, int count)
BasicBlock* block = comp->fgFirstBB;
BitVecOps::AddElemD(&m_visitedTraits, m_visited, block->bbNum);
- ArrayStack<AllSuccessorEnumerator> blocks(comp);
-
+ ArrayStack<AllSuccessorEnumerator> blocks(m_allocator);
blocks.Emplace(comp, block);
DumpBlockAndSuccessors(comp, block);
@@ -538,7 +537,7 @@ void SsaBuilder::ComputeDominanceFrontiers(BasicBlock** postOrder, int count, Bl
{
DBG_SSA_JITDUMP(" Adding BB%02u to dom frontier of pred dom BB%02u.\n", block->bbNum, b1->bbNum);
- BlkVector& b1DF = *mapDF->Emplace(b1, &m_allocator);
+ BlkVector& b1DF = *mapDF->Emplace(b1, m_allocator);
// It's possible to encounter the same DF multiple times, ensure that we don't add duplicates.
if (b1DF.empty() || (b1DF.back() != block))
{
@@ -692,12 +691,12 @@ void SsaBuilder::InsertPhiFunctions(BasicBlock** postOrder, int count)
EndPhase(PHASE_BUILD_SSA_LIVENESS);
// Compute dominance frontier.
- BlkToBlkVectorMap mapDF(&m_allocator);
+ BlkToBlkVectorMap mapDF(m_allocator);
ComputeDominanceFrontiers(postOrder, count, &mapDF);
EndPhase(PHASE_BUILD_SSA_DF);
// Use the same IDF vector for all blocks to avoid unnecessary memory allocations
- BlkVector blockIDF(&m_allocator);
+ BlkVector blockIDF(m_allocator);
JITDUMP("Inserting phi functions:\n");
@@ -1614,7 +1613,7 @@ void SsaBuilder::RenameVariables(BlkToBlkVectorMap* domTree, SsaRenameState* pRe
};
typedef jitstd::vector<BlockWork> BlockWorkStack;
- BlockWorkStack* blocksToDo = new (&m_allocator) BlockWorkStack(&m_allocator);
+ BlockWorkStack* blocksToDo = new (m_allocator) BlockWorkStack(m_allocator);
blocksToDo->push_back(BlockWork(m_pCompiler->fgFirstBB)); // Probably have to include other roots of dom tree.
while (blocksToDo->size() != 0)
@@ -1739,7 +1738,7 @@ void SsaBuilder::Build()
if (blockCount > DEFAULT_MIN_OPTS_BB_COUNT)
{
- postOrder = new (&m_allocator) BasicBlock*[blockCount];
+ postOrder = new (m_allocator) BasicBlock*[blockCount];
}
else
{
@@ -1758,7 +1757,7 @@ void SsaBuilder::Build()
ComputeImmediateDom(postOrder, count);
// Compute the dominator tree.
- BlkToBlkVectorMap* domTree = new (&m_allocator) BlkToBlkVectorMap(&m_allocator);
+ BlkToBlkVectorMap* domTree = new (m_allocator) BlkToBlkVectorMap(m_allocator);
ComputeDominators(postOrder, count, domTree);
EndPhase(PHASE_BUILD_SSA_DOMS);
@@ -1766,8 +1765,8 @@ void SsaBuilder::Build()
InsertPhiFunctions(postOrder, count);
// Rename local variables and collect UD information for each ssa var.
- SsaRenameState* pRenameState = new (&m_allocator)
- SsaRenameState(&m_allocator, m_pCompiler->lvaCount, m_pCompiler->byrefStatesMatchGcHeapStates);
+ SsaRenameState* pRenameState =
+ new (m_allocator) SsaRenameState(m_allocator, m_pCompiler->lvaCount, m_pCompiler->byrefStatesMatchGcHeapStates);
RenameVariables(domTree, pRenameState);
EndPhase(PHASE_BUILD_SSA_RENAME);
diff --git a/src/jit/ssarenamestate.cpp b/src/jit/ssarenamestate.cpp
index 4ccac05a48..9ec0770199 100644
--- a/src/jit/ssarenamestate.cpp
+++ b/src/jit/ssarenamestate.cpp
@@ -28,9 +28,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*
* @params alloc The allocator class used to allocate jitstd data.
*/
-SsaRenameState::SsaRenameState(const jitstd::allocator<int>& alloc,
- unsigned lvaCount,
- bool byrefStatesMatchGcHeapStates)
+SsaRenameState::SsaRenameState(CompAllocator alloc, unsigned lvaCount, bool byrefStatesMatchGcHeapStates)
: counts(nullptr)
, stacks(nullptr)
, definedLocs(alloc)
@@ -51,7 +49,7 @@ void SsaRenameState::EnsureCounts()
{
if (counts == nullptr)
{
- counts = jitstd::utility::allocate<unsigned>(m_alloc, lvaCount);
+ counts = m_alloc.allocate<unsigned>(lvaCount);
for (unsigned i = 0; i < lvaCount; ++i)
{
counts[i] = SsaConfig::FIRST_SSA_NUM;
@@ -68,7 +66,7 @@ void SsaRenameState::EnsureStacks()
{
if (stacks == nullptr)
{
- stacks = jitstd::utility::allocate<Stack*>(m_alloc, lvaCount);
+ stacks = m_alloc.allocate<Stack*>(lvaCount);
for (unsigned i = 0; i < lvaCount; ++i)
{
stacks[i] = nullptr;
@@ -141,7 +139,7 @@ void SsaRenameState::Push(BasicBlock* bb, unsigned lclNum, unsigned count)
if (stack == nullptr)
{
DBG_SSA_JITDUMP("\tCreating a new stack\n");
- stack = stacks[lclNum] = new (jitstd::utility::allocate<Stack>(m_alloc), jitstd::placement_t()) Stack(m_alloc);
+ stack = stacks[lclNum] = new (m_alloc) Stack(m_alloc);
}
if (stack->empty() || stack->back().m_bb != bb)
diff --git a/src/jit/ssarenamestate.h b/src/jit/ssarenamestate.h
index a8496b6386..a17b572aad 100644
--- a/src/jit/ssarenamestate.h
+++ b/src/jit/ssarenamestate.h
@@ -101,7 +101,7 @@ struct SsaRenameState
typedef unsigned* Counts;
typedef jitstd::list<SsaRenameStateLocDef> DefStack;
- SsaRenameState(const jitstd::allocator<int>& allocator, unsigned lvaCount, bool byrefStatesMatchGcHeapStates);
+ SsaRenameState(CompAllocator allocator, unsigned lvaCount, bool byrefStatesMatchGcHeapStates);
void EnsureCounts();
void EnsureStacks();
@@ -182,7 +182,7 @@ private:
unsigned lvaCount;
// Allocator to allocate stacks.
- jitstd::allocator<void> m_alloc;
+ CompAllocator m_alloc;
// Indicates whether GcHeap and ByrefExposed use the same state.
bool byrefStatesMatchGcHeapStates;
diff --git a/src/jit/stacklevelsetter.cpp b/src/jit/stacklevelsetter.cpp
index b0b6324f86..393eb25daf 100644
--- a/src/jit/stacklevelsetter.cpp
+++ b/src/jit/stacklevelsetter.cpp
@@ -13,8 +13,8 @@ StackLevelSetter::StackLevelSetter(Compiler* compiler)
: Phase(compiler, "StackLevelSetter", PHASE_STACK_LEVEL_SETTER)
, currentStackLevel(0)
, maxStackLevel(0)
- , memAllocator(compiler, CMK_fgArgInfoPtrArr)
- , putArgNumSlots(&memAllocator)
+ , memAllocator(compiler->getAllocator(CMK_fgArgInfoPtrArr))
+ , putArgNumSlots(memAllocator)
#if !FEATURE_FIXED_OUT_ARGS
, framePointerRequired(compiler->codeGen->isFramePointerRequired())
, throwHelperBlocksUsed(comp->fgUseThrowHelperBlocks() && comp->compUsesThrowHelper)
diff --git a/src/jit/unwind.cpp b/src/jit/unwind.cpp
index 1f090b20b6..db120c5b3b 100644
--- a/src/jit/unwind.cpp
+++ b/src/jit/unwind.cpp
@@ -160,12 +160,6 @@ void Compiler::unwindPushPopCFI(regNumber reg)
}
}
-template <typename T>
-inline static T* allocate_any(jitstd::allocator<void>& alloc, size_t count = 5)
-{
- return jitstd::allocator<T>(alloc).allocate(count);
-}
-
typedef jitstd::vector<CFI_CODE> CFICodeVector;
void Compiler::unwindBegPrologCFI()
@@ -185,9 +179,7 @@ void Compiler::unwindBegPrologCFI()
unwindGetFuncLocations(func, false, &func->coldStartLoc, &func->coldEndLoc);
}
- jitstd::allocator<void> allocator(getAllocator());
-
- func->cfiCodes = new (allocate_any<CFICodeVector>(allocator), jitstd::placement_t()) CFICodeVector(allocator);
+ func->cfiCodes = new (getAllocator()) CFICodeVector(getAllocator());
#endif // FEATURE_EH_FUNCLETS
}
diff --git a/src/jit/utils.cpp b/src/jit/utils.cpp
index 7a8854f9fc..2b120dd347 100644
--- a/src/jit/utils.cpp
+++ b/src/jit/utils.cpp
@@ -953,7 +953,7 @@ FixedBitVect* FixedBitVect::bitVectInit(UINT size, Compiler* comp)
assert(bitVectMemSize * bitChunkSize() >= size);
- bv = (FixedBitVect*)comp->compGetMem(sizeof(FixedBitVect) + bitVectMemSize, CMK_FixedBitVect);
+ bv = (FixedBitVect*)comp->getAllocator(CMK_FixedBitVect).allocate<char>(sizeof(FixedBitVect) + bitVectMemSize);
memset(bv->bitVect, 0, bitVectMemSize);
bv->bitVectSize = size;
@@ -1491,10 +1491,8 @@ void HelperCallProperties::init()
// MyAssembly;mscorlib;System
// MyAssembly;mscorlib System
-AssemblyNamesList2::AssemblyNamesList2(const wchar_t* list, HostAllocator* alloc) : m_alloc(alloc)
+AssemblyNamesList2::AssemblyNamesList2(const wchar_t* list, HostAllocator alloc) : m_alloc(alloc)
{
- assert(m_alloc != nullptr);
-
WCHAR prevChar = '?'; // dummy
LPWSTR nameStart = nullptr; // start of the name currently being processed. nullptr if no current name
AssemblyName** ppPrevLink = &m_pNames;
@@ -1561,8 +1559,8 @@ AssemblyNamesList2::~AssemblyNamesList2()
AssemblyName* cur = pName;
pName = pName->m_next;
- m_alloc->Free(cur->m_assemblyName);
- m_alloc->Free(cur);
+ m_alloc.deallocate(cur->m_assemblyName);
+ m_alloc.deallocate(cur);
}
}
diff --git a/src/jit/utils.h b/src/jit/utils.h
index 8dadabb3bc..fb6e3459ee 100644
--- a/src/jit/utils.h
+++ b/src/jit/utils.h
@@ -539,12 +539,12 @@ class AssemblyNamesList2
AssemblyName* m_next;
};
- AssemblyName* m_pNames; // List of names
- HostAllocator* m_alloc; // HostAllocator to use in this class
+ AssemblyName* m_pNames; // List of names
+ HostAllocator m_alloc; // HostAllocator to use in this class
public:
// Take a Unicode string list of assembly names, parse it, and store it.
- AssemblyNamesList2(const wchar_t* list, __in HostAllocator* alloc);
+ AssemblyNamesList2(const wchar_t* list, HostAllocator alloc);
~AssemblyNamesList2();
diff --git a/src/jit/valuenum.cpp b/src/jit/valuenum.cpp
index 825d6ec6bd..8b70757592 100644
--- a/src/jit/valuenum.cpp
+++ b/src/jit/valuenum.cpp
@@ -50,7 +50,7 @@ VNFunc GetVNFuncForOper(genTreeOps oper, bool isUnsigned)
}
}
-ValueNumStore::ValueNumStore(Compiler* comp, CompAllocator* alloc)
+ValueNumStore::ValueNumStore(Compiler* comp, CompAllocator alloc)
: m_pComp(comp)
, m_alloc(alloc)
,
@@ -60,7 +60,7 @@ ValueNumStore::ValueNumStore(Compiler* comp, CompAllocator* alloc)
#endif
m_nextChunkBase(0)
, m_fixedPointMapSels(alloc, 8)
- , m_checkedBoundVNs(comp)
+ , m_checkedBoundVNs(alloc)
, m_chunks(alloc, 8)
, m_intCnsMap(nullptr)
, m_longCnsMap(nullptr)
@@ -672,7 +672,7 @@ bool ValueNumStore::IsSharedStatic(ValueNum vn)
return GetVNFunc(vn, &funcAttr) && (s_vnfOpAttribs[funcAttr.m_func] & VNFOA_SharedStatic) != 0;
}
-ValueNumStore::Chunk::Chunk(CompAllocator* alloc,
+ValueNumStore::Chunk::Chunk(CompAllocator alloc,
ValueNum* pNextBaseVN,
var_types typ,
ChunkExtraAttribs attribs,
@@ -4538,8 +4538,8 @@ void Compiler::fgValueNumber()
assert(fgVNPassesCompleted > 0 || vnStore == nullptr);
if (fgVNPassesCompleted == 0)
{
- CompAllocator* allocator = new (this, CMK_ValueNumber) CompAllocator(this, CMK_ValueNumber);
- vnStore = new (this, CMK_ValueNumber) ValueNumStore(this, allocator);
+ CompAllocator allocator(getAllocator(CMK_ValueNumber));
+ vnStore = new (allocator) ValueNumStore(this, allocator);
}
else
{
diff --git a/src/jit/valuenum.h b/src/jit/valuenum.h
index a8aabef5b1..b0c1580462 100644
--- a/src/jit/valuenum.h
+++ b/src/jit/valuenum.h
@@ -103,7 +103,7 @@ public:
class VNMap : public JitHashTable<fromType, keyfuncs, ValueNum>
{
public:
- VNMap(CompAllocator* alloc) : JitHashTable<fromType, keyfuncs, ValueNum>(alloc)
+ VNMap(CompAllocator alloc) : JitHashTable<fromType, keyfuncs, ValueNum>(alloc)
{
}
~VNMap()
@@ -128,7 +128,7 @@ private:
Compiler* m_pComp;
// For allocations. (Other things?)
- CompAllocator* m_alloc;
+ CompAllocator m_alloc;
// TODO-Cleanup: should transform "attribs" into a struct with bit fields. That would be simpler...
@@ -237,7 +237,7 @@ public:
static void InitValueNumStoreStatics();
// Initialize an empty ValueNumStore.
- ValueNumStore(Compiler* comp, CompAllocator* allocator);
+ ValueNumStore(Compiler* comp, CompAllocator allocator);
// Returns "true" iff "vnf" (which may have been created by a cast from an integral value) represents
// a legal value number function.
@@ -916,7 +916,7 @@ private:
// Initialize a chunk, starting at "*baseVN", for the given "typ", "attribs", and "loopNum" (using "alloc" for
// allocations).
// (Increments "*baseVN" by ChunkSize.)
- Chunk(CompAllocator* alloc,
+ Chunk(CompAllocator alloc,
ValueNum* baseVN,
var_types typ,
ChunkExtraAttribs attribs,