summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPat Gavlin <pgavlin@gmail.com>2016-02-25 18:07:05 -0800
committerPat Gavlin <pgavlin@gmail.com>2016-02-25 18:07:05 -0800
commit795f1e3d0dabc42c4dd06a2129635f31b2866230 (patch)
treefdf0dee40e265f62c7c03a457a71a4dcabcc506c
parentd5e103809a060948e3bdac52c78a99bb5b54d118 (diff)
parent62ce0e505571896f3a63023c0074c79be1c26a60 (diff)
downloadcoreclr-795f1e3d0dabc42c4dd06a2129635f31b2866230.tar.gz
coreclr-795f1e3d0dabc42c4dd06a2129635f31b2866230.tar.bz2
coreclr-795f1e3d0dabc42c4dd06a2129635f31b2866230.zip
Merge pull request #3359 from pgavlin/AllocatorCleanup2
Clean up norls_allocator.
-rw-r--r--src/jit/alloc.cpp689
-rw-r--r--src/jit/alloc.h265
-rw-r--r--src/jit/compiler.cpp54
-rw-r--r--src/jit/compiler.h6
-rw-r--r--src/jit/compiler.hpp6
-rw-r--r--src/jit/importer.cpp2
-rw-r--r--src/jit/lsra.h2
7 files changed, 506 insertions, 518 deletions
diff --git a/src/jit/alloc.cpp b/src/jit/alloc.cpp
index cc27c83fd0..4f0a948ef9 100644
--- a/src/jit/alloc.cpp
+++ b/src/jit/alloc.cpp
@@ -1,410 +1,539 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
-/*****************************************************************************/
-
#include "jitpch.h"
-#ifdef _MSC_VER
-#pragma hdrstop
-#endif
-/*****************************************************************************/
-
-/*****************************************************************************/
-void allocatorCodeSizeBeg(){}
-/*****************************************************************************/
-#ifdef DEBUG
-/*****************************************************************************/
-void __cdecl debugStop(const char *why, ...)
+#if defined(_MSC_VER)
+#pragma hdrstop
+#endif // defined(_MSC_VER)
+
+size_t ArenaAllocator::s_defaultPageSize = 0;
+ArenaAllocator* ArenaAllocator::s_pooledAllocator;
+ArenaAllocator::MarkDescriptor ArenaAllocator::s_pooledAllocatorMark;
+LONG ArenaAllocator::s_isPooledAllocatorInUse = 0;
+
+//------------------------------------------------------------------------
+// ArenaAllocator::bypassHostAllocator:
+// Indicates whether or not the ArenaAllocator should bypass the JIT
+// host when allocating memory for arena pages.
+//
+// Return Value:
+// True if the JIT should bypass the JIT host; false otherwise.
+bool ArenaAllocator::bypassHostAllocator()
{
- va_list args;
-
- va_start(args, why);
-
- printf("NOTIFICATION: ");
- if (why)
- vprintf(why, args);
- else
- printf("debugStop(0)");
-
- printf("\n");
-
- va_end(args);
-
- BreakIfDebuggerPresent();
+#if defined(DEBUG)
+ // When JitDirectAlloc is set, all JIT allocations requests are forwarded
+ // directly to the OS. This allows taking advantage of pageheap and other gflag
+ // knobs for ensuring that we do not have buffer overruns in the JIT.
+
+ return JitConfig.JitDirectAlloc() != 0;
+#else // defined(DEBUG)
+ return false;
+#endif // !defined(DEBUG)
}
-/*****************************************************************************/
-
-/*
- * Does this constant need to be bigger?
- */
-static size_t blockStop = 99999999;
-
-/*****************************************************************************/
-#endif // DEBUG
-/*****************************************************************************/
-
-size_t THE_ALLOCATOR_BASE_SIZE = 0;
-
-bool norls_allocator::nraInit(IEEMemoryManager* pMemoryManager, size_t pageSize, int preAlloc)
+//------------------------------------------------------------------------
+// ArenaAllocator::getDefaultPageSize:
+// Returns the default size of an arena page.
+//
+// Return Value:
+// The default size of an arena page.
+size_t ArenaAllocator::getDefaultPageSize()
{
- bool result = false;
-
- nraMemoryManager = pMemoryManager;
-
- nraPageList =
- nraPageLast = 0;
-
- nraFreeNext =
- nraFreeLast = 0;
+ return s_defaultPageSize;
+}
- assert(THE_ALLOCATOR_BASE_SIZE != 0);
+//------------------------------------------------------------------------
+// ArenaAllocator::initialize:
+// Intializes an arena allocator.
+//
+// Arguments:
+// memoryManager - The `IEEMemoryManager` instance that will be used to
+// allocate memory for arena pages.
+//
+// shuoldPreallocate - True if the allocator should allocate an initial
+// arena page as part of initialization.
+//
+// Return Value:
+// True if initialization succeeded; false otherwise.
+bool ArenaAllocator::initialize(IEEMemoryManager* memoryManager, bool shouldPreallocate)
+{
+ assert(s_defaultPageSize != 0);
- nraPageSize = pageSize ? pageSize : THE_ALLOCATOR_BASE_SIZE;
+ m_memoryManager = memoryManager;
-#ifdef DEBUG
- nraShouldInjectFault = JitConfig.ShouldInjectFault() != 0;
-#endif
+ m_firstPage = nullptr;
+ m_lastPage = nullptr;
+ m_nextFreeByte = nullptr;
+ m_lastFreeByte = nullptr;
- if (preAlloc)
+ bool result = true;
+ if (shouldPreallocate)
{
- /* Grab the initial page(s) */
-
- setErrorTrap(NULL, norls_allocator *, pThis, this) // ERROR TRAP: Start normal block
+ // Grab the initial page.
+ setErrorTrap(NULL, ArenaAllocator*, thisPtr, this) // ERROR TRAP: Start normal block
{
- pThis->nraAllocNewPage(0);
+ thisPtr->allocateNewPage(0);
}
impJitErrorTrap() // ERROR TRAP: The following block handles errors
{
- result = true;
+ result = false;
}
endErrorTrap() // ERROR TRAP: End
}
- return result;
+ return result;
}
-/*---------------------------------------------------------------------------*/
-
-void * norls_allocator::nraAllocNewPage(size_t sz)
+//------------------------------------------------------------------------
+// ArenaAllocator::allocateNewPage:
+// Allocates a new arena page.
+//
+// Arguments:
+// size - The number of bytes that were requested by the allocation
+// that triggered this request to allocate a new arena page.
+//
+// Return Value:
+// A pointer to the first usable byte of the newly allocated page.
+void* ArenaAllocator::allocateNewPage(size_t size)
{
- norls_pagdesc * newPage;
- size_t sizPage;
-
- size_t realSize = sz + sizeof(norls_pagdesc);
- if (realSize < sz)
- NOMEM(); // Integer overflow
+ size_t pageSize = sizeof(PageDescriptor) + size;
- /* Do we have a page that's now full? */
-
- if (nraPageLast)
+ // Check for integer overflow
+ if (pageSize < size)
{
- /* Undo the "+=" done in nraAlloc() */
-
- nraFreeNext -= sz;
-
- /* Save the actual used size of the page */
-
- nraPageLast->nrpUsedSize = nraFreeNext - nraPageLast->nrpContents;
+ NOMEM();
}
- /* Make sure we grab enough to satisfy the allocation request */
-
- sizPage = nraPageSize;
-
- if (sizPage < realSize)
+ // If the current page is now full, update a few statistics
+ if (m_lastPage != nullptr)
{
- /* The allocation doesn't fit in a default-sized page */
+ // Undo the "+=" done in allocateMemory()
+ m_nextFreeByte -= size;
-#ifdef DEBUG
-// if (nraPageLast) printf("NOTE: wasted %u bytes in last page\n", nraPageLast->nrpPageSize - nraPageLast->nrpUsedSize);
-#endif
-
- sizPage = realSize;
+ // Save the actual used size of the page
+ m_lastPage->m_usedBytes = m_nextFreeByte - m_lastPage->m_contents;
}
- /* Round to the nearest multiple of OS page size */
-
- if (!nraDirectAlloc())
+ // Round up to a default-sized page if necessary
+ if (pageSize <= s_defaultPageSize)
{
- sizPage += (DEFAULT_PAGE_SIZE - 1);
- sizPage &= ~(DEFAULT_PAGE_SIZE - 1);
+ pageSize = s_defaultPageSize;
}
- /* Allocate the new page */
+ // Round to the nearest multiple of OS page size if necessary
+ if (!bypassHostAllocator())
+ {
+ pageSize = roundUp(pageSize, DEFAULT_PAGE_SIZE);
+ }
- newPage = (norls_pagdesc *)nraVirtualAlloc(0, sizPage, MEM_COMMIT, PAGE_READWRITE);
- if (!newPage)
+ // Allocate the new page
+ PageDescriptor* newPage = (PageDescriptor*)allocateHostMemory(pageSize);
+ if (newPage == nullptr)
+ {
NOMEM();
+ }
-#ifdef DEBUG
- newPage->nrpSelfPtr = newPage;
-#endif
-
- /* Append the new page to the end of the list */
-
- newPage->nrpNextPage = 0;
- newPage->nrpPageSize = sizPage;
- newPage->nrpPrevPage = nraPageLast;
- newPage->nrpUsedSize = 0; // nrpUsedSize is meaningless until a new page is allocated.
+ // Append the new page to the end of the list
+ newPage->m_next = nullptr;
+ newPage->m_pageBytes = pageSize;
+ newPage->m_previous = m_lastPage;
+ newPage->m_usedBytes = 0; // m_usedBytes is meaningless until a new page is allocated.
// Instead of letting it contain garbage (so to confuse us),
// set it to zero.
- if (nraPageLast)
- nraPageLast->nrpNextPage = newPage;
+ if (m_lastPage != nullptr)
+ {
+ m_lastPage->m_next = newPage;
+ }
else
- nraPageList = newPage;
- nraPageLast = newPage;
-
- /* Set up the 'next' and 'last' pointers */
+ {
+ m_firstPage = newPage;
+ }
- nraFreeNext = newPage->nrpContents + sz;
- nraFreeLast = newPage->nrpPageSize + (BYTE *)newPage;
+ m_lastPage = newPage;
- assert(nraFreeNext <= nraFreeLast);
+ // Adjust the next/last free byte pointers
+ m_nextFreeByte = newPage->m_contents + size;
+ m_lastFreeByte = (BYTE*)newPage + pageSize;
+ assert((m_lastFreeByte - m_nextFreeByte) >= 0);
- return newPage->nrpContents;
+ return newPage->m_contents;
}
-// This method walks the nraPageList forward and release the pages.
-// Be careful no other thread is doing nraToss at the same time.
-// Otherwise, the page specified by temp could be double-freed (VSW 600919).
-
-void norls_allocator::nraFree(void)
+//------------------------------------------------------------------------
+// ArenaAllocator::destroy:
+// Performs any necessary teardown for an `ArenaAllocator`.
+//
+// Notes:
+// This method walks from `m_firstPage` forward and releases the pages.
+// Be careful no other thread has called `reset` at the same time.
+// Otherwise, the page specified by `page` could be double-freed
+// (VSW 600919).
+void ArenaAllocator::destroy()
{
- /* Free all of the allocated pages */
-
- while (nraPageList)
+ // Free all of the allocated pages
+ for (PageDescriptor* page = m_firstPage, *next; page != nullptr; page = next)
{
- norls_pagdesc * temp;
+ next = page->m_next;
+ freeHostMemory(page);
+ }
- temp = nraPageList;
- nraPageList = temp->nrpNextPage;
+ // Clear out the allocator's fields
+ m_firstPage = nullptr;
+ m_lastPage = nullptr;
+ m_nextFreeByte = nullptr;
+ m_lastFreeByte = nullptr;
+}
- nraVirtualFree(temp, 0, MEM_RELEASE);
- }
+//------------------------------------------------------------------------
+// ArenaAllocator::mark:
+// Stores the current position of an `ArenaAllocator` in the given mark.
+//
+// Arguments:
+// mark - The mark that will store the current position of the
+// allocator.
+void ArenaAllocator::mark(MarkDescriptor& mark)
+{
+ mark.m_page = m_lastPage;
+ mark.m_next = m_nextFreeByte;
+ mark.m_last = m_lastFreeByte;
}
-// This method walks the nraPageList backward and release the pages.
-// Be careful no other thread is doing nraFree as the same time.
-// Otherwise, the page specified by temp could be double-freed (VSW 600919).
-void norls_allocator::nraToss(nraMarkDsc &mark)
+//------------------------------------------------------------------------
+// ArenaAllocator::reset:
+// Resets the current position of an `ArenaAllocator` to the given
+// mark, freeing any unused pages.
+//
+// Arguments:
+// mark - The mark that stores the desired position for the allocator.
+//
+// Notes:
+// This method may walk the page list backward and release the pages.
+// Be careful no other thread is doing `destroy` as the same time.
+// Otherwise, the page specified by `temp` could be double-freed
+// (VSW 600919).
+void ArenaAllocator::reset(MarkDescriptor& mark)
{
- void * last = mark.nmPage;
+ // If the active page hasn't changed, just reset the position into the
+ // page and return.
+ if (m_lastPage == mark.m_page)
+ {
+ m_nextFreeByte = mark.m_next;
+ m_lastFreeByte = mark.m_last;
+ return;
+ }
+
+ // Otherwise, free any new pages that were added.
+ void* last = mark.m_page;
- if (!last)
+ if (last == nullptr)
{
- if (!nraPageList)
+ if (m_firstPage == nullptr)
+ {
return;
+ }
- nraFreeNext = nraPageList->nrpContents;
- nraFreeLast = nraPageList->nrpPageSize + (BYTE *)nraPageList;
-
+ m_nextFreeByte = m_firstPage->m_contents;
+ m_lastFreeByte = m_firstPage->m_pageBytes + (BYTE*)m_firstPage;
return;
}
- /* Free up all the new pages we've added at the end of the list */
-
- while (nraPageLast != last)
+ while (m_lastPage != last)
{
- norls_pagdesc * temp;
+ // Remove the last page from the end of the list
+ PageDescriptor* temp = m_lastPage;
+ m_lastPage = temp->m_previous;
- /* Remove the last page from the end of the list */
+ // The new last page has no next page
+ m_lastPage->m_next = nullptr;
- temp = nraPageLast;
- nraPageLast = temp->nrpPrevPage;
+ freeHostMemory(temp);
+ }
- /* The new last page has no 'next' page */
+ m_nextFreeByte = mark.m_next;
+ m_lastFreeByte = mark.m_last;
+}
- nraPageLast->nrpNextPage = 0;
+// The debug version of the allocator may allocate directly from the
+// OS rather than going through the hosting APIs. In order to do so,
+// it must undef the macros that are usually in place to prevent
+// accidental uses of the OS allocator.
+#if defined(DEBUG)
+#undef GetProcessHeap
+#undef HeapAlloc
+#undef HeapFree
+#endif
- nraVirtualFree(temp, 0, MEM_RELEASE);
+//------------------------------------------------------------------------
+// ArenaAllocator::allocateHostMemory:
+// Allocates memory from the host (or the OS if `bypassHostAllocator()`
+// returns `true`).
+//
+// Arguments:
+// size - The number of bytes to allocate.
+//
+// Return Value:
+// A pointer to the allocated memory.
+void* ArenaAllocator::allocateHostMemory(size_t size)
+{
+#if defined(DEBUG)
+ if (bypassHostAllocator())
+ {
+ return ::HeapAlloc(GetProcessHeap(), 0, size);
}
+ else
+ {
+ return ClrAllocInProcessHeap(0, S_SIZE_T(size));
+ }
+#else // defined(DEBUG)
+ return m_memoryManager->ClrVirtualAlloc(nullptr, size, MEM_COMMIT, PAGE_READWRITE);
+#endif // !defined(DEBUG)
+}
- nraFreeNext = mark.nmNext;
- nraFreeLast = mark.nmLast;
+//------------------------------------------------------------------------
+// ArenaAllocator::freeHostMemory:
+// Frees memory allocated by a previous call to `allocateHostMemory`.
+//
+// Arguments:
+// block - A pointer to the memory to free.
+void ArenaAllocator::freeHostMemory(void* block)
+{
+#if defined(DEBUG)
+ if (bypassHostAllocator())
+ {
+ ::HeapFree(GetProcessHeap(), 0, block);
+ }
+ else
+ {
+ ClrFreeInProcessHeap(0, block);
+ }
+#else // defined(DEBUG)
+ m_memoryManager->ClrVirtualFree(block, 0, MEM_RELEASE);
+#endif // !defined(DEBUG)
}
-/*****************************************************************************/
-#ifdef DEBUG
-/*****************************************************************************/
-void * norls_allocator::nraAlloc(size_t sz)
+#if defined(DEBUG)
+//------------------------------------------------------------------------
+// ArenaAllocator::alloateMemory:
+// Allocates memory using an `ArenaAllocator`.
+//
+// Arguments:
+// size - The number of bytes to allocate.
+//
+// Return Value:
+// A pointer to the allocated memory.
+//
+// Note:
+// This is the DEBUG-only version of `allocateMemory`; the release
+// version of this method is defined in the corresponding header file.
+// This version of the method has some abilities that the release
+// version does not: it may inject faults into the allocator and
+// seeds all allocations with a specified pattern to help catch
+// use-before-init problems.
+void* ArenaAllocator::allocateMemory(size_t size)
{
- void * block;
-
- assert(sz != 0 && (sz & (sizeof(int) - 1)) == 0);
-#ifdef _WIN64
- //Ensure that we always allocate in pointer sized increments.
- /* TODO-Cleanup:
- * This is wasteful. We should add alignment requirements to the allocations so we don't waste space in
- * the heap.
- */
- sz = (unsigned)roundUp(sz, sizeof(size_t));
-#endif
+ assert(size != 0 && (size & (sizeof(int) - 1)) == 0);
+
+ // Ensure that we always allocate in pointer sized increments.
+ size = (size_t)roundUp(size, sizeof(size_t));
-#ifdef DEBUG
- if (nraShouldInjectFault)
+ if (JitConfig.ShouldInjectFault() != 0)
{
// Force the underlying memory allocator (either the OS or the CLR hoster)
// to allocate the memory. Any fault injection will kick in.
- void * p = DbgNew(1);
- if (p)
+ void* p = ClrAllocInProcessHeap(0, S_SIZE_T(1));
+ if (p != nullptr)
{
- DbgDelete(p);
+ ClrFreeInProcessHeap(0, p);
}
else
{
NOMEM(); // Throw!
}
}
-#endif
-
- block = nraFreeNext;
- nraFreeNext += sz;
- if ((size_t)block == blockStop) debugStop("Block at %08X allocated", block);
+ void* block = m_nextFreeByte;
+ m_nextFreeByte += size;
- if (nraFreeNext > nraFreeLast)
- block = nraAllocNewPage(sz);
-
-#ifdef DEBUG
- memset(block, UninitializedWord<char>(), sz);
-#endif
+ if (m_nextFreeByte > m_lastFreeByte)
+ {
+ block = allocateNewPage(size);
+ }
- return block;
+ memset(block, UninitializedWord<char>(), size);
+ return block;
}
-
-/*****************************************************************************/
-#endif
-/*****************************************************************************/
-
-size_t norls_allocator::nraTotalSizeAlloc()
+#endif // defined(DEBUG)
+
+//------------------------------------------------------------------------
+// ArenaAllocator::getTotalBytesAllocated:
+// Gets the total number of bytes allocated for all of the arena pages
+// for an `ArenaAllocator`.
+//
+// Return Value:
+// See above.
+size_t ArenaAllocator::getTotalBytesAllocated()
{
- norls_pagdesc * page;
- size_t size = 0;
-
- for (page = nraPageList; page; page = page->nrpNextPage)
- size += page->nrpPageSize;
+ size_t bytes = 0;
+ for (PageDescriptor* page = m_firstPage; page != nullptr; page = page->m_next)
+ {
+ bytes += page->m_pageBytes;
+ }
- return size;
+ return bytes;
}
-size_t norls_allocator::nraTotalSizeUsed()
+//------------------------------------------------------------------------
+// ArenaAllocator::getTotalBytesAllocated:
+// Gets the total number of bytes used in all of the arena pages for
+// an `ArenaAllocator`.
+//
+// Return Value:
+// See above.
+//
+// Notes:
+// An arena page may have unused space at the very end. This happens
+// when an allocation request comes in (via a call to `allocateMemory`)
+// that will not fit in the remaining bytes for the current page.
+// Another way to understand this method is as returning the total
+// number of bytes allocated for arena pages minus the number of bytes
+// that are unused across all area pages.
+size_t ArenaAllocator::getTotalBytesUsed()
{
- norls_pagdesc * page;
- size_t size = 0;
-
- if (nraPageLast)
- nraPageLast->nrpUsedSize = nraFreeNext - nraPageLast->nrpContents;
+ if (m_lastPage != nullptr)
+ {
+ m_lastPage->m_usedBytes = m_nextFreeByte - m_lastPage->m_contents;
+ }
- for (page = nraPageList; page; page = page->nrpNextPage)
- size += page->nrpUsedSize;
+ size_t bytes = 0;
+ for (PageDescriptor* page = m_firstPage; page != nullptr; page = page->m_next)
+ {
+ bytes += page->m_usedBytes;
+ }
- return size;
+ return bytes;
}
-/*****************************************************************************
- * We try to use this allocator instance as much as possible. It will always
- * keep a page handy so small methods won't have to call VirtualAlloc()
- * But we may not be able to use it if another thread/reentrant call
- * is already using it.
- */
-
-static norls_allocator *nraTheAllocator;
-static nraMarkDsc nraTheAllocatorMark;
-static LONG nraTheAllocatorIsInUse = 0;
-
-// The static instance which we try to reuse for all non-simultaneous requests
-
-static norls_allocator theAllocator;
-
-/*****************************************************************************/
-
-void nraInitTheAllocator()
+//------------------------------------------------------------------------
+// ArenaAllocator::startup:
+// Performs any necessary initialization for the arena allocator
+// subsystem.
+void ArenaAllocator::startup()
{
- THE_ALLOCATOR_BASE_SIZE = norls_allocator::nraDirectAlloc() ?
- (size_t)norls_allocator::MIN_PAGE_SIZE : (size_t)norls_allocator::DEFAULT_PAGE_SIZE;
+ s_defaultPageSize = bypassHostAllocator()
+ ? (size_t)MIN_PAGE_SIZE
+ : (size_t)DEFAULT_PAGE_SIZE;
}
-void nraTheAllocatorDone()
-{
- // We chose not to call nraTheAllocator->nraFree() and let the memory leak.
- // Below is the reason (VSW 600919).
-
- // The following race-condition exists during ExitProcess.
- // Thread A calls ExitProcess, which causes thread B to terminate.
- // Thread B terminated in the middle of nraToss()
- // (through the call-chain of nraFreeTheAllocator() ==> nraRlsm() ==> nraToss())
- // And then thread A comes along to call nraTheAllocator->nraFree() which will cause the double-free
- // of page specified by "temp".
-
- // These are possible fixes:
- // 1. Thread A tries to get hold on nraTheAllocatorIsInUse lock before
- // calling theAllocator.nraFree(). However, this could cause the deadlock because thread B
- // has already gone and therefore it can't release nraTheAllocatorIsInUse.
- // 2. Fix the logic in nraToss() and nraFree() to update nraPageList and nraPageLast in a thread safe way.
- // But it needs careful work to make it high performant (e.g. not holding a lock?)
- // 3. The scenario of dynamically unloading clrjit.dll cleanly is unimportant at this time.
- // We will leak the memory associated with other instances of morls_allocator anyway.
-
- // Therefore we decided not to call the cleanup code when unloading the jit.
-
+//------------------------------------------------------------------------
+// ArenaAllocator::shutdown:
+// Performs any necessary teardown for the arena allocator subsystem.
+//
+// Notes:
+// We chose not to call s_pooledAllocator->nraFree() and let the memory leak.
+// Below is the reason (VSW 600919).
+//
+// The following race-condition exists during ExitProcess.
+// Thread A calls ExitProcess, which causes thread B to terminate.
+// Thread B terminated in the middle of reset()
+// (through the call-chain of nraFreeTheAllocator() ==> reset())
+// And then thread A comes along to call s_pooledAllocator->nraFree() which will cause the double-free
+// of page specified by "temp".
+//
+// These are possible fixes:
+// 1. Thread A tries to get hold on s_isPooledAllocatorInUse lock before
+// calling s_theAllocator.nraFree(). However, this could cause the deadlock because thread B
+// has already gone and therefore it can't release s_isPooledAllocatorInUse.
+// 2. Fix the logic in reset() and nraFree() to update m_firstPage and m_lastPage in a thread safe way.
+// But it needs careful work to make it high performant (e.g. not holding a lock?)
+// 3. The scenario of dynamically unloading clrjit.dll cleanly is unimportant at this time.
+// We will leak the memory associated with other instances of morls_allocator anyway.
+//
+// Therefore we decided not to call the cleanup code when unloading the jit.
+void ArenaAllocator::shutdown()
+{
}
-/*****************************************************************************/
-
-norls_allocator * nraGetTheAllocator(IEEMemoryManager* pMemoryManager)
+// The static instance which we try to reuse for all non-simultaneous requests.
+//
+// We try to use this allocator instance as much as possible. It will always
+// keep a page handy so small methods won't have to call VirtualAlloc()
+// But we may not be able to use it if another thread/reentrant call
+// is already using it.
+static ArenaAllocator s_theAllocator;
+
+//------------------------------------------------------------------------
+// ArenaAllocator::getPooledAllocator:
+// Returns the pooled allocator if it is not already in use.
+//
+// Arguments:
+// memoryManager: The `IEEMemoryManager` instance in use by the caller.
+//
+// Return Value:
+// A pointer to the pooled allocator if it is available or `nullptr`
+// if it is already in use.
+//
+// Notes:
+// The returned `ArenaAllocator` should be given back to the pool by
+// calling `ArenaAllocator::returnPooledAllocator` when the caller has
+// finished using it.
+ArenaAllocator* ArenaAllocator::getPooledAllocator(IEEMemoryManager* memoryManager)
{
- if (InterlockedExchange(&nraTheAllocatorIsInUse, 1))
+ if (InterlockedExchange(&s_isPooledAllocatorInUse, 1))
{
// Its being used by another Compiler instance
- return NULL;
+ return nullptr;
}
- if (nraTheAllocator == NULL)
+ if (s_pooledAllocator == nullptr)
{
// Not initialized yet
- bool res = theAllocator.nraInit(pMemoryManager, 0, 1);
-
- if (res)
+ bool res = s_theAllocator.initialize(memoryManager, true);
+ if (!res)
{
// failed to initialize
- InterlockedExchange(&nraTheAllocatorIsInUse, 0);
- return NULL;
+ InterlockedExchange(&s_isPooledAllocatorInUse, 0);
+ return nullptr;
}
- nraTheAllocator = &theAllocator;
+ s_pooledAllocator = &s_theAllocator;
- assert(nraTheAllocator->nraTotalSizeAlloc() == THE_ALLOCATOR_BASE_SIZE);
- nraTheAllocator->nraMark(nraTheAllocatorMark);
+ assert(s_pooledAllocator->getTotalBytesAllocated() == s_defaultPageSize);
+ s_pooledAllocator->mark(s_pooledAllocatorMark);
}
else
{
- if (nraTheAllocator->nraGetMemoryManager() != pMemoryManager)
+ if (s_pooledAllocator->m_memoryManager != memoryManager)
{
// already initialize with a different memory manager
- InterlockedExchange(&nraTheAllocatorIsInUse, 0);
- return NULL;
+ InterlockedExchange(&s_isPooledAllocatorInUse, 0);
+ return nullptr;
}
}
- assert(nraTheAllocator->nraTotalSizeAlloc() == THE_ALLOCATOR_BASE_SIZE);
- return nraTheAllocator;
+ assert(s_pooledAllocator->getTotalBytesAllocated() == s_defaultPageSize);
+ return s_pooledAllocator;
}
-
-void nraFreeTheAllocator()
+//------------------------------------------------------------------------
+// ArenaAllocator::returnPooledAllocator:
+// Returns the pooled allocator after the callee has finished using it.
+//
+// Arguments:
+// allocator - The pooled allocator instance. This must be an instance
+// that was obtained by a previous call to
+// `ArenaAllocator::getPooledAllocator`.
+void ArenaAllocator::returnPooledAllocator(ArenaAllocator* allocator)
{
- assert (nraTheAllocator != NULL);
- assert(nraTheAllocatorIsInUse == 1);
+ assert(s_pooledAllocator != nullptr);
+ assert(s_isPooledAllocatorInUse == 1);
+ assert(allocator == s_pooledAllocator);
- nraTheAllocator->nraRlsm(nraTheAllocatorMark);
- assert(nraTheAllocator->nraTotalSizeAlloc() == THE_ALLOCATOR_BASE_SIZE);
+ s_pooledAllocator->reset(s_pooledAllocatorMark);
+ assert(s_pooledAllocator->getTotalBytesAllocated() == s_defaultPageSize);
- InterlockedExchange(&nraTheAllocatorIsInUse, 0);
+ InterlockedExchange(&s_isPooledAllocatorInUse, 0);
}
-
-/*****************************************************************************/
diff --git a/src/jit/alloc.h b/src/jit/alloc.h
index 11bb1f8d72..aa59433640 100644
--- a/src/jit/alloc.h
+++ b/src/jit/alloc.h
@@ -1,240 +1,103 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
-/*****************************************************************************/
#ifndef _ALLOC_H_
#define _ALLOC_H_
-/*****************************************************************************/
-#ifndef _HOST_H_
-#include "host.h"
-#endif
-/*****************************************************************************/
-
-#ifdef _MSC_VER
-#pragma warning(disable:4200)
-#endif
-
-/*****************************************************************************/
-#if defined(DEBUG)
-
-#include "malloc.h"
-
-inline void * DbgNew(size_t size)
-{
- return ClrAllocInProcessHeap(0, S_SIZE_T(size));
-}
-
-inline void DbgDelete(void * ptr)
-{
- (void)ClrFreeInProcessHeap(0, ptr);
-}
-#endif // DEBUG
-
-/*****************************************************************************/
-
-struct nraMarkDsc
-
-{
- void * nmPage;
- BYTE * nmNext;
- BYTE * nmLast;
-};
+#if !defined(_HOST_H_)
+#include "host.h"
+#endif // defined(_HOST_H_)
-struct norls_allocator
+struct ArenaAllocator
{
private:
- struct norls_pagdesc
+ struct MarkDescriptor
{
- norls_pagdesc * nrpNextPage;
- norls_pagdesc * nrpPrevPage;
-#ifdef DEBUG
- void * nrpSelfPtr;
-#endif
- size_t nrpPageSize; // # of bytes allocated
- size_t nrpUsedSize; // # of bytes actually used. (This is only valid when we've allocated a new page.)
- // See norls_allocator::nraAllocNewPage.
- BYTE nrpContents[];
+ void* m_page;
+ BYTE* m_next;
+ BYTE* m_last;
};
- norls_pagdesc * nraPageList;
- norls_pagdesc * nraPageLast;
-
- BYTE * nraFreeNext; // these two (when non-zero) will
- BYTE * nraFreeLast; // always point into 'nraPageLast'
-
- size_t nraPageSize;
-
-#ifdef DEBUG
- bool nraShouldInjectFault; // Should we inject fault?
-#endif
+ struct PageDescriptor
+ {
+ PageDescriptor* m_next;
+ PageDescriptor* m_previous;
- IEEMemoryManager* nraMemoryManager;
+ size_t m_pageBytes; // # of bytes allocated
+ size_t m_usedBytes; // # of bytes actually used. (This is only valid when we've allocated a new page.)
+ // See ArenaAllocator::allocateNewPage.
- void * nraAllocNewPage(size_t sz);
+ BYTE m_contents[];
+ };
-public:
// Anything less than 64K leaves VM holes since the OS allocates address space in this size.
// Thus if we want to make this smaller, we need to do a reserve / commit scheme
- enum { DEFAULT_PAGE_SIZE = (16 * OS_page_size) };
- enum { MIN_PAGE_SIZE = sizeof(norls_pagdesc) };
-
- bool nraInit (IEEMemoryManager* pMemoryManager, size_t pageSize = 0, int preAlloc = 0);
+ enum
+ {
+ DEFAULT_PAGE_SIZE = 16 * OS_page_size,
+ MIN_PAGE_SIZE = sizeof(PageDescriptor)
+ };
- void nraFree (void);
+ static size_t s_defaultPageSize;
+ static ArenaAllocator* s_pooledAllocator;
+ static MarkDescriptor s_pooledAllocatorMark;
+ static LONG s_isPooledAllocatorInUse;
- void * nraAlloc(size_t sz);
+ PageDescriptor* m_firstPage;
+ PageDescriptor* m_lastPage;
- /* The following used for mark/release operation */
+ // These two pointers (when non-null) will always point into 'm_lastPage'.
+ BYTE* m_nextFreeByte;
+ BYTE* m_lastFreeByte;
- void nraMark(nraMarkDsc &mark)
- {
- mark.nmPage = nraPageLast;
- mark.nmNext = nraFreeNext;
- mark.nmLast = nraFreeLast;
- }
+ IEEMemoryManager* m_memoryManager;
-private:
+ void* allocateNewPage(size_t size);
- void nraToss(nraMarkDsc &mark);
+ // The following methods are used for mark/release operation.
+ void mark(MarkDescriptor& mark);
+ void reset(MarkDescriptor& mark);
- LPVOID nraVirtualAlloc(LPVOID lpAddress, SIZE_T dwSize, DWORD flAllocationType, DWORD flProtect)
- {
-#if defined(DEBUG)
- assert(lpAddress == 0 && flAllocationType == MEM_COMMIT && flProtect == PAGE_READWRITE);
- if (nraDirectAlloc())
- {
-#undef GetProcessHeap
-#undef HeapAlloc
- return ::HeapAlloc(GetProcessHeap(), 0, dwSize);
- }
- else
- return DbgNew(dwSize);
-#else
- return nraMemoryManager->ClrVirtualAlloc(lpAddress, dwSize, flAllocationType, flProtect);
-#endif
- }
-
- void nraVirtualFree(LPVOID lpAddress, SIZE_T dwSize, DWORD dwFreeType)
- {
-#if defined(DEBUG)
- assert(dwSize == 0 && dwFreeType == MEM_RELEASE);
- if (nraDirectAlloc())
- {
-#undef GetProcessHeap
-#undef HeapFree
- ::HeapFree(GetProcessHeap(), 0, lpAddress);
- }
- else
- DbgDelete(lpAddress);
-#else
- nraMemoryManager->ClrVirtualFree(lpAddress, dwSize, dwFreeType);
-#endif
- }
+ void* allocateHostMemory(size_t size);
+ void freeHostMemory(void* block);
public:
+ bool initialize(IEEMemoryManager* memoryManager, bool shouldPreallocate);
+ void destroy();
- void nraRlsm(nraMarkDsc &mark)
+#if defined(DEBUG)
+ void* allocateMemory(size_t sz);
+#else // defined(DEBUG)
+ inline void* allocateMemory(size_t size)
{
- if (nraPageLast != mark.nmPage)
- {
- nraToss(mark);
- }
- else
+ void* block = m_nextFreeByte;
+ m_nextFreeByte += size;
+
+ if (m_nextFreeByte > m_lastFreeByte)
{
- nraFreeNext = mark.nmNext;
- nraFreeLast = mark.nmLast;
+ block = allocateNewPage(size);
}
- }
-
- size_t nraTotalSizeAlloc();
- size_t nraTotalSizeUsed ();
- IEEMemoryManager * nraGetMemoryManager()
- {
- return nraMemoryManager;
- }
-
- static bool nraDirectAlloc();
-
-#ifdef _TARGET_AMD64_
- /*
- * IGcInfoEncoderAllocator implementation (protected)
- * - required to use GcInfoEncoder
- */
-protected:
- void* Alloc(size_t size)
- {
- //GcInfoEncoder likes to allocate things of 0-size when m_NumSlots == 0
- //but nraAlloc doesn't like to allocate 0-size things.. so lets not let it
- return size ? nraAlloc(size) : NULL;
+ return block;
}
- void Free( void* ) {}
-#endif // _TARGET_AMD64_
-};
-
-#if !defined(DEBUG)
-
-inline
-void * norls_allocator::nraAlloc(size_t sz)
-{
- void * block;
+#endif // !defined(DEBUG)
- block = nraFreeNext;
- nraFreeNext += sz;
+ size_t getTotalBytesAllocated();
+ size_t getTotalBytesUsed();
- if (nraFreeNext > nraFreeLast)
- block = nraAllocNewPage(sz);
+ static bool bypassHostAllocator();
+ static size_t getDefaultPageSize();
- return block;
-}
+ static void startup();
+ static void shutdown();
-#endif
-
-/*****************************************************************************/
-/*****************************************************************************
- * If most uses of the norls_alloctor are going to be non-simultaneous,
- * we keep a single instance handy and preallocate 1 chunk of 64K
- * Then most uses won't need to call VirtualAlloc() for the first page.
- */
-
-
-#if defined(DEBUG)
-
-inline bool norls_allocator::nraDirectAlloc()
-{
- // When JitDirectAlloc is set, all JIT allocations requests are forwarded
- // directly to the OS. This allows taking advantage of pageheap and other gflag
- // knobs for ensuring that we do not have buffer overruns in the JIT.
-
- return JitConfig.JitDirectAlloc() != 0;
-}
-
-#else // RELEASE
-
-inline bool norls_allocator::nraDirectAlloc()
-{
- return false;
-}
-#endif
-
-extern size_t THE_ALLOCATOR_BASE_SIZE;
-
-void nraInitTheAllocator(); // One-time initialization
-void nraTheAllocatorDone(); // One-time completion code
-
-// returns NULL if the single instance is already in use.
-// User will need to allocate a new instance of the norls_allocator
-
-norls_allocator * nraGetTheAllocator(IEEMemoryManager* pMemoryManager);
-
-// Should be called after we are done with the current use, so that the
-// next user can reuse it, instead of allocating a new instance
-
-void nraFreeTheAllocator();
+ // Gets the pooled allocator if it is available. Returns `nullptr` if the
+ // pooled allocator is already in use.
+ static ArenaAllocator* getPooledAllocator(IEEMemoryManager* memoryManager);
+ // Returns the pooled allocator for use by others.
+ static void returnPooledAllocator(ArenaAllocator* allocator);
+};
-/*****************************************************************************/
-#endif // _ALLOC_H_
-/*****************************************************************************/
+#endif // _ALLOC_H_
diff --git a/src/jit/compiler.cpp b/src/jit/compiler.cpp
index 0af9efcd26..e055cdf0cf 100644
--- a/src/jit/compiler.cpp
+++ b/src/jit/compiler.cpp
@@ -677,12 +677,8 @@ void Compiler::compStartup()
totalNCsize = 0;
#endif // DISPLAY_SIZES
- /* Initialize the single instance of the norls_allocator (with a page
- * preallocated) which we try to reuse for all non-simulataneous
- * uses (which is always, for the standalone)
- */
-
- nraInitTheAllocator();
+ // Initialize the JIT's allocator.
+ ArenaAllocator::startup();
/* Initialize the table of tree node sizes */
@@ -720,7 +716,7 @@ void Compiler::compShutdown()
}
#endif // ALT_JIT
- nraTheAllocatorDone();
+ ArenaAllocator::shutdown();
/* Shut down the emitter */
@@ -1267,7 +1263,7 @@ void Compiler::compDisplayStaticSizes(FILE* fout)
* Constructor
*/
-void Compiler::compInit(norls_allocator * pAlloc, InlineInfo * inlineInfo)
+void Compiler::compInit(ArenaAllocator * pAlloc, InlineInfo * inlineInfo)
{
assert(pAlloc);
compAllocator = pAlloc;
@@ -1580,7 +1576,7 @@ void * Compiler::compGetMem(size_t sz, CompMemKind cmk)
genMemStats.AddAlloc(sz, cmk);
#endif
- void * ptr = compAllocator->nraAlloc(sz);
+ void * ptr = compAllocator->allocateMemory(sz);
// Verify that the current block is aligned. Only then will the next
// block allocated be on an aligned boundary.
@@ -4411,8 +4407,8 @@ void Compiler::compCompileFinish()
#if MEASURE_MEM_ALLOC
ClrEnterCriticalSection(s_memStatsLock.Val());
- genMemStats.nraTotalSizeAlloc = compGetAllocator()->nraTotalSizeAlloc();
- genMemStats.nraTotalSizeUsed = compGetAllocator()->nraTotalSizeUsed ();
+ genMemStats.nraTotalSizeAlloc = compGetAllocator()->getTotalBytesAllocated();
+ genMemStats.nraTotalSizeUsed = compGetAllocator()->getTotalBytesUsed();
s_aggMemStats.Add(genMemStats);
if (genMemStats.allocSz > s_maxCompMemStats.allocSz)
{
@@ -4440,8 +4436,8 @@ void Compiler::compCompileFinish()
#endif
#if defined(DEBUG)
- // Small methods should fit in THE_ALLOCATOR_BASE_SIZE, or else
- // we should bump up THE_ALLOCATOR_BASE_SIZE
+ // Small methods should fit in ArenaAllocator::getDefaultPageSize(), or else
+ // we should bump up ArenaAllocator::getDefaultPageSize()
if ((info.compILCodeSize <= 32) && // Is it a reasonably small method?
(info.compNativeCodeSize < 512) && // Some trivial methods generate huge native code. eg. pushing a single huge struct
@@ -4452,12 +4448,12 @@ void Compiler::compCompileFinish()
(info.compLocalsCount <= 32) &&
(!opts.MinOpts()) && // We may have too many local variables, etc
(getJitStressLevel() == 0) && // We need extra memory for stress
- !compAllocator->nraDirectAlloc() && // THE_ALLOCATOR_BASE_SIZE is artificially low for DirectAlloc
- (compAllocator->nraTotalSizeAlloc() > (2 * THE_ALLOCATOR_BASE_SIZE)) &&
+ !compAllocator->bypassHostAllocator() && // ArenaAllocator::getDefaultPageSize() is artificially low for DirectAlloc
+ (compAllocator->getTotalBytesAllocated() > (2 * ArenaAllocator::getDefaultPageSize())) &&
// Factor of 2x is because data-structures are bigger under DEBUG
#ifndef LEGACY_BACKEND
// RyuJIT backend needs memory tuning! TODO-Cleanup: remove this case when memory tuning is complete.
- (compAllocator->nraTotalSizeAlloc() > (10 * THE_ALLOCATOR_BASE_SIZE)) &&
+ (compAllocator->getTotalBytesAllocated() > (10 * ArenaAllocator::getDefaultPageSize())) &&
#endif
!verbose) // We allocate lots of memory to convert sets to strings for JitDump
{
@@ -5501,8 +5497,8 @@ int jitNativeCode ( CORINFO_METHOD_HANDLE methodHnd,
START:
int result = CORJIT_INTERNALERROR;
- norls_allocator * pAlloc = NULL;
- norls_allocator alloc;
+ ArenaAllocator * pAlloc = NULL;
+ ArenaAllocator alloc;
if (inlineInfo)
{
@@ -5514,12 +5510,12 @@ START:
IEEMemoryManager* pMemoryManager = compHnd->getMemoryManager();
// Try to reuse the pre-inited allocator ?
- pAlloc = nraGetTheAllocator(pMemoryManager);
+ pAlloc = ArenaAllocator::getPooledAllocator(pMemoryManager);
if (!pAlloc)
{
- bool res = alloc.nraInit(pMemoryManager);
- if (res)
+ bool res = alloc.initialize(pMemoryManager, false);
+ if (!res)
{
return CORJIT_OUTOFMEM;
}
@@ -5533,8 +5529,8 @@ START:
struct Param {
Compiler *pComp;
- norls_allocator * pAlloc;
- norls_allocator * alloc;
+ ArenaAllocator * pAlloc;
+ ArenaAllocator * alloc;
bool jitFallbackCompile;
CORINFO_METHOD_HANDLE methodHnd;
@@ -5571,7 +5567,7 @@ START:
// Lazily create the inlinee compiler object
if (pParam->inlineInfo->InlinerCompiler->InlineeCompiler == NULL)
{
- pParam->inlineInfo->InlinerCompiler->InlineeCompiler = (Compiler *)pParam->pAlloc->nraAlloc(roundUp(sizeof(*pParam->pComp)));
+ pParam->inlineInfo->InlinerCompiler->InlineeCompiler = (Compiler *)pParam->pAlloc->allocateMemory(roundUp(sizeof(*pParam->pComp)));
}
// Use the inlinee compiler object
@@ -5583,7 +5579,7 @@ START:
else
{
// Allocate create the inliner compiler object
- pParam->pComp = (Compiler *)pParam->pAlloc->nraAlloc(roundUp(sizeof(*pParam->pComp)));
+ pParam->pComp = (Compiler *)pParam->pAlloc->allocateMemory(roundUp(sizeof(*pParam->pComp)));
}
// push this compiler on the stack (TLS)
@@ -5632,11 +5628,11 @@ START:
// Now free up whichever allocator we were using
if (pParamOuter->pAlloc != pParamOuter->alloc)
{
- nraFreeTheAllocator();
+ ArenaAllocator::returnPooledAllocator(pParamOuter->pAlloc);
}
else
{
- pParamOuter->alloc->nraFree();
+ pParamOuter->alloc->destroy();
}
}
}
@@ -6702,7 +6698,7 @@ void Compiler::MemStats::Print(FILE* f)
{
fprintf(f, "count: %10u, size: %10llu, max = %10llu\n",
allocCnt, allocSz, allocSzMax);
- fprintf(f, "nraAlloc: %10llu, nraUsed: %10llu\n",
+ fprintf(f, "allocateMemory: %10llu, nraUsed: %10llu\n",
nraTotalSizeAlloc, nraTotalSizeUsed);
PrintByKind(f);
}
@@ -6730,7 +6726,7 @@ void Compiler::AggregateMemStats::Print(FILE* f)
allocSz, allocSz / nMethods);
fprintf(f, " max alloc : %12llu\n", allocSzMax);
fprintf(f, "\n");
- fprintf(f, " nraAlloc : %12llu (avg %7llu per method)\n",
+ fprintf(f, " allocateMemory : %12llu (avg %7llu per method)\n",
nraTotalSizeAlloc, nraTotalSizeAlloc / nMethods);
fprintf(f, " nraUsed : %12llu (avg %7llu per method)\n",
nraTotalSizeUsed, nraTotalSizeUsed / nMethods);
diff --git a/src/jit/compiler.h b/src/jit/compiler.h
index 4086d667d1..bada074472 100644
--- a/src/jit/compiler.h
+++ b/src/jit/compiler.h
@@ -7909,7 +7909,7 @@ public :
static void compStartup (); // One-time initialization
static void compShutdown (); // One-time finalization
- void compInit (norls_allocator * pAlloc, InlineInfo * inlineInfo);
+ void compInit (ArenaAllocator * pAlloc, InlineInfo * inlineInfo);
void compDone ();
static void compDisplayStaticSizes(FILE* fout);
@@ -7948,7 +7948,7 @@ public :
CORJIT_FLAGS * compileFlags,
CorInfoInstantiationVerification instVerInfo);
- norls_allocator * compGetAllocator();
+ ArenaAllocator * compGetAllocator();
#if MEASURE_MEM_ALLOC
struct MemStats
@@ -8152,7 +8152,7 @@ protected :
bool skipMethod();
#endif
- norls_allocator * compAllocator;
+ ArenaAllocator * compAllocator;
public:
// This one presents an implementation of the "IAllocator" abstract class that uses "compAllocator",
diff --git a/src/jit/compiler.hpp b/src/jit/compiler.hpp
index 882cf51381..03b4f1ce16 100644
--- a/src/jit/compiler.hpp
+++ b/src/jit/compiler.hpp
@@ -4054,7 +4054,7 @@ bool Compiler::compStressCompile(compStressArea stressArea,
inline
-norls_allocator * Compiler::compGetAllocator()
+ArenaAllocator * Compiler::compGetAllocator()
{
return compAllocator;
}
@@ -4076,7 +4076,7 @@ void * Compiler::compGetMem(size_t sz, CompMemKind cmk)
genMemStats.AddAlloc(sz, cmk);
#endif
- return compAllocator->nraAlloc(sz);
+ return compAllocator->allocateMemory(sz);
}
#endif
@@ -4138,7 +4138,7 @@ void * Compiler::compGetMemA(size_t sz, CompMemKind cmk)
genMemStats.AddAlloc(allocSz, cmk);
#endif
- void * ptr = compAllocator->nraAlloc(allocSz);
+ void * ptr = compAllocator->allocateMemory(allocSz);
// Verify that the current block is aligned. Only then will the next
// block allocated be on an aligned boundary.
diff --git a/src/jit/importer.cpp b/src/jit/importer.cpp
index db00722777..ecae1d2a3b 100644
--- a/src/jit/importer.cpp
+++ b/src/jit/importer.cpp
@@ -15758,7 +15758,7 @@ void Compiler::impCanInlineNative(int callsiteNativeEstima
if (calleeNativeSizeEstimate > threshold)
{
#ifdef DEBUG
- char * message = (char *)compAllocator->nraAlloc(128);
+ char * message = (char *)compAllocator->allocateMemory(128);
sprintf(message, "Native estimate for function size exceeds threshold %g > %g (multiplier = %g).",
calleeNativeSizeEstimate / NATIVE_CALL_SIZE_MULTIPLIER,
threshold / NATIVE_CALL_SIZE_MULTIPLIER, multiplier);
diff --git a/src/jit/lsra.h b/src/jit/lsra.h
index db28eaa748..3bfdda9e55 100644
--- a/src/jit/lsra.h
+++ b/src/jit/lsra.h
@@ -102,7 +102,7 @@ typedef regNumber * VarToRegMap;
typedef StructArrayList<Interval, /* initial element count */ 32, /* multiplicative chunk size growth factor */ 2, LinearScanMemoryAllocatorInterval> IntervalList;
typedef StructArrayList<RefPosition, /* initial element count */ 64, /* multiplicative chunk size growth factor */ 2, LinearScanMemoryAllocatorRefPosition> RefPositionList;
-// Wrapper for norls_allocator
+// Wrapper for ArenaAllocator
class LinearScanMemoryAllocatorRefPosition
{
public: