summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorVladimir Sadov <vsadov@microsoft.com>2019-05-02 22:16:31 -0700
committerJan Kotas <jkotas@microsoft.com>2019-05-02 22:16:31 -0700
commitb271aff1fa54c1385143f3b45c1bf3af01c901cd (patch)
tree69c76676a56a28979fd1c5c66db9d096afa98c6e
parentdd814e26e2206c36589f88b2c58a6f3695f7dc4e (diff)
downloadcoreclr-b271aff1fa54c1385143f3b45c1bf3af01c901cd.tar.gz
coreclr-b271aff1fa54c1385143f3b45c1bf3af01c901cd.tar.bz2
coreclr-b271aff1fa54c1385143f3b45c1bf3af01c901cd.zip
System.GC.AllocateUninitializedArray (#24096)
* Do not expand to allocation_quantum in SOH when GC_ALLOC_ZEROING_OPTIONAL * short-circuit short arrays to use `new T[size]` * Clean syncblock of large-aligned objects on ARM32 * specialize single-dimensional path AllocateSzArray * Unit tests * Some PR feedback. Made AllocateUninitializedArray not be trimmed away. * PR feedback on gchelpers - replaced use of multiple bool parameters with flags enum - merged some methods with nearly identical implementation - switched callers to use AllocateSzArray vs. AllocateArrayEx where appropriate. * PR feedback. Removed X86 specific array/string allocation helpers.
-rw-r--r--src/System.Private.CoreLib/ILLinkTrim.xml2
-rw-r--r--src/System.Private.CoreLib/src/System/GC.cs29
-rw-r--r--src/gc/gc.cpp176
-rw-r--r--src/gc/gcinterface.h28
-rw-r--r--src/gc/gcpriv.h24
-rw-r--r--src/vm/comutilnative.cpp28
-rw-r--r--src/vm/comutilnative.h4
-rw-r--r--src/vm/crossloaderallocatorhash.inl4
-rw-r--r--src/vm/customattribute.cpp4
-rw-r--r--src/vm/ecalllist.h2
-rw-r--r--src/vm/fieldmarshaler.cpp2
-rw-r--r--src/vm/gchelpers.cpp435
-rw-r--r--src/vm/gchelpers.h90
-rw-r--r--src/vm/i386/jitinterfacex86.cpp118
-rw-r--r--src/vm/ilmarshalers.cpp4
-rw-r--r--src/vm/interpreter.cpp2
-rw-r--r--src/vm/jithelpers.cpp61
-rw-r--r--src/vm/object.h6
-rw-r--r--src/vm/qcall.cpp2
-rw-r--r--src/vm/runtimehandles.cpp14
-rw-r--r--src/vm/typeparse.cpp2
-rw-r--r--tests/src/GC/API/GC/AllocateUninitializedArray.cs129
-rw-r--r--tests/src/GC/API/GC/AllocateUninitializedArray.csproj37
-rw-r--r--tests/src/JIT/Methodical/doublearray/dblarray3.cs2
24 files changed, 640 insertions, 565 deletions
diff --git a/src/System.Private.CoreLib/ILLinkTrim.xml b/src/System.Private.CoreLib/ILLinkTrim.xml
index b621468b61..d88232b42b 100644
--- a/src/System.Private.CoreLib/ILLinkTrim.xml
+++ b/src/System.Private.CoreLib/ILLinkTrim.xml
@@ -9,6 +9,8 @@
<!-- Methods are used to register and unregister frozen segments. They are private and experimental. -->
<method name="_RegisterFrozenSegment" />
<method name="_UnregisterFrozenSegment" />
+ <!-- This is an internal API for now and is not yet used outside tests. -->
+ <method name="AllocateUninitializedArray" />
</type>
<!-- Properties and methods used by a debugger. -->
<type fullname="System.Threading.Tasks.Task">
diff --git a/src/System.Private.CoreLib/src/System/GC.cs b/src/System.Private.CoreLib/src/System/GC.cs
index 65de26502f..2f88ae0138 100644
--- a/src/System.Private.CoreLib/src/System/GC.cs
+++ b/src/System.Private.CoreLib/src/System/GC.cs
@@ -18,6 +18,7 @@ using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Diagnostics;
using System.Collections.Generic;
+using Internal.Runtime.CompilerServices;
namespace System
{
@@ -81,7 +82,10 @@ namespace System
[DllImport(JitHelpers.QCall, CharSet = CharSet.Unicode)]
internal static extern int _EndNoGCRegion();
-
+
+ [MethodImplAttribute(MethodImplOptions.InternalCall)]
+ internal static extern Array AllocateNewArray(IntPtr typeHandle, int length, bool zeroingOptional);
+
[MethodImplAttribute(MethodImplOptions.InternalCall)]
private static extern int GetGenerationWR(IntPtr handle);
@@ -643,5 +647,28 @@ namespace System
// UnregisterMemoryLoadChangeNotification and InvokeMemoryLoadChangeNotifications in native.
}
}
+
+ // Skips zero-initialization of the array if possible. If T contains object references,
+ // the array is always zero-initialized.
+ internal static T[] AllocateUninitializedArray<T>(int length)
+ {
+ if (length < 0)
+ ThrowHelper.ThrowArgumentOutOfRangeException(ExceptionArgument.lengths, 0, ExceptionResource.ArgumentOutOfRange_NeedNonNegNum);
+#if DEBUG
+ // in DEBUG arrays of any length can be created uninitialized
+#else
+ // otherwise small arrays are allocated using `new[]` as that is generally faster.
+ //
+ // The threshold was derived from various simulations.
+ // As it turned out the threshold depends on overal pattern of all allocations and is typically in 200-300 byte range.
+ // The gradient around the number is shallow (there is no perf cliff) and the exact value of the threshold does not matter a lot.
+ // So it is 256 bytes including array header.
+ if (Unsafe.SizeOf<T>() * length < 256 - 3 * IntPtr.Size)
+ {
+ return new T[length];
+ }
+#endif
+ return (T[])AllocateNewArray(typeof(T[]).TypeHandle.Value, length, zeroingOptional: true);
+ }
}
}
diff --git a/src/gc/gc.cpp b/src/gc/gc.cpp
index 42e93fefc4..4f77e1d75d 100644
--- a/src/gc/gc.cpp
+++ b/src/gc/gc.cpp
@@ -11458,9 +11458,9 @@ void allocator::commit_alloc_list_changes()
}
}
-void gc_heap::adjust_limit_clr (uint8_t* start, size_t limit_size,
- alloc_context* acontext, heap_segment* seg,
- int align_const, int gen_number)
+void gc_heap::adjust_limit_clr (uint8_t* start, size_t limit_size, size_t size,
+ alloc_context* acontext, uint32_t flags,
+ heap_segment* seg, int align_const, int gen_number)
{
bool loh_p = (gen_number > 0);
GCSpinLock* msl = loh_p ? &more_space_lock_loh : &more_space_lock_soh;
@@ -11491,12 +11491,12 @@ void gc_heap::adjust_limit_clr (uint8_t* start, size_t limit_size,
uint8_t* hole = acontext->alloc_ptr;
if (hole != 0)
{
- size_t size = (acontext->alloc_limit - acontext->alloc_ptr);
- dprintf (3, ("filling up hole [%Ix, %Ix[", (size_t)hole, (size_t)hole + size + Align (min_obj_size, align_const)));
+ size_t ac_size = (acontext->alloc_limit - acontext->alloc_ptr);
+ dprintf (3, ("filling up hole [%Ix, %Ix[", (size_t)hole, (size_t)hole + ac_size + Align (min_obj_size, align_const)));
// when we are finishing an allocation from a free list
// we know that the free area was Align(min_obj_size) larger
- acontext->alloc_bytes -= size;
- size_t free_obj_size = size + aligned_min_obj_size;
+ acontext->alloc_bytes -= ac_size;
+ size_t free_obj_size = ac_size + aligned_min_obj_size;
make_unused_array (hole, free_obj_size);
generation_free_obj_space (generation_of (gen_number)) += free_obj_size;
}
@@ -11555,32 +11555,60 @@ void gc_heap::adjust_limit_clr (uint8_t* start, size_t limit_size,
assert (heap_segment_used (seg) >= old_allocated);
}
#endif //BACKGROUND_GC
- if ((seg == 0) ||
- (start - plug_skew + limit_size) <= heap_segment_used (seg))
+
+ // we are going to clear a right-edge exclusive span [clear_start, clear_limit)
+ // but will adjust for cases when object is ok to stay dirty or the space has not seen any use yet
+ // NB: the size and limit_size include syncblock, which is to the -1 of the object start
+ // that effectively shifts the allocation by `plug_skew`
+ uint8_t* clear_start = start - plug_skew;
+ uint8_t* clear_limit = start + limit_size - plug_skew;
+
+ if (flags & GC_ALLOC_ZEROING_OPTIONAL)
+ {
+ uint8_t* obj_start = acontext->alloc_ptr;
+ assert(start >= obj_start);
+ uint8_t* obj_end = obj_start + size - plug_skew;
+ assert(obj_end > clear_start);
+
+ // if clearing at the object start, clear the syncblock.
+ if(obj_start == start)
+ {
+ *(PTR_PTR)clear_start = 0;
+ }
+ // skip the rest of the object
+ clear_start = obj_end;
+ }
+
+ // check if space to clear is all dirty from prior use or only partially
+ if ((seg == 0) || (clear_limit <= heap_segment_used (seg)))
{
add_saved_spinlock_info (loh_p, me_release, mt_clr_mem);
leave_spin_lock (msl);
- dprintf (3, ("clearing memory at %Ix for %d bytes", (start - plug_skew), limit_size));
- memclr (start - plug_skew, limit_size);
+
+ if (clear_start < clear_limit)
+ {
+ dprintf(3, ("clearing memory at %Ix for %d bytes", clear_start, clear_limit - clear_start));
+ memclr(clear_start, clear_limit - clear_start);
+ }
}
else
{
+ // we only need to clear [clear_start, used) and only if clear_start < used
uint8_t* used = heap_segment_used (seg);
- heap_segment_used (seg) = start + limit_size - plug_skew;
+ heap_segment_used (seg) = clear_limit;
add_saved_spinlock_info (loh_p, me_release, mt_clr_mem);
leave_spin_lock (msl);
- if ((start - plug_skew) < used)
+ if (clear_start < used)
{
if (used != saved_used)
{
FATAL_GC_ERROR ();
}
- dprintf (2, ("clearing memory before used at %Ix for %Id bytes",
- (start - plug_skew), (plug_skew + used - start)));
- memclr (start - plug_skew, used - (start - plug_skew));
+ dprintf (2, ("clearing memory before used at %Ix for %Id bytes", clear_start, used - clear_start));
+ memclr (clear_start, used - clear_start);
}
}
@@ -11627,17 +11655,18 @@ size_t gc_heap::new_allocation_limit (size_t size, size_t physical_limit, int ge
return limit;
}
-size_t gc_heap::limit_from_size (size_t size, size_t physical_limit, int gen_number,
+size_t gc_heap::limit_from_size (size_t size, uint32_t flags, size_t physical_limit, int gen_number,
int align_const)
{
size_t padded_size = size + Align (min_obj_size, align_const);
// for LOH this is not true...we could select a physical_limit that's exactly the same
// as size.
assert ((gen_number != 0) || (physical_limit >= padded_size));
- size_t min_size_to_allocate = ((gen_number == 0) ? allocation_quantum : 0);
- // For SOH if the size asked for is very small, we want to allocate more than
- // just what's asked for if possible.
+ // For SOH if the size asked for is very small, we want to allocate more than just what's asked for if possible.
+ // Unless we were told not to clean, then we will not force it.
+ size_t min_size_to_allocate = ((gen_number == 0 && !(flags & GC_ALLOC_ZEROING_OPTIONAL)) ? allocation_quantum : 0);
+
size_t desired_size_to_allocate = max (padded_size, min_size_to_allocate);
size_t new_physical_limit = min (physical_limit, desired_size_to_allocate);
@@ -11959,6 +11988,7 @@ inline
BOOL gc_heap::a_fit_free_list_p (int gen_number,
size_t size,
alloc_context* acontext,
+ uint32_t flags,
int align_const)
{
BOOL can_fit = FALSE;
@@ -11985,7 +12015,7 @@ BOOL gc_heap::a_fit_free_list_p (int gen_number,
// We ask for more Align (min_obj_size)
// to make sure that we can insert a free object
// in adjust_limit will set the limit lower
- size_t limit = limit_from_size (size, free_list_size, gen_number, align_const);
+ size_t limit = limit_from_size (size, flags, free_list_size, gen_number, align_const);
uint8_t* remain = (free_list + limit);
size_t remain_size = (free_list_size - limit);
@@ -12002,7 +12032,7 @@ BOOL gc_heap::a_fit_free_list_p (int gen_number,
}
generation_free_list_space (gen) -= limit;
- adjust_limit_clr (free_list, limit, acontext, 0, align_const, gen_number);
+ adjust_limit_clr (free_list, limit, size, acontext, flags, 0, align_const, gen_number);
can_fit = TRUE;
goto end;
@@ -12034,6 +12064,7 @@ end:
void gc_heap::bgc_loh_alloc_clr (uint8_t* alloc_start,
size_t size,
alloc_context* acontext,
+ uint32_t flags,
int align_const,
int lock_index,
BOOL check_used_p,
@@ -12097,7 +12128,11 @@ void gc_heap::bgc_loh_alloc_clr (uint8_t* alloc_start,
dprintf (SPINLOCK_LOG, ("[%d]Lmsl to clear large obj", heap_number));
add_saved_spinlock_info (true, me_release, mt_clr_large_mem);
leave_spin_lock (&more_space_lock_loh);
- memclr (alloc_start + size_to_skip, size_to_clear);
+
+ if (!(flags & GC_ALLOC_ZEROING_OPTIONAL))
+ {
+ memclr(alloc_start + size_to_skip, size_to_clear);
+ }
bgc_alloc_lock->loh_alloc_set (alloc_start);
@@ -12111,6 +12146,7 @@ void gc_heap::bgc_loh_alloc_clr (uint8_t* alloc_start,
BOOL gc_heap::a_fit_free_list_large_p (size_t size,
alloc_context* acontext,
+ uint32_t flags,
int align_const)
{
BOOL can_fit = FALSE;
@@ -12154,7 +12190,7 @@ BOOL gc_heap::a_fit_free_list_large_p (size_t size,
loh_allocator->unlink_item (a_l_idx, free_list, prev_free_item, FALSE);
// Substract min obj size because limit_from_size adds it. Not needed for LOH
- size_t limit = limit_from_size (size - Align(min_obj_size, align_const), free_list_size,
+ size_t limit = limit_from_size (size - Align(min_obj_size, align_const), flags, free_list_size,
gen_number, align_const);
#ifdef FEATURE_LOH_COMPACTION
@@ -12185,12 +12221,12 @@ BOOL gc_heap::a_fit_free_list_large_p (size_t size,
#ifdef BACKGROUND_GC
if (cookie != -1)
{
- bgc_loh_alloc_clr (free_list, limit, acontext, align_const, cookie, FALSE, 0);
+ bgc_loh_alloc_clr (free_list, limit, acontext, flags, align_const, cookie, FALSE, 0);
}
else
#endif //BACKGROUND_GC
{
- adjust_limit_clr (free_list, limit, acontext, 0, align_const, gen_number);
+ adjust_limit_clr (free_list, limit, size, acontext, flags, 0, align_const, gen_number);
}
//fix the limit to compensate for adjust_limit_clr making it too short
@@ -12216,6 +12252,7 @@ BOOL gc_heap::a_fit_segment_end_p (int gen_number,
heap_segment* seg,
size_t size,
alloc_context* acontext,
+ uint32_t flags,
int align_const,
BOOL* commit_failed_p)
{
@@ -12245,6 +12282,7 @@ BOOL gc_heap::a_fit_segment_end_p (int gen_number,
if (a_size_fit_p (size, allocated, end, align_const))
{
limit = limit_from_size (size,
+ flags,
(end - allocated),
gen_number, align_const);
goto found_fit;
@@ -12255,6 +12293,7 @@ BOOL gc_heap::a_fit_segment_end_p (int gen_number,
if (a_size_fit_p (size, allocated, end, align_const))
{
limit = limit_from_size (size,
+ flags,
(end - allocated),
gen_number, align_const);
@@ -12310,12 +12349,12 @@ found_fit:
#ifdef BACKGROUND_GC
if (cookie != -1)
{
- bgc_loh_alloc_clr (old_alloc, limit, acontext, align_const, cookie, TRUE, seg);
+ bgc_loh_alloc_clr (old_alloc, limit, acontext, flags, align_const, cookie, TRUE, seg);
}
else
#endif //BACKGROUND_GC
{
- adjust_limit_clr (old_alloc, limit, acontext, seg, align_const, gen_number);
+ adjust_limit_clr (old_alloc, limit, size, acontext, flags, seg, align_const, gen_number);
}
return TRUE;
@@ -12328,6 +12367,7 @@ found_no_fit:
BOOL gc_heap::loh_a_fit_segment_end_p (int gen_number,
size_t size,
alloc_context* acontext,
+ uint32_t flags,
int align_const,
BOOL* commit_failed_p,
oom_reason* oom_r)
@@ -12347,7 +12387,7 @@ BOOL gc_heap::loh_a_fit_segment_end_p (int gen_number,
#endif //BACKGROUND_GC
{
if (a_fit_segment_end_p (gen_number, seg, (size - Align (min_obj_size, align_const)),
- acontext, align_const, commit_failed_p))
+ acontext, flags, align_const, commit_failed_p))
{
acontext->alloc_limit += Align (min_obj_size, align_const);
can_allocate_p = TRUE;
@@ -12430,6 +12470,7 @@ BOOL gc_heap::trigger_ephemeral_gc (gc_reason gr)
BOOL gc_heap::soh_try_fit (int gen_number,
size_t size,
alloc_context* acontext,
+ uint32_t flags,
int align_const,
BOOL* commit_failed_p,
BOOL* short_seg_end_p)
@@ -12440,7 +12481,7 @@ BOOL gc_heap::soh_try_fit (int gen_number,
*short_seg_end_p = FALSE;
}
- can_allocate = a_fit_free_list_p (gen_number, size, acontext, align_const);
+ can_allocate = a_fit_free_list_p (gen_number, size, acontext, flags, align_const);
if (!can_allocate)
{
if (short_seg_end_p)
@@ -12452,7 +12493,7 @@ BOOL gc_heap::soh_try_fit (int gen_number,
if (!short_seg_end_p || !(*short_seg_end_p))
{
can_allocate = a_fit_segment_end_p (gen_number, ephemeral_heap_segment, size,
- acontext, align_const, commit_failed_p);
+ acontext, flags, align_const, commit_failed_p);
}
}
@@ -12462,6 +12503,7 @@ BOOL gc_heap::soh_try_fit (int gen_number,
allocation_state gc_heap::allocate_small (int gen_number,
size_t size,
alloc_context* acontext,
+ uint32_t flags,
int align_const)
{
#if defined (BACKGROUND_GC) && !defined (MULTIPLE_HEAPS)
@@ -12515,7 +12557,7 @@ allocation_state gc_heap::allocate_small (int gen_number,
BOOL commit_failed_p = FALSE;
BOOL can_use_existing_p = FALSE;
- can_use_existing_p = soh_try_fit (gen_number, size, acontext,
+ can_use_existing_p = soh_try_fit (gen_number, size, acontext, flags,
align_const, &commit_failed_p,
NULL);
soh_alloc_state = (can_use_existing_p ?
@@ -12531,7 +12573,7 @@ allocation_state gc_heap::allocate_small (int gen_number,
BOOL can_use_existing_p = FALSE;
BOOL short_seg_end_p = FALSE;
- can_use_existing_p = soh_try_fit (gen_number, size, acontext,
+ can_use_existing_p = soh_try_fit (gen_number, size, acontext, flags,
align_const, &commit_failed_p,
&short_seg_end_p);
soh_alloc_state = (can_use_existing_p ?
@@ -12547,7 +12589,7 @@ allocation_state gc_heap::allocate_small (int gen_number,
BOOL can_use_existing_p = FALSE;
BOOL short_seg_end_p = FALSE;
- can_use_existing_p = soh_try_fit (gen_number, size, acontext,
+ can_use_existing_p = soh_try_fit (gen_number, size, acontext, flags,
align_const, &commit_failed_p,
&short_seg_end_p);
@@ -12602,7 +12644,7 @@ allocation_state gc_heap::allocate_small (int gen_number,
}
else
{
- can_use_existing_p = soh_try_fit (gen_number, size, acontext,
+ can_use_existing_p = soh_try_fit (gen_number, size, acontext, flags,
align_const, &commit_failed_p,
&short_seg_end_p);
#ifdef BACKGROUND_GC
@@ -12664,7 +12706,7 @@ allocation_state gc_heap::allocate_small (int gen_number,
}
else
{
- can_use_existing_p = soh_try_fit (gen_number, size, acontext,
+ can_use_existing_p = soh_try_fit (gen_number, size, acontext, flags,
align_const, &commit_failed_p,
&short_seg_end_p);
if (short_seg_end_p || commit_failed_p)
@@ -12869,16 +12911,17 @@ BOOL gc_heap::check_and_wait_for_bgc (alloc_wait_reason awr,
BOOL gc_heap::loh_try_fit (int gen_number,
size_t size,
alloc_context* acontext,
+ uint32_t flags,
int align_const,
BOOL* commit_failed_p,
oom_reason* oom_r)
{
BOOL can_allocate = TRUE;
- if (!a_fit_free_list_large_p (size, acontext, align_const))
+ if (!a_fit_free_list_large_p (size, acontext, flags, align_const))
{
can_allocate = loh_a_fit_segment_end_p (gen_number, size,
- acontext, align_const,
+ acontext, flags, align_const,
commit_failed_p, oom_r);
#ifdef BACKGROUND_GC
@@ -13009,6 +13052,7 @@ bool gc_heap::should_retry_other_heap (size_t size)
allocation_state gc_heap::allocate_large (int gen_number,
size_t size,
alloc_context* acontext,
+ uint32_t flags,
int align_const)
{
#ifdef BACKGROUND_GC
@@ -13077,7 +13121,7 @@ allocation_state gc_heap::allocate_large (int gen_number,
BOOL commit_failed_p = FALSE;
BOOL can_use_existing_p = FALSE;
- can_use_existing_p = loh_try_fit (gen_number, size, acontext,
+ can_use_existing_p = loh_try_fit (gen_number, size, acontext, flags,
align_const, &commit_failed_p, &oom_r);
loh_alloc_state = (can_use_existing_p ?
a_state_can_allocate :
@@ -13092,7 +13136,7 @@ allocation_state gc_heap::allocate_large (int gen_number,
BOOL commit_failed_p = FALSE;
BOOL can_use_existing_p = FALSE;
- can_use_existing_p = loh_try_fit (gen_number, size, acontext,
+ can_use_existing_p = loh_try_fit (gen_number, size, acontext, flags,
align_const, &commit_failed_p, &oom_r);
// Even after we got a new seg it doesn't necessarily mean we can allocate,
// another LOH allocating thread could have beat us to acquire the msl so
@@ -13106,7 +13150,7 @@ allocation_state gc_heap::allocate_large (int gen_number,
BOOL commit_failed_p = FALSE;
BOOL can_use_existing_p = FALSE;
- can_use_existing_p = loh_try_fit (gen_number, size, acontext,
+ can_use_existing_p = loh_try_fit (gen_number, size, acontext, flags,
align_const, &commit_failed_p, &oom_r);
// If we failed to commit, we bail right away 'cause we already did a
// full compacting GC.
@@ -13123,7 +13167,7 @@ allocation_state gc_heap::allocate_large (int gen_number,
BOOL commit_failed_p = FALSE;
BOOL can_use_existing_p = FALSE;
- can_use_existing_p = loh_try_fit (gen_number, size, acontext,
+ can_use_existing_p = loh_try_fit (gen_number, size, acontext, flags,
align_const, &commit_failed_p, &oom_r);
loh_alloc_state = (can_use_existing_p ?
a_state_can_allocate :
@@ -13296,8 +13340,8 @@ void gc_heap::trigger_gc_for_alloc (int gen_number, gc_reason gr,
#endif //BACKGROUND_GC
}
-allocation_state gc_heap::try_allocate_more_space (alloc_context* acontext, size_t size,
- int gen_number)
+allocation_state gc_heap::try_allocate_more_space (alloc_context* acontext, size_t size,
+ uint32_t flags, int gen_number)
{
if (gc_heap::gc_started)
{
@@ -13385,8 +13429,8 @@ allocation_state gc_heap::try_allocate_more_space (alloc_context* acontext, size
}
allocation_state can_allocate = ((gen_number == 0) ?
- allocate_small (gen_number, size, acontext, align_const) :
- allocate_large (gen_number, size, acontext, align_const));
+ allocate_small (gen_number, size, acontext, flags, align_const) :
+ allocate_large (gen_number, size, acontext, flags, align_const));
if (can_allocate == a_state_can_allocate)
{
@@ -13403,13 +13447,16 @@ allocation_state gc_heap::try_allocate_more_space (alloc_context* acontext, size
FIRE_EVENT(GCAllocationTick_V1, (uint32_t)etw_allocation_running_amount[etw_allocation_index],
(gen_number == 0) ? gc_etw_alloc_soh : gc_etw_alloc_loh);
#else
+
+#if defined(FEATURE_EVENT_TRACE)
+ // We are explicitly checking whether the event is enabled here.
// Unfortunately some of the ETW macros do not check whether the ETW feature is enabled.
// The ones that do are much less efficient.
-#if defined(FEATURE_EVENT_TRACE)
if (EVENT_ENABLED(GCAllocationTick_V3))
{
fire_etw_allocation_event (etw_allocation_running_amount[etw_allocation_index], gen_number, acontext->alloc_ptr);
}
+
#endif //FEATURE_EVENT_TRACE
#endif //FEATURE_REDHAWK
etw_allocation_running_amount[etw_allocation_index] = 0;
@@ -13673,7 +13720,7 @@ try_again:
#endif //MULTIPLE_HEAPS
BOOL gc_heap::allocate_more_space(alloc_context* acontext, size_t size,
- int alloc_generation_number)
+ uint32_t flags, int alloc_generation_number)
{
allocation_state status = a_state_start;
do
@@ -13682,7 +13729,7 @@ BOOL gc_heap::allocate_more_space(alloc_context* acontext, size_t size,
if (alloc_generation_number == 0)
{
balance_heaps (acontext);
- status = acontext->get_alloc_heap()->pGenGCHeap->try_allocate_more_space (acontext, size, alloc_generation_number);
+ status = acontext->get_alloc_heap()->pGenGCHeap->try_allocate_more_space (acontext, size, flags, alloc_generation_number);
}
else
{
@@ -13700,14 +13747,14 @@ BOOL gc_heap::allocate_more_space(alloc_context* acontext, size_t size,
alloc_heap = balance_heaps_loh (acontext, size);
}
- status = alloc_heap->try_allocate_more_space (acontext, size, alloc_generation_number);
+ status = alloc_heap->try_allocate_more_space (acontext, size, flags, alloc_generation_number);
if (status == a_state_retry_allocate)
{
dprintf (3, ("LOH h%d alloc retry!", alloc_heap->heap_number));
}
}
#else
- status = try_allocate_more_space (acontext, size, alloc_generation_number);
+ status = try_allocate_more_space (acontext, size, flags, alloc_generation_number);
#endif //MULTIPLE_HEAPS
}
while (status == a_state_retry_allocate);
@@ -13716,7 +13763,7 @@ BOOL gc_heap::allocate_more_space(alloc_context* acontext, size_t size,
}
inline
-CObjectHeader* gc_heap::allocate (size_t jsize, alloc_context* acontext)
+CObjectHeader* gc_heap::allocate (size_t jsize, alloc_context* acontext, uint32_t flags)
{
size_t size = Align (jsize);
assert (size >= Align (min_obj_size));
@@ -13738,7 +13785,7 @@ CObjectHeader* gc_heap::allocate (size_t jsize, alloc_context* acontext)
#pragma inline_depth(0)
#endif //_MSC_VER
- if (! allocate_more_space (acontext, size, 0))
+ if (! allocate_more_space (acontext, size, flags, 0))
return 0;
#ifdef _MSC_VER
@@ -31142,7 +31189,7 @@ BOOL gc_heap::ephemeral_gen_fit_p (gc_tuning_point tp)
}
}
-CObjectHeader* gc_heap::allocate_large_object (size_t jsize, int64_t& alloc_bytes)
+CObjectHeader* gc_heap::allocate_large_object (size_t jsize, uint32_t flags, int64_t& alloc_bytes)
{
//create a new alloc context because gen3context is shared.
alloc_context acontext;
@@ -31175,7 +31222,7 @@ CObjectHeader* gc_heap::allocate_large_object (size_t jsize, int64_t& alloc_byte
#ifdef _MSC_VER
#pragma inline_depth(0)
#endif //_MSC_VER
- if (! allocate_more_space (&acontext, (size + pad), max_generation+1))
+ if (! allocate_more_space (&acontext, (size + pad), flags, max_generation+1))
{
return 0;
}
@@ -34781,7 +34828,7 @@ bool GCHeap::StressHeap(gc_alloc_context * context)
// update the cached type handle before allocating
SetTypeHandleOnThreadForAlloc(TypeHandle(g_pStringClass));
- str = (StringObject*) pGenGCHeap->allocate (strSize, acontext);
+ str = (StringObject*) pGenGCHeap->allocate (strSize, acontext, /*flags*/ 0);
if (str)
{
str->SetMethodTable (g_pStringClass);
@@ -34948,7 +34995,7 @@ GCHeap::AllocAlign8Common(void* _hp, alloc_context* acontext, size_t size, uint3
if ((((size_t)result & 7) == desiredAlignment) && ((result + size) <= acontext->alloc_limit))
{
// Yes, we can just go ahead and make the allocation.
- newAlloc = (Object*) hp->allocate (size, acontext);
+ newAlloc = (Object*) hp->allocate (size, acontext, flags);
ASSERT(((size_t)newAlloc & 7) == desiredAlignment);
}
else
@@ -34961,7 +35008,7 @@ GCHeap::AllocAlign8Common(void* _hp, alloc_context* acontext, size_t size, uint3
// We allocate both together then decide based on the result whether we'll format the space as
// free object + real object or real object + free object.
ASSERT((Align(min_obj_size) & 7) == 4);
- CObjectHeader *freeobj = (CObjectHeader*) hp->allocate (Align(size) + Align(min_obj_size), acontext);
+ CObjectHeader *freeobj = (CObjectHeader*) hp->allocate (Align(size) + Align(min_obj_size), acontext, flags);
if (freeobj)
{
if (((size_t)freeobj & 7) == desiredAlignment)
@@ -34977,6 +35024,11 @@ GCHeap::AllocAlign8Common(void* _hp, alloc_context* acontext, size_t size, uint3
// rest of the space should be correctly aligned for the real object.
newAlloc = (Object*)((uint8_t*)freeobj + Align(min_obj_size));
ASSERT(((size_t)newAlloc & 7) == desiredAlignment);
+ if (flags & GC_ALLOC_ZEROING_OPTIONAL)
+ {
+ // clean the syncblock of the aligned object.
+ *(((PTR_PTR)newAlloc)-1) = 0;
+ }
}
freeobj->SetFree(min_obj_size);
}
@@ -34993,7 +35045,7 @@ GCHeap::AllocAlign8Common(void* _hp, alloc_context* acontext, size_t size, uint3
alloc_context* acontext = generation_alloc_context (hp->generation_of (max_generation+1));
- newAlloc = (Object*) hp->allocate_large_object (size, acontext->alloc_bytes_loh);
+ newAlloc = (Object*) hp->allocate_large_object (size, flags, acontext->alloc_bytes_loh);
ASSERT(((size_t)newAlloc & 7) == 0);
}
@@ -35055,7 +35107,7 @@ GCHeap::AllocLHeap( size_t size, uint32_t flags REQD_ALIGN_DCL)
alloc_context* acontext = generation_alloc_context (hp->generation_of (max_generation+1));
- newAlloc = (Object*) hp->allocate_large_object (size + ComputeMaxStructAlignPadLarge(requiredAlignment), acontext->alloc_bytes_loh);
+ newAlloc = (Object*) hp->allocate_large_object (size + ComputeMaxStructAlignPadLarge(requiredAlignment), flags, acontext->alloc_bytes_loh);
#ifdef FEATURE_STRUCTALIGN
newAlloc = (Object*) hp->pad_for_alignment_large ((uint8_t*) newAlloc, requiredAlignment, size);
#endif // FEATURE_STRUCTALIGN
@@ -35118,7 +35170,7 @@ GCHeap::Alloc(gc_alloc_context* context, size_t size, uint32_t flags REQD_ALIGN_
#ifdef TRACE_GC
AllocSmallCount++;
#endif //TRACE_GC
- newAlloc = (Object*) hp->allocate (size + ComputeMaxStructAlignPad(requiredAlignment), acontext);
+ newAlloc = (Object*) hp->allocate (size + ComputeMaxStructAlignPad(requiredAlignment), acontext, flags);
#ifdef FEATURE_STRUCTALIGN
newAlloc = (Object*) hp->pad_for_alignment ((uint8_t*) newAlloc, requiredAlignment, size, acontext);
#endif // FEATURE_STRUCTALIGN
@@ -35126,7 +35178,7 @@ GCHeap::Alloc(gc_alloc_context* context, size_t size, uint32_t flags REQD_ALIGN_
}
else
{
- newAlloc = (Object*) hp->allocate_large_object (size + ComputeMaxStructAlignPadLarge(requiredAlignment), acontext->alloc_bytes_loh);
+ newAlloc = (Object*) hp->allocate_large_object (size + ComputeMaxStructAlignPadLarge(requiredAlignment), flags, acontext->alloc_bytes_loh);
#ifdef FEATURE_STRUCTALIGN
newAlloc = (Object*) hp->pad_for_alignment_large ((uint8_t*) newAlloc, requiredAlignment, size);
#endif // FEATURE_STRUCTALIGN
diff --git a/src/gc/gcinterface.h b/src/gc/gcinterface.h
index e6c2ccb862..c710d6d321 100644
--- a/src/gc/gcinterface.h
+++ b/src/gc/gcinterface.h
@@ -910,10 +910,30 @@ void updateGCShadow(Object** ptr, Object* val);
#define GC_CALL_CHECK_APP_DOMAIN 0x4
//flags for IGCHeapAlloc(...)
-#define GC_ALLOC_FINALIZE 0x1
-#define GC_ALLOC_CONTAINS_REF 0x2
-#define GC_ALLOC_ALIGN8_BIAS 0x4
-#define GC_ALLOC_ALIGN8 0x8
+enum GC_ALLOC_FLAGS
+{
+ GC_ALLOC_NO_FLAGS = 0,
+ GC_ALLOC_FINALIZE = 1,
+ GC_ALLOC_CONTAINS_REF = 2,
+ GC_ALLOC_ALIGN8_BIAS = 4,
+ GC_ALLOC_ALIGN8 = 8,
+ GC_ALLOC_ZEROING_OPTIONAL = 16,
+};
+
+inline GC_ALLOC_FLAGS operator|(GC_ALLOC_FLAGS a, GC_ALLOC_FLAGS b)
+{return (GC_ALLOC_FLAGS)((int)a | (int)b);}
+
+inline GC_ALLOC_FLAGS operator&(GC_ALLOC_FLAGS a, GC_ALLOC_FLAGS b)
+{return (GC_ALLOC_FLAGS)((int)a & (int)b);}
+
+inline GC_ALLOC_FLAGS operator~(GC_ALLOC_FLAGS a)
+{return (GC_ALLOC_FLAGS)(~(int)a);}
+
+inline GC_ALLOC_FLAGS& operator|=(GC_ALLOC_FLAGS& a, GC_ALLOC_FLAGS b)
+{return (GC_ALLOC_FLAGS&)((int&)a |= (int)b);}
+
+inline GC_ALLOC_FLAGS& operator&=(GC_ALLOC_FLAGS& a, GC_ALLOC_FLAGS b)
+{return (GC_ALLOC_FLAGS&)((int&)a &= (int)b);}
#if defined(USE_CHECKED_OBJECTREFS) && !defined(_NOVM)
#define OBJECTREF_TO_UNCHECKED_OBJECTREF(objref) (*((_UNCHECKED_OBJECTREF*)&(objref)))
diff --git a/src/gc/gcpriv.h b/src/gc/gcpriv.h
index c1d7b7f4d2..2288ffee25 100644
--- a/src/gc/gcpriv.h
+++ b/src/gc/gcpriv.h
@@ -1219,7 +1219,8 @@ public:
PER_HEAP
CObjectHeader* allocate (size_t jsize,
- alloc_context* acontext);
+ alloc_context* acontext,
+ uint32_t flags);
#ifdef MULTIPLE_HEAPS
static
@@ -1241,7 +1242,7 @@ public:
// Note: This is an instance method, but the heap instance is only used for
// lowest_address and highest_address, which are currently the same accross all heaps.
PER_HEAP
- CObjectHeader* allocate_large_object (size_t size, int64_t& alloc_bytes);
+ CObjectHeader* allocate_large_object (size_t size, uint32_t flags, int64_t& alloc_bytes);
#ifdef FEATURE_STRUCTALIGN
PER_HEAP
@@ -1449,13 +1450,13 @@ protected:
void fire_etw_pin_object_event (uint8_t* object, uint8_t** ppObject);
PER_HEAP
- size_t limit_from_size (size_t size, size_t room, int gen_number,
+ size_t limit_from_size (size_t size, uint32_t flags, size_t room, int gen_number,
int align_const);
PER_HEAP
- allocation_state try_allocate_more_space (alloc_context* acontext, size_t jsize,
+ allocation_state try_allocate_more_space (alloc_context* acontext, size_t jsize, uint32_t flags,
int alloc_generation_number);
PER_HEAP_ISOLATED
- BOOL allocate_more_space (alloc_context* acontext, size_t jsize,
+ BOOL allocate_more_space (alloc_context* acontext, size_t jsize, uint32_t flags,
int alloc_generation_number);
PER_HEAP
@@ -1470,6 +1471,7 @@ protected:
BOOL a_fit_free_list_p (int gen_number,
size_t size,
alloc_context* acontext,
+ uint32_t flags,
int align_const);
#ifdef BACKGROUND_GC
@@ -1483,6 +1485,7 @@ protected:
void bgc_loh_alloc_clr (uint8_t* alloc_start,
size_t size,
alloc_context* acontext,
+ uint32_t flags,
int align_const,
int lock_index,
BOOL check_used_p,
@@ -1524,6 +1527,7 @@ protected:
PER_HEAP
BOOL a_fit_free_list_large_p (size_t size,
alloc_context* acontext,
+ uint32_t flags,
int align_const);
PER_HEAP
@@ -1531,12 +1535,14 @@ protected:
heap_segment* seg,
size_t size,
alloc_context* acontext,
+ uint32_t flags,
int align_const,
BOOL* commit_failed_p);
PER_HEAP
BOOL loh_a_fit_segment_end_p (int gen_number,
size_t size,
alloc_context* acontext,
+ uint32_t flags,
int align_const,
BOOL* commit_failed_p,
oom_reason* oom_r);
@@ -1570,6 +1576,7 @@ protected:
BOOL soh_try_fit (int gen_number,
size_t size,
alloc_context* acontext,
+ uint32_t flags,
int align_const,
BOOL* commit_failed_p,
BOOL* short_seg_end_p);
@@ -1577,6 +1584,7 @@ protected:
BOOL loh_try_fit (int gen_number,
size_t size,
alloc_context* acontext,
+ uint32_t flags,
int align_const,
BOOL* commit_failed_p,
oom_reason* oom_r);
@@ -1585,6 +1593,7 @@ protected:
allocation_state allocate_small (int gen_number,
size_t size,
alloc_context* acontext,
+ uint32_t flags,
int align_const);
#ifdef RECORD_LOH_STATE
@@ -1607,6 +1616,7 @@ protected:
allocation_state allocate_large (int gen_number,
size_t size,
alloc_context* acontext,
+ uint32_t flags,
int align_const);
PER_HEAP_ISOLATED
@@ -1853,8 +1863,8 @@ protected:
void adjust_limit (uint8_t* start, size_t limit_size, generation* gen,
int gen_number);
PER_HEAP
- void adjust_limit_clr (uint8_t* start, size_t limit_size,
- alloc_context* acontext, heap_segment* seg,
+ void adjust_limit_clr (uint8_t* start, size_t limit_size, size_t size,
+ alloc_context* acontext, uint32_t flags, heap_segment* seg,
int align_const, int gen_number);
PER_HEAP
void leave_allocation_segment (generation* gen);
diff --git a/src/vm/comutilnative.cpp b/src/vm/comutilnative.cpp
index d8c73fcb73..46cdbad829 100644
--- a/src/vm/comutilnative.cpp
+++ b/src/vm/comutilnative.cpp
@@ -1260,6 +1260,34 @@ FCIMPL0(INT64, GCInterface::GetAllocatedBytesForCurrentThread)
}
FCIMPLEND
+/*===============================AllocateNewArray===============================
+**Action: Allocates a new array object. Allows passing extra flags
+**Returns: The allocated array.
+**Arguments: elementTypeHandle -> type of the element,
+** length -> number of elements,
+** zeroingOptional -> whether caller prefers to skip clearing the content of the array, if possible.
+**Exceptions: IDS_EE_ARRAY_DIMENSIONS_EXCEEDED when size is too large. OOM if can't allocate.
+==============================================================================*/
+FCIMPL3(Object*, GCInterface::AllocateNewArray, void* arrayTypeHandle, INT32 length, CLR_BOOL zeroingOptional)
+{
+ CONTRACTL {
+ FCALL_CHECK;
+ PRECONDITION(length >= 0);
+ } CONTRACTL_END;
+
+ OBJECTREF pRet = NULL;
+ TypeHandle arrayType = TypeHandle::FromPtr(arrayTypeHandle);
+
+ HELPER_METHOD_FRAME_BEGIN_RET_0();
+
+ pRet = AllocateSzArray(arrayType, length, zeroingOptional ? GC_ALLOC_ZEROING_OPTIONAL : GC_ALLOC_NO_FLAGS);
+
+ HELPER_METHOD_FRAME_END();
+
+ return OBJECTREFToObject(pRet);
+}
+FCIMPLEND
+
#ifdef FEATURE_BASICFREEZE
/*===============================RegisterFrozenSegment===============================
diff --git a/src/vm/comutilnative.h b/src/vm/comutilnative.h
index 67c111bbb4..24cb85e323 100644
--- a/src/vm/comutilnative.h
+++ b/src/vm/comutilnative.h
@@ -138,7 +138,9 @@ public:
static FCDECL1(void, ReRegisterForFinalize, Object *obj);
static FCDECL2(int, CollectionCount, INT32 generation, INT32 getSpecialGCCount);
- static FCDECL0(INT64, GetAllocatedBytesForCurrentThread);
+ static FCDECL0(INT64, GetAllocatedBytesForCurrentThread);
+
+ static FCDECL3(Object*, AllocateNewArray, void* elementTypeHandle, INT32 length, CLR_BOOL zeroingOptional);
#ifdef FEATURE_BASICFREEZE
static
diff --git a/src/vm/crossloaderallocatorhash.inl b/src/vm/crossloaderallocatorhash.inl
index de7f31a0ca..04a98a858b 100644
--- a/src/vm/crossloaderallocatorhash.inl
+++ b/src/vm/crossloaderallocatorhash.inl
@@ -80,7 +80,7 @@ template <class TKey_, class TValue_>
if (*pKeyValueStore == NULL)
{
- *pKeyValueStore = AllocatePrimitiveArray(ELEMENT_TYPE_I1, IsNull(value) ? sizeof(TKey) : sizeof(TKey) + sizeof(TValue), FALSE);
+ *pKeyValueStore = AllocatePrimitiveArray(ELEMENT_TYPE_I1, IsNull(value) ? sizeof(TKey) : sizeof(TKey) + sizeof(TValue));
updatedKeyValueStore = true;
TKey* pKeyLoc = (TKey*)((I1ARRAYREF)*pKeyValueStore)->GetDirectPointerToNonObjectElements();
*pKeyLoc = key;
@@ -108,7 +108,7 @@ template <class TKey_, class TValue_>
COMPlusThrow(kOverflowException);
// Allocate the new array.
- I1ARRAYREF newKeyValueStore = (I1ARRAYREF)AllocatePrimitiveArray(ELEMENT_TYPE_I1, newSize*sizeof(TValue) + sizeof(TKey), FALSE);
+ I1ARRAYREF newKeyValueStore = (I1ARRAYREF)AllocatePrimitiveArray(ELEMENT_TYPE_I1, newSize*sizeof(TValue) + sizeof(TKey));
// Since, AllocatePrimitiveArray may have triggered a GC, recapture all data pointers from GC objects
void* pStartOfNewArray = newKeyValueStore->GetDirectPointerToNonObjectElements();
diff --git a/src/vm/customattribute.cpp b/src/vm/customattribute.cpp
index 2a8d5b0236..6edda673ea 100644
--- a/src/vm/customattribute.cpp
+++ b/src/vm/customattribute.cpp
@@ -170,7 +170,7 @@ CustomAttributeManagedValues Attribute::GetManagedCaValue(CaValue* pCaVal)
if (length != (ULONG)-1)
{
- gc.array = (CaValueArrayREF)AllocateValueSzArray(MscorlibBinder::GetClass(CLASS__CUSTOM_ATTRIBUTE_ENCODED_ARGUMENT), length);
+ gc.array = (CaValueArrayREF)AllocateSzArray(TypeHandle(MscorlibBinder::GetClass(CLASS__CUSTOM_ATTRIBUTE_ENCODED_ARGUMENT)).MakeSZArray(), length);
CustomAttributeValue* pValues = gc.array->GetDirectPointerToNonObjectElements();
for (COUNT_T i = 0; i < length; i ++)
@@ -1310,7 +1310,7 @@ void COMCustomAttribute::ReadArray(Assembly *pCtorAssembly,
TypeHandle arrayHandle = ClassLoader::LoadArrayTypeThrowing(th);
if (arrayHandle.IsNull())
goto badBlob;
- *pArray = (BASEARRAYREF)AllocateArrayEx(arrayHandle, &bounds, 1);
+ *pArray = (BASEARRAYREF)AllocateSzArray(arrayHandle, bounds);
BOOL fSuccess;
switch (elementSize)
{
diff --git a/src/vm/ecalllist.h b/src/vm/ecalllist.h
index 34aead9373..4c7fffc74f 100644
--- a/src/vm/ecalllist.h
+++ b/src/vm/ecalllist.h
@@ -782,6 +782,8 @@ FCFuncStart(gGCInterfaceFuncs)
FCFuncElement("_GetAllocatedBytesForCurrentThread", GCInterface::GetAllocatedBytesForCurrentThread)
+ FCFuncElement("AllocateNewArray", GCInterface::AllocateNewArray)
+
#ifdef FEATURE_BASICFREEZE
QCFuncElement("_RegisterFrozenSegment", GCInterface::RegisterFrozenSegment)
QCFuncElement("_UnregisterFrozenSegment", GCInterface::UnregisterFrozenSegment)
diff --git a/src/vm/fieldmarshaler.cpp b/src/vm/fieldmarshaler.cpp
index a8676d65bd..0629525b02 100644
--- a/src/vm/fieldmarshaler.cpp
+++ b/src/vm/fieldmarshaler.cpp
@@ -2826,7 +2826,7 @@ VOID FieldMarshaler_FixedArray::UpdateCLRImpl(const VOID *pNativeValue, OBJECTRE
CONTRACTL_END;
// Allocate the value class array.
- *ppProtectedCLRValue = AllocateArrayEx(m_arrayType.GetValue(), (INT32*)&m_numElems, 1);
+ *ppProtectedCLRValue = AllocateSzArray(m_arrayType.GetValue(), (INT32)m_numElems);
// Marshal the contents from the native array to the managed array.
const OleVariant::Marshaler *pMarshaler = OleVariant::GetMarshalerForVarType(m_vt, TRUE);
diff --git a/src/vm/gchelpers.cpp b/src/vm/gchelpers.cpp
index fbc2cf1073..8cb4fee9b0 100644
--- a/src/vm/gchelpers.cpp
+++ b/src/vm/gchelpers.cpp
@@ -205,11 +205,11 @@ inline void CheckObjectSize(size_t alloc_size)
// While this is a choke point into allocating an object, it is primitive (it does not want to know about
// MethodTable and thus does not initialize that pointer. It also does not know if the object is finalizable
// or contains pointers. Thus we quickly wrap this function in more user-friendly ones that know about
-// MethodTables etc. (see code:FastAllocatePrimitiveArray code:AllocateArrayEx code:AllocateObject)
+// MethodTables etc. (see code:AllocateSzArray code:AllocateArrayEx code:AllocateObject)
//
// You can get an exhaustive list of code sites that allocate GC objects by finding all calls to
// code:ProfilerObjectAllocatedCallback (since the profiler has to hook them all).
-inline Object* Alloc(size_t size, BOOL bFinalize, BOOL bContainsPointers )
+inline Object* Alloc(size_t size, GC_ALLOC_FLAGS flags)
{
CONTRACTL {
THROWS;
@@ -227,8 +227,8 @@ inline Object* Alloc(size_t size, BOOL bFinalize, BOOL bContainsPointers )
}
#endif
- DWORD flags = ((bContainsPointers ? GC_ALLOC_CONTAINS_REF : 0) |
- (bFinalize ? GC_ALLOC_FINALIZE : 0));
+ if (flags & GC_ALLOC_CONTAINS_REF)
+ flags &= ~GC_ALLOC_ZEROING_OPTIONAL;
Object *retVal = NULL;
CheckObjectSize(size);
@@ -259,7 +259,7 @@ inline Object* Alloc(size_t size, BOOL bFinalize, BOOL bContainsPointers )
#ifdef FEATURE_64BIT_ALIGNMENT
// Helper for allocating 8-byte aligned objects (on platforms where this doesn't happen naturally, e.g. 32-bit
// platforms).
-inline Object* AllocAlign8(size_t size, BOOL bFinalize, BOOL bContainsPointers, BOOL bAlignBias)
+inline Object* AllocAlign8(size_t size, GC_ALLOC_FLAGS flags)
{
CONTRACTL {
THROWS;
@@ -267,9 +267,8 @@ inline Object* AllocAlign8(size_t size, BOOL bFinalize, BOOL bContainsPointers,
MODE_COOPERATIVE; // returns an objref without pinning it => cooperative
} CONTRACTL_END;
- DWORD flags = ((bContainsPointers ? GC_ALLOC_CONTAINS_REF : 0) |
- (bFinalize ? GC_ALLOC_FINALIZE : 0) |
- (bAlignBias ? GC_ALLOC_ALIGN8_BIAS : 0));
+ if (flags & GC_ALLOC_CONTAINS_REF)
+ flags &= ~ GC_ALLOC_ZEROING_OPTIONAL;
Object *retVal = NULL;
CheckObjectSize(size);
@@ -303,7 +302,7 @@ inline Object* AllocAlign8(size_t size, BOOL bFinalize, BOOL bContainsPointers,
//
// One (and only?) example of where this is needed is 8 byte aligning of arrays of doubles. See
// code:EEConfig.GetDoubleArrayToLargeObjectHeapThreshold and code:CORINFO_HELP_NEWARR_1_ALIGN8 for more.
-inline Object* AllocLHeap(size_t size, BOOL bFinalize, BOOL bContainsPointers )
+inline Object* AllocLHeap(size_t size, GC_ALLOC_FLAGS flags)
{
CONTRACTL {
THROWS;
@@ -322,8 +321,8 @@ inline Object* AllocLHeap(size_t size, BOOL bFinalize, BOOL bContainsPointers )
}
#endif
- DWORD flags = ((bContainsPointers ? GC_ALLOC_CONTAINS_REF : 0) |
- (bFinalize ? GC_ALLOC_FINALIZE : 0));
+ if (flags & GC_ALLOC_CONTAINS_REF)
+ flags &= ~GC_ALLOC_ZEROING_OPTIONAL;
Object *retVal = NULL;
CheckObjectSize(size);
@@ -408,15 +407,179 @@ inline SIZE_T MaxArrayLength(SIZE_T componentSize)
return (componentSize == 1) ? 0X7FFFFFC7 : 0X7FEFFFFF;
}
-OBJECTREF AllocateValueSzArray(TypeHandle elementType, INT32 length)
+OBJECTREF AllocateSzArray(TypeHandle arrayType, INT32 cElements, GC_ALLOC_FLAGS flags, BOOL bAllocateInLargeHeap)
{
- CONTRACTL {
+ CONTRACTL{
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE; // returns an objref without pinning it => cooperative
} CONTRACTL_END;
- return AllocateArrayEx(elementType.MakeSZArray(), &length, 1);
+ ArrayTypeDesc* arrayDesc = arrayType.AsArray();
+ MethodTable* pArrayMT = arrayDesc->GetMethodTable();
+
+ return AllocateSzArray(pArrayMT, cElements, flags, bAllocateInLargeHeap);
+}
+
+OBJECTREF AllocateSzArray(MethodTable* pArrayMT, INT32 cElements, GC_ALLOC_FLAGS flags, BOOL bAllocateInLargeHeap)
+{
+ CONTRACTL{
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE; // returns an objref without pinning it => cooperative
+ } CONTRACTL_END;
+
+ SetTypeHandleOnThreadForAlloc(TypeHandle(pArrayMT));
+
+ _ASSERTE(pArrayMT->CheckInstanceActivated());
+ _ASSERTE(pArrayMT->GetInternalCorElementType() == ELEMENT_TYPE_SZARRAY);
+
+ CorElementType elemType = pArrayMT->GetArrayElementType();
+
+ // Disallow the creation of void[] (an array of System.Void)
+ if (elemType == ELEMENT_TYPE_VOID)
+ COMPlusThrow(kArgumentException);
+
+ // IBC Log MethodTable access
+ g_IBCLogger.LogMethodTableAccess(pArrayMT);
+
+ if (cElements < 0)
+ COMPlusThrow(kOverflowException);
+
+ SIZE_T componentSize = pArrayMT->GetComponentSize();
+ if ((SIZE_T)cElements > MaxArrayLength(componentSize))
+ ThrowOutOfMemoryDimensionsExceeded();
+
+ // Allocate the space from the GC heap
+#ifdef _TARGET_64BIT_
+ // POSITIVE_INT32 * UINT16 + SMALL_CONST
+ // this cannot overflow on 64bit
+ size_t totalSize = cElements * componentSize + pArrayMT->GetBaseSize();
+
+#else
+ S_SIZE_T safeTotalSize = S_SIZE_T((DWORD)cElements) * S_SIZE_T((DWORD)componentSize) + S_SIZE_T((DWORD)pArrayMT->GetBaseSize());
+ if (safeTotalSize.IsOverflow())
+ ThrowOutOfMemoryDimensionsExceeded();
+
+ size_t totalSize = safeTotalSize.Value();
+#endif
+
+#ifdef FEATURE_DOUBLE_ALIGNMENT_HINT
+ if ((elemType == ELEMENT_TYPE_R8) &&
+ ((DWORD)cElements >= g_pConfig->GetDoubleArrayToLargeObjectHeapThreshold()))
+ {
+ STRESS_LOG2(LF_GC, LL_INFO10, "Allocating double MD array of size %d and length %d to large object heap\n", totalSize, cElements);
+ bAllocateInLargeHeap = TRUE;
+ }
+#endif
+
+ flags |= (pArrayMT->ContainsPointers() ? GC_ALLOC_CONTAINS_REF : GC_ALLOC_NO_FLAGS);
+
+ ArrayBase* orArray = NULL;
+ if (bAllocateInLargeHeap)
+ {
+ orArray = (ArrayBase*)AllocLHeap(totalSize, flags);
+ orArray->SetArrayMethodTableForLargeObject(pArrayMT);
+ }
+ else
+ {
+ if ((DATA_ALIGNMENT < sizeof(double)) && (elemType == ELEMENT_TYPE_R8))
+ {
+ // Creation of an array of doubles, not in the large object heap.
+ // We want to align the doubles to 8 byte boundaries, but the GC gives us pointers aligned
+ // to 4 bytes only (on 32 bit platforms). To align, we ask for 12 bytes more to fill with a
+ // dummy object.
+ // If the GC gives us a 8 byte aligned address, we use it for the array and place the dummy
+ // object after the array, otherwise we put the dummy object first, shifting the base of
+ // the array to an 8 byte aligned address.
+ //
+ // Note: on 64 bit platforms, the GC always returns 8 byte aligned addresses, and we don't
+ // execute this code because DATA_ALIGNMENT < sizeof(double) is false.
+
+ _ASSERTE(DATA_ALIGNMENT == sizeof(double) / 2);
+ _ASSERTE((MIN_OBJECT_SIZE % sizeof(double)) == DATA_ALIGNMENT); // used to change alignment
+ _ASSERTE(pArrayMT->GetComponentSize() == sizeof(double));
+ _ASSERTE(g_pObjectClass->GetBaseSize() == MIN_OBJECT_SIZE);
+ _ASSERTE(totalSize < totalSize + MIN_OBJECT_SIZE);
+ orArray = (ArrayBase*)Alloc(totalSize + MIN_OBJECT_SIZE, flags);
+
+ Object* orDummyObject;
+ if ((size_t)orArray % sizeof(double))
+ {
+ orDummyObject = orArray;
+ orArray = (ArrayBase*)((size_t)orArray + MIN_OBJECT_SIZE);
+ }
+ else
+ {
+ orDummyObject = (Object*)((size_t)orArray + totalSize);
+ }
+ _ASSERTE(((size_t)orArray % sizeof(double)) == 0);
+ orDummyObject->SetMethodTable(g_pObjectClass);
+ }
+ else
+ {
+#ifdef FEATURE_64BIT_ALIGNMENT
+ MethodTable* pElementMT = pArrayMT->GetApproxArrayElementTypeHandle().GetMethodTable();
+ if (pElementMT->RequiresAlign8() && pElementMT->IsValueType())
+ {
+ // This platform requires that certain fields are 8-byte aligned (and the runtime doesn't provide
+ // this guarantee implicitly, e.g. on 32-bit platforms). Since it's the array payload, not the
+ // header that requires alignment we need to be careful. However it just so happens that all the
+ // cases we care about (single and multi-dim arrays of value types) have an even number of DWORDs
+ // in their headers so the alignment requirements for the header and the payload are the same.
+ _ASSERTE(((pArrayMT->GetBaseSize() - SIZEOF_OBJHEADER) & 7) == 0);
+ orArray = (ArrayBase*)AllocAlign8(totalSize, flags);
+ }
+ else
+#endif
+ {
+ orArray = (ArrayBase*)Alloc(totalSize, flags);
+ }
+ }
+ orArray->SetArrayMethodTable(pArrayMT);
+ }
+
+ // Initialize Object
+ orArray->m_NumComponents = cElements;
+
+ if (bAllocateInLargeHeap ||
+ (totalSize >= g_pConfig->GetGCLOHThreshold()))
+ {
+ GCHeapUtilities::GetGCHeap()->PublishObject((BYTE*)orArray);
+ }
+
+#ifdef _LOGALLOC
+ LogAlloc(totalSize, pArrayMT, orArray);
+#endif // _LOGALLOC
+
+#ifdef _DEBUG
+ // Ensure the typehandle has been interned prior to allocation.
+ // This is important for OOM reliability.
+ OBJECTREF objref = ObjectToOBJECTREF((Object *) orArray);
+ GCPROTECT_BEGIN(objref);
+
+ orArray->GetTypeHandle();
+
+ GCPROTECT_END();
+ orArray = (ArrayBase *) OBJECTREFToObject(objref);
+#endif
+
+ // Notify the profiler of the allocation
+ // do this after initializing bounds so callback has size information
+ if (TrackAllocations())
+ {
+ ProfileTrackArrayAlloc(orArray);
+ }
+
+#ifdef FEATURE_EVENT_TRACE
+ // Send ETW event for allocation
+ if(ETW::TypeSystemLog::IsHeapAllocEventEnabled())
+ {
+ ETW::TypeSystemLog::SendObjectAllocatedEvent(orArray);
+ }
+#endif // FEATURE_EVENT_TRACE
+
+ return ObjectToOBJECTREF((Object *) orArray);
}
void ThrowOutOfMemoryDimensionsExceeded()
@@ -437,7 +600,7 @@ void ThrowOutOfMemoryDimensionsExceeded()
//
// This is wrapper overload to handle TypeHandle arrayType
//
-OBJECTREF AllocateArrayEx(TypeHandle arrayType, INT32 *pArgs, DWORD dwNumArgs, BOOL bAllocateInLargeHeap)
+OBJECTREF AllocateArrayEx(TypeHandle arrayType, INT32 *pArgs, DWORD dwNumArgs, GC_ALLOC_FLAGS flags, BOOL bAllocateInLargeHeap)
{
CONTRACTL
{
@@ -447,7 +610,7 @@ OBJECTREF AllocateArrayEx(TypeHandle arrayType, INT32 *pArgs, DWORD dwNumArgs, B
ArrayTypeDesc* arrayDesc = arrayType.AsArray();
MethodTable* pArrayMT = arrayDesc->GetMethodTable();
- return AllocateArrayEx(pArrayMT, pArgs, dwNumArgs, bAllocateInLargeHeap);
+ return AllocateArrayEx(pArrayMT, pArgs, dwNumArgs, flags, bAllocateInLargeHeap);
}
//
@@ -457,7 +620,7 @@ OBJECTREF AllocateArrayEx(TypeHandle arrayType, INT32 *pArgs, DWORD dwNumArgs, B
// allocate sub-arrays and fill them in.
//
// For arrays with lower bounds, pBounds is <lower bound 1>, <count 1>, <lower bound 2>, ...
-OBJECTREF AllocateArrayEx(MethodTable *pArrayMT, INT32 *pArgs, DWORD dwNumArgs, BOOL bAllocateInLargeHeap)
+OBJECTREF AllocateArrayEx(MethodTable *pArrayMT, INT32 *pArgs, DWORD dwNumArgs, GC_ALLOC_FLAGS flags, BOOL bAllocateInLargeHeap)
{
CONTRACTL {
THROWS;
@@ -505,12 +668,9 @@ OBJECTREF AllocateArrayEx(MethodTable *pArrayMT, INT32 *pArgs, DWORD dwNumArgs,
// Morph a ARRAY rank 1 with 0 lower bound into an SZARRAY
if (rank == 1 && (dwNumArgs == 1 || pArgs[0] == 0))
- { // lower bound is zero
-
- // This recursive call doesn't go any farther, because the dwNumArgs will be 1,
- // so don't bother with stack probe.
+ {
TypeHandle szArrayType = ClassLoader::LoadArrayTypeThrowing(pArrayMT->GetApproxArrayElementTypeHandle(), ELEMENT_TYPE_SZARRAY, 1);
- return AllocateArrayEx(szArrayType, &pArgs[dwNumArgs - 1], 1, bAllocateInLargeHeap);
+ return AllocateSzArray(szArrayType, pArgs[dwNumArgs - 1], flags, bAllocateInLargeHeap);
}
providedLowerBounds = (dwNumArgs == 2*rank);
@@ -554,11 +714,18 @@ OBJECTREF AllocateArrayEx(MethodTable *pArrayMT, INT32 *pArgs, DWORD dwNumArgs,
ThrowOutOfMemoryDimensionsExceeded();
// Allocate the space from the GC heap
- S_SIZE_T safeTotalSize = S_SIZE_T(cElements) * S_SIZE_T(componentSize) + S_SIZE_T(pArrayMT->GetBaseSize());
+#ifdef _TARGET_64BIT_
+ // POSITIVE_INT32 * UINT16 + SMALL_CONST
+ // this cannot overflow on 64bit
+ size_t totalSize = cElements * componentSize + pArrayMT->GetBaseSize();
+
+#else
+ S_SIZE_T safeTotalSize = S_SIZE_T((DWORD)cElements) * S_SIZE_T((DWORD)componentSize) + S_SIZE_T((DWORD)pArrayMT->GetBaseSize());
if (safeTotalSize.IsOverflow())
ThrowOutOfMemoryDimensionsExceeded();
size_t totalSize = safeTotalSize.Value();
+#endif
#ifdef FEATURE_DOUBLE_ALIGNMENT_HINT
if ((elemType == ELEMENT_TYPE_R8) &&
@@ -569,9 +736,11 @@ OBJECTREF AllocateArrayEx(MethodTable *pArrayMT, INT32 *pArgs, DWORD dwNumArgs,
}
#endif
+ flags |= (pArrayMT->ContainsPointers() ? GC_ALLOC_CONTAINS_REF : GC_ALLOC_NO_FLAGS);
+
if (bAllocateInLargeHeap)
{
- orArray = (ArrayBase *) AllocLHeap(totalSize, FALSE, pArrayMT->ContainsPointers());
+ orArray = (ArrayBase *) AllocLHeap(totalSize, flags);
orArray->SetArrayMethodTableForLargeObject(pArrayMT);
}
else
@@ -586,12 +755,12 @@ OBJECTREF AllocateArrayEx(MethodTable *pArrayMT, INT32 *pArgs, DWORD dwNumArgs,
// cases we care about (single and multi-dim arrays of value types) have an even number of DWORDs
// in their headers so the alignment requirements for the header and the payload are the same.
_ASSERTE(((pArrayMT->GetBaseSize() - SIZEOF_OBJHEADER) & 7) == 0);
- orArray = (ArrayBase *) AllocAlign8(totalSize, FALSE, pArrayMT->ContainsPointers(), FALSE);
+ orArray = (ArrayBase *) AllocAlign8(totalSize, flags);
}
else
#endif
{
- orArray = (ArrayBase *) Alloc(totalSize, FALSE, pArrayMT->ContainsPointers());
+ orArray = (ArrayBase *) Alloc(totalSize, flags);
}
orArray->SetArrayMethodTable(pArrayMT);
}
@@ -670,7 +839,7 @@ OBJECTREF AllocateArrayEx(MethodTable *pArrayMT, INT32 *pArgs, DWORD dwNumArgs,
TypeHandle subArrayType = pArrayMT->GetApproxArrayElementTypeHandle();
for (UINT32 i = 0; i < cElements; i++)
{
- OBJECTREF obj = AllocateArrayEx(subArrayType, &pArgs[1], dwNumArgs-1, bAllocateInLargeHeap);
+ OBJECTREF obj = AllocateArrayEx(subArrayType, &pArgs[1], dwNumArgs-1, flags, bAllocateInLargeHeap);
outerArray->SetAt(i, obj);
}
@@ -690,7 +859,7 @@ OBJECTREF AllocateArrayEx(MethodTable *pArrayMT, INT32 *pArgs, DWORD dwNumArgs,
/*
* Allocates a single dimensional array of primitive types.
*/
-OBJECTREF AllocatePrimitiveArray(CorElementType type, DWORD cElements, BOOL bAllocateInLargeHeap)
+OBJECTREF AllocatePrimitiveArray(CorElementType type, DWORD cElements)
{
CONTRACTL
{
@@ -701,7 +870,6 @@ OBJECTREF AllocatePrimitiveArray(CorElementType type, DWORD cElements, BOOL bA
}
CONTRACTL_END
-
// Allocating simple primite arrays is done in various places as internal storage.
// Because this is unlikely to result in any bad recursions, we will override the type limit
// here rather forever chase down all the callers.
@@ -716,139 +884,13 @@ OBJECTREF AllocatePrimitiveArray(CorElementType type, DWORD cElements, BOOL bA
TypeHandle typHnd = ClassLoader::LoadArrayTypeThrowing(elemType, ELEMENT_TYPE_SZARRAY, 0);
g_pPredefinedArrayTypes[type] = typHnd.AsArray();
}
- return FastAllocatePrimitiveArray(g_pPredefinedArrayTypes[type]->GetMethodTable(), cElements, bAllocateInLargeHeap);
-}
-
-/*
- * Allocates a single dimensional array of primitive types.
- */
-
-OBJECTREF FastAllocatePrimitiveArray(MethodTable* pMT, DWORD cElements, BOOL bAllocateInLargeHeap)
-{
- CONTRACTL {
- THROWS;
- GC_TRIGGERS;
- MODE_COOPERATIVE; // returns an objref without pinning it => cooperative
- PRECONDITION(pMT->CheckInstanceActivated());
- } CONTRACTL_END;
-
-#ifdef _DEBUG
- if (g_pConfig->ShouldInjectFault(INJECTFAULT_GCHEAP))
- {
- char *a = new char;
- delete a;
- }
-#endif
-
- _ASSERTE(pMT && pMT->IsArray());
- _ASSERTE(pMT->IsRestored_NoLogging());
- _ASSERTE(CorTypeInfo::IsPrimitiveType(pMT->GetArrayElementType()) &&
- g_pPredefinedArrayTypes[pMT->GetArrayElementType()] != NULL);
-
- g_IBCLogger.LogMethodTableAccess(pMT);
- SetTypeHandleOnThreadForAlloc(TypeHandle(pMT));
-
- SIZE_T componentSize = pMT->GetComponentSize();
- if (cElements > MaxArrayLength(componentSize))
- ThrowOutOfMemory();
-
- S_SIZE_T safeTotalSize = S_SIZE_T(cElements) * S_SIZE_T(componentSize) + S_SIZE_T(pMT->GetBaseSize());
- if (safeTotalSize.IsOverflow())
- ThrowOutOfMemory();
-
- size_t totalSize = safeTotalSize.Value();
-
- BOOL bPublish = bAllocateInLargeHeap;
-
- ArrayBase* orObject;
- if (bAllocateInLargeHeap)
- {
- orObject = (ArrayBase*) AllocLHeap(totalSize, FALSE, FALSE);
- }
- else
- {
- ArrayTypeDesc *pArrayR8TypeDesc = g_pPredefinedArrayTypes[ELEMENT_TYPE_R8];
- if (DATA_ALIGNMENT < sizeof(double) && pArrayR8TypeDesc != NULL && pMT == pArrayR8TypeDesc->GetMethodTable() &&
- (totalSize < g_pConfig->GetGCLOHThreshold() - MIN_OBJECT_SIZE))
- {
- // Creation of an array of doubles, not in the large object heap.
- // We want to align the doubles to 8 byte boundaries, but the GC gives us pointers aligned
- // to 4 bytes only (on 32 bit platforms). To align, we ask for 12 bytes more to fill with a
- // dummy object.
- // If the GC gives us a 8 byte aligned address, we use it for the array and place the dummy
- // object after the array, otherwise we put the dummy object first, shifting the base of
- // the array to an 8 byte aligned address.
- // Note: on 64 bit platforms, the GC always returns 8 byte aligned addresses, and we don't
- // execute this code because DATA_ALIGNMENT < sizeof(double) is false.
-
- _ASSERTE(DATA_ALIGNMENT == sizeof(double)/2);
- _ASSERTE((MIN_OBJECT_SIZE % sizeof(double)) == DATA_ALIGNMENT); // used to change alignment
- _ASSERTE(pMT->GetComponentSize() == sizeof(double));
- _ASSERTE(g_pObjectClass->GetBaseSize() == MIN_OBJECT_SIZE);
- _ASSERTE(totalSize < totalSize + MIN_OBJECT_SIZE);
- orObject = (ArrayBase*) Alloc(totalSize + MIN_OBJECT_SIZE, FALSE, FALSE);
-
- Object *orDummyObject;
- if((size_t)orObject % sizeof(double))
- {
- orDummyObject = orObject;
- orObject = (ArrayBase*) ((size_t)orObject + MIN_OBJECT_SIZE);
- }
- else
- {
- orDummyObject = (Object*) ((size_t)orObject + totalSize);
- }
- _ASSERTE(((size_t)orObject % sizeof(double)) == 0);
- orDummyObject->SetMethodTable(g_pObjectClass);
- }
- else
- {
- orObject = (ArrayBase*) Alloc(totalSize, FALSE, FALSE);
- bPublish = (totalSize >= g_pConfig->GetGCLOHThreshold());
- }
- }
-
- // Initialize Object
- orObject->SetArrayMethodTable( pMT );
- _ASSERTE(orObject->GetMethodTable() != NULL);
- orObject->m_NumComponents = cElements;
-
- if (bPublish)
- {
- GCHeapUtilities::GetGCHeap()->PublishObject((BYTE*)orObject);
- }
-
- // Notify the profiler of the allocation
- if (TrackAllocations())
- {
- OBJECTREF objref = ObjectToOBJECTREF((Object*)orObject);
- GCPROTECT_BEGIN(objref);
- ProfilerObjectAllocatedCallback(objref, (ClassID) orObject->GetTypeHandle().AsPtr());
- GCPROTECT_END();
-
- orObject = (ArrayBase *) OBJECTREFToObject(objref);
- }
-
-#ifdef FEATURE_EVENT_TRACE
- // Send ETW event for allocation
- if(ETW::TypeSystemLog::IsHeapAllocEventEnabled())
- {
- ETW::TypeSystemLog::SendObjectAllocatedEvent(orObject);
- }
-#endif // FEATURE_EVENT_TRACE
-
- // IBC Log MethodTable access
- g_IBCLogger.LogMethodTableAccess(pMT);
-
- LogAlloc(totalSize, pMT, orObject);
-
- return( ObjectToOBJECTREF((Object*)orObject) );
+ return AllocateSzArray(g_pPredefinedArrayTypes[type]->GetMethodTable(), cElements);
}
//
// Allocate an array which is the same size as pRef. However, do not zero out the array.
//
-OBJECTREF DupArrayForCloning(BASEARRAYREF pRef, BOOL bAllocateInLargeHeap)
+OBJECTREF DupArrayForCloning(BASEARRAYREF pRef)
{
CONTRACTL {
THROWS;
@@ -877,61 +919,14 @@ OBJECTREF DupArrayForCloning(BASEARRAYREF pRef, BOOL bAllocateInLargeHeap)
numArgs = 1;
args[0] = pRef->GetNumComponents();
}
- return AllocateArrayEx(TypeHandle(&arrayType), args, numArgs, bAllocateInLargeHeap);
-}
-
-#if defined(_TARGET_X86_)
-
-// The fast version always allocates in the normal heap
-OBJECTREF AllocatePrimitiveArray(CorElementType type, DWORD cElements)
-{
- CONTRACTL {
- THROWS;
- GC_TRIGGERS;
- MODE_COOPERATIVE; // returns an objref without pinning it => cooperative
- } CONTRACTL_END;
-
- return OBJECTREF( HCCALL2(fastPrimitiveArrayAllocator, type, cElements) );
+ return AllocateArrayEx(TypeHandle(&arrayType), args, numArgs, GC_ALLOC_ZEROING_OPTIONAL);
}
-// The fast version always allocates in the normal heap
-OBJECTREF AllocateObjectArray(DWORD cElements, TypeHandle ElementType)
-{
- CONTRACTL {
- THROWS;
- GC_TRIGGERS;
- MODE_COOPERATIVE; // returns an objref without pinning it => cooperative
- } CONTRACTL_END;
-
-
- OVERRIDE_TYPE_LOAD_LEVEL_LIMIT(CLASS_LOADED);
-
- // We must call this here to ensure the typehandle for this object is
- // interned before the object is allocated. As soon as the object is allocated,
- // the profiler could do a heapwalk and it expects to find an interned
- // typehandle for every object in the heap.
- TypeHandle ArrayType = ClassLoader::LoadArrayTypeThrowing(ElementType);
-
- return OBJECTREF( HCCALL2(fastObjectArrayAllocator, ArrayType.AsArray()->GetTemplateMethodTable(), cElements));
-}
-
-STRINGREF AllocateString( DWORD cchStringLength )
-{
- CONTRACTL {
- THROWS;
- GC_TRIGGERS;
- MODE_COOPERATIVE; // returns an objref without pinning it => cooperative
- } CONTRACTL_END;
-
- return STRINGREF(HCCALL1(fastStringAllocator, cchStringLength));
-}
-
-#endif
//
// Helper for parts of the EE which are allocating arrays
//
-OBJECTREF AllocateObjectArray(DWORD cElements, TypeHandle elementType, BOOL bAllocateInLargeHeap)
+OBJECTREF AllocateObjectArray(DWORD cElements, TypeHandle elementType, BOOL bAllocateInLargeHeap)
{
CONTRACTL {
THROWS;
@@ -950,14 +945,10 @@ OBJECTREF AllocateObjectArray(DWORD cElements, TypeHandle elementType, BOOL bA
_ASSERTE(arrayType.GetInternalCorElementType() == ELEMENT_TYPE_SZARRAY);
#endif //_DEBUG
- return AllocateArrayEx(ClassLoader::LoadArrayTypeThrowing(elementType),
- (INT32 *)(&cElements),
- 1,
- bAllocateInLargeHeap);
+ return AllocateSzArray(ClassLoader::LoadArrayTypeThrowing(elementType), (INT32) cElements, GC_ALLOC_NO_FLAGS, bAllocateInLargeHeap);
}
-
-STRINGREF SlowAllocateString( DWORD cchStringLength )
+STRINGREF AllocateString( DWORD cchStringLength )
{
CONTRACTL {
THROWS;
@@ -978,7 +969,7 @@ STRINGREF SlowAllocateString( DWORD cchStringLength )
// Limit the maximum string size to <2GB to mitigate risk of security issues caused by 32-bit integer
// overflows in buffer size calculations.
//
- // If the value below is changed, also change SlowAllocateUtf8String.
+ // If the value below is changed, also change AllocateUtf8String.
if (cchStringLength > 0x3FFFFFDF)
ThrowOutOfMemory();
@@ -987,7 +978,7 @@ STRINGREF SlowAllocateString( DWORD cchStringLength )
SetTypeHandleOnThreadForAlloc(TypeHandle(g_pStringClass));
- orObject = (StringObject *)Alloc( ObjectSize, FALSE, FALSE );
+ orObject = (StringObject *)Alloc( ObjectSize, GC_ALLOC_NO_FLAGS);
// Object is zero-init already
_ASSERTE( orObject->HasEmptySyncBlockInfo() );
@@ -1027,7 +1018,7 @@ STRINGREF SlowAllocateString( DWORD cchStringLength )
}
#ifdef FEATURE_UTF8STRING
-UTF8STRINGREF SlowAllocateUtf8String(DWORD cchStringLength)
+UTF8STRINGREF AllocateUtf8String(DWORD cchStringLength)
{
CONTRACTL{
THROWS;
@@ -1062,7 +1053,7 @@ UTF8STRINGREF SlowAllocateUtf8String(DWORD cchStringLength)
SetTypeHandleOnThreadForAlloc(TypeHandle(g_pUtf8StringClass));
- orObject = (Utf8StringObject *)Alloc(ObjectSize, FALSE, FALSE);
+ orObject = (Utf8StringObject *)Alloc(ObjectSize, GC_ALLOC_NO_FLAGS);
// Object is zero-init already
_ASSERTE(orObject->HasEmptySyncBlockInfo());
@@ -1176,6 +1167,8 @@ OBJECTREF AllocateObject(MethodTable *pMT
#endif // FEATURE_COMINTEROP
{
DWORD baseSize = pMT->GetBaseSize();
+ GC_ALLOC_FLAGS flags = ((pMT->ContainsPointers() ? GC_ALLOC_CONTAINS_REF : GC_ALLOC_NO_FLAGS) |
+ (pMT->HasFinalizer() ? GC_ALLOC_FINALIZE : GC_ALLOC_NO_FLAGS));
#ifdef FEATURE_64BIT_ALIGNMENT
if (pMT->RequiresAlign8())
@@ -1187,17 +1180,13 @@ OBJECTREF AllocateObject(MethodTable *pMT
// first field is aligned relative to the header) and true for boxed value types (where we can't
// do the same padding without introducing more complexity in type layout and unboxing stubs).
_ASSERTE(sizeof(Object) == 4);
- orObject = (Object *) AllocAlign8(baseSize,
- pMT->HasFinalizer(),
- pMT->ContainsPointers(),
- pMT->IsValueType());
+ flags |= pMT->IsValueType() ? GC_ALLOC_ALIGN8_BIAS : GC_ALLOC_NO_FLAGS;
+ orObject = (Object *) AllocAlign8(baseSize, flags);
}
else
#endif // FEATURE_64BIT_ALIGNMENT
{
- orObject = (Object *) Alloc(baseSize,
- pMT->HasFinalizer(),
- pMT->ContainsPointers());
+ orObject = (Object*)Alloc(baseSize, flags);
}
// verify zero'd memory (at least for sync block)
diff --git a/src/vm/gchelpers.h b/src/vm/gchelpers.h
index 33a94270fc..5fe51ce6c7 100644
--- a/src/vm/gchelpers.h
+++ b/src/vm/gchelpers.h
@@ -20,94 +20,28 @@
//
//========================================================================
-OBJECTREF AllocateValueSzArray(TypeHandle elementType, INT32 length);
- // The main Array allocation routine, can do multi-dimensional
-OBJECTREF AllocateArrayEx(MethodTable *pArrayMT, INT32 *pArgs, DWORD dwNumArgs, BOOL bAllocateInLargeHeap = FALSE);
-OBJECTREF AllocateArrayEx(TypeHandle arrayClass, INT32 *pArgs, DWORD dwNumArgs, BOOL bAllocateInLargeHeap = FALSE);
- // Optimized verion of above
-OBJECTREF FastAllocatePrimitiveArray(MethodTable* arrayType, DWORD cElements, BOOL bAllocateInLargeHeap = FALSE);
+// Allocate single-dimensional array given array type
+OBJECTREF AllocateSzArray(MethodTable *pArrayMT, INT32 length, GC_ALLOC_FLAGS flags = GC_ALLOC_NO_FLAGS, BOOL bAllocateInLargeHeap = FALSE);
+OBJECTREF AllocateSzArray(TypeHandle arrayType, INT32 length, GC_ALLOC_FLAGS flags = GC_ALLOC_NO_FLAGS, BOOL bAllocateInLargeHeap = FALSE);
+// The main Array allocation routine, can do multi-dimensional
+OBJECTREF AllocateArrayEx(MethodTable *pArrayMT, INT32 *pArgs, DWORD dwNumArgs, GC_ALLOC_FLAGS flags = GC_ALLOC_NO_FLAGS, BOOL bAllocateInLargeHeap = FALSE);
+OBJECTREF AllocateArrayEx(TypeHandle arrayType, INT32 *pArgs, DWORD dwNumArgs, GC_ALLOC_FLAGS flags = GC_ALLOC_NO_FLAGS, BOOL bAllocateInLargeHeap = FALSE);
-#if defined(_TARGET_X86_)
-
- // for x86, we generate efficient allocators for some special cases
- // these are called via inline wrappers that call the generated allocators
- // via function pointers.
-
-
- // Create a SD array of primitive types
-typedef HCCALL2_PTR(Object*, FastPrimitiveArrayAllocatorFuncPtr, CorElementType type, DWORD cElements);
-
-extern FastPrimitiveArrayAllocatorFuncPtr fastPrimitiveArrayAllocator;
-
- // The fast version always allocates in the normal heap
+// Create a SD array of primitive types given an element type
OBJECTREF AllocatePrimitiveArray(CorElementType type, DWORD cElements);
- // The slow version is distinguished via overloading by an additional parameter
-OBJECTREF AllocatePrimitiveArray(CorElementType type, DWORD cElements, BOOL bAllocateInLargeHeap);
-
-
-// Allocate SD array of object pointers.
-typedef HCCALL2_PTR(Object*, FastObjectArrayAllocatorFuncPtr, MethodTable *pArrayMT, DWORD cElements);
-
-extern FastObjectArrayAllocatorFuncPtr fastObjectArrayAllocator;
-
- // The fast version always allocates in the normal heap
-OBJECTREF AllocateObjectArray(DWORD cElements, TypeHandle ElementType);
-
- // The slow version is distinguished via overloading by an additional parameter
-OBJECTREF AllocateObjectArray(DWORD cElements, TypeHandle ElementType, BOOL bAllocateInLargeHeap);
-
-
- // Allocate string
-typedef HCCALL1_PTR(StringObject*, FastStringAllocatorFuncPtr, DWORD cchArrayLength);
-
-extern FastStringAllocatorFuncPtr fastStringAllocator;
-
-STRINGREF AllocateString( DWORD cchStringLength );
-
- // The slow version, implemented in gcscan.cpp
-STRINGREF SlowAllocateString( DWORD cchStringLength );
-
-#ifdef FEATURE_UTF8STRING
-UTF8STRINGREF SlowAllocateUtf8String( DWORD cchStringLength );
-#endif // FEATURE_UTF8STRING
-
-#else
-
-// On other platforms, go to the (somewhat less efficient) implementations in gcscan.cpp
-
- // Create a SD array of primitive types
-OBJECTREF AllocatePrimitiveArray(CorElementType type, DWORD cElements, BOOL bAllocateInLargeHeap = FALSE);
-
- // Allocate SD array of object pointers
+// Allocate SD array of object types given an element type
OBJECTREF AllocateObjectArray(DWORD cElements, TypeHandle ElementType, BOOL bAllocateInLargeHeap = FALSE);
-STRINGREF SlowAllocateString( DWORD cchStringLength );
-
-#ifdef FEATURE_UTF8STRING
-UTF8STRINGREF SlowAllocateUtf8String( DWORD cchStringLength );
-#endif // FEATURE_UTF8STRING
-
-inline STRINGREF AllocateString( DWORD cchStringLength )
-{
- WRAPPER_NO_CONTRACT;
-
- return SlowAllocateString( cchStringLength );
-}
-
-#endif
+// Allocate a string
+STRINGREF AllocateString( DWORD cchStringLength );
#ifdef FEATURE_UTF8STRING
-inline UTF8STRINGREF AllocateUtf8String(DWORD cchStringLength)
-{
- WRAPPER_NO_CONTRACT;
-
- return SlowAllocateUtf8String(cchStringLength);
-}
+UTF8STRINGREF AllocateUtf8String( DWORD cchStringLength );
#endif // FEATURE_UTF8STRING
-OBJECTREF DupArrayForCloning(BASEARRAYREF pRef, BOOL bAllocateInLargeHeap = FALSE);
+OBJECTREF DupArrayForCloning(BASEARRAYREF pRef);
// The JIT requests the EE to specify an allocation helper to use at each new-site.
// The EE makes this choice based on whether context boundaries may be involved,
diff --git a/src/vm/i386/jitinterfacex86.cpp b/src/vm/i386/jitinterfacex86.cpp
index 68cf72d8d8..2444a7bf9f 100644
--- a/src/vm/i386/jitinterfacex86.cpp
+++ b/src/vm/i386/jitinterfacex86.cpp
@@ -45,7 +45,6 @@ public:
OBJ_ARRAY = 0x4,
ALIGN8 = 0x8, // insert a dummy object to insure 8 byte alignment (until the next GC)
ALIGN8OBJ = 0x10,
- NO_FRAME = 0x20, // call is from unmanaged code - don't try to put up a frame
};
static void *GenAllocSFast(Flags flags);
@@ -767,47 +766,6 @@ void *JIT_TrialAlloc::GenBox(Flags flags)
return (void *)pStub->GetEntryPoint();
}
-
-HCIMPL2_RAW(Object*, UnframedAllocateObjectArray, MethodTable *pArrayMT, DWORD cElements)
-{
- // This isn't _really_ an FCALL and therefore shouldn't have the
- // SO_TOLERANT part of the FCALL_CONTRACT b/c it is not entered
- // from managed code.
- CONTRACTL {
- THROWS;
- GC_TRIGGERS;
- MODE_COOPERATIVE;
- } CONTRACTL_END;
-
- return OBJECTREFToObject(AllocateArrayEx(pArrayMT,
- (INT32 *)(&cElements),
- 1,
- FALSE));
-}
-HCIMPLEND_RAW
-
-
-HCIMPL2_RAW(Object*, UnframedAllocatePrimitiveArray, CorElementType type, DWORD cElements)
-{
- // This isn't _really_ an FCALL and therefore shouldn't have the
- // SO_TOLERANT part of the FCALL_CONTRACT b/c it is not entered
- // from managed code.
- CONTRACTL {
- THROWS;
- GC_TRIGGERS;
- MODE_COOPERATIVE;
- } CONTRACTL_END;
-
- return OBJECTREFToObject( AllocatePrimitiveArray(type, cElements, FALSE) );
-}
-HCIMPLEND_RAW
-
-HCIMPL1_RAW(PTR_MethodTable, UnframedGetTemplateMethodTable, ArrayTypeDesc *arrayDesc)
-{
- return arrayDesc->GetTemplateMethodTable();
-}
-HCIMPLEND_RAW
-
void *JIT_TrialAlloc::GenAllocArray(Flags flags)
{
STANDARD_VM_CONTRACT;
@@ -832,29 +790,6 @@ void *JIT_TrialAlloc::GenAllocArray(Flags flags)
// push edx
sl.X86EmitPushReg(kEDX);
- if (flags & NO_FRAME)
- {
- if ((flags & OBJ_ARRAY) == 0)
- {
- // mov ecx,[g_pPredefinedArrayTypes+ecx*4]
- sl.Emit8(0x8b);
- sl.Emit16(0x8d0c);
- sl.Emit32((int)(size_t)&g_pPredefinedArrayTypes);
-
- // test ecx,ecx
- sl.Emit16(0xc985);
-
- // je noLock
- sl.X86EmitCondJump(noLock, X86CondCode::kJZ);
-
- sl.X86EmitPushReg(kEDX);
- sl.X86EmitCall(sl.NewExternalCodeLabel((LPVOID)UnframedGetTemplateMethodTable), 0);
- sl.X86EmitPopReg(kEDX);
-
- sl.X86EmitMovRegReg(kECX, kEAX);
- }
- }
-
// Do a conservative check here. This is to avoid doing overflow checks within this function. We'll
// still have to do a size check before running through the body of EmitCore. The way we do the check
// against the allocation quantum there requires that we not overflow when adding the size to the
@@ -979,28 +914,9 @@ void *JIT_TrialAlloc::GenAllocArray(Flags flags)
// pop ecx - array method table
sl.X86EmitPopReg(kECX);
- CodeLabel * target;
- if (flags & NO_FRAME)
- {
- if (flags & OBJ_ARRAY)
- {
- // Jump to the unframed helper
- target = sl.NewExternalCodeLabel((LPVOID)UnframedAllocateObjectArray);
- _ASSERTE(target->e.m_pExternalAddress);
- }
- else
- {
- // Jump to the unframed helper
- target = sl.NewExternalCodeLabel((LPVOID)UnframedAllocatePrimitiveArray);
- _ASSERTE(target->e.m_pExternalAddress);
- }
- }
- else
- {
- // Jump to the framed helper
- target = sl.NewExternalCodeLabel((LPVOID)JIT_NewArr1);
- _ASSERTE(target->e.m_pExternalAddress);
- }
+ // Jump to the framed helper
+ CodeLabel * target = sl.NewExternalCodeLabel((LPVOID)JIT_NewArr1);
+ _ASSERTE(target->e.m_pExternalAddress);
sl.X86EmitNearJump(target);
Stub *pStub = sl.Link(SystemDomain::GetGlobalLoaderAllocator()->GetExecutableHeap());
@@ -1087,31 +1003,14 @@ void *JIT_TrialAlloc::GenAllocString(Flags flags)
// pop ecx - element count
sl.X86EmitPopReg(kECX);
- CodeLabel * target;
- if (flags & NO_FRAME)
- {
- // Jump to the unframed helper
- target = sl.NewExternalCodeLabel((LPVOID)UnframedAllocateString);
- }
- else
- {
- // Jump to the framed helper
- target = sl.NewExternalCodeLabel((LPVOID)FramedAllocateString);
- }
+ // Jump to the framed helper
+ CodeLabel * target = sl.NewExternalCodeLabel((LPVOID)FramedAllocateString);
sl.X86EmitNearJump(target);
Stub *pStub = sl.Link(SystemDomain::GetGlobalLoaderAllocator()->GetExecutableHeap());
return (void *)pStub->GetEntryPoint();
}
-
-
-FastStringAllocatorFuncPtr fastStringAllocator = UnframedAllocateString;
-
-FastObjectArrayAllocatorFuncPtr fastObjectArrayAllocator = UnframedAllocateObjectArray;
-
-FastPrimitiveArrayAllocatorFuncPtr fastPrimitiveArrayAllocator = UnframedAllocatePrimitiveArray;
-
// For this helper,
// If bCCtorCheck == true
// ECX contains the domain neutral module ID
@@ -1350,16 +1249,9 @@ void InitJITHelpers1()
pMethodAddresses[5] = JIT_TrialAlloc::GenAllocArray((JIT_TrialAlloc::Flags)(flags|JIT_TrialAlloc::ALIGN8));
SetJitHelperFunction(CORINFO_HELP_NEWARR_1_ALIGN8, pMethodAddresses[5]);
- fastObjectArrayAllocator = (FastObjectArrayAllocatorFuncPtr)JIT_TrialAlloc::GenAllocArray((JIT_TrialAlloc::Flags)(flags|JIT_TrialAlloc::NO_FRAME|JIT_TrialAlloc::OBJ_ARRAY));
- fastPrimitiveArrayAllocator = (FastPrimitiveArrayAllocatorFuncPtr)JIT_TrialAlloc::GenAllocArray((JIT_TrialAlloc::Flags)(flags|JIT_TrialAlloc::NO_FRAME));
-
// If allocation logging is on, then we divert calls to FastAllocateString to an Ecall method, not this
// generated method. Find this workaround in Ecall::Init() in ecall.cpp.
ECall::DynamicallyAssignFCallImpl((PCODE) JIT_TrialAlloc::GenAllocString(flags), ECall::FastAllocateString);
-
- // generate another allocator for use from unmanaged code (won't need a frame)
- fastStringAllocator = (FastStringAllocatorFuncPtr) JIT_TrialAlloc::GenAllocString((JIT_TrialAlloc::Flags)(flags|JIT_TrialAlloc::NO_FRAME));
- //UnframedAllocateString;
}
// Replace static helpers with faster assembly versions
diff --git a/src/vm/ilmarshalers.cpp b/src/vm/ilmarshalers.cpp
index 0a4a1d900a..19db7dff73 100644
--- a/src/vm/ilmarshalers.cpp
+++ b/src/vm/ilmarshalers.cpp
@@ -4359,7 +4359,7 @@ FCIMPL4(void, MngdNativeArrayMarshaler::ConvertSpaceToManaged, MngdNativeArrayMa
//
// Allocate array
//
- SetObjectReference(pManagedHome, AllocateArrayEx(pThis->m_Array, &cElements, 1));
+ SetObjectReference(pManagedHome, AllocateSzArray(pThis->m_Array, cElements));
}
HELPER_METHOD_FRAME_END();
}
@@ -5392,7 +5392,7 @@ FCIMPL4(void, MngdHiddenLengthArrayMarshaler::ConvertSpaceToManaged, MngdHiddenL
{
TypeHandle elementType(pThis->m_pElementMT);
TypeHandle arrayType = ClassLoader::LoadArrayTypeThrowing(elementType);
- SetObjectReference(pManagedHome, AllocateArrayEx(arrayType, &cElements, 1));
+ SetObjectReference(pManagedHome, AllocateSzArray(arrayType, cElements));
}
HELPER_METHOD_FRAME_END();
diff --git a/src/vm/interpreter.cpp b/src/vm/interpreter.cpp
index 0670dc92b9..37eba9008e 100644
--- a/src/vm/interpreter.cpp
+++ b/src/vm/interpreter.cpp
@@ -5995,7 +5995,7 @@ void Interpreter::NewArr()
pArrayMT->CheckRunClassInitThrowing();
INT32 size32 = (INT32)sz;
- Object* newarray = OBJECTREFToObject(AllocateArrayEx(pArrayMT, &size32, 1));
+ Object* newarray = OBJECTREFToObject(AllocateSzArray(pArrayMT, size32));
GCX_FORBID();
OpStackTypeSet(stkInd, InterpreterType(CORINFO_TYPE_CLASS));
diff --git a/src/vm/jithelpers.cpp b/src/vm/jithelpers.cpp
index 11d07e6296..261b9acb61 100644
--- a/src/vm/jithelpers.cpp
+++ b/src/vm/jithelpers.cpp
@@ -2868,7 +2868,7 @@ HCIMPL1_RAW(StringObject*, UnframedAllocateString, DWORD stringLength)
} CONTRACTL_END;
STRINGREF result;
- result = SlowAllocateString(stringLength);
+ result = AllocateString(stringLength);
return((StringObject*) OBJECTREFToObject(result));
}
@@ -2881,7 +2881,7 @@ HCIMPL1(StringObject*, FramedAllocateString, DWORD stringLength)
STRINGREF result = NULL;
HELPER_METHOD_FRAME_BEGIN_RET_0(); // Set up a frame
- result = SlowAllocateString(stringLength);
+ result = AllocateString(stringLength);
HELPER_METHOD_FRAME_END();
return((StringObject*) OBJECTREFToObject(result));
@@ -2896,7 +2896,7 @@ HCIMPL1(Utf8StringObject*, FramedAllocateUtf8String, DWORD stringLength)
UTF8STRINGREF result = NULL;
HELPER_METHOD_FRAME_BEGIN_RET_0(); // Set up a frame
- result = SlowAllocateUtf8String(stringLength);
+ result = AllocateUtf8String(stringLength);
HELPER_METHOD_FRAME_END();
return((Utf8StringObject*) OBJECTREFToObject(result));
@@ -3136,62 +3136,13 @@ HCIMPL2(Object*, JIT_NewArr1, CORINFO_CLASS_HANDLE arrayMT, INT_PTR size)
EX_THROW(EEMessageException, (kOverflowException, IDS_EE_ARRAY_DIMENSIONS_EXCEEDED));
#endif
- //
- // is this a primitive type?
- //
-
- CorElementType elemType = pArrayMT->GetArrayElementType();
-
- if (CorTypeInfo::IsPrimitiveType(elemType)
-#ifdef FEATURE_64BIT_ALIGNMENT
- // On platforms where 64-bit types require 64-bit alignment and don't obtain it naturally force us
- // through the slow path where this will be handled.
- && (elemType != ELEMENT_TYPE_I8)
- && (elemType != ELEMENT_TYPE_U8)
- && (elemType != ELEMENT_TYPE_R8)
-#endif
- )
- {
#ifdef _DEBUG
- if (g_pConfig->FastGCStressLevel()) {
- GetThread()->DisableStressHeap();
- }
-#endif // _DEBUG
-
- // Disallow the creation of void[] (an array of System.Void)
- if (elemType == ELEMENT_TYPE_VOID)
- COMPlusThrow(kArgumentException);
-
- BOOL bAllocateInLargeHeap = FALSE;
-#ifdef FEATURE_DOUBLE_ALIGNMENT_HINT
- if ((elemType == ELEMENT_TYPE_R8) &&
- (static_cast<DWORD>(size) >= g_pConfig->GetDoubleArrayToLargeObjectHeapThreshold()))
- {
- STRESS_LOG1(LF_GC, LL_INFO10, "Allocating double array of size %d to large object heap\n", size);
- bAllocateInLargeHeap = TRUE;
- }
-#endif
-
- if (g_pPredefinedArrayTypes[elemType] == NULL)
- {
- TypeHandle elemTypeHnd = TypeHandle(MscorlibBinder::GetElementType(elemType));
-
- g_pPredefinedArrayTypes[elemType] = ClassLoader::LoadArrayTypeThrowing(elemTypeHnd, ELEMENT_TYPE_SZARRAY, 0).AsArray();
- }
-
- newArray = FastAllocatePrimitiveArray(pArrayMT, static_cast<DWORD>(size), bAllocateInLargeHeap);
+ if (g_pConfig->FastGCStressLevel()) {
+ GetThread()->DisableStressHeap();
}
- else
- {
-#ifdef _DEBUG
- if (g_pConfig->FastGCStressLevel()) {
- GetThread()->DisableStressHeap();
- }
#endif // _DEBUG
- INT32 size32 = (INT32)size;
- newArray = AllocateArrayEx(pArrayMT, &size32, 1);
- }
+ newArray = AllocateSzArray(pArrayMT, (INT32)size);
HELPER_METHOD_FRAME_END();
return(OBJECTREFToObject(newArray));
diff --git a/src/vm/object.h b/src/vm/object.h
index 173036d249..4842a95b33 100644
--- a/src/vm/object.h
+++ b/src/vm/object.h
@@ -553,8 +553,8 @@ class ArrayBase : public Object
friend class GCHeap;
friend class CObjectHeader;
friend class Object;
- friend OBJECTREF AllocateArrayEx(MethodTable *pArrayMT, INT32 *pArgs, DWORD dwNumArgs, BOOL bAllocateInLargeHeap);
- friend OBJECTREF FastAllocatePrimitiveArray(MethodTable* arrayType, DWORD cElements, BOOL bAllocateInLargeHeap);
+ friend OBJECTREF AllocateSzArray(MethodTable *pArrayMT, INT32 length, GC_ALLOC_FLAGS flags, BOOL bAllocateInLargeHeap);
+ friend OBJECTREF AllocateArrayEx(MethodTable *pArrayMT, INT32 *pArgs, DWORD dwNumArgs, GC_ALLOC_FLAGS flags, BOOL bAllocateInLargeHeap);
friend FCDECL2(Object*, JIT_NewArr1VC_MP_FastPortable, CORINFO_CLASS_HANDLE arrayMT, INT_PTR size);
friend FCDECL2(Object*, JIT_NewArr1OBJ_MP_FastPortable, CORINFO_CLASS_HANDLE arrayMT, INT_PTR size);
friend class JIT_TrialAlloc;
@@ -721,7 +721,7 @@ class PtrArray : public ArrayBase
{
friend class GCHeap;
friend class ClrDataAccess;
- friend OBJECTREF AllocateArrayEx(MethodTable *pArrayMT, INT32 *pArgs, DWORD dwNumArgs, BOOL bAllocateInLargeHeap);
+ friend OBJECTREF AllocateArrayEx(MethodTable *pArrayMT, INT32 *pArgs, DWORD dwNumArgs, DWORD flags, BOOL bAllocateInLargeHeap);
friend class JIT_TrialAlloc;
friend class CheckAsmOffsets;
diff --git a/src/vm/qcall.cpp b/src/vm/qcall.cpp
index 1b7bbda3b7..bcad868343 100644
--- a/src/vm/qcall.cpp
+++ b/src/vm/qcall.cpp
@@ -72,7 +72,7 @@ void QCall::ObjectHandleOnStack::SetGuidArray(const GUID * p, COUNT_T length)
GCX_COOP();
TypeHandle typeHandle = MscorlibBinder::GetClass(CLASS__GUID);
- BASEARRAYREF arr = (BASEARRAYREF) AllocateValueSzArray(typeHandle, length);
+ BASEARRAYREF arr = (BASEARRAYREF) AllocateSzArray(typeHandle.MakeSZArray(), length);
memcpyNoGCRefs(arr->GetDataPtr(), p, length * sizeof(GUID));
Set(arr);
}
diff --git a/src/vm/runtimehandles.cpp b/src/vm/runtimehandles.cpp
index 911dcab150..1880d41773 100644
--- a/src/vm/runtimehandles.cpp
+++ b/src/vm/runtimehandles.cpp
@@ -764,7 +764,7 @@ PTRARRAYREF CopyRuntimeTypeHandles(TypeHandle * prgTH, FixupPointer<TypeHandle>
GCPROTECT_BEGIN(refArray);
TypeHandle thRuntimeType = TypeHandle(MscorlibBinder::GetClass(arrayElemType));
TypeHandle arrayHandle = ClassLoader::LoadArrayTypeThrowing(thRuntimeType, ELEMENT_TYPE_SZARRAY);
- refArray = (PTRARRAYREF)AllocateArrayEx(arrayHandle, &numTypeHandles, 1);
+ refArray = (PTRARRAYREF)AllocateSzArray(arrayHandle, numTypeHandles);
for (INT32 i = 0; i < numTypeHandles; i++)
{
@@ -852,7 +852,7 @@ FCIMPL1(PtrArray*, RuntimeTypeHandle::GetInterfaces, ReflectClassBaseObject *pTy
if (ifaceCount > 0)
{
TypeHandle arrayHandle = ClassLoader::LoadArrayTypeThrowing(TypeHandle(g_pRuntimeTypeClass), ELEMENT_TYPE_SZARRAY);
- refRetVal = (PTRARRAYREF)AllocateArrayEx(arrayHandle, &ifaceCount, 1);
+ refRetVal = (PTRARRAYREF)AllocateSzArray(arrayHandle, ifaceCount);
// populate type array
UINT i = 0;
@@ -1957,7 +1957,7 @@ FCIMPL3(Object *, SignatureNative::GetCustomModifiers, SignatureNative* pSignatu
MethodTable *pMT = MscorlibBinder::GetClass(CLASS__TYPE);
TypeHandle arrayHandle = ClassLoader::LoadArrayTypeThrowing(TypeHandle(pMT), ELEMENT_TYPE_SZARRAY);
- gc.retVal = (PTRARRAYREF) AllocateArrayEx(arrayHandle, &cMods, 1);
+ gc.retVal = (PTRARRAYREF) AllocateSzArray(arrayHandle, cMods);
while(cMods != 0)
{
@@ -2107,7 +2107,7 @@ FCIMPL6(void, SignatureNative::GetSignature,
INT32 nArgs = msig.NumFixedArgs();
TypeHandle arrayHandle = ClassLoader::LoadArrayTypeThrowing(TypeHandle(g_pRuntimeTypeClass), ELEMENT_TYPE_SZARRAY);
- PTRARRAYREF ptrArrayarguments = (PTRARRAYREF) AllocateArrayEx(arrayHandle, &nArgs, 1);
+ PTRARRAYREF ptrArrayarguments = (PTRARRAYREF) AllocateSzArray(arrayHandle, nArgs);
gc.pSig->SetArgumentArray(ptrArrayarguments);
for (INT32 i = 0; i < nArgs; i++)
@@ -2509,7 +2509,7 @@ FCIMPL2(RuntimeMethodBody *, RuntimeMethodHandle::GetMethodBody, ReflectMethodOb
// Allocate the array of exception clauses.
INT32 cEh = (INT32)header.EHCount();
const COR_ILMETHOD_SECT_EH* ehInfo = header.EH;
- gc.TempArray = (BASEARRAYREF) AllocateArrayEx(thEHClauseArray, &cEh, 1);
+ gc.TempArray = (BASEARRAYREF) AllocateSzArray(thEHClauseArray, cEh);
SetObjectReference((OBJECTREF*)&gc.MethodBodyObj->_exceptionClauses, gc.TempArray);
@@ -2545,7 +2545,7 @@ FCIMPL2(RuntimeMethodBody *, RuntimeMethodHandle::GetMethodBody, ReflectMethodOb
&sigTypeContext,
MetaSig::sigLocalVars);
INT32 cLocals = metaSig.NumFixedArgs();
- gc.TempArray = (BASEARRAYREF) AllocateArrayEx(thLocalVariableArray, &cLocals, 1);
+ gc.TempArray = (BASEARRAYREF) AllocateSzArray(thLocalVariableArray, cLocals);
SetObjectReference((OBJECTREF*)&gc.MethodBodyObj->_localVariables, gc.TempArray);
for (INT32 i = 0; i < cLocals; i ++)
@@ -2570,7 +2570,7 @@ FCIMPL2(RuntimeMethodBody *, RuntimeMethodHandle::GetMethodBody, ReflectMethodOb
else
{
INT32 cLocals = 0;
- gc.TempArray = (BASEARRAYREF) AllocateArrayEx(thLocalVariableArray, &cLocals, 1);
+ gc.TempArray = (BASEARRAYREF) AllocateSzArray(thLocalVariableArray, cLocals);
SetObjectReference((OBJECTREF*)&gc.MethodBodyObj->_localVariables, gc.TempArray);
}
}
diff --git a/src/vm/typeparse.cpp b/src/vm/typeparse.cpp
index 0794454be2..6d621bebab 100644
--- a/src/vm/typeparse.cpp
+++ b/src/vm/typeparse.cpp
@@ -1267,7 +1267,7 @@ TypeHandle TypeName::GetTypeFromAsm()
if (cGenericArgs > 0)
{
TypeHandle arrayHandle = ClassLoader::LoadArrayTypeThrowing(TypeHandle(g_pRuntimeTypeClass), ELEMENT_TYPE_SZARRAY);
- gc.refGenericArguments = (PTRARRAYREF)AllocateArrayEx(arrayHandle, &cGenericArgs, 1);
+ gc.refGenericArguments = (PTRARRAYREF)AllocateSzArray(arrayHandle, cGenericArgs);
}
// Instantiate generic arguments
for (INT32 i = 0; i < cGenericArgs; i++)
diff --git a/tests/src/GC/API/GC/AllocateUninitializedArray.cs b/tests/src/GC/API/GC/AllocateUninitializedArray.cs
new file mode 100644
index 0000000000..450fb6d8a1
--- /dev/null
+++ b/tests/src/GC/API/GC/AllocateUninitializedArray.cs
@@ -0,0 +1,129 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+// Tests GC.Collect()
+
+using System;
+
+public class Test {
+
+
+ public static int Main() {
+ // allocate a bunch of SOH byte arrays and touch them.
+ var r = new Random(1234);
+ for (int i = 0; i < 10000; i++)
+ {
+ int size = r.Next(10000);
+ var arr = AllocUninitialized<byte>.Call(size);
+
+ if (size > 1)
+ {
+ arr[0] = 5;
+ arr[size - 1] = 17;
+ if (arr[0] != 5 || arr[size - 1] != 17)
+ {
+ Console.WriteLine("Scenario 1 for GC.AllocUninitialized() failed!");
+ return 1;
+ }
+ }
+ }
+
+ // allocate a bunch of LOH int arrays and touch them.
+ for (int i = 0; i < 1000; i++)
+ {
+ int size = r.Next(100000, 1000000);
+ var arr = AllocUninitialized<int>.Call(size);
+
+ arr[0] = 5;
+ arr[size - 1] = 17;
+ if (arr[0] != 5 || arr[size - 1] != 17)
+ {
+ Console.WriteLine("Scenario 2 for GC.AllocUninitialized() failed!");
+ return 1;
+ }
+ }
+
+ // allocate a string array
+ {
+ int i = 100;
+ var arr = AllocUninitialized<string>.Call(i);
+
+ arr[0] = "5";
+ arr[i - 1] = "17";
+ if (arr[0] != "5" || arr[i - 1] != "17")
+ {
+ Console.WriteLine("Scenario 3 for GC.AllocUninitialized() failed!");
+ return 1;
+ }
+ }
+
+ // allocate max size byte array
+ {
+ if (IntPtr.Size == 8)
+ {
+ int i = 0x7FFFFFC7;
+ var arr = AllocUninitialized<byte>.Call(i);
+
+ arr[0] = 5;
+ arr[i - 1] = 17;
+ if (arr[0] != 5 || arr[i - 1] != 17)
+ {
+ Console.WriteLine("Scenario 4 for GC.AllocUninitialized() failed!");
+ return 1;
+ }
+ }
+ }
+
+ // negative size
+ {
+ try
+ {
+ var arr = AllocUninitialized<byte>.Call(-1);
+
+ Console.WriteLine("Scenario 5 Expected exception!");
+ return 1;
+ }
+ catch (ArgumentOutOfRangeException)
+ {
+ }
+ }
+
+ // too large
+ {
+ try
+ {
+ var arr = AllocUninitialized<double>.Call(int.MaxValue);
+
+ Console.WriteLine("Scenario 6 Expected exception!");
+ return 1;
+ }
+ catch (OutOfMemoryException)
+ {
+ }
+ }
+
+
+ Console.WriteLine("Test for GC.Collect() passed!");
+ return 100;
+ }
+
+ //TODO: This should be removed once the API is public.
+ static class AllocUninitialized<T>
+ {
+ public static Func<int, T[]> Call = (i) =>
+ {
+ // replace the stub with actual impl.
+ Call = (Func<int, T[]>)typeof(System.GC).
+ GetMethod("AllocateUninitializedArray",
+ bindingAttr: System.Reflection.BindingFlags.NonPublic | System.Reflection.BindingFlags.Static,
+ binder: null,
+ new Type[] { typeof(int) },
+ modifiers: new System.Reflection.ParameterModifier[0]).
+ MakeGenericMethod(new Type[] { typeof(T) }).
+ CreateDelegate(typeof(Func<int, T[]>));
+
+ // call the impl.
+ return Call(i);
+ };
+ }
+}
diff --git a/tests/src/GC/API/GC/AllocateUninitializedArray.csproj b/tests/src/GC/API/GC/AllocateUninitializedArray.csproj
new file mode 100644
index 0000000000..930f4e4f07
--- /dev/null
+++ b/tests/src/GC/API/GC/AllocateUninitializedArray.csproj
@@ -0,0 +1,37 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project ToolsVersion="12.0" DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <Import Project="$([MSBuild]::GetDirectoryNameOfFileAbove($(MSBuildThisFileDirectory), dir.props))\dir.props" />
+ <PropertyGroup>
+ <Configuration Condition=" '$(Configuration)' == '' ">Debug</Configuration>
+ <Platform Condition=" '$(Platform)' == '' ">AnyCPU</Platform>
+ <AssemblyName>$(MSBuildProjectName)</AssemblyName>
+ <SchemaVersion>2.0</SchemaVersion>
+ <ProjectGuid>{95DFC527-4DC1-495E-97D7-E94EE1F7140D}</ProjectGuid>
+ <OutputType>Exe</OutputType>
+ <ProjectTypeGuids>{786C830F-07A1-408B-BD7F-6EE04809D6DB};{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}</ProjectTypeGuids>
+ <SolutionDir Condition="$(SolutionDir) == '' Or $(SolutionDir) == '*Undefined*'">..\..\</SolutionDir>
+ <CLRTestPriority>0</CLRTestPriority>
+ </PropertyGroup>
+ <!-- Default configurations to help VS understand the configurations -->
+ <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Debug|AnyCPU' "></PropertyGroup>
+ <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Release|AnyCPU' "></PropertyGroup>
+ <ItemGroup>
+ <CodeAnalysisDependentAssemblyPaths Condition=" '$(VS100COMNTOOLS)' != '' " Include="$(VS100COMNTOOLS)..\IDE\PrivateAssemblies">
+ <Visible>False</Visible>
+ </CodeAnalysisDependentAssemblyPaths>
+ </ItemGroup>
+ <PropertyGroup>
+ <!-- Set to 'Full' if the Debug? column is marked in the spreadsheet. Leave blank otherwise. -->
+ <DebugType>PdbOnly</DebugType>
+ <NoLogo>True</NoLogo>
+ <DefineConstants>$(DefineConstants);DESKTOP</DefineConstants>
+ </PropertyGroup>
+ <ItemGroup>
+ <Compile Include="AllocateUninitializedArray.cs" />
+ </ItemGroup>
+ <ItemGroup>
+ <Service Include="{82A7F48D-3B50-4B1E-B82E-3ADA8210C358}" />
+ </ItemGroup>
+ <Import Project="$([MSBuild]::GetDirectoryNameOfFileAbove($(MSBuildThisFileDirectory), dir.targets))\dir.targets" />
+ <PropertyGroup Condition=" '$(MsBuildProjectDirOverride)' != '' "></PropertyGroup>
+</Project>
diff --git a/tests/src/JIT/Methodical/doublearray/dblarray3.cs b/tests/src/JIT/Methodical/doublearray/dblarray3.cs
index 9a2667e693..c9f7f9b3bc 100644
--- a/tests/src/JIT/Methodical/doublearray/dblarray3.cs
+++ b/tests/src/JIT/Methodical/doublearray/dblarray3.cs
@@ -99,7 +99,7 @@ internal class DblArray3
public static void f3()
{
Array arr = Array.CreateInstance(typeof(double), 1000);
- if (GC.GetGeneration(arr) != 0)
+ if (GC.GetGeneration(arr) != s_LOH_GEN)
{
Console.WriteLine("Generation {0}", GC.GetGeneration(arr));
throw new Exception();