summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorVladimir Sadov <vsadov@microsoft.com>2019-05-28 14:28:56 -0700
committerGitHub <noreply@github.com>2019-05-28 14:28:56 -0700
commit4ca032d49b0718eb40f2105f94db2b243cf94957 (patch)
treeef089aa06f4a7bab2ecce888b9b191613caf45d4
parent2b08a111cc77d77221889700d169c9c636153355 (diff)
downloadcoreclr-4ca032d49b0718eb40f2105f94db2b243cf94957.tar.gz
coreclr-4ca032d49b0718eb40f2105f94db2b243cf94957.tar.bz2
coreclr-4ca032d49b0718eb40f2105f94db2b243cf94957.zip
Using AllocateUninitializedArray in array pool (#24504)
* Just use `new T[]` when elements are not pointer-free * reduce zeroing out when not necessary. * use AllocateUninitializedArray in ArrayPool
-rw-r--r--src/System.Private.CoreLib/shared/System/Buffers/ConfigurableArrayPool.cs6
-rw-r--r--src/System.Private.CoreLib/shared/System/Buffers/TlsOverPerCoreLockedStacksArrayPool.cs4
-rw-r--r--src/System.Private.CoreLib/src/System/GC.cs5
-rw-r--r--src/gc/gc.cpp42
4 files changed, 38 insertions, 19 deletions
diff --git a/src/System.Private.CoreLib/shared/System/Buffers/ConfigurableArrayPool.cs b/src/System.Private.CoreLib/shared/System/Buffers/ConfigurableArrayPool.cs
index 6dd3063258..054a38fc60 100644
--- a/src/System.Private.CoreLib/shared/System/Buffers/ConfigurableArrayPool.cs
+++ b/src/System.Private.CoreLib/shared/System/Buffers/ConfigurableArrayPool.cs
@@ -100,13 +100,13 @@ namespace System.Buffers
// The pool was exhausted for this buffer size. Allocate a new buffer with a size corresponding
// to the appropriate bucket.
- buffer = new T[_buckets[index]._bufferLength];
+ buffer = GC.AllocateUninitializedArray<T>(_buckets[index]._bufferLength);
}
else
{
// The request was for a size too large for the pool. Allocate an array of exactly the requested length.
// When it's returned to the pool, we'll simply throw it away.
- buffer = new T[minimumLength];
+ buffer = GC.AllocateUninitializedArray<T>(minimumLength);
}
if (log.IsEnabled())
@@ -215,7 +215,7 @@ namespace System.Buffers
// for that slot, in which case we should do so now.
if (allocateBuffer)
{
- buffer = new T[_bufferLength];
+ buffer = GC.AllocateUninitializedArray<T>(_bufferLength);
var log = ArrayPoolEventSource.Log;
if (log.IsEnabled())
diff --git a/src/System.Private.CoreLib/shared/System/Buffers/TlsOverPerCoreLockedStacksArrayPool.cs b/src/System.Private.CoreLib/shared/System/Buffers/TlsOverPerCoreLockedStacksArrayPool.cs
index 47470715ec..7369ed8ca6 100644
--- a/src/System.Private.CoreLib/shared/System/Buffers/TlsOverPerCoreLockedStacksArrayPool.cs
+++ b/src/System.Private.CoreLib/shared/System/Buffers/TlsOverPerCoreLockedStacksArrayPool.cs
@@ -136,13 +136,13 @@ namespace System.Buffers
}
// No buffer available. Allocate a new buffer with a size corresponding to the appropriate bucket.
- buffer = new T[_bucketArraySizes[bucketIndex]];
+ buffer = GC.AllocateUninitializedArray<T>(_bucketArraySizes[bucketIndex]);
}
else
{
// The request was for a size too large for the pool. Allocate an array of exactly the requested length.
// When it's returned to the pool, we'll simply throw it away.
- buffer = new T[minimumLength];
+ buffer = GC.AllocateUninitializedArray<T>(minimumLength);
}
if (log.IsEnabled())
diff --git a/src/System.Private.CoreLib/src/System/GC.cs b/src/System.Private.CoreLib/src/System/GC.cs
index a5ea093b8f..4e7e1fbf92 100644
--- a/src/System.Private.CoreLib/src/System/GC.cs
+++ b/src/System.Private.CoreLib/src/System/GC.cs
@@ -660,6 +660,11 @@ namespace System
// the array is always zero-initialized.
internal static T[] AllocateUninitializedArray<T>(int length)
{
+ if (RuntimeHelpers.IsReferenceOrContainsReferences<T>())
+ {
+ return new T[length];
+ }
+
if (length < 0)
ThrowHelper.ThrowArgumentOutOfRangeException(ExceptionArgument.lengths, 0, ExceptionResource.ArgumentOutOfRange_NeedNonNegNum);
#if DEBUG
diff --git a/src/gc/gc.cpp b/src/gc/gc.cpp
index 3c8345922f..100e2c222f 100644
--- a/src/gc/gc.cpp
+++ b/src/gc/gc.cpp
@@ -11517,9 +11517,9 @@ void gc_heap::adjust_limit_clr (uint8_t* start, size_t limit_size, size_t size,
if (gen_number == 0)
{
size_t pad_size = Align (min_obj_size, align_const);
- make_unused_array (acontext->alloc_ptr, pad_size);
dprintf (3, ("contigous ac: making min obj gap %Ix->%Ix(%Id)",
acontext->alloc_ptr, (acontext->alloc_ptr + pad_size), pad_size));
+ make_unused_array (acontext->alloc_ptr, pad_size);
acontext->alloc_ptr += pad_size;
}
}
@@ -11581,6 +11581,7 @@ void gc_heap::adjust_limit_clr (uint8_t* start, size_t limit_size, size_t size,
*(PTR_PTR)clear_start = 0;
}
// skip the rest of the object
+ dprintf(3, ("zeroing optional: skipping object at %Ix->%Ix(%Id)", clear_start, obj_end, obj_end - clear_start));
clear_start = obj_end;
}
@@ -11643,7 +11644,10 @@ void gc_heap::adjust_limit_clr (uint8_t* start, size_t limit_size, size_t size,
}
// verifying the memory is completely cleared.
- //verify_mem_cleared (start - plug_skew, limit_size);
+ //if (!(flags & GC_ALLOC_ZEROING_OPTIONAL))
+ //{
+ // verify_mem_cleared(start - plug_skew, limit_size);
+ //}
}
size_t gc_heap::new_allocation_limit (size_t size, size_t physical_limit, int gen_number)
@@ -12129,6 +12133,7 @@ void gc_heap::bgc_loh_alloc_clr (uint8_t* alloc_start,
add_saved_spinlock_info (true, me_release, mt_clr_large_mem);
leave_spin_lock (&more_space_lock_loh);
+ ((void**) alloc_start)[-1] = 0; //clear the sync block
if (!(flags & GC_ALLOC_ZEROING_OPTIONAL))
{
memclr(alloc_start + size_to_skip, size_to_clear);
@@ -12264,8 +12269,8 @@ BOOL gc_heap::a_fit_segment_end_p (int gen_number,
#endif //BACKGROUND_GC
uint8_t*& allocated = ((gen_number == 0) ?
- alloc_allocated :
- heap_segment_allocated(seg));
+ alloc_allocated :
+ heap_segment_allocated(seg));
size_t pad = Align (min_obj_size, align_const);
@@ -12327,33 +12332,42 @@ found_fit:
}
#endif //BACKGROUND_GC
- uint8_t* old_alloc;
- old_alloc = allocated;
#ifdef FEATURE_LOH_COMPACTION
if (gen_number == (max_generation + 1))
{
- make_unused_array (old_alloc, loh_pad);
- old_alloc += loh_pad;
+ make_unused_array (allocated, loh_pad);
allocated += loh_pad;
limit -= loh_pad;
}
#endif //FEATURE_LOH_COMPACTION
-#if defined (VERIFY_HEAP) && defined (_DEBUG)
- ((void**) allocated)[-1] = 0; //clear the sync block
-#endif //VERIFY_HEAP && _DEBUG
- allocated += limit;
-
dprintf (3, ("found fit at end of seg: %Ix", old_alloc));
+ uint8_t* old_alloc;
+ old_alloc = allocated;
+
#ifdef BACKGROUND_GC
if (cookie != -1)
{
+ allocated += limit;
bgc_loh_alloc_clr (old_alloc, limit, acontext, flags, align_const, cookie, TRUE, seg);
}
else
#endif //BACKGROUND_GC
- {
+ {
+ // In a contiguous AC case with GC_ALLOC_ZEROING_OPTIONAL, deduct unspent space from the limit to clear only what is necessary.
+ if ((flags & GC_ALLOC_ZEROING_OPTIONAL) &&
+ ((allocated == acontext->alloc_limit) || (allocated == (acontext->alloc_limit + Align (min_obj_size, align_const)))))
+ {
+ assert(gen_number == 0);
+ assert(allocated > acontext->alloc_ptr);
+
+ limit -= (allocated - acontext->alloc_ptr);
+ // add space for an AC continuity divider
+ limit += Align(min_obj_size, align_const);
+ }
+
+ allocated += limit;
adjust_limit_clr (old_alloc, limit, size, acontext, flags, seg, align_const, gen_number);
}