diff options
author | Vladimir Sadov <vsadov@microsoft.com> | 2019-05-02 22:16:31 -0700 |
---|---|---|
committer | Jan Kotas <jkotas@microsoft.com> | 2019-05-02 22:16:31 -0700 |
commit | b271aff1fa54c1385143f3b45c1bf3af01c901cd (patch) | |
tree | 69c76676a56a28979fd1c5c66db9d096afa98c6e /src/gc | |
parent | dd814e26e2206c36589f88b2c58a6f3695f7dc4e (diff) | |
download | coreclr-b271aff1fa54c1385143f3b45c1bf3af01c901cd.tar.gz coreclr-b271aff1fa54c1385143f3b45c1bf3af01c901cd.tar.bz2 coreclr-b271aff1fa54c1385143f3b45c1bf3af01c901cd.zip |
System.GC.AllocateUninitializedArray (#24096)
* Do not expand to allocation_quantum in SOH when GC_ALLOC_ZEROING_OPTIONAL
* short-circuit short arrays to use `new T[size]`
* Clean syncblock of large-aligned objects on ARM32
* specialize single-dimensional path AllocateSzArray
* Unit tests
* Some PR feedback. Made AllocateUninitializedArray not be trimmed away.
* PR feedback on gchelpers
- replaced use of multiple bool parameters with flags enum
- merged some methods with nearly identical implementation
- switched callers to use AllocateSzArray vs. AllocateArrayEx where appropriate.
* PR feedback. Removed X86 specific array/string allocation helpers.
Diffstat (limited to 'src/gc')
-rw-r--r-- | src/gc/gc.cpp | 176 | ||||
-rw-r--r-- | src/gc/gcinterface.h | 28 | ||||
-rw-r--r-- | src/gc/gcpriv.h | 24 |
3 files changed, 155 insertions, 73 deletions
diff --git a/src/gc/gc.cpp b/src/gc/gc.cpp index 42e93fefc4..4f77e1d75d 100644 --- a/src/gc/gc.cpp +++ b/src/gc/gc.cpp @@ -11458,9 +11458,9 @@ void allocator::commit_alloc_list_changes() } } -void gc_heap::adjust_limit_clr (uint8_t* start, size_t limit_size, - alloc_context* acontext, heap_segment* seg, - int align_const, int gen_number) +void gc_heap::adjust_limit_clr (uint8_t* start, size_t limit_size, size_t size, + alloc_context* acontext, uint32_t flags, + heap_segment* seg, int align_const, int gen_number) { bool loh_p = (gen_number > 0); GCSpinLock* msl = loh_p ? &more_space_lock_loh : &more_space_lock_soh; @@ -11491,12 +11491,12 @@ void gc_heap::adjust_limit_clr (uint8_t* start, size_t limit_size, uint8_t* hole = acontext->alloc_ptr; if (hole != 0) { - size_t size = (acontext->alloc_limit - acontext->alloc_ptr); - dprintf (3, ("filling up hole [%Ix, %Ix[", (size_t)hole, (size_t)hole + size + Align (min_obj_size, align_const))); + size_t ac_size = (acontext->alloc_limit - acontext->alloc_ptr); + dprintf (3, ("filling up hole [%Ix, %Ix[", (size_t)hole, (size_t)hole + ac_size + Align (min_obj_size, align_const))); // when we are finishing an allocation from a free list // we know that the free area was Align(min_obj_size) larger - acontext->alloc_bytes -= size; - size_t free_obj_size = size + aligned_min_obj_size; + acontext->alloc_bytes -= ac_size; + size_t free_obj_size = ac_size + aligned_min_obj_size; make_unused_array (hole, free_obj_size); generation_free_obj_space (generation_of (gen_number)) += free_obj_size; } @@ -11555,32 +11555,60 @@ void gc_heap::adjust_limit_clr (uint8_t* start, size_t limit_size, assert (heap_segment_used (seg) >= old_allocated); } #endif //BACKGROUND_GC - if ((seg == 0) || - (start - plug_skew + limit_size) <= heap_segment_used (seg)) + + // we are going to clear a right-edge exclusive span [clear_start, clear_limit) + // but will adjust for cases when object is ok to stay dirty or the space has not seen any use yet + // NB: the size and limit_size include syncblock, which is to the -1 of the object start + // that effectively shifts the allocation by `plug_skew` + uint8_t* clear_start = start - plug_skew; + uint8_t* clear_limit = start + limit_size - plug_skew; + + if (flags & GC_ALLOC_ZEROING_OPTIONAL) + { + uint8_t* obj_start = acontext->alloc_ptr; + assert(start >= obj_start); + uint8_t* obj_end = obj_start + size - plug_skew; + assert(obj_end > clear_start); + + // if clearing at the object start, clear the syncblock. + if(obj_start == start) + { + *(PTR_PTR)clear_start = 0; + } + // skip the rest of the object + clear_start = obj_end; + } + + // check if space to clear is all dirty from prior use or only partially + if ((seg == 0) || (clear_limit <= heap_segment_used (seg))) { add_saved_spinlock_info (loh_p, me_release, mt_clr_mem); leave_spin_lock (msl); - dprintf (3, ("clearing memory at %Ix for %d bytes", (start - plug_skew), limit_size)); - memclr (start - plug_skew, limit_size); + + if (clear_start < clear_limit) + { + dprintf(3, ("clearing memory at %Ix for %d bytes", clear_start, clear_limit - clear_start)); + memclr(clear_start, clear_limit - clear_start); + } } else { + // we only need to clear [clear_start, used) and only if clear_start < used uint8_t* used = heap_segment_used (seg); - heap_segment_used (seg) = start + limit_size - plug_skew; + heap_segment_used (seg) = clear_limit; add_saved_spinlock_info (loh_p, me_release, mt_clr_mem); leave_spin_lock (msl); - if ((start - plug_skew) < used) + if (clear_start < used) { if (used != saved_used) { FATAL_GC_ERROR (); } - dprintf (2, ("clearing memory before used at %Ix for %Id bytes", - (start - plug_skew), (plug_skew + used - start))); - memclr (start - plug_skew, used - (start - plug_skew)); + dprintf (2, ("clearing memory before used at %Ix for %Id bytes", clear_start, used - clear_start)); + memclr (clear_start, used - clear_start); } } @@ -11627,17 +11655,18 @@ size_t gc_heap::new_allocation_limit (size_t size, size_t physical_limit, int ge return limit; } -size_t gc_heap::limit_from_size (size_t size, size_t physical_limit, int gen_number, +size_t gc_heap::limit_from_size (size_t size, uint32_t flags, size_t physical_limit, int gen_number, int align_const) { size_t padded_size = size + Align (min_obj_size, align_const); // for LOH this is not true...we could select a physical_limit that's exactly the same // as size. assert ((gen_number != 0) || (physical_limit >= padded_size)); - size_t min_size_to_allocate = ((gen_number == 0) ? allocation_quantum : 0); - // For SOH if the size asked for is very small, we want to allocate more than - // just what's asked for if possible. + // For SOH if the size asked for is very small, we want to allocate more than just what's asked for if possible. + // Unless we were told not to clean, then we will not force it. + size_t min_size_to_allocate = ((gen_number == 0 && !(flags & GC_ALLOC_ZEROING_OPTIONAL)) ? allocation_quantum : 0); + size_t desired_size_to_allocate = max (padded_size, min_size_to_allocate); size_t new_physical_limit = min (physical_limit, desired_size_to_allocate); @@ -11959,6 +11988,7 @@ inline BOOL gc_heap::a_fit_free_list_p (int gen_number, size_t size, alloc_context* acontext, + uint32_t flags, int align_const) { BOOL can_fit = FALSE; @@ -11985,7 +12015,7 @@ BOOL gc_heap::a_fit_free_list_p (int gen_number, // We ask for more Align (min_obj_size) // to make sure that we can insert a free object // in adjust_limit will set the limit lower - size_t limit = limit_from_size (size, free_list_size, gen_number, align_const); + size_t limit = limit_from_size (size, flags, free_list_size, gen_number, align_const); uint8_t* remain = (free_list + limit); size_t remain_size = (free_list_size - limit); @@ -12002,7 +12032,7 @@ BOOL gc_heap::a_fit_free_list_p (int gen_number, } generation_free_list_space (gen) -= limit; - adjust_limit_clr (free_list, limit, acontext, 0, align_const, gen_number); + adjust_limit_clr (free_list, limit, size, acontext, flags, 0, align_const, gen_number); can_fit = TRUE; goto end; @@ -12034,6 +12064,7 @@ end: void gc_heap::bgc_loh_alloc_clr (uint8_t* alloc_start, size_t size, alloc_context* acontext, + uint32_t flags, int align_const, int lock_index, BOOL check_used_p, @@ -12097,7 +12128,11 @@ void gc_heap::bgc_loh_alloc_clr (uint8_t* alloc_start, dprintf (SPINLOCK_LOG, ("[%d]Lmsl to clear large obj", heap_number)); add_saved_spinlock_info (true, me_release, mt_clr_large_mem); leave_spin_lock (&more_space_lock_loh); - memclr (alloc_start + size_to_skip, size_to_clear); + + if (!(flags & GC_ALLOC_ZEROING_OPTIONAL)) + { + memclr(alloc_start + size_to_skip, size_to_clear); + } bgc_alloc_lock->loh_alloc_set (alloc_start); @@ -12111,6 +12146,7 @@ void gc_heap::bgc_loh_alloc_clr (uint8_t* alloc_start, BOOL gc_heap::a_fit_free_list_large_p (size_t size, alloc_context* acontext, + uint32_t flags, int align_const) { BOOL can_fit = FALSE; @@ -12154,7 +12190,7 @@ BOOL gc_heap::a_fit_free_list_large_p (size_t size, loh_allocator->unlink_item (a_l_idx, free_list, prev_free_item, FALSE); // Substract min obj size because limit_from_size adds it. Not needed for LOH - size_t limit = limit_from_size (size - Align(min_obj_size, align_const), free_list_size, + size_t limit = limit_from_size (size - Align(min_obj_size, align_const), flags, free_list_size, gen_number, align_const); #ifdef FEATURE_LOH_COMPACTION @@ -12185,12 +12221,12 @@ BOOL gc_heap::a_fit_free_list_large_p (size_t size, #ifdef BACKGROUND_GC if (cookie != -1) { - bgc_loh_alloc_clr (free_list, limit, acontext, align_const, cookie, FALSE, 0); + bgc_loh_alloc_clr (free_list, limit, acontext, flags, align_const, cookie, FALSE, 0); } else #endif //BACKGROUND_GC { - adjust_limit_clr (free_list, limit, acontext, 0, align_const, gen_number); + adjust_limit_clr (free_list, limit, size, acontext, flags, 0, align_const, gen_number); } //fix the limit to compensate for adjust_limit_clr making it too short @@ -12216,6 +12252,7 @@ BOOL gc_heap::a_fit_segment_end_p (int gen_number, heap_segment* seg, size_t size, alloc_context* acontext, + uint32_t flags, int align_const, BOOL* commit_failed_p) { @@ -12245,6 +12282,7 @@ BOOL gc_heap::a_fit_segment_end_p (int gen_number, if (a_size_fit_p (size, allocated, end, align_const)) { limit = limit_from_size (size, + flags, (end - allocated), gen_number, align_const); goto found_fit; @@ -12255,6 +12293,7 @@ BOOL gc_heap::a_fit_segment_end_p (int gen_number, if (a_size_fit_p (size, allocated, end, align_const)) { limit = limit_from_size (size, + flags, (end - allocated), gen_number, align_const); @@ -12310,12 +12349,12 @@ found_fit: #ifdef BACKGROUND_GC if (cookie != -1) { - bgc_loh_alloc_clr (old_alloc, limit, acontext, align_const, cookie, TRUE, seg); + bgc_loh_alloc_clr (old_alloc, limit, acontext, flags, align_const, cookie, TRUE, seg); } else #endif //BACKGROUND_GC { - adjust_limit_clr (old_alloc, limit, acontext, seg, align_const, gen_number); + adjust_limit_clr (old_alloc, limit, size, acontext, flags, seg, align_const, gen_number); } return TRUE; @@ -12328,6 +12367,7 @@ found_no_fit: BOOL gc_heap::loh_a_fit_segment_end_p (int gen_number, size_t size, alloc_context* acontext, + uint32_t flags, int align_const, BOOL* commit_failed_p, oom_reason* oom_r) @@ -12347,7 +12387,7 @@ BOOL gc_heap::loh_a_fit_segment_end_p (int gen_number, #endif //BACKGROUND_GC { if (a_fit_segment_end_p (gen_number, seg, (size - Align (min_obj_size, align_const)), - acontext, align_const, commit_failed_p)) + acontext, flags, align_const, commit_failed_p)) { acontext->alloc_limit += Align (min_obj_size, align_const); can_allocate_p = TRUE; @@ -12430,6 +12470,7 @@ BOOL gc_heap::trigger_ephemeral_gc (gc_reason gr) BOOL gc_heap::soh_try_fit (int gen_number, size_t size, alloc_context* acontext, + uint32_t flags, int align_const, BOOL* commit_failed_p, BOOL* short_seg_end_p) @@ -12440,7 +12481,7 @@ BOOL gc_heap::soh_try_fit (int gen_number, *short_seg_end_p = FALSE; } - can_allocate = a_fit_free_list_p (gen_number, size, acontext, align_const); + can_allocate = a_fit_free_list_p (gen_number, size, acontext, flags, align_const); if (!can_allocate) { if (short_seg_end_p) @@ -12452,7 +12493,7 @@ BOOL gc_heap::soh_try_fit (int gen_number, if (!short_seg_end_p || !(*short_seg_end_p)) { can_allocate = a_fit_segment_end_p (gen_number, ephemeral_heap_segment, size, - acontext, align_const, commit_failed_p); + acontext, flags, align_const, commit_failed_p); } } @@ -12462,6 +12503,7 @@ BOOL gc_heap::soh_try_fit (int gen_number, allocation_state gc_heap::allocate_small (int gen_number, size_t size, alloc_context* acontext, + uint32_t flags, int align_const) { #if defined (BACKGROUND_GC) && !defined (MULTIPLE_HEAPS) @@ -12515,7 +12557,7 @@ allocation_state gc_heap::allocate_small (int gen_number, BOOL commit_failed_p = FALSE; BOOL can_use_existing_p = FALSE; - can_use_existing_p = soh_try_fit (gen_number, size, acontext, + can_use_existing_p = soh_try_fit (gen_number, size, acontext, flags, align_const, &commit_failed_p, NULL); soh_alloc_state = (can_use_existing_p ? @@ -12531,7 +12573,7 @@ allocation_state gc_heap::allocate_small (int gen_number, BOOL can_use_existing_p = FALSE; BOOL short_seg_end_p = FALSE; - can_use_existing_p = soh_try_fit (gen_number, size, acontext, + can_use_existing_p = soh_try_fit (gen_number, size, acontext, flags, align_const, &commit_failed_p, &short_seg_end_p); soh_alloc_state = (can_use_existing_p ? @@ -12547,7 +12589,7 @@ allocation_state gc_heap::allocate_small (int gen_number, BOOL can_use_existing_p = FALSE; BOOL short_seg_end_p = FALSE; - can_use_existing_p = soh_try_fit (gen_number, size, acontext, + can_use_existing_p = soh_try_fit (gen_number, size, acontext, flags, align_const, &commit_failed_p, &short_seg_end_p); @@ -12602,7 +12644,7 @@ allocation_state gc_heap::allocate_small (int gen_number, } else { - can_use_existing_p = soh_try_fit (gen_number, size, acontext, + can_use_existing_p = soh_try_fit (gen_number, size, acontext, flags, align_const, &commit_failed_p, &short_seg_end_p); #ifdef BACKGROUND_GC @@ -12664,7 +12706,7 @@ allocation_state gc_heap::allocate_small (int gen_number, } else { - can_use_existing_p = soh_try_fit (gen_number, size, acontext, + can_use_existing_p = soh_try_fit (gen_number, size, acontext, flags, align_const, &commit_failed_p, &short_seg_end_p); if (short_seg_end_p || commit_failed_p) @@ -12869,16 +12911,17 @@ BOOL gc_heap::check_and_wait_for_bgc (alloc_wait_reason awr, BOOL gc_heap::loh_try_fit (int gen_number, size_t size, alloc_context* acontext, + uint32_t flags, int align_const, BOOL* commit_failed_p, oom_reason* oom_r) { BOOL can_allocate = TRUE; - if (!a_fit_free_list_large_p (size, acontext, align_const)) + if (!a_fit_free_list_large_p (size, acontext, flags, align_const)) { can_allocate = loh_a_fit_segment_end_p (gen_number, size, - acontext, align_const, + acontext, flags, align_const, commit_failed_p, oom_r); #ifdef BACKGROUND_GC @@ -13009,6 +13052,7 @@ bool gc_heap::should_retry_other_heap (size_t size) allocation_state gc_heap::allocate_large (int gen_number, size_t size, alloc_context* acontext, + uint32_t flags, int align_const) { #ifdef BACKGROUND_GC @@ -13077,7 +13121,7 @@ allocation_state gc_heap::allocate_large (int gen_number, BOOL commit_failed_p = FALSE; BOOL can_use_existing_p = FALSE; - can_use_existing_p = loh_try_fit (gen_number, size, acontext, + can_use_existing_p = loh_try_fit (gen_number, size, acontext, flags, align_const, &commit_failed_p, &oom_r); loh_alloc_state = (can_use_existing_p ? a_state_can_allocate : @@ -13092,7 +13136,7 @@ allocation_state gc_heap::allocate_large (int gen_number, BOOL commit_failed_p = FALSE; BOOL can_use_existing_p = FALSE; - can_use_existing_p = loh_try_fit (gen_number, size, acontext, + can_use_existing_p = loh_try_fit (gen_number, size, acontext, flags, align_const, &commit_failed_p, &oom_r); // Even after we got a new seg it doesn't necessarily mean we can allocate, // another LOH allocating thread could have beat us to acquire the msl so @@ -13106,7 +13150,7 @@ allocation_state gc_heap::allocate_large (int gen_number, BOOL commit_failed_p = FALSE; BOOL can_use_existing_p = FALSE; - can_use_existing_p = loh_try_fit (gen_number, size, acontext, + can_use_existing_p = loh_try_fit (gen_number, size, acontext, flags, align_const, &commit_failed_p, &oom_r); // If we failed to commit, we bail right away 'cause we already did a // full compacting GC. @@ -13123,7 +13167,7 @@ allocation_state gc_heap::allocate_large (int gen_number, BOOL commit_failed_p = FALSE; BOOL can_use_existing_p = FALSE; - can_use_existing_p = loh_try_fit (gen_number, size, acontext, + can_use_existing_p = loh_try_fit (gen_number, size, acontext, flags, align_const, &commit_failed_p, &oom_r); loh_alloc_state = (can_use_existing_p ? a_state_can_allocate : @@ -13296,8 +13340,8 @@ void gc_heap::trigger_gc_for_alloc (int gen_number, gc_reason gr, #endif //BACKGROUND_GC } -allocation_state gc_heap::try_allocate_more_space (alloc_context* acontext, size_t size, - int gen_number) +allocation_state gc_heap::try_allocate_more_space (alloc_context* acontext, size_t size, + uint32_t flags, int gen_number) { if (gc_heap::gc_started) { @@ -13385,8 +13429,8 @@ allocation_state gc_heap::try_allocate_more_space (alloc_context* acontext, size } allocation_state can_allocate = ((gen_number == 0) ? - allocate_small (gen_number, size, acontext, align_const) : - allocate_large (gen_number, size, acontext, align_const)); + allocate_small (gen_number, size, acontext, flags, align_const) : + allocate_large (gen_number, size, acontext, flags, align_const)); if (can_allocate == a_state_can_allocate) { @@ -13403,13 +13447,16 @@ allocation_state gc_heap::try_allocate_more_space (alloc_context* acontext, size FIRE_EVENT(GCAllocationTick_V1, (uint32_t)etw_allocation_running_amount[etw_allocation_index], (gen_number == 0) ? gc_etw_alloc_soh : gc_etw_alloc_loh); #else + +#if defined(FEATURE_EVENT_TRACE) + // We are explicitly checking whether the event is enabled here. // Unfortunately some of the ETW macros do not check whether the ETW feature is enabled. // The ones that do are much less efficient. -#if defined(FEATURE_EVENT_TRACE) if (EVENT_ENABLED(GCAllocationTick_V3)) { fire_etw_allocation_event (etw_allocation_running_amount[etw_allocation_index], gen_number, acontext->alloc_ptr); } + #endif //FEATURE_EVENT_TRACE #endif //FEATURE_REDHAWK etw_allocation_running_amount[etw_allocation_index] = 0; @@ -13673,7 +13720,7 @@ try_again: #endif //MULTIPLE_HEAPS BOOL gc_heap::allocate_more_space(alloc_context* acontext, size_t size, - int alloc_generation_number) + uint32_t flags, int alloc_generation_number) { allocation_state status = a_state_start; do @@ -13682,7 +13729,7 @@ BOOL gc_heap::allocate_more_space(alloc_context* acontext, size_t size, if (alloc_generation_number == 0) { balance_heaps (acontext); - status = acontext->get_alloc_heap()->pGenGCHeap->try_allocate_more_space (acontext, size, alloc_generation_number); + status = acontext->get_alloc_heap()->pGenGCHeap->try_allocate_more_space (acontext, size, flags, alloc_generation_number); } else { @@ -13700,14 +13747,14 @@ BOOL gc_heap::allocate_more_space(alloc_context* acontext, size_t size, alloc_heap = balance_heaps_loh (acontext, size); } - status = alloc_heap->try_allocate_more_space (acontext, size, alloc_generation_number); + status = alloc_heap->try_allocate_more_space (acontext, size, flags, alloc_generation_number); if (status == a_state_retry_allocate) { dprintf (3, ("LOH h%d alloc retry!", alloc_heap->heap_number)); } } #else - status = try_allocate_more_space (acontext, size, alloc_generation_number); + status = try_allocate_more_space (acontext, size, flags, alloc_generation_number); #endif //MULTIPLE_HEAPS } while (status == a_state_retry_allocate); @@ -13716,7 +13763,7 @@ BOOL gc_heap::allocate_more_space(alloc_context* acontext, size_t size, } inline -CObjectHeader* gc_heap::allocate (size_t jsize, alloc_context* acontext) +CObjectHeader* gc_heap::allocate (size_t jsize, alloc_context* acontext, uint32_t flags) { size_t size = Align (jsize); assert (size >= Align (min_obj_size)); @@ -13738,7 +13785,7 @@ CObjectHeader* gc_heap::allocate (size_t jsize, alloc_context* acontext) #pragma inline_depth(0) #endif //_MSC_VER - if (! allocate_more_space (acontext, size, 0)) + if (! allocate_more_space (acontext, size, flags, 0)) return 0; #ifdef _MSC_VER @@ -31142,7 +31189,7 @@ BOOL gc_heap::ephemeral_gen_fit_p (gc_tuning_point tp) } } -CObjectHeader* gc_heap::allocate_large_object (size_t jsize, int64_t& alloc_bytes) +CObjectHeader* gc_heap::allocate_large_object (size_t jsize, uint32_t flags, int64_t& alloc_bytes) { //create a new alloc context because gen3context is shared. alloc_context acontext; @@ -31175,7 +31222,7 @@ CObjectHeader* gc_heap::allocate_large_object (size_t jsize, int64_t& alloc_byte #ifdef _MSC_VER #pragma inline_depth(0) #endif //_MSC_VER - if (! allocate_more_space (&acontext, (size + pad), max_generation+1)) + if (! allocate_more_space (&acontext, (size + pad), flags, max_generation+1)) { return 0; } @@ -34781,7 +34828,7 @@ bool GCHeap::StressHeap(gc_alloc_context * context) // update the cached type handle before allocating SetTypeHandleOnThreadForAlloc(TypeHandle(g_pStringClass)); - str = (StringObject*) pGenGCHeap->allocate (strSize, acontext); + str = (StringObject*) pGenGCHeap->allocate (strSize, acontext, /*flags*/ 0); if (str) { str->SetMethodTable (g_pStringClass); @@ -34948,7 +34995,7 @@ GCHeap::AllocAlign8Common(void* _hp, alloc_context* acontext, size_t size, uint3 if ((((size_t)result & 7) == desiredAlignment) && ((result + size) <= acontext->alloc_limit)) { // Yes, we can just go ahead and make the allocation. - newAlloc = (Object*) hp->allocate (size, acontext); + newAlloc = (Object*) hp->allocate (size, acontext, flags); ASSERT(((size_t)newAlloc & 7) == desiredAlignment); } else @@ -34961,7 +35008,7 @@ GCHeap::AllocAlign8Common(void* _hp, alloc_context* acontext, size_t size, uint3 // We allocate both together then decide based on the result whether we'll format the space as // free object + real object or real object + free object. ASSERT((Align(min_obj_size) & 7) == 4); - CObjectHeader *freeobj = (CObjectHeader*) hp->allocate (Align(size) + Align(min_obj_size), acontext); + CObjectHeader *freeobj = (CObjectHeader*) hp->allocate (Align(size) + Align(min_obj_size), acontext, flags); if (freeobj) { if (((size_t)freeobj & 7) == desiredAlignment) @@ -34977,6 +35024,11 @@ GCHeap::AllocAlign8Common(void* _hp, alloc_context* acontext, size_t size, uint3 // rest of the space should be correctly aligned for the real object. newAlloc = (Object*)((uint8_t*)freeobj + Align(min_obj_size)); ASSERT(((size_t)newAlloc & 7) == desiredAlignment); + if (flags & GC_ALLOC_ZEROING_OPTIONAL) + { + // clean the syncblock of the aligned object. + *(((PTR_PTR)newAlloc)-1) = 0; + } } freeobj->SetFree(min_obj_size); } @@ -34993,7 +35045,7 @@ GCHeap::AllocAlign8Common(void* _hp, alloc_context* acontext, size_t size, uint3 alloc_context* acontext = generation_alloc_context (hp->generation_of (max_generation+1)); - newAlloc = (Object*) hp->allocate_large_object (size, acontext->alloc_bytes_loh); + newAlloc = (Object*) hp->allocate_large_object (size, flags, acontext->alloc_bytes_loh); ASSERT(((size_t)newAlloc & 7) == 0); } @@ -35055,7 +35107,7 @@ GCHeap::AllocLHeap( size_t size, uint32_t flags REQD_ALIGN_DCL) alloc_context* acontext = generation_alloc_context (hp->generation_of (max_generation+1)); - newAlloc = (Object*) hp->allocate_large_object (size + ComputeMaxStructAlignPadLarge(requiredAlignment), acontext->alloc_bytes_loh); + newAlloc = (Object*) hp->allocate_large_object (size + ComputeMaxStructAlignPadLarge(requiredAlignment), flags, acontext->alloc_bytes_loh); #ifdef FEATURE_STRUCTALIGN newAlloc = (Object*) hp->pad_for_alignment_large ((uint8_t*) newAlloc, requiredAlignment, size); #endif // FEATURE_STRUCTALIGN @@ -35118,7 +35170,7 @@ GCHeap::Alloc(gc_alloc_context* context, size_t size, uint32_t flags REQD_ALIGN_ #ifdef TRACE_GC AllocSmallCount++; #endif //TRACE_GC - newAlloc = (Object*) hp->allocate (size + ComputeMaxStructAlignPad(requiredAlignment), acontext); + newAlloc = (Object*) hp->allocate (size + ComputeMaxStructAlignPad(requiredAlignment), acontext, flags); #ifdef FEATURE_STRUCTALIGN newAlloc = (Object*) hp->pad_for_alignment ((uint8_t*) newAlloc, requiredAlignment, size, acontext); #endif // FEATURE_STRUCTALIGN @@ -35126,7 +35178,7 @@ GCHeap::Alloc(gc_alloc_context* context, size_t size, uint32_t flags REQD_ALIGN_ } else { - newAlloc = (Object*) hp->allocate_large_object (size + ComputeMaxStructAlignPadLarge(requiredAlignment), acontext->alloc_bytes_loh); + newAlloc = (Object*) hp->allocate_large_object (size + ComputeMaxStructAlignPadLarge(requiredAlignment), flags, acontext->alloc_bytes_loh); #ifdef FEATURE_STRUCTALIGN newAlloc = (Object*) hp->pad_for_alignment_large ((uint8_t*) newAlloc, requiredAlignment, size); #endif // FEATURE_STRUCTALIGN diff --git a/src/gc/gcinterface.h b/src/gc/gcinterface.h index e6c2ccb862..c710d6d321 100644 --- a/src/gc/gcinterface.h +++ b/src/gc/gcinterface.h @@ -910,10 +910,30 @@ void updateGCShadow(Object** ptr, Object* val); #define GC_CALL_CHECK_APP_DOMAIN 0x4 //flags for IGCHeapAlloc(...) -#define GC_ALLOC_FINALIZE 0x1 -#define GC_ALLOC_CONTAINS_REF 0x2 -#define GC_ALLOC_ALIGN8_BIAS 0x4 -#define GC_ALLOC_ALIGN8 0x8 +enum GC_ALLOC_FLAGS +{ + GC_ALLOC_NO_FLAGS = 0, + GC_ALLOC_FINALIZE = 1, + GC_ALLOC_CONTAINS_REF = 2, + GC_ALLOC_ALIGN8_BIAS = 4, + GC_ALLOC_ALIGN8 = 8, + GC_ALLOC_ZEROING_OPTIONAL = 16, +}; + +inline GC_ALLOC_FLAGS operator|(GC_ALLOC_FLAGS a, GC_ALLOC_FLAGS b) +{return (GC_ALLOC_FLAGS)((int)a | (int)b);} + +inline GC_ALLOC_FLAGS operator&(GC_ALLOC_FLAGS a, GC_ALLOC_FLAGS b) +{return (GC_ALLOC_FLAGS)((int)a & (int)b);} + +inline GC_ALLOC_FLAGS operator~(GC_ALLOC_FLAGS a) +{return (GC_ALLOC_FLAGS)(~(int)a);} + +inline GC_ALLOC_FLAGS& operator|=(GC_ALLOC_FLAGS& a, GC_ALLOC_FLAGS b) +{return (GC_ALLOC_FLAGS&)((int&)a |= (int)b);} + +inline GC_ALLOC_FLAGS& operator&=(GC_ALLOC_FLAGS& a, GC_ALLOC_FLAGS b) +{return (GC_ALLOC_FLAGS&)((int&)a &= (int)b);} #if defined(USE_CHECKED_OBJECTREFS) && !defined(_NOVM) #define OBJECTREF_TO_UNCHECKED_OBJECTREF(objref) (*((_UNCHECKED_OBJECTREF*)&(objref))) diff --git a/src/gc/gcpriv.h b/src/gc/gcpriv.h index c1d7b7f4d2..2288ffee25 100644 --- a/src/gc/gcpriv.h +++ b/src/gc/gcpriv.h @@ -1219,7 +1219,8 @@ public: PER_HEAP CObjectHeader* allocate (size_t jsize, - alloc_context* acontext); + alloc_context* acontext, + uint32_t flags); #ifdef MULTIPLE_HEAPS static @@ -1241,7 +1242,7 @@ public: // Note: This is an instance method, but the heap instance is only used for // lowest_address and highest_address, which are currently the same accross all heaps. PER_HEAP - CObjectHeader* allocate_large_object (size_t size, int64_t& alloc_bytes); + CObjectHeader* allocate_large_object (size_t size, uint32_t flags, int64_t& alloc_bytes); #ifdef FEATURE_STRUCTALIGN PER_HEAP @@ -1449,13 +1450,13 @@ protected: void fire_etw_pin_object_event (uint8_t* object, uint8_t** ppObject); PER_HEAP - size_t limit_from_size (size_t size, size_t room, int gen_number, + size_t limit_from_size (size_t size, uint32_t flags, size_t room, int gen_number, int align_const); PER_HEAP - allocation_state try_allocate_more_space (alloc_context* acontext, size_t jsize, + allocation_state try_allocate_more_space (alloc_context* acontext, size_t jsize, uint32_t flags, int alloc_generation_number); PER_HEAP_ISOLATED - BOOL allocate_more_space (alloc_context* acontext, size_t jsize, + BOOL allocate_more_space (alloc_context* acontext, size_t jsize, uint32_t flags, int alloc_generation_number); PER_HEAP @@ -1470,6 +1471,7 @@ protected: BOOL a_fit_free_list_p (int gen_number, size_t size, alloc_context* acontext, + uint32_t flags, int align_const); #ifdef BACKGROUND_GC @@ -1483,6 +1485,7 @@ protected: void bgc_loh_alloc_clr (uint8_t* alloc_start, size_t size, alloc_context* acontext, + uint32_t flags, int align_const, int lock_index, BOOL check_used_p, @@ -1524,6 +1527,7 @@ protected: PER_HEAP BOOL a_fit_free_list_large_p (size_t size, alloc_context* acontext, + uint32_t flags, int align_const); PER_HEAP @@ -1531,12 +1535,14 @@ protected: heap_segment* seg, size_t size, alloc_context* acontext, + uint32_t flags, int align_const, BOOL* commit_failed_p); PER_HEAP BOOL loh_a_fit_segment_end_p (int gen_number, size_t size, alloc_context* acontext, + uint32_t flags, int align_const, BOOL* commit_failed_p, oom_reason* oom_r); @@ -1570,6 +1576,7 @@ protected: BOOL soh_try_fit (int gen_number, size_t size, alloc_context* acontext, + uint32_t flags, int align_const, BOOL* commit_failed_p, BOOL* short_seg_end_p); @@ -1577,6 +1584,7 @@ protected: BOOL loh_try_fit (int gen_number, size_t size, alloc_context* acontext, + uint32_t flags, int align_const, BOOL* commit_failed_p, oom_reason* oom_r); @@ -1585,6 +1593,7 @@ protected: allocation_state allocate_small (int gen_number, size_t size, alloc_context* acontext, + uint32_t flags, int align_const); #ifdef RECORD_LOH_STATE @@ -1607,6 +1616,7 @@ protected: allocation_state allocate_large (int gen_number, size_t size, alloc_context* acontext, + uint32_t flags, int align_const); PER_HEAP_ISOLATED @@ -1853,8 +1863,8 @@ protected: void adjust_limit (uint8_t* start, size_t limit_size, generation* gen, int gen_number); PER_HEAP - void adjust_limit_clr (uint8_t* start, size_t limit_size, - alloc_context* acontext, heap_segment* seg, + void adjust_limit_clr (uint8_t* start, size_t limit_size, size_t size, + alloc_context* acontext, uint32_t flags, heap_segment* seg, int align_const, int gen_number); PER_HEAP void leave_allocation_segment (generation* gen); |