summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPeter Sollich <petersol@microsoft.com>2019-06-25 15:55:05 +0200
committerGitHub <noreply@github.com>2019-06-25 15:55:05 +0200
commit899f7faa6e2b0b2a9c9a2aaa5a78909a3ae79fe3 (patch)
tree67e2eabc1319471b6b54711c001109976065a4fd
parent3eac718c85cde9121fb5f03da58bb949914b51e1 (diff)
downloadcoreclr-899f7faa6e2b0b2a9c9a2aaa5a78909a3ae79fe3.tar.gz
coreclr-899f7faa6e2b0b2a9c9a2aaa5a78909a3ae79fe3.tar.bz2
coreclr-899f7faa6e2b0b2a9c9a2aaa5a78909a3ae79fe3.zip
Brick table (#25349)
Fix brick table logic to fix perf issue in several ASP.NET tests, remove #ifdef FFIND_OBJECT. What I observed was that some GCs spent a lot of time in find_first_object called from find_object, which is called during stack scanning to find the containing object for interior pointers. A substantial fraction of generation 0 was being scanned, indicating that the brick table logic didn't work properly in these cases. The root cause was the fact that the brick table entries were not being set in adjust_limit_clr if the allocation was satisfied from the free list in gen0 instead of newly allocated space. This is the case if there are pinned objects in gen0 as well. The main fix is in adjust_limit_clr - if the allocation is satisfied from the freelist, seg is nullptr, the change is to set the bricks in this case as well if we are allocating in gen0 and the allocated piece is above a reasonable size threshold. The bricks are not set always set during allocation - instead, when we detect an interior pointer during GC, we make the allocator set the bricks during the next GC cycles by setting gen0_must_clear_bricks. I changed the way this is handled for server GC (multiple heaps). We used to multiply the decay time by the number of heaps (gc_heap::n_heaps), but only applied it to the single heap where an interior pointer was found. Instead, I think it's better to instead set gen0_must_clear_bricks for all heaps, but leave the decay time unchanged compared to workstation GC. Maoni suggested to remove the #ifdef FFIND_OBJECT - interior pointers are not going away, so the #ifdefs are unnecessary clutter. Addressed code review feedback: - add parentheses as per GC coding conventions - use max instead of if-statement - merge body of for-loop over all into existing for-loop
-rw-r--r--src/gc/gc.cpp32
-rw-r--r--src/gc/gcpriv.h3
2 files changed, 17 insertions, 18 deletions
diff --git a/src/gc/gc.cpp b/src/gc/gc.cpp
index 37057669b8..0917527ab4 100644
--- a/src/gc/gc.cpp
+++ b/src/gc/gc.cpp
@@ -2925,9 +2925,7 @@ heap_segment* gc_heap::saved_loh_segment_no_gc = 0;
BOOL gc_heap::gen0_bricks_cleared = FALSE;
-#ifdef FFIND_OBJECT
int gc_heap::gen0_must_clear_bricks = 0;
-#endif //FFIND_OBJECT
#ifdef FEATURE_PREMORTEM_FINALIZATION
CFinalize* gc_heap::finalize_queue = 0;
@@ -11621,9 +11619,9 @@ void gc_heap::adjust_limit_clr (uint8_t* start, size_t limit_size, size_t size,
}
//this portion can be done after we release the lock
- if (seg == ephemeral_heap_segment)
+ if (seg == ephemeral_heap_segment ||
+ ((seg == nullptr) && (gen_number == 0) && (limit_size >= CLR_SIZE / 2)))
{
-#ifdef FFIND_OBJECT
if (gen0_must_clear_bricks > 0)
{
//set the brick table to speed up find_object
@@ -11639,7 +11637,6 @@ void gc_heap::adjust_limit_clr (uint8_t* start, size_t limit_size, size_t size,
*x = -1;
}
else
-#endif //FFIND_OBJECT
{
gen0_bricks_cleared = FALSE;
}
@@ -16161,6 +16158,7 @@ void gc_heap::gc1()
#ifdef FEATURE_LOH_COMPACTION
BOOL all_heaps_compacted_p = TRUE;
#endif //FEATURE_LOH_COMPACTION
+ int max_gen0_must_clear_bricks = 0;
for (int i = 0; i < gc_heap::n_heaps; i++)
{
gc_heap* hp = gc_heap::g_heaps[i];
@@ -16169,12 +16167,26 @@ void gc_heap::gc1()
#ifdef FEATURE_LOH_COMPACTION
all_heaps_compacted_p &= hp->loh_compacted_p;
#endif //FEATURE_LOH_COMPACTION
+ // compute max of gen0_must_clear_bricks over all heaps
+ max_gen0_must_clear_bricks = max(max_gen0_must_clear_bricks, hp->gen0_must_clear_bricks);
}
#ifdef FEATURE_LOH_COMPACTION
check_loh_compact_mode (all_heaps_compacted_p);
#endif //FEATURE_LOH_COMPACTION
+ // if max_gen0_must_clear_bricks > 0, distribute to all heaps -
+ // if one heap encountered an interior pointer during this GC,
+ // the next GC might see one on another heap
+ if (max_gen0_must_clear_bricks > 0)
+ {
+ for (int i = 0; i < gc_heap::n_heaps; i++)
+ {
+ gc_heap* hp = gc_heap::g_heaps[i];
+ hp->gen0_must_clear_bricks = max_gen0_must_clear_bricks;
+ }
+ }
+
fire_pevents();
pm_full_gc_init_or_clear();
@@ -17531,14 +17543,8 @@ uint8_t* gc_heap::find_object (uint8_t* interior, uint8_t* low)
set_brick (b, -1);
}
}
-#ifdef FFIND_OBJECT
//indicate that in the future this needs to be done during allocation
-#ifdef MULTIPLE_HEAPS
- gen0_must_clear_bricks = FFIND_DECAY*gc_heap::n_heaps;
-#else
gen0_must_clear_bricks = FFIND_DECAY;
-#endif //MULTIPLE_HEAPS
-#endif //FFIND_OBJECT
int brick_entry = get_brick_entry(brick_of (interior));
if (brick_entry == 0)
@@ -19991,10 +19997,8 @@ void gc_heap::mark_phase (int condemned_gen_number, BOOL mark_only_p)
#endif //RESPECT_LARGE_ALIGNMENT || FEATURE_STRUCTALIGN
}
-#ifdef FFIND_OBJECT
if (gen0_must_clear_bricks > 0)
gen0_must_clear_bricks--;
-#endif //FFIND_OBJECT
size_t last_promoted_bytes = 0;
@@ -26128,10 +26132,8 @@ void gc_heap::background_mark_phase ()
start = GetCycleCount32();
#endif //TIME_GC
-#ifdef FFIND_OBJECT
if (gen0_must_clear_bricks > 0)
gen0_must_clear_bricks--;
-#endif //FFIND_OBJECT
background_soh_alloc_count = 0;
background_loh_alloc_count = 0;
diff --git a/src/gc/gcpriv.h b/src/gc/gcpriv.h
index ef59a3d925..7c8286babf 100644
--- a/src/gc/gcpriv.h
+++ b/src/gc/gcpriv.h
@@ -127,7 +127,6 @@ inline void FATAL_GC_ERROR()
//#define SHORT_PLUGS //keep plug short
-#define FFIND_OBJECT //faster find_object, slower allocation
#define FFIND_DECAY 7 //Number of GC for which fast find will be active
#ifndef MAX_LONGPATH
@@ -3676,10 +3675,8 @@ protected:
PER_HEAP
BOOL gen0_bricks_cleared;
-#ifdef FFIND_OBJECT
PER_HEAP
int gen0_must_clear_bricks;
-#endif //FFIND_OBJECT
PER_HEAP_ISOLATED
bool maxgen_size_inc_p;