summaryrefslogtreecommitdiff
path: root/src/gc
diff options
context:
space:
mode:
authorMaoni0 <maonis@microsoft.com>2016-07-12 14:39:07 -0700
committerMaoni0 <maonis@microsoft.com>2016-07-12 14:39:07 -0700
commit43c102efb157c6b6c39457284a17473388822725 (patch)
treeb2ce5bc79a8cf002a4a9f515857529ab87075db2 /src/gc
parentadb2188ed70751f70a1b9e06aa497bc027cdf8a8 (diff)
downloadcoreclr-43c102efb157c6b6c39457284a17473388822725.tar.gz
coreclr-43c102efb157c6b6c39457284a17473388822725.tar.bz2
coreclr-43c102efb157c6b6c39457284a17473388822725.zip
Fixed race condition in setting dd_new_allocation
Sequence: + At the end of BGC we call compute_new_dynamic_data then switch to preempt, at this point dd_new_allocation isn't set yet; + an FGC happens and saves new_allocation in gc_new_allocation + FGC finishes and BGC keeps running and saves gc_new_allocation in new_allocation The fix is to finish filling in the values for new_allocations before we allow an FGC to happen. Should really get rid of gc_new_allocation in the future. I am making this fix to unblock my work on the fragmentation stuff.
Diffstat (limited to 'src/gc')
-rw-r--r--src/gc/gc.cpp19
1 files changed, 4 insertions, 15 deletions
diff --git a/src/gc/gc.cpp b/src/gc/gc.cpp
index ae4c55761d..a3792e1516 100644
--- a/src/gc/gc.cpp
+++ b/src/gc/gc.cpp
@@ -15342,21 +15342,6 @@ void gc_heap::gc1()
assert (ephemeral_high == heap_segment_reserved (ephemeral_heap_segment));
#endif //BACKGROUND_GC
- int bottom_gen = 0;
-#ifdef BACKGROUND_GC
- if (settings.concurrent)
- {
- bottom_gen = max_generation;
- }
-#endif //BACKGROUND_GC
- {
- for (int gen_number = bottom_gen; gen_number <= max_generation+1; gen_number++)
- {
- dynamic_data* dd = dynamic_data_of (gen_number);
- dd_new_allocation(dd) = dd_gc_new_allocation (dd);
- }
- }
-
if (fgn_maxgen_percent)
{
if (settings.condemned_generation == (max_generation - 1))
@@ -29848,6 +29833,7 @@ size_t gc_heap::compute_in (int gen_number)
}
dd_gc_new_allocation (dd) -= in;
+ dd_new_allocation (dd) = dd_gc_new_allocation (dd);
gc_history_per_heap* current_gc_data_per_heap = get_gc_data_per_heap();
gc_generation_data* gen_data = &(current_gc_data_per_heap->gen_data[gen_number]);
@@ -30020,6 +30006,8 @@ void gc_heap::compute_new_dynamic_data (int gen_number)
gen_data->npinned_surv = dd_survived_size (dd) - dd_pinned_survived_size (dd);
dd_gc_new_allocation (dd) = dd_desired_allocation (dd);
+ dd_new_allocation (dd) = dd_gc_new_allocation (dd);
+
//update counter
dd_promoted_size (dd) = out;
if (gen_number == max_generation)
@@ -30035,6 +30023,7 @@ void gc_heap::compute_new_dynamic_data (int gen_number)
dd_desired_allocation (dd) = desired_new_allocation (dd, out, max_generation+1, 0);
dd_gc_new_allocation (dd) = Align (dd_desired_allocation (dd),
get_alignment_constant (FALSE));
+ dd_new_allocation (dd) = dd_gc_new_allocation (dd);
gen_data = &(current_gc_data_per_heap->gen_data[max_generation+1]);
gen_data->size_after = total_gen_size;