summaryrefslogtreecommitdiff
path: root/src/gc/gcpriv.h
diff options
context:
space:
mode:
Diffstat (limited to 'src/gc/gcpriv.h')
-rw-r--r--src/gc/gcpriv.h149
1 files changed, 128 insertions, 21 deletions
diff --git a/src/gc/gcpriv.h b/src/gc/gcpriv.h
index ded7a6bee7..18785d5185 100644
--- a/src/gc/gcpriv.h
+++ b/src/gc/gcpriv.h
@@ -402,11 +402,12 @@ enum gc_latency_level
enum gc_tuning_point
{
- tuning_deciding_condemned_gen,
- tuning_deciding_full_gc,
- tuning_deciding_compaction,
- tuning_deciding_expansion,
- tuning_deciding_promote_ephemeral
+ tuning_deciding_condemned_gen = 0,
+ tuning_deciding_full_gc = 1,
+ tuning_deciding_compaction = 2,
+ tuning_deciding_expansion = 3,
+ tuning_deciding_promote_ephemeral = 4,
+ tuning_deciding_short_on_seg = 5
};
#if defined(TRACE_GC) && defined(BACKGROUND_GC)
@@ -431,10 +432,11 @@ enum allocation_state
a_state_start = 0,
a_state_can_allocate,
a_state_cant_allocate,
+ // This could be due to having to wait till a GC is done,
+ // or having to try a different heap.
+ a_state_retry_allocate,
a_state_try_fit,
a_state_try_fit_new_seg,
- a_state_try_fit_new_seg_after_cg,
- a_state_try_fit_no_seg,
a_state_try_fit_after_cg,
a_state_try_fit_after_bgc,
a_state_try_free_full_seg_in_bgc,
@@ -1202,6 +1204,14 @@ public:
static
void shutdown_gc();
+ // If the hard limit is specified, take that into consideration
+ // and this means it may modify the # of heaps.
+ PER_HEAP_ISOLATED
+ size_t get_segment_size_hard_limit (uint32_t* num_heaps, bool should_adjust_num_heaps);
+
+ PER_HEAP_ISOLATED
+ bool should_retry_other_heap (size_t size);
+
PER_HEAP
CObjectHeader* allocate (size_t jsize,
alloc_context* acontext);
@@ -1214,8 +1224,6 @@ public:
void gc_thread_stub (void* arg);
#endif //MULTIPLE_HEAPS
- CObjectHeader* try_fast_alloc (size_t jsize);
-
// For LOH allocations we only update the alloc_bytes_loh in allocation
// context - we don't actually use the ptr/limit from it so I am
// making this explicit by not passing in the alloc_context.
@@ -1431,8 +1439,8 @@ protected:
size_t limit_from_size (size_t size, size_t room, int gen_number,
int align_const);
PER_HEAP
- int try_allocate_more_space (alloc_context* acontext, size_t jsize,
- int alloc_generation_number);
+ allocation_state try_allocate_more_space (alloc_context* acontext, size_t jsize,
+ int alloc_generation_number);
PER_HEAP
BOOL allocate_more_space (alloc_context* acontext, size_t jsize,
int alloc_generation_number);
@@ -1561,10 +1569,10 @@ protected:
oom_reason* oom_r);
PER_HEAP
- BOOL allocate_small (int gen_number,
- size_t size,
- alloc_context* acontext,
- int align_const);
+ allocation_state allocate_small (int gen_number,
+ size_t size,
+ alloc_context* acontext,
+ int align_const);
#ifdef RECORD_LOH_STATE
#define max_saved_loh_states 12
@@ -1583,10 +1591,10 @@ protected:
void add_saved_loh_state (allocation_state loh_state_to_save, EEThreadId thread_id);
#endif //RECORD_LOH_STATE
PER_HEAP
- BOOL allocate_large (int gen_number,
- size_t size,
- alloc_context* acontext,
- int align_const);
+ allocation_state allocate_large (int gen_number,
+ size_t size,
+ alloc_context* acontext,
+ int align_const);
PER_HEAP_ISOLATED
int init_semi_shared();
@@ -1655,6 +1663,12 @@ protected:
void decommit_heap_segment_pages (heap_segment* seg, size_t extra_space);
PER_HEAP
void decommit_heap_segment (heap_segment* seg);
+ PER_HEAP_ISOLATED
+ bool virtual_alloc_commit_for_heap (void* addr, size_t size, int h_number);
+ PER_HEAP_ISOLATED
+ bool virtual_commit (void* address, size_t size, int h_number=-1, bool* hard_limit_exceeded_p=NULL);
+ PER_HEAP_ISOLATED
+ bool virtual_decommit (void* address, size_t size, int h_number=-1);
PER_HEAP
void clear_gen0_bricks();
#ifdef BACKGROUND_GC
@@ -1749,7 +1763,7 @@ protected:
BOOL find_card (uint32_t* card_table, size_t& card,
size_t card_word_end, size_t& end_card);
PER_HEAP
- BOOL grow_heap_segment (heap_segment* seg, uint8_t* high_address);
+ BOOL grow_heap_segment (heap_segment* seg, uint8_t* high_address, bool* hard_limit_exceeded_p=NULL);
PER_HEAP
int grow_heap_segment (heap_segment* seg, uint8_t* high_address, uint8_t* old_loc, size_t size, BOOL pad_front_p REQD_ALIGN_AND_OFFSET_DCL);
PER_HEAP
@@ -2489,6 +2503,9 @@ protected:
PER_HEAP
void save_ephemeral_generation_starts();
+ PER_HEAP_ISOLATED
+ size_t get_gen0_min_size();
+
PER_HEAP
void set_static_data();
@@ -2523,7 +2540,10 @@ protected:
size_t get_total_committed_size();
PER_HEAP_ISOLATED
size_t get_total_fragmentation();
-
+ PER_HEAP_ISOLATED
+ size_t get_total_gen_fragmentation (int gen_number);
+ PER_HEAP_ISOLATED
+ size_t get_total_gen_estimated_reclaim (int gen_number);
PER_HEAP_ISOLATED
void get_memory_info (uint32_t* memory_load,
uint64_t* available_physical=NULL,
@@ -2532,6 +2552,9 @@ protected:
size_t generation_size (int gen_number);
PER_HEAP_ISOLATED
size_t get_total_survived_size();
+ // this also resets allocated_since_last_gc
+ PER_HEAP_ISOLATED
+ size_t get_total_allocated_since_last_gc();
PER_HEAP
size_t get_current_allocated();
PER_HEAP_ISOLATED
@@ -2559,14 +2582,22 @@ protected:
PER_HEAP
size_t committed_size();
PER_HEAP
+ size_t committed_size (bool loh_p, size_t* allocated);
+ PER_HEAP
size_t approximate_new_allocation();
PER_HEAP
size_t end_space_after_gc();
PER_HEAP
+ size_t estimated_reclaim (int gen_number);
+ PER_HEAP
BOOL decide_on_compacting (int condemned_gen_number,
size_t fragmentation,
BOOL& should_expand);
PER_HEAP
+ BOOL sufficient_space_end_seg (uint8_t* start, uint8_t* seg_end,
+ size_t end_space_required,
+ gc_tuning_point tp);
+ PER_HEAP
BOOL ephemeral_gen_fit_p (gc_tuning_point tp);
PER_HEAP
void reset_large_object (uint8_t* o);
@@ -3036,6 +3067,79 @@ public:
PER_HEAP_ISOLATED
uint64_t entry_available_physical_mem;
+ // Hard limit for the heap, only supported on 64-bit.
+ //
+ // Users can specify a hard limit for the GC heap via GCHeapHardLimit or
+ // a percentage of the physical memory this process is allowed to use via
+ // GCHeapHardLimitPercent. This is the maximum commit size the GC heap
+ // can consume.
+ //
+ // The way the hard limit is decided is:
+ //
+ // If the GCHeapHardLimit config is specified that's the value we use;
+ // else if the GCHeapHardLimitPercent config is specified we use that
+ // value;
+ // else if the process is running inside a container with a memory limit,
+ // the hard limit is
+ // max (20mb, 75% of the memory limit on the container).
+ //
+ // Due to the different perf charicteristics of containers we make the
+ // following policy changes:
+ //
+ // 1) No longer affinitize Server GC threads by default because we wouldn't
+ // want all the containers on the machine to only affinitize to use the
+ // first few CPUs (and we don't know which CPUs are already used). You
+ // can however override this by specifying the GCHeapAffinitizeMask
+ // config which will decide which CPUs the process will affinitize the
+ // Server GC threads to.
+ //
+ // 2) Segment size is determined by limit / number of heaps but has a
+ // minimum value of 16mb. This can be changed by specifying the number
+ // of heaps via the GCHeapCount config. The minimum size is to avoid
+ // the scenario where the hard limit is small but the process can use
+ // many procs and we end up with tiny segments which doesn't make sense.
+ //
+ // 3) LOH compaction occurs automatically if needed.
+ //
+ // Since we do allow both gen0 and gen3 allocations, and we don't know
+ // the distinction (and it's unrealistic to request users to specify
+ // this distribution) we reserve memory this way -
+ //
+ // For SOH we reserve (limit / number of heaps) per heap.
+ // For LOH we reserve (limit * 2 / number of heaps) per heap.
+ //
+ // This means the following -
+ //
+ // + we never need to acquire new segments. This simplies the perf
+ // calculations by a lot.
+ //
+ // + we now need a different definition of "end of seg" because we
+ // need to make sure the total does not exceed the limit.
+ //
+ // + if we detect that we exceed the commit limit in the allocator we
+ // wouldn't want to treat that as a normal commit failure because that
+ // would mean we always do full compacting GCs.
+ //
+ // TODO: some of the logic here applies to the general case as well
+ // such as LOH automatic compaction. However it will require more
+ //testing to change the general case.
+ PER_HEAP_ISOLATED
+ size_t heap_hard_limit;
+
+ PER_HEAP_ISOLATED
+ CLRCriticalSection check_commit_cs;
+
+ PER_HEAP_ISOLATED
+ size_t current_total_committed;
+
+ // This is what GC uses for its own bookkeeping.
+ PER_HEAP_ISOLATED
+ size_t current_total_committed_bookkeeping;
+
+ // This is what GC's own book keeping consumes.
+ PER_HEAP_ISOLATED
+ size_t current_total_committed_gc_own;
+
PER_HEAP_ISOLATED
size_t last_gc_index;
@@ -3618,6 +3722,9 @@ protected:
PER_HEAP_ISOLATED
size_t num_provisional_triggered;
+ PER_HEAP
+ size_t allocated_since_last_gc;
+
#ifdef BACKGROUND_GC
PER_HEAP_ISOLATED
size_t ephemeral_fgc_counts[max_generation];