summaryrefslogtreecommitdiff
path: root/src/gc
diff options
context:
space:
mode:
Diffstat (limited to 'src/gc')
-rw-r--r--src/gc/gc.cpp74
-rw-r--r--src/gc/gc.h8
-rw-r--r--src/gc/gcee.cpp12
-rw-r--r--src/gc/gcimpl.h42
-rw-r--r--src/gc/gcinterface.h62
-rw-r--r--src/gc/gcpriv.h16
-rw-r--r--src/gc/gcscan.h7
-rw-r--r--src/gc/objecthandle.cpp4
-rw-r--r--src/gc/objecthandle.h2
-rw-r--r--src/gc/sample/gcenv.ee.cpp4
10 files changed, 111 insertions, 120 deletions
diff --git a/src/gc/gc.cpp b/src/gc/gc.cpp
index bacef7f968..31ceeca113 100644
--- a/src/gc/gc.cpp
+++ b/src/gc/gc.cpp
@@ -5857,7 +5857,7 @@ struct fix_alloc_context_args
void fix_alloc_context(gc_alloc_context* acontext, void* param)
{
fix_alloc_context_args* args = (fix_alloc_context_args*)param;
- g_theGCHeap->FixAllocContext(acontext, FALSE, (void*)(size_t)(args->for_gc_p), args->heap);
+ g_theGCHeap->FixAllocContext(acontext, false, (void*)(size_t)(args->for_gc_p), args->heap);
}
void gc_heap::fix_allocation_contexts(BOOL for_gc_p)
@@ -21332,7 +21332,7 @@ void gc_heap::relocate_in_loh_compact()
generation_free_obj_space (gen)));
}
-void gc_heap::walk_relocation_for_loh (size_t profiling_context, record_surv_fn fn)
+void gc_heap::walk_relocation_for_loh (void* profiling_context, record_surv_fn fn)
{
generation* gen = large_object_generation;
heap_segment* seg = heap_segment_rw (generation_start_segment (gen));
@@ -21362,7 +21362,7 @@ void gc_heap::walk_relocation_for_loh (size_t profiling_context, record_surv_fn
STRESS_LOG_PLUG_MOVE(o, (o + size), -reloc);
- fn (o, (o + size), reloc, profiling_context, settings.compaction, FALSE);
+ fn (o, (o + size), reloc, profiling_context, !!settings.compaction, false);
o = o + size;
if (o < heap_segment_allocated (seg))
@@ -24177,7 +24177,7 @@ void gc_heap::walk_plug (uint8_t* plug, size_t size, BOOL check_last_object_p, w
STRESS_LOG_PLUG_MOVE(plug, (plug + size), -last_plug_relocation);
ptrdiff_t reloc = settings.compaction ? last_plug_relocation : 0;
- (args->fn) (plug, (plug + size), reloc, args->profiling_context, settings.compaction, FALSE);
+ (args->fn) (plug, (plug + size), reloc, args->profiling_context, !!settings.compaction, false);
if (check_last_object_p)
{
@@ -24245,7 +24245,7 @@ void gc_heap::walk_relocation_in_brick (uint8_t* tree, walk_relocate_args* args)
}
}
-void gc_heap::walk_relocation (size_t profiling_context, record_surv_fn fn)
+void gc_heap::walk_relocation (void* profiling_context, record_surv_fn fn)
{
generation* condemned_gen = generation_of (settings.condemned_generation);
uint8_t* start_address = generation_allocation_start (condemned_gen);
@@ -24301,7 +24301,7 @@ void gc_heap::walk_relocation (size_t profiling_context, record_surv_fn fn)
}
}
-void gc_heap::walk_survivors (record_surv_fn fn, size_t context, walk_surv_type type)
+void gc_heap::walk_survivors (record_surv_fn fn, void* context, walk_surv_type type)
{
if (type == walk_for_gc)
walk_survivors_relocation (context, fn);
@@ -24316,7 +24316,7 @@ void gc_heap::walk_survivors (record_surv_fn fn, size_t context, walk_surv_type
}
#if defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE)
-void gc_heap::walk_survivors_for_bgc (size_t profiling_context, record_surv_fn fn)
+void gc_heap::walk_survivors_for_bgc (void* profiling_context, record_surv_fn fn)
{
// This should only be called for BGCs
assert(settings.concurrent);
@@ -24377,8 +24377,8 @@ void gc_heap::walk_survivors_for_bgc (size_t profiling_context, record_surv_fn f
plug_end,
0, // Reloc distance == 0 as this is non-compacting
profiling_context,
- FALSE, // Non-compacting
- TRUE); // BGC
+ false, // Non-compacting
+ true); // BGC
}
seg = heap_segment_next (seg);
@@ -30863,7 +30863,7 @@ BOOL gc_heap::large_object_marked (uint8_t* o, BOOL clearp)
return m;
}
-void gc_heap::walk_survivors_relocation (size_t profiling_context, record_surv_fn fn)
+void gc_heap::walk_survivors_relocation (void* profiling_context, record_surv_fn fn)
{
// Now walk the portion of memory that is actually being relocated.
walk_relocation (profiling_context, fn);
@@ -30876,7 +30876,7 @@ void gc_heap::walk_survivors_relocation (size_t profiling_context, record_surv_f
#endif //FEATURE_LOH_COMPACTION
}
-void gc_heap::walk_survivors_for_loh (size_t profiling_context, record_surv_fn fn)
+void gc_heap::walk_survivors_for_loh (void* profiling_context, record_surv_fn fn)
{
generation* gen = large_object_generation;
heap_segment* seg = heap_segment_rw (generation_start_segment (gen));;
@@ -30914,7 +30914,7 @@ void gc_heap::walk_survivors_for_loh (size_t profiling_context, record_surv_fn f
plug_end = o;
- fn (plug_start, plug_end, 0, profiling_context, FALSE, FALSE);
+ fn (plug_start, plug_end, 0, profiling_context, false, false);
}
else
{
@@ -33750,7 +33750,7 @@ HRESULT GCHeap::Initialize ()
////
// GC callback functions
-BOOL GCHeap::IsPromoted(Object* object)
+bool GCHeap::IsPromoted(Object* object)
{
#ifdef _DEBUG
((CObjectHeader*)object)->Validate();
@@ -33769,7 +33769,7 @@ BOOL GCHeap::IsPromoted(Object* object)
#ifdef BACKGROUND_GC
if (gc_heap::settings.concurrent)
{
- BOOL is_marked = (!((o < hp->background_saved_highest_address) && (o >= hp->background_saved_lowest_address))||
+ bool is_marked = (!((o < hp->background_saved_highest_address) && (o >= hp->background_saved_lowest_address))||
hp->background_marked (o));
return is_marked;
}
@@ -33810,11 +33810,11 @@ unsigned int GCHeap::WhichGeneration (Object* object)
return g;
}
-BOOL GCHeap::IsEphemeral (Object* object)
+bool GCHeap::IsEphemeral (Object* object)
{
uint8_t* o = (uint8_t*)object;
gc_heap* hp = gc_heap::heap_of (o);
- return hp->ephemeral_pointer_p (o);
+ return !!hp->ephemeral_pointer_p (o);
}
// Return NULL if can't find next object. When EE is not suspended,
@@ -33888,7 +33888,7 @@ BOOL GCHeap::IsInFrozenSegment (Object * object)
#endif //VERIFY_HEAP
// returns TRUE if the pointer is in one of the GC heaps.
-BOOL GCHeap::IsHeapPointer (void* vpObject, BOOL small_heap_only)
+bool GCHeap::IsHeapPointer (void* vpObject, bool small_heap_only)
{
STATIC_CONTRACT_SO_TOLERANT;
@@ -34059,7 +34059,7 @@ void GCHeap::Relocate (Object** ppObject, ScanContext* sc,
STRESS_LOG_ROOT_RELOCATE(ppObject, object, pheader, ((!(flags & GC_CALL_INTERIOR)) ? ((Object*)object)->GetGCSafeMethodTable() : 0));
}
-/*static*/ BOOL GCHeap::IsObjectInFixedHeap(Object *pObj)
+/*static*/ bool GCHeap::IsObjectInFixedHeap(Object *pObj)
{
// For now we simply look at the size of the object to determine if it in the
// fixed heap or not. If the bit indicating this gets set at some point
@@ -34105,7 +34105,7 @@ int StressRNG(int iMaxValue)
// free up object so that things will move and then do a GC
//return TRUE if GC actually happens, otherwise FALSE
-BOOL GCHeap::StressHeap(gc_alloc_context * context)
+bool GCHeap::StressHeap(gc_alloc_context * context)
{
#if defined(STRESS_HEAP) && !defined(FEATURE_REDHAWK)
alloc_context* acontext = static_cast<alloc_context*>(context);
@@ -34603,7 +34603,7 @@ GCHeap::Alloc(gc_alloc_context* context, size_t size, uint32_t flags REQD_ALIGN_
}
void
-GCHeap::FixAllocContext (gc_alloc_context* context, BOOL lockp, void* arg, void *heap)
+GCHeap::FixAllocContext (gc_alloc_context* context, bool lockp, void* arg, void *heap)
{
alloc_context* acontext = static_cast<alloc_context*>(context);
#ifdef MULTIPLE_HEAPS
@@ -34681,7 +34681,7 @@ BOOL should_collect_optimized (dynamic_data* dd, BOOL low_memory_p)
// API to ensure that a complete new garbage collection takes place
//
HRESULT
-GCHeap::GarbageCollect (int generation, BOOL low_memory_p, int mode)
+GCHeap::GarbageCollect (int generation, bool low_memory_p, int mode)
{
#if defined(BIT64)
if (low_memory_p)
@@ -35512,7 +35512,7 @@ void GCHeap::SetLOHCompactionMode (int newLOHCompactionyMode)
#endif //FEATURE_LOH_COMPACTION
}
-BOOL GCHeap::RegisterForFullGCNotification(uint32_t gen2Percentage,
+bool GCHeap::RegisterForFullGCNotification(uint32_t gen2Percentage,
uint32_t lohPercentage)
{
#ifdef MULTIPLE_HEAPS
@@ -35535,7 +35535,7 @@ BOOL GCHeap::RegisterForFullGCNotification(uint32_t gen2Percentage,
return TRUE;
}
-BOOL GCHeap::CancelFullGCNotification()
+bool GCHeap::CancelFullGCNotification()
{
pGenGCHeap->fgn_maxgen_percent = 0;
pGenGCHeap->fgn_loh_percent = 0;
@@ -35562,7 +35562,7 @@ int GCHeap::WaitForFullGCComplete(int millisecondsTimeout)
return result;
}
-int GCHeap::StartNoGCRegion(uint64_t totalSize, BOOL lohSizeKnown, uint64_t lohSize, BOOL disallowFullBlockingGC)
+int GCHeap::StartNoGCRegion(uint64_t totalSize, bool lohSizeKnown, uint64_t lohSize, bool disallowFullBlockingGC)
{
NoGCRegionLockHolder lh;
@@ -35640,7 +35640,7 @@ HRESULT GCHeap::GetGcCounters(int gen, gc_counters* counters)
}
// Get the segment size to use, making sure it conforms.
-size_t GCHeap::GetValidSegmentSize(BOOL large_seg)
+size_t GCHeap::GetValidSegmentSize(bool large_seg)
{
return get_valid_segment_size (large_seg);
}
@@ -35764,15 +35764,15 @@ size_t GCHeap::GetFinalizablePromotedCount()
#endif //MULTIPLE_HEAPS
}
-BOOL GCHeap::FinalizeAppDomain(AppDomain *pDomain, BOOL fRunFinalizers)
+bool GCHeap::FinalizeAppDomain(AppDomain *pDomain, bool fRunFinalizers)
{
#ifdef MULTIPLE_HEAPS
- BOOL foundp = FALSE;
+ bool foundp = false;
for (int hn = 0; hn < gc_heap::n_heaps; hn++)
{
gc_heap* hp = gc_heap::g_heaps [hn];
if (hp->finalize_queue->FinalizeAppDomain (pDomain, fRunFinalizers))
- foundp = TRUE;
+ foundp = true;
}
return foundp;
@@ -35781,13 +35781,13 @@ BOOL GCHeap::FinalizeAppDomain(AppDomain *pDomain, BOOL fRunFinalizers)
#endif //MULTIPLE_HEAPS
}
-BOOL GCHeap::ShouldRestartFinalizerWatchDog()
+bool GCHeap::ShouldRestartFinalizerWatchDog()
{
// This condition was historically used as part of the condition to detect finalizer thread timeouts
return gc_heap::gc_lock.lock != -1;
}
-void GCHeap::SetFinalizeQueueForShutdown(BOOL fHasLock)
+void GCHeap::SetFinalizeQueueForShutdown(bool fHasLock)
{
#ifdef MULTIPLE_HEAPS
for (int hn = 0; hn < gc_heap::n_heaps; hn++)
@@ -36173,10 +36173,10 @@ CFinalize::FinalizeSegForAppDomain (AppDomain *pDomain,
return finalizedFound;
}
-BOOL
-CFinalize::FinalizeAppDomain (AppDomain *pDomain, BOOL fRunFinalizers)
+bool
+CFinalize::FinalizeAppDomain (AppDomain *pDomain, bool fRunFinalizers)
{
- BOOL finalizedFound = FALSE;
+ bool finalizedFound = false;
unsigned int startSeg = gen_segment (max_generation);
@@ -36186,7 +36186,7 @@ CFinalize::FinalizeAppDomain (AppDomain *pDomain, BOOL fRunFinalizers)
{
if (FinalizeSegForAppDomain (pDomain, fRunFinalizers, Seg))
{
- finalizedFound = TRUE;
+ finalizedFound = true;
}
}
@@ -36594,13 +36594,13 @@ void GCHeap::DiagWalkObject (Object* obj, walk_fn fn, void* context)
}
}
-void GCHeap::DiagWalkSurvivorsWithType (void* gc_context, record_surv_fn fn, size_t diag_context, walk_surv_type type)
+void GCHeap::DiagWalkSurvivorsWithType (void* gc_context, record_surv_fn fn, void* diag_context, walk_surv_type type)
{
gc_heap* hp = (gc_heap*)gc_context;
hp->walk_survivors (fn, diag_context, type);
}
-void GCHeap::DiagWalkHeap (walk_fn fn, void* context, int gen_number, BOOL walk_large_object_heap_p)
+void GCHeap::DiagWalkHeap (walk_fn fn, void* context, int gen_number, bool walk_large_object_heap_p)
{
gc_heap::walk_heap (fn, context, gen_number, walk_large_object_heap_p);
}
@@ -36920,7 +36920,7 @@ void GCHeap::TemporaryDisableConcurrentGC()
#endif //BACKGROUND_GC
}
-BOOL GCHeap::IsConcurrentGCEnabled()
+bool GCHeap::IsConcurrentGCEnabled()
{
#ifdef BACKGROUND_GC
return (gc_heap::gc_can_use_concurrent && !(gc_heap::temp_disable_concurrent_p));
diff --git a/src/gc/gc.h b/src/gc/gc.h
index d521f93a99..478e8cdb99 100644
--- a/src/gc/gc.h
+++ b/src/gc/gc.h
@@ -220,13 +220,13 @@ public:
return IGCHeap::maxGeneration;
}
- BOOL IsValidSegmentSize(size_t cbSize)
+ bool IsValidSegmentSize(size_t cbSize)
{
//Must be aligned on a Mb and greater than 4Mb
return (((cbSize & (1024*1024-1)) ==0) && (cbSize >> 22));
}
- BOOL IsValidGen0MaxSize(size_t cbSize)
+ bool IsValidGen0MaxSize(size_t cbSize)
{
return (cbSize >= 64*1024);
}
@@ -263,7 +263,7 @@ extern void FinalizeWeakReference(Object * obj);
extern IGCHeapInternal* g_theGCHeap;
#ifndef DACCESS_COMPILE
-inline BOOL IsGCInProgress(bool bConsiderGCStart = FALSE)
+inline bool IsGCInProgress(bool bConsiderGCStart = false)
{
WRAPPER_NO_CONTRACT;
@@ -271,7 +271,7 @@ inline BOOL IsGCInProgress(bool bConsiderGCStart = FALSE)
}
#endif // DACCESS_COMPILE
-inline BOOL IsServerHeap()
+inline bool IsServerHeap()
{
LIMITED_METHOD_CONTRACT;
#ifdef FEATURE_SVR_GC
diff --git a/src/gc/gcee.cpp b/src/gc/gcee.cpp
index 6513fde51f..a736a596e7 100644
--- a/src/gc/gcee.cpp
+++ b/src/gc/gcee.cpp
@@ -381,12 +381,12 @@ size_t GCHeap::GetNow()
return GetHighPrecisionTimeStamp();
}
-BOOL GCHeap::IsGCInProgressHelper (BOOL bConsiderGCStart)
+bool GCHeap::IsGCInProgressHelper (bool bConsiderGCStart)
{
return GcInProgress || (bConsiderGCStart? VolatileLoad(&gc_heap::gc_started) : FALSE);
}
-uint32_t GCHeap::WaitUntilGCComplete(BOOL bConsiderGCStart)
+uint32_t GCHeap::WaitUntilGCComplete(bool bConsiderGCStart)
{
if (bConsiderGCStart)
{
@@ -427,7 +427,7 @@ BlockAgain:
return dwWaitResult;
}
-void GCHeap::SetGCInProgress(BOOL fInProgress)
+void GCHeap::SetGCInProgress(bool fInProgress)
{
GcInProgress = fInProgress;
}
@@ -445,12 +445,12 @@ void GCHeap::WaitUntilConcurrentGCComplete()
#endif //BACKGROUND_GC
}
-BOOL GCHeap::IsConcurrentGCInProgress()
+bool GCHeap::IsConcurrentGCInProgress()
{
#ifdef BACKGROUND_GC
- return pGenGCHeap->settings.concurrent;
+ return !!pGenGCHeap->settings.concurrent;
#else
- return FALSE;
+ return false;
#endif //BACKGROUND_GC
}
diff --git a/src/gc/gcimpl.h b/src/gc/gcimpl.h
index e0008b97be..2a51d477b0 100644
--- a/src/gc/gcimpl.h
+++ b/src/gc/gcimpl.h
@@ -85,11 +85,11 @@ public:
void DiagTraceGCSegments ();
void PublishObject(uint8_t* obj);
- BOOL IsGCInProgressHelper (BOOL bConsiderGCStart = FALSE);
+ bool IsGCInProgressHelper (bool bConsiderGCStart = false);
- uint32_t WaitUntilGCComplete (BOOL bConsiderGCStart = FALSE);
+ uint32_t WaitUntilGCComplete (bool bConsiderGCStart = false);
- void SetGCInProgress(BOOL fInProgress);
+ void SetGCInProgress(bool fInProgress);
bool RuntimeStructuresValid();
@@ -106,7 +106,7 @@ public:
Object* Alloc (gc_alloc_context* acontext, size_t size, uint32_t flags);
void FixAllocContext (gc_alloc_context* acontext,
- BOOL lockp, void* arg, void *heap);
+ bool lockp, void* arg, void *heap);
Object* GetContainingObject(void *pInteriorPtr, bool fCollectedGenOnly);
@@ -121,15 +121,15 @@ public:
void HideAllocContext(alloc_context*);
void RevealAllocContext(alloc_context*);
- BOOL IsObjectInFixedHeap(Object *pObj);
+ bool IsObjectInFixedHeap(Object *pObj);
- HRESULT GarbageCollect (int generation = -1, BOOL low_memory_p=FALSE, int mode=collection_blocking);
+ HRESULT GarbageCollect (int generation = -1, bool low_memory_p=false, int mode=collection_blocking);
////
// GC callback functions
// Check if an argument is promoted (ONLY CALL DURING
// THE PROMOTIONSGRANTED CALLBACK.)
- BOOL IsPromoted (Object *object);
+ bool IsPromoted (Object *object);
size_t GetPromotedBytes (int heap_index);
@@ -157,8 +157,8 @@ public:
//returns the generation number of an object (not valid during relocation)
unsigned WhichGeneration (Object* object);
// returns TRUE is the object is ephemeral
- BOOL IsEphemeral (Object* object);
- BOOL IsHeapPointer (void* object, BOOL small_heap_only = FALSE);
+ bool IsEphemeral (Object* object);
+ bool IsHeapPointer (void* object, bool small_heap_only = false);
void ValidateObjectMember (Object *obj);
@@ -173,13 +173,13 @@ public:
int GetLOHCompactionMode();
void SetLOHCompactionMode(int newLOHCompactionyMode);
- BOOL RegisterForFullGCNotification(uint32_t gen2Percentage,
+ bool RegisterForFullGCNotification(uint32_t gen2Percentage,
uint32_t lohPercentage);
- BOOL CancelFullGCNotification();
+ bool CancelFullGCNotification();
int WaitForFullGCApproach(int millisecondsTimeout);
int WaitForFullGCComplete(int millisecondsTimeout);
- int StartNoGCRegion(uint64_t totalSize, BOOL lohSizeKnown, uint64_t lohSize, BOOL disallowFullBlockingGC);
+ int StartNoGCRegion(uint64_t totalSize, bool lohSizeKnown, uint64_t lohSize, bool disallowFullBlockingGC);
int EndNoGCRegion();
unsigned GetGcCount();
@@ -189,7 +189,7 @@ public:
PER_HEAP_ISOLATED HRESULT GetGcCounters(int gen, gc_counters* counters);
- size_t GetValidSegmentSize(BOOL large_seg = FALSE);
+ size_t GetValidSegmentSize(bool large_seg = false);
static size_t GetValidGen0MaxSize(size_t seg_size);
@@ -199,9 +199,9 @@ public:
PER_HEAP_ISOLATED size_t GetNumberFinalizableObjects();
PER_HEAP_ISOLATED size_t GetFinalizablePromotedCount();
- void SetFinalizeQueueForShutdown(BOOL fHasLock);
- BOOL FinalizeAppDomain(AppDomain *pDomain, BOOL fRunFinalizers);
- BOOL ShouldRestartFinalizerWatchDog();
+ void SetFinalizeQueueForShutdown(bool fHasLock);
+ bool FinalizeAppDomain(AppDomain *pDomain, bool fRunFinalizers);
+ bool ShouldRestartFinalizerWatchDog();
void DiagWalkObject (Object* obj, walk_fn fn, void* context);
void SetFinalizeRunOnShutdown(bool value);
@@ -235,12 +235,12 @@ public: // FIX
#ifndef DACCESS_COMPILE
HRESULT WaitUntilConcurrentGCCompleteAsync(int millisecondsTimeout); // Use in native threads. TRUE if succeed. FALSE if failed or timeout
#endif
- BOOL IsConcurrentGCInProgress();
+ bool IsConcurrentGCInProgress();
// Enable/disable concurrent GC
void TemporaryEnableConcurrentGC();
void TemporaryDisableConcurrentGC();
- BOOL IsConcurrentGCEnabled();
+ bool IsConcurrentGCEnabled();
PER_HEAP_ISOLATED CLREvent *WaitForGCEvent; // used for syncing w/GC
@@ -259,7 +259,7 @@ private:
}
public:
//return TRUE if GC actually happens, otherwise FALSE
- BOOL StressHeap(gc_alloc_context * acontext = 0);
+ bool StressHeap(gc_alloc_context * acontext);
#ifndef FEATURE_REDHAWK // Redhawk forces relocation a different way
#ifdef STRESS_HEAP
@@ -279,7 +279,7 @@ protected:
virtual void DiagDescrGenerations (gen_walk_fn fn, void *context);
- virtual void DiagWalkSurvivorsWithType (void* gc_context, record_surv_fn fn, size_t diag_context, walk_surv_type type);
+ virtual void DiagWalkSurvivorsWithType (void* gc_context, record_surv_fn fn, void* diag_context, walk_surv_type type);
virtual void DiagWalkFinalizeQueue (void* gc_context, fq_walk_fn fn);
@@ -289,7 +289,7 @@ protected:
virtual void DiagScanDependentHandles (handle_scan_fn fn, int gen_number, ScanContext* context);
- virtual void DiagWalkHeap(walk_fn fn, void* context, int gen_number, BOOL walk_large_object_heap_p);
+ virtual void DiagWalkHeap(walk_fn fn, void* context, int gen_number, bool walk_large_object_heap_p);
public:
Object * NextObj (Object * object);
diff --git a/src/gc/gcinterface.h b/src/gc/gcinterface.h
index 215f6cedfe..7aae605633 100644
--- a/src/gc/gcinterface.h
+++ b/src/gc/gcinterface.h
@@ -378,12 +378,12 @@ typedef enum
HNDTYPE_WEAK_WINRT = 9
} HandleType;
-typedef BOOL (* walk_fn)(Object*, void*);
+typedef bool (* walk_fn)(Object*, void*);
typedef void (* gen_walk_fn)(void* context, int generation, uint8_t* range_start, uint8_t* range_end, uint8_t* range_reserved);
-typedef void (* record_surv_fn)(uint8_t* begin, uint8_t* end, ptrdiff_t reloc, size_t context, BOOL compacting_p, BOOL bgc_p);
-typedef void (* fq_walk_fn)(BOOL, void*);
+typedef void (* record_surv_fn)(uint8_t* begin, uint8_t* end, ptrdiff_t reloc, void* context, bool compacting_p, bool bgc_p);
+typedef void (* fq_walk_fn)(bool, void*);
typedef void (* fq_scan_fn)(Object** ppObject, ScanContext *pSC, uint32_t dwFlags);
-typedef void (* handle_scan_fn)(Object** pRef, Object* pSec, uint32_t flags, ScanContext* context, BOOL isDependent);
+typedef void (* handle_scan_fn)(Object** pRef, Object* pSec, uint32_t flags, ScanContext* context, bool isDependent);
// IGCHeap is the interface that the VM will use when interacting with the GC.
class IGCHeap {
@@ -398,13 +398,13 @@ public:
*/
// Returns whether or not the given size is a valid segment size.
- virtual BOOL IsValidSegmentSize(size_t size) = 0;
+ virtual bool IsValidSegmentSize(size_t size) = 0;
// Returns whether or not the given size is a valid gen 0 max size.
- virtual BOOL IsValidGen0MaxSize(size_t size) = 0;
+ virtual bool IsValidGen0MaxSize(size_t size) = 0;
// Gets a valid segment size.
- virtual size_t GetValidSegmentSize(BOOL large_seg = FALSE) = 0;
+ virtual size_t GetValidSegmentSize(bool large_seg = false) = 0;
// Sets the limit for reserved virtual memory.
virtual void SetReservedVMLimit(size_t vmlimit) = 0;
@@ -424,7 +424,7 @@ public:
virtual void WaitUntilConcurrentGCComplete() = 0;
// Returns true if a concurrent GC is in progress, false otherwise.
- virtual BOOL IsConcurrentGCInProgress() = 0;
+ virtual bool IsConcurrentGCInProgress() = 0;
// Temporarily enables concurrent GC, used during profiling.
virtual void TemporaryEnableConcurrentGC() = 0;
@@ -433,7 +433,7 @@ public:
virtual void TemporaryDisableConcurrentGC() = 0;
// Returns whether or not Concurrent GC is enabled.
- virtual BOOL IsConcurrentGCEnabled() = 0;
+ virtual bool IsConcurrentGCEnabled() = 0;
// Wait for a concurrent GC to complete if one is in progress, with the given timeout.
virtual HRESULT WaitUntilConcurrentGCCompleteAsync(int millisecondsTimeout) = 0; // Use in native threads. TRUE if succeed. FALSE if failed or timeout
@@ -447,17 +447,17 @@ public:
*/
// Finalizes an app domain by finalizing objects within that app domain.
- virtual BOOL FinalizeAppDomain(AppDomain* pDomain, BOOL fRunFinalizers) = 0;
+ virtual bool FinalizeAppDomain(AppDomain* pDomain, bool fRunFinalizers) = 0;
// Finalizes all registered objects for shutdown, even if they are still reachable.
- virtual void SetFinalizeQueueForShutdown(BOOL fHasLock) = 0;
+ virtual void SetFinalizeQueueForShutdown(bool fHasLock) = 0;
// Gets the number of finalizable objects.
virtual size_t GetNumberOfFinalizable() = 0;
// Traditionally used by the finalizer thread on shutdown to determine
// whether or not to time out. Returns true if the GC lock has not been taken.
- virtual BOOL ShouldRestartFinalizerWatchDog() = 0;
+ virtual bool ShouldRestartFinalizerWatchDog() = 0;
// Gets the next finalizable object.
virtual Object* GetNextFinalizable() = 0;
@@ -490,10 +490,10 @@ public:
// Registers for a full GC notification, raising a notification if the gen 2 or
// LOH object heap thresholds are exceeded.
- virtual BOOL RegisterForFullGCNotification(uint32_t gen2Percentage, uint32_t lohPercentage) = 0;
+ virtual bool RegisterForFullGCNotification(uint32_t gen2Percentage, uint32_t lohPercentage) = 0;
// Cancels a full GC notification that was requested by `RegisterForFullGCNotification`.
- virtual BOOL CancelFullGCNotification() = 0;
+ virtual bool CancelFullGCNotification() = 0;
// Returns the status of a registered notification for determining whether a blocking
// Gen 2 collection is about to be initiated, with the given timeout.
@@ -514,7 +514,7 @@ public:
// Begins a no-GC region, returning a code indicating whether entering the no-GC
// region was successful.
- virtual int StartNoGCRegion(uint64_t totalSize, BOOL lohSizeKnown, uint64_t lohSize, BOOL disallowFullBlockingGC) = 0;
+ virtual int StartNoGCRegion(uint64_t totalSize, bool lohSizeKnown, uint64_t lohSize, bool disallowFullBlockingGC) = 0;
// Exits a no-GC region.
virtual int EndNoGCRegion() = 0;
@@ -524,7 +524,7 @@ public:
// Forces a garbage collection of the given generation. Also used extensively
// throughout the VM.
- virtual HRESULT GarbageCollect(int generation = -1, BOOL low_memory_p = FALSE, int mode = collection_blocking) = 0;
+ virtual HRESULT GarbageCollect(int generation = -1, bool low_memory_p = false, int mode = collection_blocking) = 0;
// Gets the largest GC generation. Also used extensively throughout the VM.
virtual unsigned GetMaxGeneration() = 0;
@@ -546,16 +546,16 @@ public:
virtual HRESULT Initialize() = 0;
// Returns whether nor this GC was promoted by the last GC.
- virtual BOOL IsPromoted(Object* object) = 0;
+ virtual bool IsPromoted(Object* object) = 0;
// Returns true if this pointer points into a GC heap, false otherwise.
- virtual BOOL IsHeapPointer(void* object, BOOL small_heap_only = FALSE) = 0;
+ virtual bool IsHeapPointer(void* object, bool small_heap_only = false) = 0;
// Return the generation that has been condemned by the current GC.
virtual unsigned GetCondemnedGeneration() = 0;
// Returns whether or not a GC is in progress.
- virtual BOOL IsGCInProgressHelper(BOOL bConsiderGCStart = FALSE) = 0;
+ virtual bool IsGCInProgressHelper(bool bConsiderGCStart = false) = 0;
// Returns the number of GCs that have occured. Mainly used for
// sanity checks asserting that a GC has not occured.
@@ -566,20 +566,20 @@ public:
virtual bool IsThreadUsingAllocationContextHeap(gc_alloc_context* acontext, int thread_number) = 0;
// Returns whether or not this object resides in an ephemeral generation.
- virtual BOOL IsEphemeral(Object* object) = 0;
+ virtual bool IsEphemeral(Object* object) = 0;
// Blocks until a GC is complete, returning a code indicating the wait was successful.
- virtual uint32_t WaitUntilGCComplete(BOOL bConsiderGCStart = FALSE) = 0;
+ virtual uint32_t WaitUntilGCComplete(bool bConsiderGCStart = false) = 0;
// "Fixes" an allocation context by binding its allocation pointer to a
// location on the heap.
- virtual void FixAllocContext(gc_alloc_context* acontext, BOOL lockp, void* arg, void* heap) = 0;
+ virtual void FixAllocContext(gc_alloc_context* acontext, bool lockp, void* arg, void* heap) = 0;
// Gets the total survived size plus the total allocated bytes on the heap.
virtual size_t GetCurrentObjSize() = 0;
// Sets whether or not a GC is in progress.
- virtual void SetGCInProgress(BOOL fInProgress) = 0;
+ virtual void SetGCInProgress(bool fInProgress) = 0;
// Gets whether or not the GC runtime structures are in a valid state for heap traversal.
virtual bool RuntimeStructuresValid() = 0;
@@ -642,7 +642,7 @@ public:
===========================================================================
*/
// Returns whether or not this object is in the fixed heap.
- virtual BOOL IsObjectInFixedHeap(Object* pObj) = 0;
+ virtual bool IsObjectInFixedHeap(Object* pObj) = 0;
// Walks an object and validates its members.
virtual void ValidateObjectMember(Object* obj) = 0;
@@ -669,10 +669,10 @@ public:
virtual void DiagWalkObject(Object* obj, walk_fn fn, void* context) = 0;
// Walk the heap object by object.
- virtual void DiagWalkHeap(walk_fn fn, void* context, int gen_number, BOOL walk_large_object_heap_p) = 0;
+ virtual void DiagWalkHeap(walk_fn fn, void* context, int gen_number, bool walk_large_object_heap_p) = 0;
// Walks the survivors and get the relocation information if objects have moved.
- virtual void DiagWalkSurvivorsWithType(void* gc_context, record_surv_fn fn, size_t diag_context, walk_surv_type type) = 0;
+ virtual void DiagWalkSurvivorsWithType(void* gc_context, record_surv_fn fn, void* diag_context, walk_surv_type type) = 0;
// Walks the finalization queue.
virtual void DiagWalkFinalizeQueue(void* gc_context, fq_walk_fn fn) = 0;
@@ -700,7 +700,7 @@ public:
// Returns TRUE if GC actually happens, otherwise FALSE. The passed alloc context
// must not be null.
- virtual BOOL StressHeap(gc_alloc_context* acontext) = 0;
+ virtual bool StressHeap(gc_alloc_context* acontext) = 0;
/*
===========================================================================
@@ -753,8 +753,8 @@ struct ScanContext
Thread* thread_under_crawl;
int thread_number;
uintptr_t stack_limit; // Lowest point on the thread stack that the scanning logic is permitted to read
- BOOL promotion; //TRUE: Promotion, FALSE: Relocation.
- BOOL concurrent; //TRUE: concurrent scanning
+ bool promotion; //TRUE: Promotion, FALSE: Relocation.
+ bool concurrent; //TRUE: concurrent scanning
#if CHECK_APP_DOMAIN_LEAKS || defined (FEATURE_APPDOMAIN_RESOURCE_MONITORING) || defined (DACCESS_COMPILE)
AppDomain *pCurrentDomain;
#endif //CHECK_APP_DOMAIN_LEAKS || FEATURE_APPDOMAIN_RESOURCE_MONITORING || DACCESS_COMPILE
@@ -775,8 +775,8 @@ struct ScanContext
thread_under_crawl = 0;
thread_number = -1;
stack_limit = 0;
- promotion = FALSE;
- concurrent = FALSE;
+ promotion = false;
+ concurrent = false;
#ifdef GC_PROFILING
pMD = NULL;
#endif //GC_PROFILING
diff --git a/src/gc/gcpriv.h b/src/gc/gcpriv.h
index b929198fdf..92868bbbbe 100644
--- a/src/gc/gcpriv.h
+++ b/src/gc/gcpriv.h
@@ -1298,19 +1298,19 @@ protected:
uint8_t* last_plug;
BOOL is_shortened;
mark* pinned_plug_entry;
- size_t profiling_context;
+ void* profiling_context;
record_surv_fn fn;
};
PER_HEAP
- void walk_survivors (record_surv_fn fn, size_t context, walk_surv_type type);
+ void walk_survivors (record_surv_fn fn, void* context, walk_surv_type type);
PER_HEAP
void walk_plug (uint8_t* plug, size_t size, BOOL check_last_object_p,
walk_relocate_args* args);
PER_HEAP
- void walk_relocation (size_t profiling_context, record_surv_fn fn);
+ void walk_relocation (void* profiling_context, record_surv_fn fn);
PER_HEAP
void walk_relocation_in_brick (uint8_t* tree, walk_relocate_args* args);
@@ -1320,14 +1320,14 @@ protected:
#if defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE)
PER_HEAP
- void walk_survivors_for_bgc (size_t profiling_context, record_surv_fn fn);
+ void walk_survivors_for_bgc (void* profiling_context, record_surv_fn fn);
#endif // defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE)
// used in blocking GCs after plan phase so this walks the plugs.
PER_HEAP
- void walk_survivors_relocation (size_t profiling_context, record_surv_fn fn);
+ void walk_survivors_relocation (void* profiling_context, record_surv_fn fn);
PER_HEAP
- void walk_survivors_for_loh (size_t profiling_context, record_surv_fn fn);
+ void walk_survivors_for_loh (void* profiling_context, record_surv_fn fn);
PER_HEAP
int generation_to_condemn (int n,
@@ -2168,7 +2168,7 @@ protected:
void relocate_in_loh_compact();
PER_HEAP
- void walk_relocation_for_loh (size_t profiling_context, record_surv_fn fn);
+ void walk_relocation_for_loh (void* profiling_context, record_surv_fn fn);
PER_HEAP
BOOL loh_enque_pinned_plug (uint8_t* plug, size_t len);
@@ -3767,7 +3767,7 @@ public:
void DiscardNonCriticalObjects();
//Methods used by the app domain unloading call to finalize objects in an app domain
- BOOL FinalizeAppDomain (AppDomain *pDomain, BOOL fRunFinalizers);
+ bool FinalizeAppDomain (AppDomain *pDomain, bool fRunFinalizers);
void CheckFinalizerObjects();
diff --git a/src/gc/gcscan.h b/src/gc/gcscan.h
index 306d5f23a3..c7060f3f51 100644
--- a/src/gc/gcscan.h
+++ b/src/gc/gcscan.h
@@ -92,11 +92,4 @@ class GCScan
static VOLATILE(int32_t) m_GcStructuresInvalidCnt;
};
-// These two functions are utilized to scan the heap if requested by ETW
-// or a profiler. The implementations of these two functions are in profheapwalkhelper.cpp.
-#if defined(FEATURE_EVENT_TRACE) | defined(GC_PROFILING)
-void ScanRootsHelper(Object* pObj, Object** ppRoot, ScanContext * pSC, DWORD dwFlags);
-BOOL HeapWalkHelper(Object * pBO, void * pvContext);
-#endif
-
#endif // _GCSCAN_H_
diff --git a/src/gc/objecthandle.cpp b/src/gc/objecthandle.cpp
index e8eed93006..5f5ecbf556 100644
--- a/src/gc/objecthandle.cpp
+++ b/src/gc/objecthandle.cpp
@@ -441,13 +441,13 @@ void CALLBACK ScanPointerForProfilerAndETW(_UNCHECKED_OBJECTREF *pObjRef, uintpt
ScanContext *pSC = (ScanContext *)lp1;
uint32_t rootFlags = 0;
- BOOL isDependent = FALSE;
+ bool isDependent = false;
OBJECTHANDLE handle = (OBJECTHANDLE)(pRef);
switch (HandleFetchType(handle))
{
case HNDTYPE_DEPENDENT:
- isDependent = TRUE;
+ isDependent = true;
break;
case HNDTYPE_WEAK_SHORT:
case HNDTYPE_WEAK_LONG:
diff --git a/src/gc/objecthandle.h b/src/gc/objecthandle.h
index 9c885bbfc6..b86572b276 100644
--- a/src/gc/objecthandle.h
+++ b/src/gc/objecthandle.h
@@ -511,8 +511,6 @@ void Ref_ScanSizedRefHandles(uint32_t condemned, uint32_t maxgen, ScanContext* s
void Ref_ScanPointers(uint32_t condemned, uint32_t maxgen, ScanContext* sc, Ref_promote_func* fn);
#endif
-typedef void (* handle_scan_fn)(Object** pRef, Object* pSec, uint32_t flags, ScanContext* context, BOOL isDependent);
-
void Ref_CheckReachable (uint32_t uCondemnedGeneration, uint32_t uMaxGeneration, uintptr_t lp1);
void Ref_CheckAlive (uint32_t uCondemnedGeneration, uint32_t uMaxGeneration, uintptr_t lp1);
void Ref_ScanHandlesForProfilerAndETW(uint32_t uMaxGeneration, uintptr_t lp1, handle_scan_fn fn);
diff --git a/src/gc/sample/gcenv.ee.cpp b/src/gc/sample/gcenv.ee.cpp
index b339fcc619..07be244375 100644
--- a/src/gc/sample/gcenv.ee.cpp
+++ b/src/gc/sample/gcenv.ee.cpp
@@ -137,7 +137,7 @@ void ThreadStore::AttachCurrentThread()
void GCToEEInterface::SuspendEE(SUSPEND_REASON reason)
{
- g_theGCHeap->SetGCInProgress(TRUE);
+ g_theGCHeap->SetGCInProgress(true);
// TODO: Implement
}
@@ -146,7 +146,7 @@ void GCToEEInterface::RestartEE(bool bFinishedGC)
{
// TODO: Implement
- g_theGCHeap->SetGCInProgress(FALSE);
+ g_theGCHeap->SetGCInProgress(false);
}
void GCToEEInterface::GcScanRoots(promote_func* fn, int condemned, int max_gen, ScanContext* sc)