summaryrefslogtreecommitdiff
path: root/src/gc
diff options
context:
space:
mode:
authorJan Vorlicek <janvorli@microsoft.com>2015-11-19 10:38:17 +0100
committerJan Vorlicek <janvorli@microsoft.com>2015-11-19 10:38:17 +0100
commitca854edf49a0dc91cc6f80f10b8fdfba39878e07 (patch)
tree43481e386bf19ad19c2189d68dcd65a981f8a0e7 /src/gc
parent8a1abac8f17553f97b2613784db4bde1f5edda96 (diff)
downloadcoreclr-ca854edf49a0dc91cc6f80f10b8fdfba39878e07.tar.gz
coreclr-ca854edf49a0dc91cc6f80f10b8fdfba39878e07.tar.bz2
coreclr-ca854edf49a0dc91cc6f80f10b8fdfba39878e07.zip
Changed LONG and ULONG
Diffstat (limited to 'src/gc')
-rw-r--r--src/gc/env/gcenv.base.h18
-rw-r--r--src/gc/env/gcenv.unix.cpp6
-rw-r--r--src/gc/env/gcenv.windows.cpp12
-rw-r--r--src/gc/gc.cpp90
-rw-r--r--src/gc/gc.h4
-rw-r--r--src/gc/gccommon.cpp4
-rw-r--r--src/gc/gcee.cpp14
-rw-r--r--src/gc/gcimpl.h2
-rw-r--r--src/gc/gcpriv.h10
-rw-r--r--src/gc/gcscan.cpp12
-rw-r--r--src/gc/gcscan.h4
-rw-r--r--src/gc/handletable.cpp4
-rw-r--r--src/gc/handletablecache.cpp22
-rw-r--r--src/gc/handletablepriv.h4
-rw-r--r--src/gc/objecthandle.cpp2
15 files changed, 104 insertions, 104 deletions
diff --git a/src/gc/env/gcenv.base.h b/src/gc/env/gcenv.base.h
index a5aac73129..dabca8daed 100644
--- a/src/gc/env/gcenv.base.h
+++ b/src/gc/env/gcenv.base.h
@@ -29,15 +29,15 @@ typedef uint32_t DWORD;
typedef uintptr_t DWORD_PTR;
typedef void* LPVOID;
typedef uint32_t UINT;
-typedef int32_t LONG;
-typedef uint32_t ULONG;
+//typedef int32_t LONG;
+//typedef uint32_t ULONG;
typedef uintptr_t ULONG_PTR;
typedef void VOID;
typedef void* PVOID;
typedef uintptr_t LPARAM;
typedef void * LPSECURITY_ATTRIBUTES;
typedef void const * LPCVOID;
-typedef uint32_t * PULONG;
+//typedef uint32_t * PULONG;
typedef wchar_t * PWSTR, *LPWSTR;
typedef const wchar_t *LPCWSTR, *PCWSTR;
typedef size_t SIZE_T;
@@ -130,8 +130,8 @@ typedef struct _RTL_CRITICAL_SECTION {
// section for the resource
//
- LONG LockCount;
- LONG RecursionCount;
+ int32_t LockCount;
+ int32_t RecursionCount;
HANDLE OwningThread; // from the thread's ClientId->UniqueThread
HANDLE LockSemaphore;
ULONG_PTR SpinCount; // force size on 64-bit systems when packed
@@ -191,7 +191,7 @@ GetWriteWatch(
SIZE_T dwRegionSize,
PVOID *lpAddresses,
ULONG_PTR * lpdwCount,
- ULONG * lpdwGranularity
+ uint32_t * lpdwGranularity
);
WINBASEAPI
@@ -260,8 +260,8 @@ DWORD
WINAPI
SetFilePointer(
HANDLE hFile,
- LONG lDistanceToMove,
- LONG * lpDistanceToMoveHigh,
+ int32_t lDistanceToMove,
+ int32_t * lpDistanceToMoveHigh,
DWORD dwMoveMethod);
WINBASEAPI
@@ -430,7 +430,7 @@ struct _DacGlobals;
int32_t FastInterlockIncrement(int32_t volatile *lpAddend);
int32_t FastInterlockDecrement(int32_t volatile *lpAddend);
-int32_t FastInterlockExchange(int32_t volatile *Target, LONG Value);
+int32_t FastInterlockExchange(int32_t volatile *Target, int32_t Value);
int32_t FastInterlockCompareExchange(int32_t volatile *Destination, int32_t Exchange, int32_t Comperand);
int32_t FastInterlockExchangeAdd(int32_t volatile *Addend, int32_t Value);
diff --git a/src/gc/env/gcenv.unix.cpp b/src/gc/env/gcenv.unix.cpp
index 249453a0b4..8fe8708744 100644
--- a/src/gc/env/gcenv.unix.cpp
+++ b/src/gc/env/gcenv.unix.cpp
@@ -389,7 +389,7 @@ GetWriteWatch(
SIZE_T dwRegionSize,
PVOID *lpAddresses,
ULONG_PTR * lpdwCount,
- ULONG * lpdwGranularity
+ uint32_t * lpdwGranularity
)
{
// TODO: Implement for background GC
@@ -519,8 +519,8 @@ DWORD
WINAPI
SetFilePointer(
HANDLE hFile,
- LONG lDistanceToMove,
- LONG * lpDistanceToMoveHigh,
+ int32_t lDistanceToMove,
+ int32_t * lpDistanceToMoveHigh,
DWORD dwMoveMethod)
{
// TODO: Reimplement callers using CRT
diff --git a/src/gc/env/gcenv.windows.cpp b/src/gc/env/gcenv.windows.cpp
index aba1a828d1..db1bd5148e 100644
--- a/src/gc/env/gcenv.windows.cpp
+++ b/src/gc/env/gcenv.windows.cpp
@@ -16,12 +16,12 @@
int32_t FastInterlockIncrement(int32_t volatile *lpAddend)
{
- return InterlockedIncrement((LONG *)lpAddend);
+ return InterlockedIncrement((int32_t *)lpAddend);
}
int32_t FastInterlockDecrement(int32_t volatile *lpAddend)
{
- return InterlockedDecrement((LONG *)lpAddend);
+ return InterlockedDecrement((int32_t *)lpAddend);
}
int32_t FastInterlockExchange(int32_t volatile *Target, int32_t Value)
@@ -31,12 +31,12 @@ int32_t FastInterlockExchange(int32_t volatile *Target, int32_t Value)
int32_t FastInterlockCompareExchange(int32_t volatile *Destination, int32_t Exchange, int32_t Comperand)
{
- return InterlockedCompareExchange((LONG *)Destination, Exchange, Comperand);
+ return InterlockedCompareExchange((int32_t *)Destination, Exchange, Comperand);
}
int32_t FastInterlockExchangeAdd(int32_t volatile *Addend, int32_t Value)
{
- return InterlockedExchangeAdd((LONG *)Addend, Value);
+ return InterlockedExchangeAdd((int32_t *)Addend, Value);
}
void * _FastInterlockExchangePointer(void * volatile *Target, void * Value)
@@ -51,12 +51,12 @@ void * _FastInterlockCompareExchangePointer(void * volatile *Destination, void *
void FastInterlockOr(uint32_t volatile *p, uint32_t msk)
{
- InterlockedOr((LONG *)p, msk);
+ InterlockedOr((int32_t *)p, msk);
}
void FastInterlockAnd(uint32_t volatile *p, uint32_t msk)
{
- InterlockedAnd((LONG *)p, msk);
+ InterlockedAnd((int32_t *)p, msk);
}
diff --git a/src/gc/gc.cpp b/src/gc/gc.cpp
index eeb85bdada..c24370eca4 100644
--- a/src/gc/gc.cpp
+++ b/src/gc/gc.cpp
@@ -293,7 +293,7 @@ uint32_t bgc_alloc_spin = 2;
inline
void c_write (uint32_t& place, uint32_t value)
{
- FastInterlockExchange (&(LONG&)place, value);
+ FastInterlockExchange (&(int32_t&)place, value);
//place = value;
}
@@ -301,7 +301,7 @@ void c_write (uint32_t& place, uint32_t value)
inline
void c_write_volatile (BOOL* place, uint32_t value)
{
- FastInterlockExchange ((LONG*)place, value);
+ FastInterlockExchange ((int32_t*)place, value);
//place = value;
}
@@ -607,10 +607,10 @@ enum gc_join_flavor
struct join_structure
{
CLREvent joined_event[3]; // the last event in the array is only used for first_thread_arrived.
- VOLATILE(LONG) join_lock;
- VOLATILE(LONG) r_join_lock;
- VOLATILE(LONG) join_restart;
- VOLATILE(LONG) r_join_restart; // only used by get_here_first and friends.
+ VOLATILE(int32_t) join_lock;
+ VOLATILE(int32_t) r_join_lock;
+ VOLATILE(int32_t) join_restart;
+ VOLATILE(int32_t) r_join_restart; // only used by get_here_first and friends.
int n_threads;
VOLATILE(BOOL) joined_p;
// avoid lock_color and join_lock being on same cache line
@@ -643,7 +643,7 @@ enum join_heap_index
struct join_event
{
- ULONG heap;
+ uint32_t heap;
join_time time;
join_type type;
};
@@ -731,7 +731,7 @@ public:
if (FastInterlockDecrement(&join_struct.join_lock) != 0)
{
dprintf (JOIN_LOG, ("join%d(%d): Join() Waiting...join_lock is now %d",
- flavor, join_id, (LONG)(join_struct.join_lock)));
+ flavor, join_id, (int32_t)(join_struct.join_lock)));
fire_event (gch->heap_number, time_start, type_join, join_id);
@@ -753,7 +753,7 @@ respin:
if (color == join_struct.lock_color)
{
dprintf (JOIN_LOG, ("join%d(%d): Join() hard wait on reset event %d, join_lock is now %d",
- flavor, join_id, color, (LONG)(join_struct.join_lock)));
+ flavor, join_id, color, (int32_t)(join_struct.join_lock)));
//Thread* current_thread = GetThread();
//BOOL cooperative_mode = gc_heap::enable_preemptive (current_thread);
@@ -774,7 +774,7 @@ respin:
}
dprintf (JOIN_LOG, ("join%d(%d): Join() done, join_lock is %d",
- flavor, join_id, (LONG)(join_struct.join_lock)));
+ flavor, join_id, (int32_t)(join_struct.join_lock)));
}
fire_event (gch->heap_number, time_end, type_join, join_id);
@@ -931,7 +931,7 @@ respin:
assert (join_struct.joined_p);
join_struct.joined_p = FALSE;
join_struct.join_lock = join_struct.n_threads;
- dprintf (JOIN_LOG, ("join%d(%d): Restarting from join: join_lock is %d", flavor, id, (LONG)(join_struct.join_lock)));
+ dprintf (JOIN_LOG, ("join%d(%d): Restarting from join: join_lock is %d", flavor, id, (int32_t)(join_struct.join_lock)));
// printf("restart from join #%d at cycle %u from start of gc\n", join_id, GetCycleCount32() - gc_start);
int color = join_struct.lock_color;
join_struct.lock_color = !color;
@@ -948,7 +948,7 @@ respin:
BOOL joined()
{
- dprintf (JOIN_LOG, ("join%d(%d): joined, join_lock is %d", flavor, id, (LONG)(join_struct.join_lock)));
+ dprintf (JOIN_LOG, ("join%d(%d): joined, join_lock is %d", flavor, id, (int32_t)(join_struct.join_lock)));
return join_struct.joined_p;
}
@@ -1008,11 +1008,11 @@ class exclusive_sync
{
// TODO - verify that this is the right syntax for Volatile.
VOLATILE(uint8_t*) rwp_object;
- VOLATILE(LONG) needs_checking;
+ VOLATILE(int32_t) needs_checking;
int spin_count;
- uint8_t cache_separator[HS_CACHE_LINE_SIZE - sizeof (int) - sizeof (LONG)];
+ uint8_t cache_separator[HS_CACHE_LINE_SIZE - sizeof (int) - sizeof (int32_t)];
// TODO - perhaps each object should be on its own cache line...
VOLATILE(uint8_t*) alloc_objects[max_pending_allocs];
@@ -1183,9 +1183,9 @@ retry:
// class to do synchronization between FGCs and BGC.
class recursive_gc_sync
{
- static VOLATILE(LONG) foreground_request_count;//initial state 0
+ static VOLATILE(int32_t) foreground_request_count;//initial state 0
static VOLATILE(BOOL) gc_background_running; //initial state FALSE
- static VOLATILE(LONG) foreground_count; // initial state 0;
+ static VOLATILE(int32_t) foreground_count; // initial state 0;
static VOLATILE(uint32_t) foreground_gate; // initial state FALSE;
static CLREvent foreground_complete;//Auto Reset
static CLREvent foreground_allowed;//Auto Reset
@@ -1200,8 +1200,8 @@ public:
static BOOL background_running_p() {return gc_background_running;}
};
-VOLATILE(LONG) recursive_gc_sync::foreground_request_count = 0;//initial state 0
-VOLATILE(LONG) recursive_gc_sync::foreground_count = 0; // initial state 0;
+VOLATILE(int32_t) recursive_gc_sync::foreground_request_count = 0;//initial state 0
+VOLATILE(int32_t) recursive_gc_sync::foreground_count = 0; // initial state 0;
VOLATILE(BOOL) recursive_gc_sync::gc_background_running = FALSE; //initial state FALSE
VOLATILE(uint32_t) recursive_gc_sync::foreground_gate = 0;
CLREvent recursive_gc_sync::foreground_complete;//Auto Reset
@@ -1289,7 +1289,7 @@ try_again_no_inc:
if (foreground_gate)
{
FastInterlockIncrement (&foreground_count);
- dprintf (2, ("foreground_count: %d", (LONG)foreground_count));
+ dprintf (2, ("foreground_count: %d", (int32_t)foreground_count));
if (foreground_gate)
{
gc_heap::settings.concurrent = FALSE;
@@ -1314,7 +1314,7 @@ void recursive_gc_sync::end_foreground()
if (gc_background_running)
{
FastInterlockDecrement (&foreground_request_count);
- dprintf (2, ("foreground_count before decrement: %d", (LONG)foreground_count));
+ dprintf (2, ("foreground_count before decrement: %d", (int32_t)foreground_count));
if (FastInterlockDecrement (&foreground_count) == 0)
{
//c_write_volatile ((BOOL*)&foreground_gate, 0);
@@ -1335,7 +1335,7 @@ BOOL recursive_gc_sync::allow_foreground()
{
assert (gc_heap::settings.concurrent);
dprintf (100, ("enter allow_foreground, f_req_count: %d, f_count: %d",
- (LONG)foreground_request_count, (LONG)foreground_count));
+ (int32_t)foreground_request_count, (int32_t)foreground_count));
BOOL did_fgc = FALSE;
@@ -1551,7 +1551,7 @@ static void safe_switch_to_thread()
// raw pointers in addition to the results of the & operator on Volatile<T>.
//
inline
-static void enter_spin_lock_noinstru (RAW_KEYWORD(volatile) LONG* lock)
+static void enter_spin_lock_noinstru (RAW_KEYWORD(volatile) int32_t* lock)
{
retry:
@@ -1595,15 +1595,15 @@ retry:
}
inline
-static BOOL try_enter_spin_lock_noinstru(RAW_KEYWORD(volatile) LONG* lock)
+static BOOL try_enter_spin_lock_noinstru(RAW_KEYWORD(volatile) int32_t* lock)
{
return (FastInterlockExchange (&*lock, 0) < 0);
}
inline
-static void leave_spin_lock_noinstru (RAW_KEYWORD(volatile) LONG* lock)
+static void leave_spin_lock_noinstru (RAW_KEYWORD(volatile) int32_t* lock)
{
- VolatileStore<LONG>((LONG*)lock, -1);
+ VolatileStore<int32_t>((int32_t*)lock, -1);
}
#ifdef _DEBUG
@@ -4896,7 +4896,7 @@ public:
if (GCGetCurrentProcessorNumber)
return proc_no_to_heap_no[GCGetCurrentProcessorNumber() % gc_heap::n_heaps];
- unsigned sniff_index = FastInterlockIncrement((LONG *)&cur_sniff_index);
+ unsigned sniff_index = FastInterlockIncrement((int32_t *)&cur_sniff_index);
sniff_index %= n_sniff_buffers;
int best_heap = 0;
@@ -6643,7 +6643,7 @@ void gc_heap::mark_array_set_marked (uint8_t* add)
size_t index = mark_word_of (add);
uint32_t val = (1 << mark_bit_bit_of (add));
#ifdef MULTIPLE_HEAPS
- InterlockedOr ((LONG*)&(mark_array [index]), val);
+ InterlockedOr ((int32_t*)&(mark_array [index]), val);
#else
mark_array [index] |= val;
#endif
@@ -9151,7 +9151,7 @@ void gc_heap::update_card_table_bundle()
uint8_t* base_address = (uint8_t*)(&card_table[card_word (card_of (lowest_address))]);
uint8_t* saved_base_address = base_address;
ULONG_PTR bcount = array_size;
- ULONG granularity = 0;
+ uint32_t granularity = 0;
uint8_t* high_address = (uint8_t*)(&card_table[card_word (card_of (highest_address))]);
size_t saved_region_size = align_on_page (high_address) - saved_base_address;
@@ -10000,7 +10000,7 @@ int gc_heap::loh_state_index = 0;
gc_heap::loh_state_info gc_heap::last_loh_states[max_saved_loh_states];
#endif //RECORD_LOH_STATE
-VOLATILE(LONG) gc_heap::gc_done_event_lock;
+VOLATILE(int32_t) gc_heap::gc_done_event_lock;
VOLATILE(bool) gc_heap::gc_done_event_set;
CLREvent gc_heap::gc_done_event;
#endif //!MULTIPLE_HEAPS
@@ -12888,7 +12888,7 @@ int gc_heap::try_allocate_more_space (alloc_context* acontext, size_t size,
if (etw_allocation_running_amount[etw_allocation_index] > etw_allocation_tick)
{
#ifdef FEATURE_REDHAWK
- FireEtwGCAllocationTick_V1((ULONG)etw_allocation_running_amount[etw_allocation_index],
+ FireEtwGCAllocationTick_V1((uint32_t)etw_allocation_running_amount[etw_allocation_index],
((gen_number == 0) ? ETW::GCLog::ETW_GC_INFO::AllocationSmall : ETW::GCLog::ETW_GC_INFO::AllocationLarge),
GetClrInstanceId());
#else
@@ -24541,11 +24541,11 @@ void gc_heap::compact_phase (int condemned_gen_number,
//
// Also, any exceptions that escape out of the GC thread are fatal. Thus, once
// we do our unhandled exception processing, we shall failfast.
-inline LONG GCUnhandledExceptionFilter(EXCEPTION_POINTERS* pExceptionPointers, void* pv)
+inline int32_t GCUnhandledExceptionFilter(EXCEPTION_POINTERS* pExceptionPointers, void* pv)
{
WRAPPER_NO_CONTRACT;
- LONG result = CLRVectoredExceptionHandler(pExceptionPointers);
+ int32_t result = CLRVectoredExceptionHandler(pExceptionPointers);
if (result == EXCEPTION_CONTINUE_EXECUTION)
{
// Since VEH has asked to continue execution, lets do that...
@@ -32062,7 +32062,7 @@ size_t GCHeap::totalSurvivedSize = 0;
#ifdef FEATURE_PREMORTEM_FINALIZATION
CFinalize* GCHeap::m_Finalize = 0;
BOOL GCHeap::GcCollectClasses = FALSE;
-VOLATILE(LONG) GCHeap::m_GCFLock = 0;
+VOLATILE(int32_t) GCHeap::m_GCFLock = 0;
#ifndef FEATURE_REDHAWK // Redhawk forces relocation a different way
#ifdef STRESS_HEAP
@@ -33709,13 +33709,13 @@ void GCHeap::Relocate (Object** ppObject, ScanContext* sc,
void StressHeapDummy ();
-static LONG GCStressStartCount = -1;
-static LONG GCStressCurCount = 0;
-static LONG GCStressStartAtJit = -1;
+static int32_t GCStressStartCount = -1;
+static int32_t GCStressCurCount = 0;
+static int32_t GCStressStartAtJit = -1;
// the maximum number of foreground GCs we'll induce during one BGC
// (this number does not include "naturally" occuring GCs).
-static LONG GCStressMaxFGCsPerBGC = -1;
+static int32_t GCStressMaxFGCsPerBGC = -1;
// CLRRandom implementation can produce FPU exceptions if
// the test/application run by CLR is enabling any FPU exceptions.
@@ -33823,7 +33823,7 @@ BOOL GCHeap::StressHeap(alloc_context * acontext)
{
#ifndef MULTIPLE_HEAPS
- static LONG OneAtATime = -1;
+ static int32_t OneAtATime = -1;
if (acontext == 0)
acontext = generation_alloc_context (pGenGCHeap->generation_of(0));
@@ -33837,7 +33837,7 @@ BOOL GCHeap::StressHeap(alloc_context * acontext)
// at a time. A secondary advantage is that we release part of our StressObjs
// buffer sparingly but just as effectively.
- if (FastInterlockIncrement((LONG *) &OneAtATime) == 0 &&
+ if (FastInterlockIncrement((int32_t *) &OneAtATime) == 0 &&
!TrackAllocations()) // Messing with object sizes can confuse the profiler (see ICorProfilerInfo::GetObjectSize)
{
StringObject* str;
@@ -33899,7 +33899,7 @@ BOOL GCHeap::StressHeap(alloc_context * acontext)
}
}
}
- FastInterlockDecrement((LONG *) &OneAtATime);
+ FastInterlockDecrement((int32_t *) &OneAtATime);
#endif // !MULTIPLE_HEAPS
if (IsConcurrentGCEnabled())
{
@@ -34668,8 +34668,8 @@ void gc_heap::do_pre_gc()
#ifdef STRESS_LOG
STRESS_LOG_GC_START(VolatileLoad(&settings.gc_index),
- (ULONG)settings.condemned_generation,
- (ULONG)settings.reason);
+ (uint32_t)settings.condemned_generation,
+ (uint32_t)settings.reason);
#endif // STRESS_LOG
#ifdef MULTIPLE_HEAPS
@@ -34887,8 +34887,8 @@ void gc_heap::do_post_gc()
#ifdef STRESS_LOG
STRESS_LOG_GC_END(VolatileLoad(&settings.gc_index),
- (ULONG)settings.condemned_generation,
- (ULONG)settings.reason);
+ (uint32_t)settings.condemned_generation,
+ (uint32_t)settings.reason);
#endif // STRESS_LOG
#ifdef GC_CONFIG_DRIVEN
@@ -36504,7 +36504,7 @@ void TouchPages(LPVOID pStart, uint32_t cb)
{
char a;
a = VolatileLoad(p);
- //printf("Touching page %lxh\n", (ULONG)p);
+ //printf("Touching page %lxh\n", (uint32_t)p);
p += pagesize;
}
}
diff --git a/src/gc/gc.h b/src/gc/gc.h
index 48f3cd6624..dafa7f88fe 100644
--- a/src/gc/gc.h
+++ b/src/gc/gc.h
@@ -660,13 +660,13 @@ public:
#endif //VERIFY_HEAP
};
-extern VOLATILE(LONG) m_GCLock;
+extern VOLATILE(int32_t) m_GCLock;
// Go through and touch (read) each page straddled by a memory block.
void TouchPages(LPVOID pStart, uint32_t cb);
// For low memory notification from host
-extern LONG g_bLowMemoryFromHost;
+extern int32_t g_bLowMemoryFromHost;
#ifdef WRITE_BARRIER_CHECK
void updateGCShadow(Object** ptr, Object* val);
diff --git a/src/gc/gccommon.cpp b/src/gc/gccommon.cpp
index dcfa6318fb..c414eedfb0 100644
--- a/src/gc/gccommon.cpp
+++ b/src/gc/gccommon.cpp
@@ -43,7 +43,7 @@ uint8_t* g_GCShadowEnd;
uint8_t* g_shadow_lowest_address = NULL;
#endif
-VOLATILE(LONG) m_GCLock = -1;
+VOLATILE(int32_t) m_GCLock = -1;
#ifdef GC_CONFIG_DRIVEN
void record_global_mechanism (int mech_index)
@@ -52,7 +52,7 @@ void record_global_mechanism (int mech_index)
}
#endif //GC_CONFIG_DRIVEN
-LONG g_bLowMemoryFromHost = 0;
+int32_t g_bLowMemoryFromHost = 0;
#ifdef WRITE_BARRIER_CHECK
diff --git a/src/gc/gcee.cpp b/src/gc/gcee.cpp
index d8821eb366..138ec6102e 100644
--- a/src/gc/gcee.cpp
+++ b/src/gc/gcee.cpp
@@ -84,8 +84,8 @@ void GCHeap::UpdatePreGCCounters()
#ifdef FEATURE_EVENT_TRACE
ETW::GCLog::ETW_GC_INFO Info;
- Info.GCStart.Count = (ULONG)pSettings->gc_index;
- Info.GCStart.Depth = (ULONG)pSettings->condemned_generation;
+ Info.GCStart.Count = (uint32_t)pSettings->gc_index;
+ Info.GCStart.Depth = (uint32_t)pSettings->condemned_generation;
Info.GCStart.Reason = (ETW::GCLog::ETW_GC_INFO::GC_REASON)((int)(pSettings->reason));
Info.GCStart.Type = ETW::GCLog::ETW_GC_INFO::GC_NGC;
@@ -119,7 +119,7 @@ void GCHeap::UpdatePostGCCounters()
int condemned_gen = pSettings->condemned_generation;
Info.GCEnd.Depth = condemned_gen;
- Info.GCEnd.Count = (ULONG)pSettings->gc_index;
+ Info.GCEnd.Count = (uint32_t)pSettings->gc_index;
ETW::GCLog::FireGcEndAndGenerationRanges(Info.GCEnd.Count, Info.GCEnd.Depth);
int xGen;
@@ -334,9 +334,9 @@ void GCHeap::UpdatePostGCCounters()
g_TotalTimeSinceLastGCEnd = _currentPerfCounterTimer;
- HeapInfo.HeapStats.PinnedObjectCount = (ULONG)(GetPerfCounters().m_GC.cPinnedObj);
- HeapInfo.HeapStats.SinkBlockCount = (ULONG)(GetPerfCounters().m_GC.cSinkBlocks);
- HeapInfo.HeapStats.GCHandleCount = (ULONG)(GetPerfCounters().m_GC.cHandles);
+ HeapInfo.HeapStats.PinnedObjectCount = (uint32_t)(GetPerfCounters().m_GC.cPinnedObj);
+ HeapInfo.HeapStats.SinkBlockCount = (uint32_t)(GetPerfCounters().m_GC.cSinkBlocks);
+ HeapInfo.HeapStats.GCHandleCount = (uint32_t)(GetPerfCounters().m_GC.cHandles);
#endif //ENABLE_PERF_COUNTERS
FireEtwGCHeapStats_V1(HeapInfo.HeapStats.GenInfo[0].GenerationSize, HeapInfo.HeapStats.GenInfo[0].TotalPromotedSize,
@@ -634,7 +634,7 @@ void gc_heap::fire_etw_allocation_event (size_t allocation_amount, int gen_numbe
InlineSString<MAX_CLASSNAME_LENGTH> strTypeName;
th.GetName(strTypeName);
- FireEtwGCAllocationTick_V3((ULONG)allocation_amount,
+ FireEtwGCAllocationTick_V3((uint32_t)allocation_amount,
((gen_number == 0) ? ETW::GCLog::ETW_GC_INFO::AllocationSmall : ETW::GCLog::ETW_GC_INFO::AllocationLarge),
GetClrInstanceId(),
allocation_amount,
diff --git a/src/gc/gcimpl.h b/src/gc/gcimpl.h
index 101565761c..05c55137e7 100644
--- a/src/gc/gcimpl.h
+++ b/src/gc/gcimpl.h
@@ -233,7 +233,7 @@ public: // FIX
// Lock for finalization
PER_HEAP_ISOLATED
- VOLATILE(LONG) m_GCFLock;
+ VOLATILE(int32_t) m_GCFLock;
PER_HEAP_ISOLATED BOOL GcCollectClasses;
PER_HEAP_ISOLATED
diff --git a/src/gc/gcpriv.h b/src/gc/gcpriv.h
index fc690b150d..5f6122daf6 100644
--- a/src/gc/gcpriv.h
+++ b/src/gc/gcpriv.h
@@ -400,7 +400,7 @@ void GCLog (const char *fmt, ... );
#ifdef _DEBUG
struct GCDebugSpinLock {
- VOLATILE(LONG) lock; // -1 if free, 0 if held
+ VOLATILE(int32_t) lock; // -1 if free, 0 if held
VOLATILE(Thread *) holding_thread; // -1 if no thread holds the lock.
VOLATILE(BOOL) released_by_gc_p; // a GC thread released the lock.
@@ -414,7 +414,7 @@ typedef GCDebugSpinLock GCSpinLock;
#elif defined (SYNCHRONIZATION_STATS)
struct GCSpinLockInstru {
- VOLATILE(LONG) lock;
+ VOLATILE(int32_t) lock;
// number of times we went into SwitchToThread in enter_spin_lock.
unsigned int num_switch_thread;
// number of times we went into WaitLonger.
@@ -443,7 +443,7 @@ typedef GCSpinLockInstru GCSpinLock;
#else
struct GCDebugSpinLock {
- VOLATILE(LONG) lock; // -1 if free, 0 if held
+ VOLATILE(int32_t) lock; // -1 if free, 0 if held
GCDebugSpinLock()
: lock(-1)
@@ -2830,7 +2830,7 @@ public:
#endif // MULTIPLE_HEAPS
PER_HEAP
- VOLATILE(LONG) gc_done_event_lock;
+ VOLATILE(int32_t) gc_done_event_lock;
PER_HEAP
VOLATILE(bool) gc_done_event_set;
@@ -3767,7 +3767,7 @@ private:
PTR_PTR_Object m_EndArray;
size_t m_PromotedCount;
- VOLATILE(LONG) lock;
+ VOLATILE(int32_t) lock;
#ifdef _DEBUG
uint32_t lockowner_threadid;
#endif // _DEBUG
diff --git a/src/gc/gcscan.cpp b/src/gc/gcscan.cpp
index c4a29f28c8..d6a36089e4 100644
--- a/src/gc/gcscan.cpp
+++ b/src/gc/gcscan.cpp
@@ -22,17 +22,17 @@
//#define CATCH_GC //catches exception during GC
#ifdef DACCESS_COMPILE
-SVAL_IMPL_INIT(LONG, CNameSpace, m_GcStructuresInvalidCnt, 1);
+SVAL_IMPL_INIT(int32_t, CNameSpace, m_GcStructuresInvalidCnt, 1);
#else //DACCESS_COMPILE
-VOLATILE(LONG) CNameSpace::m_GcStructuresInvalidCnt = 1;
+VOLATILE(int32_t) CNameSpace::m_GcStructuresInvalidCnt = 1;
#endif //DACCESS_COMPILE
BOOL CNameSpace::GetGcRuntimeStructuresValid ()
{
LIMITED_METHOD_CONTRACT;
SUPPORTS_DAC;
- _ASSERTE ((LONG)m_GcStructuresInvalidCnt >= 0);
- return (LONG)m_GcStructuresInvalidCnt == 0;
+ _ASSERTE ((int32_t)m_GcStructuresInvalidCnt >= 0);
+ return (int32_t)m_GcStructuresInvalidCnt == 0;
}
#ifdef DACCESS_COMPILE
@@ -291,13 +291,13 @@ void CNameSpace::GcRuntimeStructuresValid (BOOL bValid)
WRAPPER_NO_CONTRACT;
if (!bValid)
{
- LONG result;
+ int32_t result;
result = FastInterlockIncrement (&m_GcStructuresInvalidCnt);
_ASSERTE (result > 0);
}
else
{
- LONG result;
+ int32_t result;
result = FastInterlockDecrement (&m_GcStructuresInvalidCnt);
_ASSERTE (result >= 0);
}
diff --git a/src/gc/gcscan.h b/src/gc/gcscan.h
index 9b198b0782..64280c3fac 100644
--- a/src/gc/gcscan.h
+++ b/src/gc/gcscan.h
@@ -110,9 +110,9 @@ class CNameSpace
private:
#ifdef DACCESS_COMPILE
- SVAL_DECL(LONG, m_GcStructuresInvalidCnt);
+ SVAL_DECL(int32_t, m_GcStructuresInvalidCnt);
#else
- static VOLATILE(LONG) m_GcStructuresInvalidCnt;
+ static VOLATILE(int32_t) m_GcStructuresInvalidCnt;
#endif //DACCESS_COMPILE
};
diff --git a/src/gc/handletable.cpp b/src/gc/handletable.cpp
index 03a6b19531..c5b425854d 100644
--- a/src/gc/handletable.cpp
+++ b/src/gc/handletable.cpp
@@ -1184,8 +1184,8 @@ uint32_t HndCountHandles(HHANDLETABLE hTable)
for (; pCache != pCacheEnd; ++pCache)
{
// get relevant indexes for the reserve bank and the free bank
- LONG lFreeIndex = pCache->lFreeIndex;
- LONG lReserveIndex = pCache->lReserveIndex;
+ int32_t lFreeIndex = pCache->lFreeIndex;
+ int32_t lReserveIndex = pCache->lReserveIndex;
// clamp the min free index and min reserve index to be non-negative;
// this is necessary since interlocked operations can set these variables
diff --git a/src/gc/handletablecache.cpp b/src/gc/handletablecache.cpp
index 359d3939e3..2008dc817b 100644
--- a/src/gc/handletablecache.cpp
+++ b/src/gc/handletablecache.cpp
@@ -340,8 +340,8 @@ void SyncTransferCacheHandles(OBJECTHANDLE *pDst, OBJECTHANDLE *pSrc, uint32_t u
void TableFullRebalanceCache(HandleTable *pTable,
HandleTypeCache *pCache,
uint32_t uType,
- LONG lMinReserveIndex,
- LONG lMinFreeIndex,
+ int32_t lMinReserveIndex,
+ int32_t lMinFreeIndex,
OBJECTHANDLE *pExtraOutHandle,
OBJECTHANDLE extraInHandle)
{
@@ -483,7 +483,7 @@ void TableFullRebalanceCache(HandleTable *pTable,
}
// compute the index to start serving handles from
- lMinReserveIndex = (LONG)uHandleCount;
+ lMinReserveIndex = (int32_t)uHandleCount;
// update the read index for the reserve bank
// NOTE: we use an interlocked exchange here to guarantee relative store order on MP
@@ -504,8 +504,8 @@ void TableFullRebalanceCache(HandleTable *pTable,
void TableQuickRebalanceCache(HandleTable *pTable,
HandleTypeCache *pCache,
uint32_t uType,
- LONG lMinReserveIndex,
- LONG lMinFreeIndex,
+ int32_t lMinReserveIndex,
+ int32_t lMinFreeIndex,
OBJECTHANDLE *pExtraOutHandle,
OBJECTHANDLE extraInHandle)
{
@@ -630,13 +630,13 @@ OBJECTHANDLE TableCacheMissOnAlloc(HandleTable *pTable, HandleTypeCache *pCache,
CrstHolder ch(&pTable->Lock);
// try again to take a handle (somebody else may have rebalanced)
- LONG lReserveIndex = FastInterlockDecrement(&pCache->lReserveIndex);
+ int32_t lReserveIndex = FastInterlockDecrement(&pCache->lReserveIndex);
// are we still waiting for handles?
if (lReserveIndex < 0)
{
// yup, suspend free list usage...
- LONG lFreeIndex = FastInterlockExchange(&pCache->lFreeIndex, 0L);
+ int32_t lFreeIndex = FastInterlockExchange(&pCache->lFreeIndex, 0L);
// ...and rebalance the cache...
TableQuickRebalanceCache(pTable, pCache, uType, lReserveIndex, lFreeIndex, &handle, NULL);
@@ -680,13 +680,13 @@ void TableCacheMissOnFree(HandleTable *pTable, HandleTypeCache *pCache, uint32_t
CrstHolder ch(&pTable->Lock);
// try again to take a slot (somebody else may have rebalanced)
- LONG lFreeIndex = FastInterlockDecrement(&pCache->lFreeIndex);
+ int32_t lFreeIndex = FastInterlockDecrement(&pCache->lFreeIndex);
// are we still waiting for free slots?
if (lFreeIndex < 0)
{
// yup, suspend reserve list usage...
- LONG lReserveIndex = FastInterlockExchange(&pCache->lReserveIndex, 0L);
+ int32_t lReserveIndex = FastInterlockExchange(&pCache->lReserveIndex, 0L);
// ...and rebalance the cache...
TableQuickRebalanceCache(pTable, pCache, uType, lReserveIndex, lFreeIndex, NULL, handle);
@@ -729,7 +729,7 @@ OBJECTHANDLE TableAllocSingleHandleFromCache(HandleTable *pTable, uint32_t uType
HandleTypeCache *pCache = pTable->rgMainCache + uType;
// try to take a handle from the main cache
- LONG lReserveIndex = FastInterlockDecrement(&pCache->lReserveIndex);
+ int32_t lReserveIndex = FastInterlockDecrement(&pCache->lReserveIndex);
// did we underflow?
if (lReserveIndex < 0)
@@ -798,7 +798,7 @@ void TableFreeSingleHandleToCache(HandleTable *pTable, uint32_t uType, OBJECTHAN
HandleTypeCache *pCache = pTable->rgMainCache + uType;
// try to take a free slot from the main cache
- LONG lFreeIndex = FastInterlockDecrement(&pCache->lFreeIndex);
+ int32_t lFreeIndex = FastInterlockDecrement(&pCache->lFreeIndex);
// did we underflow?
if (lFreeIndex < 0)
diff --git a/src/gc/handletablepriv.h b/src/gc/handletablepriv.h
index c9ea8bddbf..2740c5bd7d 100644
--- a/src/gc/handletablepriv.h
+++ b/src/gc/handletablepriv.h
@@ -323,7 +323,7 @@ struct HandleTypeCache
/*
* index of next available handle slot in the reserve bank
*/
- LONG lReserveIndex;
+ int32_t lReserveIndex;
/*---------------------------------------------------------------------------------
@@ -339,7 +339,7 @@ struct HandleTypeCache
/*
* index of next empty slot in the free bank
*/
- LONG lFreeIndex;
+ int32_t lFreeIndex;
};
diff --git a/src/gc/objecthandle.cpp b/src/gc/objecthandle.cpp
index 6929fdf86e..a37c888ae6 100644
--- a/src/gc/objecthandle.cpp
+++ b/src/gc/objecthandle.cpp
@@ -1540,7 +1540,7 @@ void Ref_CheckAlive(uint32_t condemned, uint32_t maxgen, LPARAM lp1)
TraceVariableHandles(CheckPromoted, lp1, 0, VHT_WEAK_SHORT, condemned, maxgen, flags);
}
-static VOLATILE(LONG) uCount = 0;
+static VOLATILE(int32_t) uCount = 0;
// NOTE: Please: if you update this function, update the very similar profiling function immediately below!!!
void Ref_UpdatePointers(uint32_t condemned, uint32_t maxgen, ScanContext* sc, Ref_promote_func* fn)