summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/gc/CMakeLists.txt50
-rw-r--r--src/gc/env/gcenv.base.h337
-rw-r--r--src/gc/env/gcenv.unix.cpp21
-rw-r--r--src/gc/env/gcenv.windows.cpp25
-rw-r--r--src/gc/gc.cpp185
-rw-r--r--src/gc/gc.h4
-rw-r--r--src/gc/gcee.cpp25
-rw-r--r--src/gc/gcpriv.h4
-rw-r--r--src/gc/gcscan.cpp2
-rw-r--r--src/gc/gcscan.h2
-rw-r--r--src/gc/handletable.cpp15
-rw-r--r--src/gc/handletablecore.cpp4
-rw-r--r--src/gc/handletablescan.cpp17
-rw-r--r--src/gc/objecthandle.cpp154
-rw-r--r--src/gc/objecthandle.h3
-rw-r--r--src/gc/sample/CMakeLists.txt33
16 files changed, 537 insertions, 344 deletions
diff --git a/src/gc/CMakeLists.txt b/src/gc/CMakeLists.txt
index 990d8e611b..71dd46fee8 100644
--- a/src/gc/CMakeLists.txt
+++ b/src/gc/CMakeLists.txt
@@ -1,49 +1 @@
-project(clrgc)
-
-set(CMAKE_INCLUDE_CURRENT_DIR ON)
-
-include_directories(env)
-
-set(SOURCES
- gccommon.cpp
- gceewks.cpp
- gcscan.cpp
- gcwks.cpp
- handletable.cpp
- handletablecache.cpp
- handletablecore.cpp
- handletablescan.cpp
- objecthandle.cpp
-)
-
-if(WIN32)
- list(APPEND SOURCES
- env/gcenv.windows.cpp)
-else()
- list(APPEND SOURCES
- env/gcenv.unix.cpp)
-endif()
-
-if(CLR_CMAKE_PLATFORM_ARCH_AMD64)
- add_definitions(-D_TARGET_AMD64_=1)
- add_definitions(-D_WIN64=1)
-elseif(CLR_CMAKE_PLATFORM_ARCH_I386)
- add_definitions(-D_TARGET_X86_=1)
- add_definitions(-D_WIN32=1)
-elseif(CLR_CMAKE_PLATFORM_ARCH_ARM)
- add_definitions(-D_TARGET_ARM_=1)
- add_definitions(-D_WIN32=1)
-elseif(CLR_CMAKE_PLATFORM_ARCH_ARM64)
- add_definitions(-D_TARGET_ARM64_=1)
- add_definitions(-D_WIN64=1)
-else()
- clr_unknown_arch()
-endif()
-
-add_compile_options(-Wno-format)
-add_compile_options(-Wno-unused-variable)
-add_compile_options(-Wno-unused-private-field)
-add_compile_options(-Wno-tautological-undefined-compare)
-
-add_library(clrgc STATIC ${SOURCES})
-
+add_subdirectory(sample)
diff --git a/src/gc/env/gcenv.base.h b/src/gc/env/gcenv.base.h
index 88821b14b5..72b74bd14d 100644
--- a/src/gc/env/gcenv.base.h
+++ b/src/gc/env/gcenv.base.h
@@ -2,9 +2,10 @@
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
//
-
+#ifndef __GCENV_BASE_INCLUDED__
+#define __GCENV_BASE_INCLUDED__
//
-// Setups standalone environment for CLR GC
+// Sets up basic environment for CLR GC
//
#define FEATURE_REDHAWK 1
@@ -12,13 +13,16 @@
#define GCENV_INCLUDED
+#define REDHAWK_PALIMPORT extern "C"
+#define REDHAWK_PALAPI __stdcall
+
+
#ifndef _MSC_VER
#define __stdcall
#define __forceinline inline
#endif
#ifndef _INC_WINDOWS
-
// -----------------------------------------------------------------------------------------------------------
//
// Aliases for Win32 types
@@ -58,7 +62,12 @@ typedef union _LARGE_INTEGER {
// -----------------------------------------------------------------------------------------------------------
// HRESULT subset.
+#ifdef WIN32
+// this must exactly match the typedef used by windows.h
+typedef long HRESULT;
+#else
typedef int32_t HRESULT;
+#endif
#define SUCCEEDED(_hr) ((HRESULT)(_hr) >= 0)
#define FAILED(_hr) ((HRESULT)(_hr) < 0)
@@ -141,18 +150,6 @@ typedef struct _RTL_CRITICAL_SECTION {
#endif
-typedef struct _MEMORYSTATUSEX {
- uint32_t dwLength;
- uint32_t dwMemoryLoad;
- uint64_t ullTotalPhys;
- uint64_t ullAvailPhys;
- uint64_t ullTotalPageFile;
- uint64_t ullAvailPageFile;
- uint64_t ullTotalVirtual;
- uint64_t ullAvailVirtual;
- uint64_t ullAvailExtendedVirtual;
-} MEMORYSTATUSEX, *LPMEMORYSTATUSEX;
-
#define WINBASEAPI extern "C"
#define WINAPI __stdcall
@@ -264,24 +261,47 @@ WINAPI
FlushFileBuffers(
HANDLE hFile);
-#ifdef _MSC_VER
-
-extern "C" VOID
-_mm_pause (
- VOID
- );
-
-extern "C" VOID
-_mm_mfence (
- VOID
- );
-
-#pragma intrinsic(_mm_pause)
-#pragma intrinsic(_mm_mfence)
-
-#define YieldProcessor _mm_pause
-#define MemoryBarrier _mm_mfence
-
+#if defined(_MSC_VER)
+ #if defined(_ARM_)
+
+ __forceinline void YieldProcessor() { }
+ extern "C" void __emit(const unsigned __int32 opcode);
+ #pragma intrinsic(__emit)
+ #define MemoryBarrier() { __emit(0xF3BF); __emit(0x8F5F); }
+
+ #elif defined(_AMD64_)
+
+ extern "C" VOID
+ _mm_pause (
+ VOID
+ );
+
+ extern "C" VOID
+ _mm_mfence (
+ VOID
+ );
+
+ #pragma intrinsic(_mm_pause)
+ #pragma intrinsic(_mm_mfence)
+
+ #define YieldProcessor _mm_pause
+ #define MemoryBarrier _mm_mfence
+
+ #elif defined(_X86_)
+
+ #define YieldProcessor() __asm { rep nop }
+
+ __forceinline void MemoryBarrier()
+ {
+ int32_t Barrier;
+ __asm {
+ xchg Barrier, eax
+ }
+ }
+
+ #else // !_ARM_ && !_AMD64_ && !_X86_
+ #error Unsupported architecture
+ #endif
#else // _MSC_VER
WINBASEAPI
@@ -296,6 +316,12 @@ MemoryBarrier();
#endif // _MSC_VER
+typedef struct _GUID {
+ unsigned long Data1;
+ unsigned short Data2;
+ unsigned short Data3;
+ unsigned char Data4[8];
+} GUID;
#endif // _INC_WINDOWS
// -----------------------------------------------------------------------------------------------------------
@@ -354,7 +380,9 @@ MemoryBarrier();
//
// Data access macros
//
-
+#ifdef DACCESS_COMPILE
+#include "daccess.h"
+#else // DACCESS_COMPILE
typedef uintptr_t TADDR;
#define PTR_TO_TADDR(ptr) ((TADDR)(ptr))
@@ -395,11 +423,12 @@ typedef uintptr_t TADDR;
#define GARY_IMPL(type, var, size) \
type var[size]
+struct _DacGlobals;
+#endif // DACCESS_COMPILE
+
typedef DPTR(size_t) PTR_size_t;
typedef DPTR(uint8_t) PTR_uint8_t;
-struct _DacGlobals;
-
// -----------------------------------------------------------------------------------------------------------
#define DATA_ALIGNMENT sizeof(uintptr_t)
@@ -436,7 +465,7 @@ inline T FastInterlockExchangePointer(
T volatile * target,
T value)
{
- return (T)_FastInterlockExchangePointer((void **)target, value);
+ return (T)((TADDR)_FastInterlockExchangePointer((void **)target, value));
}
template <typename T>
@@ -444,7 +473,7 @@ inline T FastInterlockExchangePointer(
T volatile * target,
nullptr_t value)
{
- return (T)_FastInterlockExchangePointer((void **)target, value);
+ return (T)((TADDR)_FastInterlockExchangePointer((void **)target, value));
}
template <typename T>
@@ -453,7 +482,7 @@ inline T FastInterlockCompareExchangePointer(
T exchange,
T comparand)
{
- return (T)_FastInterlockCompareExchangePointer((void **)destination, exchange, comparand);
+ return (T)((TADDR)_FastInterlockCompareExchangePointer((void **)destination, exchange, comparand));
}
template <typename T>
@@ -462,7 +491,7 @@ inline T FastInterlockCompareExchangePointer(
T exchange,
nullptr_t comparand)
{
- return (T)_FastInterlockCompareExchangePointer((void **)destination, exchange, comparand);
+ return (T)((TADDR)_FastInterlockCompareExchangePointer((void **)destination, exchange, comparand));
}
@@ -501,7 +530,7 @@ typedef TADDR OBJECTHANDLE;
#define ObjectToOBJECTREF(_obj) (OBJECTREF)(_obj)
#define OBJECTREFToObject(_obj) (Object*)(_obj)
-#define VALIDATEOBJECTREF(_objref)
+#define VALIDATEOBJECTREF(_objref) _objref;
#define VOLATILE(T) T volatile
@@ -548,9 +577,23 @@ struct GCSystemInfo
extern GCSystemInfo g_SystemInfo;
void InitializeSystemInfo();
+// An 'abstract' definition of Windows MEMORYSTATUSEX. In practice, the only difference is the missing struct size
+// field and one field that Windows documents to always be 0. If additional information is available on other OSes,
+// this information should be surfaced through this structure as additional fields that the GC may optionally depend on.
+struct GCMemoryStatus
+{
+ uint32_t dwMemoryLoad;
+ uint64_t ullTotalPhys;
+ uint64_t ullAvailPhys;
+ uint64_t ullTotalPageFile;
+ uint64_t ullAvailPageFile;
+ uint64_t ullTotalVirtual;
+ uint64_t ullAvailVirtual;
+};
+
void
GetProcessMemoryLoad(
- LPMEMORYSTATUSEX lpBuffer);
+ GCMemoryStatus* lpBuffer);
extern MethodTable * g_pFreeObjectMethodTable;
@@ -653,9 +696,9 @@ public:
static bool RefCountedHandleCallbacks(Object * pObject);
// Sync block cache management
- static void SyncBlockCacheWeakPtrScan(HANDLESCANPROC scanProc, uintptr_t lp1, uintptr_t lp2) { }
- static void SyncBlockCacheDemote(int max_gen) { }
- static void SyncBlockCachePromotionsGranted(int max_gen) { }
+ static void SyncBlockCacheWeakPtrScan(HANDLESCANPROC scanProc, uintptr_t lp1, uintptr_t lp2);
+ static void SyncBlockCacheDemote(int max_gen);
+ static void SyncBlockCachePromotionsGranted(int max_gen);
// Thread functions
static bool IsPreemptiveGCDisabled(Thread * pThread);
@@ -683,10 +726,11 @@ public:
static bool WatchDog();
static void SignalFinalizationDone(bool fFinalizer);
static void SetFinalizerThread(Thread * pThread);
+ static HANDLE GetFinalizerEvent();
};
typedef uint32_t (__stdcall *BackgroundCallback)(void* pCallbackContext);
-bool PalStartBackgroundGCThread(BackgroundCallback callback, void* pCallbackContext);
+REDHAWK_PALIMPORT bool REDHAWK_PALAPI PalStartBackgroundGCThread(BackgroundCallback callback, void* pCallbackContext);
void DestroyThread(Thread * pThread);
@@ -711,91 +755,74 @@ void UnsafeDeleteCriticalSection(CRITICAL_SECTION *lpCriticalSection);
#define COUNTER_ONLY(x)
-#include "etmdummy.h"
-
-#define ETW_EVENT_ENABLED(e,f) false
+//#include "etmdummy.h"
+//#define ETW_EVENT_ENABLED(e,f) false
namespace ETW
{
- class GCLog
- {
- public:
- struct ETW_GC_INFO
- {
- typedef enum _GC_ROOT_KIND {
- GC_ROOT_STACK = 0,
- GC_ROOT_FQ = 1,
- GC_ROOT_HANDLES = 2,
- GC_ROOT_OLDER = 3,
- GC_ROOT_SIZEDREF = 4,
- GC_ROOT_OVERFLOW = 5
- } GC_ROOT_KIND;
- };
- };
+ typedef enum _GC_ROOT_KIND {
+ GC_ROOT_STACK = 0,
+ GC_ROOT_FQ = 1,
+ GC_ROOT_HANDLES = 2,
+ GC_ROOT_OLDER = 3,
+ GC_ROOT_SIZEDREF = 4,
+ GC_ROOT_OVERFLOW = 5
+ } GC_ROOT_KIND;
};
//
// Logging
//
-#define LOG(x)
+#ifdef _MSC_VER
+#define SUPPRESS_WARNING_4127 \
+ __pragma(warning(push)) \
+ __pragma(warning(disable:4127)) /* conditional expression is constant*/
+#define POP_WARNING_STATE \
+ __pragma(warning(pop))
+#else // _MSC_VER
+#define SUPPRESS_WARNING_4127
+#define POP_WARNING_STATE
+#endif // _MSC_VER
-inline VOID LogSpewAlways(const char *fmt, ...)
-{
-}
+#define WHILE_0 \
+ SUPPRESS_WARNING_4127 \
+ while(0) \
+ POP_WARNING_STATE \
+
+#define LOG(x)
-#define LL_INFO10 0
-
-#define STRESS_LOG_VA(msg) do { } while(0)
-#define STRESS_LOG0(facility, level, msg) do { } while(0)
-#define STRESS_LOG1(facility, level, msg, data1) do { } while(0)
-#define STRESS_LOG2(facility, level, msg, data1, data2) do { } while(0)
-#define STRESS_LOG3(facility, level, msg, data1, data2, data3) do { } while(0)
-#define STRESS_LOG4(facility, level, msg, data1, data2, data3, data4) do { } while(0)
-#define STRESS_LOG5(facility, level, msg, data1, data2, data3, data4, data5) do { } while(0)
-#define STRESS_LOG6(facility, level, msg, data1, data2, data3, data4, data5, data6) do { } while(0)
-#define STRESS_LOG7(facility, level, msg, data1, data2, data3, data4, data5, data6, data7) do { } while(0)
-#define STRESS_LOG_PLUG_MOVE(plug_start, plug_end, plug_delta) do { } while(0)
-#define STRESS_LOG_ROOT_PROMOTE(root_addr, objPtr, methodTable) do { } while(0)
-#define STRESS_LOG_ROOT_RELOCATE(root_addr, old_value, new_value, methodTable) do { } while(0)
-#define STRESS_LOG_GC_START(gcCount, Gen, collectClasses) do { } while(0)
-#define STRESS_LOG_GC_END(gcCount, Gen, collectClasses) do { } while(0)
+VOID LogSpewAlways(const char *fmt, ...);
+
+#define LL_INFO10 4
+
+#define STRESS_LOG_VA(msg) do { } WHILE_0
+#define STRESS_LOG0(facility, level, msg) do { } WHILE_0
+#define STRESS_LOG1(facility, level, msg, data1) do { } WHILE_0
+#define STRESS_LOG2(facility, level, msg, data1, data2) do { } WHILE_0
+#define STRESS_LOG3(facility, level, msg, data1, data2, data3) do { } WHILE_0
+#define STRESS_LOG4(facility, level, msg, data1, data2, data3, data4) do { } WHILE_0
+#define STRESS_LOG5(facility, level, msg, data1, data2, data3, data4, data5) do { } WHILE_0
+#define STRESS_LOG6(facility, level, msg, data1, data2, data3, data4, data5, data6) do { } WHILE_0
+#define STRESS_LOG7(facility, level, msg, data1, data2, data3, data4, data5, data6, data7) do { } WHILE_0
+#define STRESS_LOG_PLUG_MOVE(plug_start, plug_end, plug_delta) do { } WHILE_0
+#define STRESS_LOG_ROOT_PROMOTE(root_addr, objPtr, methodTable) do { } WHILE_0
+#define STRESS_LOG_ROOT_RELOCATE(root_addr, old_value, new_value, methodTable) do { } WHILE_0
+#define STRESS_LOG_GC_START(gcCount, Gen, collectClasses) do { } WHILE_0
+#define STRESS_LOG_GC_END(gcCount, Gen, collectClasses) do { } WHILE_0
#define STRESS_LOG_OOM_STACK(size) do { } while(0)
#define STRESS_LOG_RESERVE_MEM(numChunks) do {} while (0)
#define STRESS_LOG_GC_STACK
-typedef void* MUTEX_COOKIE;
+typedef void* CLR_MUTEX_ATTRIBUTES;
+typedef void* CLR_MUTEX_COOKIE;
-inline MUTEX_COOKIE ClrCreateMutex(LPSECURITY_ATTRIBUTES lpMutexAttributes, BOOL bInitialOwner, LPCWSTR lpName)
-{
- _ASSERTE(!"ClrCreateMutex");
- return NULL;
-}
-
-inline void ClrCloseMutex(MUTEX_COOKIE mutex)
-{
- _ASSERTE(!"ClrCloseMutex");
-}
-
-inline BOOL ClrReleaseMutex(MUTEX_COOKIE mutex)
-{
- _ASSERTE(!"ClrReleaseMutex");
- return true;
-}
-
-inline DWORD ClrWaitForMutex(MUTEX_COOKIE mutex, DWORD dwMilliseconds, BOOL bAlertable)
-{
- _ASSERTE(!"ClrWaitForMutex");
- return WAIT_OBJECT_0;
-}
-
-inline
-HANDLE
-PalCreateFileW(LPCWSTR pFileName, DWORD desiredAccess, DWORD shareMode, void* pSecurityAttributes, DWORD creationDisposition, DWORD flagsAndAttributes, HANDLE hTemplateFile)
-{
- return INVALID_HANDLE_VALUE;
-}
+CLR_MUTEX_COOKIE ClrCreateMutex(CLR_MUTEX_ATTRIBUTES lpMutexAttributes, bool bInitialOwner, LPCWSTR lpName);
+void ClrCloseMutex(CLR_MUTEX_COOKIE mutex);
+bool ClrReleaseMutex(CLR_MUTEX_COOKIE mutex);
+uint32_t ClrWaitForMutex(CLR_MUTEX_COOKIE mutex, uint32_t dwMilliseconds, bool bAlertable);
+REDHAWK_PALIMPORT HANDLE REDHAWK_PALAPI PalCreateFileW(_In_z_ LPCWSTR pFileName, uint32_t desiredAccess, uint32_t shareMode, _In_opt_ void* pSecurityAttributes, uint32_t creationDisposition, uint32_t flagsAndAttributes, HANDLE hTemplateFile);
#define DEFAULT_GC_PRN_LVL 3
@@ -808,15 +835,10 @@ enum PalCapability
GetCurrentProcessorNumberCapability = 0x00000004, // GetCurrentProcessorNumber()
};
-bool PalHasCapability(PalCapability capability);
+REDHAWK_PALIMPORT bool REDHAWK_PALAPI PalHasCapability(PalCapability capability);
-inline void StompWriteBarrierEphemeral()
-{
-}
-
-inline void StompWriteBarrierResize(BOOL bReqUpperBoundsCheck)
-{
-}
+void StompWriteBarrierEphemeral();
+void StompWriteBarrierResize(bool bReqUpperBoundsCheck);
class CLRConfig
{
@@ -839,72 +861,8 @@ public:
typedef CLRConfigTypes ConfigDWORDInfo;
typedef CLRConfigTypes ConfigStringInfo;
- static DWORD GetConfigValue(ConfigDWORDInfo eType)
- {
- switch (eType)
- {
- case UNSUPPORTED_BGCSpinCount:
- return 140;
-
- case UNSUPPORTED_BGCSpin:
- return 2;
-
- case UNSUPPORTED_GCLogEnabled:
- case UNSUPPORTED_GCLogFile:
- case UNSUPPORTED_GCLogFileSize:
- case EXTERNAL_GCStressStart:
- case INTERNAL_GCStressStartAtJit:
- case INTERNAL_DbgDACSkipVerifyDlls:
- return 0;
-
- case Config_COUNT:
- default:
-#ifdef _MSC_VER
-#pragma warning(suppress:4127) // Constant conditional expression in ASSERT below
-#endif
- ASSERT(!"Unknown config value type");
- return 0;
- }
- }
-
- static HRESULT GetConfigValue(ConfigStringInfo eType, PWSTR * outVal)
- {
- *outVal = NULL;
- return 0;
- }
-};
-
-template <typename TYPE>
-class NewHolder
-{
- TYPE * m_value;
- bool m_fSuppressRelease;
-
-public:
- NewHolder(TYPE * value)
- : m_value(value), m_fSuppressRelease(false)
- {
- }
-
- FORCEINLINE operator TYPE *() const
- {
- return this->m_value;
- }
- FORCEINLINE const TYPE * &operator->() const
- {
- return this->m_value;
- }
-
- void SuppressRelease()
- {
- m_fSuppressRelease = true;
- }
-
- ~NewHolder()
- {
- if (!m_fSuppressRelease)
- delete m_value;
- }
+ static uint32_t GetConfigValue(ConfigDWORDInfo eType);
+ static HRESULT GetConfigValue(ConfigStringInfo /*eType*/, wchar_t * * outVal);
};
inline bool FitsInU1(uint64_t val)
@@ -943,7 +901,7 @@ class SystemDomain
{
public:
static SystemDomain *System() { return NULL; }
- static AppDomain *GetAppDomainAtIndex(ADIndex index) { return (AppDomain *)-1; }
+ static AppDomain *GetAppDomainAtIndex(ADIndex /*index*/) { return (AppDomain *)-1; }
static AppDomain *AppDomainBeingUnloaded() { return NULL; }
AppDomain *DefaultDomain() { return NULL; }
DWORD GetTotalNumSizedRefHandles() { return 0; }
@@ -975,3 +933,4 @@ public:
};
#endif // STRESS_HEAP
+#endif // __GCENV_BASE_INCLUDED__
diff --git a/src/gc/env/gcenv.unix.cpp b/src/gc/env/gcenv.unix.cpp
index 82cfe752fd..c9186d5d43 100644
--- a/src/gc/env/gcenv.unix.cpp
+++ b/src/gc/env/gcenv.unix.cpp
@@ -82,7 +82,7 @@ void UnsafeDeleteCriticalSection(CRITICAL_SECTION *lpCriticalSection)
}
-void GetProcessMemoryLoad(LPMEMORYSTATUSEX pMSEX)
+void GetProcessMemoryLoad(GCMemoryStatus* pGCMemStatus)
{
CONTRACTL
{
@@ -91,28 +91,27 @@ void GetProcessMemoryLoad(LPMEMORYSTATUSEX pMSEX)
}
CONTRACTL_END;
- pMSEX->dwMemoryLoad = 0;
- pMSEX->ullTotalPageFile = 0;
- pMSEX->ullAvailPageFile = 0;
- pMSEX->ullAvailExtendedVirtual = 0;
+ pGCMemStatus->dwMemoryLoad = 0;
+ pGCMemStatus->ullTotalPageFile = 0;
+ pGCMemStatus->ullAvailPageFile = 0;
// There is no API to get the total virtual address space size on
// Unix, so we use a constant value representing 128TB, which is
// the approximate size of total user virtual address space on
// the currently supported Unix systems.
static const uint64_t _128TB = (1ull << 47);
- pMSEX->ullTotalVirtual = _128TB;
- pMSEX->ullAvailVirtual = _128TB;
+ pGCMemStatus->ullTotalVirtual = _128TB;
+ pGCMemStatus->ullAvailVirtual = _128TB;
// TODO: Implement
- pMSEX->ullTotalPhys = _128TB;
- pMSEX->ullAvailPhys = _128TB;
+ pGCMemStatus->ullTotalPhys = _128TB;
+ pGCMemStatus->ullAvailPhys = _128TB;
// If the machine has more RAM than virtual address limit, let us cap it.
// Our GC can never use more than virtual address limit.
- if (pMSEX->ullAvailPhys > pMSEX->ullTotalVirtual)
+ if (pGCMemStatus->ullAvailPhys > pGCMemStatus->ullTotalVirtual)
{
- pMSEX->ullAvailPhys = pMSEX->ullAvailVirtual;
+ pGCMemStatus->ullAvailPhys = pGCMemStatus->ullAvailVirtual;
}
}
diff --git a/src/gc/env/gcenv.windows.cpp b/src/gc/env/gcenv.windows.cpp
index e9fb38eef2..0882165126 100644
--- a/src/gc/env/gcenv.windows.cpp
+++ b/src/gc/env/gcenv.windows.cpp
@@ -51,12 +51,12 @@ void * _FastInterlockCompareExchangePointer(void * volatile *Destination, void *
void FastInterlockOr(uint32_t volatile *p, uint32_t msk)
{
- InterlockedOr((int32_t *)p, msk);
+ InterlockedOr((int32_t volatile *)p, msk);
}
void FastInterlockAnd(uint32_t volatile *p, uint32_t msk)
{
- InterlockedAnd((int32_t *)p, msk);
+ InterlockedAnd((int32_t volatile *)p, msk);
}
@@ -81,7 +81,7 @@ void UnsafeDeleteCriticalSection(CRITICAL_SECTION *lpCriticalSection)
}
-void GetProcessMemoryLoad(LPMEMORYSTATUSEX pMSEX)
+void GetProcessMemoryLoad(GCMemoryStatus* pGCMemStatus)
{
CONTRACTL
{
@@ -90,16 +90,27 @@ void GetProcessMemoryLoad(LPMEMORYSTATUSEX pMSEX)
}
CONTRACTL_END;
- pMSEX->dwLength = sizeof(MEMORYSTATUSEX);
- BOOL fRet = GlobalMemoryStatusEx(pMSEX);
+ MEMORYSTATUSEX memStatus;
+
+ memStatus.dwLength = sizeof(MEMORYSTATUSEX);
+ BOOL fRet = GlobalMemoryStatusEx(&memStatus);
_ASSERTE (fRet);
// If the machine has more RAM than virtual address limit, let us cap it.
// Our GC can never use more than virtual address limit.
- if (pMSEX->ullAvailPhys > pMSEX->ullTotalVirtual)
+ if (memStatus.ullAvailPhys > memStatus.ullTotalVirtual)
{
- pMSEX->ullAvailPhys = pMSEX->ullAvailVirtual;
+ memStatus.ullAvailPhys = memStatus.ullAvailVirtual;
}
+
+ // Convert Windows struct to abstract struct
+ pGCMemStatus->dwMemoryLoad = memStatus.dwMemoryLoad ;
+ pGCMemStatus->ullTotalPhys = memStatus.ullTotalPhys ;
+ pGCMemStatus->ullAvailPhys = memStatus.ullAvailPhys ;
+ pGCMemStatus->ullTotalPageFile = memStatus.ullTotalPageFile ;
+ pGCMemStatus->ullAvailPageFile = memStatus.ullAvailPageFile ;
+ pGCMemStatus->ullTotalVirtual = memStatus.ullTotalVirtual ;
+ pGCMemStatus->ullAvailVirtual = memStatus.ullAvailVirtual ;
}
void CLREventStatic::CreateManualEvent(bool bInitialState)
diff --git a/src/gc/gc.cpp b/src/gc/gc.cpp
index 13f4e4dcb8..c341acd226 100644
--- a/src/gc/gc.cpp
+++ b/src/gc/gc.cpp
@@ -373,7 +373,7 @@ size_t gc_log_file_size = 0;
size_t gc_buffer_index = 0;
size_t max_gc_buffers = 0;
-static MUTEX_COOKIE gc_log_lock = 0;
+static CLR_MUTEX_COOKIE gc_log_lock = 0;
// we keep this much in a buffer and only flush when the buffer is full
#define gc_log_buffer_size (1024*1024)
@@ -1902,6 +1902,8 @@ BOOL same_large_alignment_p (uint8_t* p1, uint8_t* p2)
#ifdef RESPECT_LARGE_ALIGNMENT
return ((((size_t)p1 ^ (size_t)p2) & 7) == 0);
#else
+ UNREFERENCED_PARAMETER(p1);
+ UNREFERENCED_PARAMETER(p2);
return TRUE;
#endif //RESPECT_LARGE_ALIGNMENT
}
@@ -2320,9 +2322,9 @@ CLREvent gc_heap::background_gc_done_event;
CLREvent gc_heap::ee_proceed_event;
-BOOL gc_heap::gc_can_use_concurrent = FALSE;
+bool gc_heap::gc_can_use_concurrent = false;
-BOOL gc_heap::temp_disable_concurrent_p = FALSE;
+bool gc_heap::temp_disable_concurrent_p = false;
uint32_t gc_heap::cm_in_progress = FALSE;
@@ -2720,6 +2722,8 @@ void gen_to_condemn_tuning::print (int heap_num)
}
dprintf (DT_LOG_0, ("[%2d]%s", heap_num, str_reasons_condition));
+#else
+ UNREFERENCED_PARAMETER(heap_num);
#endif //DT_LOG
}
@@ -2734,6 +2738,9 @@ void gc_generation_data::print (int heap_num, int gen_num)
free_list_space_after, free_obj_space_after,
in, pinned_surv, npinned_surv,
new_allocation));
+#else
+ UNREFERENCED_PARAMETER(heap_num);
+ UNREFERENCED_PARAMETER(gen_num);
#endif //SIMPLE_DPRINTF && DT_LOG
}
@@ -3419,6 +3426,7 @@ void seg_mapping_table_add_ro_segment (heap_segment* seg)
void seg_mapping_table_remove_ro_segment (heap_segment* seg)
{
+ UNREFERENCED_PARAMETER(seg);
#if 0
// POSSIBLE PERF TODO: right now we are not doing anything because we can't simply remove the flag. If it proves
// to be a perf problem, we can search in the current ro segs and see if any lands in this range and only
@@ -3466,6 +3474,8 @@ void gc_heap::seg_mapping_table_add_segment (heap_segment* seg, gc_heap* hp)
end_entry->h0 = hp;
assert (begin_entry->h1 == 0);
begin_entry->h1 = hp;
+#else
+ UNREFERENCED_PARAMETER(hp);
#endif //MULTIPLE_HEAPS
end_entry->boundary = (uint8_t*)seg_end;
@@ -3691,6 +3701,8 @@ public:
void Validate(BOOL bDeep=TRUE, BOOL bVerifyNextHeader = TRUE)
{
+ UNREFERENCED_PARAMETER(bVerifyNextHeader);
+
if (this == NULL)
return;
@@ -3714,11 +3726,16 @@ public:
void ValidatePromote(ScanContext *sc, uint32_t flags)
{
+ UNREFERENCED_PARAMETER(sc);
+ UNREFERENCED_PARAMETER(flags);
+
Validate();
}
void ValidateHeap(Object *from, BOOL bDeep)
{
+ UNREFERENCED_PARAMETER(from);
+
Validate(bDeep, FALSE);
}
@@ -5297,6 +5314,8 @@ void* virtual_alloc_commit_for_heap(void* addr, size_t size, uint32_t type,
return ret;
}
}
+#else
+ UNREFERENCED_PARAMETER(h_number);
#endif
//numa aware not enabled, or call failed --> fallback to VirtualAlloc()
@@ -5674,6 +5693,8 @@ void gc_heap::fix_youngest_allocation_area (BOOL for_gc_p)
void gc_heap::fix_large_allocation_area (BOOL for_gc_p)
{
+ UNREFERENCED_PARAMETER(for_gc_p);
+
#ifdef _DEBUG
alloc_context* acontext =
#endif // DEBUG
@@ -5855,11 +5876,13 @@ void gc_heap::reset_allocation_pointers (generation* gen, uint8_t* start)
void
gc_heap::disallow_new_allocation (int gen_number)
{
+ UNREFERENCED_PARAMETER(gen_number);
settings.allocations_allowed = FALSE;
}
void
gc_heap::allow_new_allocation (int gen_number)
{
+ UNREFERENCED_PARAMETER(gen_number);
settings.allocations_allowed = TRUE;
}
@@ -6099,6 +6122,8 @@ void gc_heap::set_allocator_next_pin (generation* gen)
// After we set the info, we increase tos.
void gc_heap::set_pinned_info (uint8_t* last_pinned_plug, size_t plug_len, uint8_t* alloc_pointer, uint8_t*& alloc_limit)
{
+ UNREFERENCED_PARAMETER(last_pinned_plug);
+
mark& m = mark_stack_array[mark_stack_tos];
assert (m.first == last_pinned_plug);
@@ -6110,6 +6135,8 @@ void gc_heap::set_pinned_info (uint8_t* last_pinned_plug, size_t plug_len, uint8
// After we set the info, we increase tos.
void gc_heap::set_pinned_info (uint8_t* last_pinned_plug, size_t plug_len, generation* gen)
{
+ UNREFERENCED_PARAMETER(last_pinned_plug);
+
mark& m = mark_stack_array[mark_stack_tos];
assert (m.first == last_pinned_plug);
@@ -6927,7 +6954,7 @@ int gc_heap::grow_brick_card_tables (uint8_t* start,
{
//modify the higest address so the span covered
//is twice the previous one.
- MEMORYSTATUSEX st;
+ GCMemoryStatus st;
GetProcessMemoryLoad (&st);
uint8_t* top = (uint8_t*)0 + Align ((size_t)(st.ullTotalVirtual));
// On non-Windows systems, we get only an approximate ullTotalVirtual
@@ -7195,6 +7222,7 @@ void gc_heap::copy_brick_card_range (uint8_t* la, uint32_t* old_card_table,
uint32_t* old_ct = &old_card_table[card_word (card_of (la))];
#ifdef MARK_ARRAY
#ifdef BACKGROUND_GC
+ UNREFERENCED_PARAMETER(seg);
if (recursive_gc_sync::background_running_p())
{
uint32_t* old_mark_array = card_table_mark_array (old_ct);
@@ -9478,6 +9506,7 @@ HANDLE CreateLogFile(const CLRConfig::ConfigStringInfo & info, BOOL is_config)
CLRConfig::GetConfigValue(info, &temp_logfile_name);
#ifdef FEATURE_REDHAWK
+ UNREFERENCED_PARAMETER(is_config);
return PalCreateFileW(
temp_logfile_name,
GENERIC_WRITE,
@@ -9610,12 +9639,12 @@ HRESULT gc_heap::initialize_gc (size_t segment_size,
#ifdef BACKGROUND_GC
if (can_use_write_watch () && g_pConfig->GetGCconcurrent()!=0)
{
- gc_can_use_concurrent = TRUE;
+ gc_can_use_concurrent = true;
mem_reserve = MEM_WRITE_WATCH | MEM_RESERVE;
}
else
{
- gc_can_use_concurrent = FALSE;
+ gc_can_use_concurrent = false;
}
#endif //BACKGROUND_GC
#endif //WRITE_WATCH
@@ -10625,6 +10654,7 @@ int gc_heap::grow_heap_segment (heap_segment* seg, uint8_t* allocated, uint8_t*
void gc_heap::adjust_limit (uint8_t* start, size_t limit_size, generation* gen,
int gennum)
{
+ UNREFERENCED_PARAMETER(gennum);
dprintf (3, ("gc Expanding segment allocation"));
heap_segment* seg = generation_allocation_segment (gen);
if ((generation_allocation_limit (gen) != start) || (start != heap_segment_plan_allocated (seg)))
@@ -11197,6 +11227,8 @@ size_t gc_heap::limit_from_size (size_t size, size_t room, int gen_number,
void gc_heap::handle_oom (int heap_num, oom_reason reason, size_t alloc_size,
uint8_t* allocated, uint8_t* reserved)
{
+ UNREFERENCED_PARAMETER(heap_num);
+
if (reason == oom_budget)
{
alloc_size = dd_min_gc_size (dynamic_data_of (0)) / 2;
@@ -11474,6 +11506,7 @@ BOOL gc_heap::short_on_end_of_seg (int gen_number,
heap_segment* seg,
int align_const)
{
+ UNREFERENCED_PARAMETER(gen_number);
uint8_t* allocated = heap_segment_allocated(seg);
return (!a_size_fit_p (end_space_after_gc(),
@@ -11904,7 +11937,7 @@ void gc_heap::wait_for_bgc_high_memory (alloc_wait_reason awr)
{
if (recursive_gc_sync::background_running_p())
{
- MEMORYSTATUSEX ms;
+ GCMemoryStatus ms;
memset (&ms, 0, sizeof(ms));
GetProcessMemoryLoad(&ms);
if (ms.dwMemoryLoad >= 95)
@@ -12317,6 +12350,9 @@ BOOL gc_heap::loh_get_new_seg (generation* gen,
BOOL* did_full_compact_gc,
oom_reason* oom_r)
{
+ UNREFERENCED_PARAMETER(gen);
+ UNREFERENCED_PARAMETER(align_const);
+
*did_full_compact_gc = FALSE;
size_t seg_size = get_large_seg_size (size);
@@ -13282,6 +13318,8 @@ void gc_heap::print_free_and_plug (const char* msg)
}
}
}
+#else
+ UNREFERENCED_PARAMETER(msg);
#endif //FREE_USAGE_STATS && SIMPLE_DPRINTF
}
@@ -13303,6 +13341,9 @@ void gc_heap::add_gen_plug (int gen_number, size_t plug_size)
}
(gen->gen_plugs[i])++;
+#else
+ UNREFERENCED_PARAMETER(gen_number);
+ UNREFERENCED_PARAMETER(plug_size);
#endif //FREE_USAGE_STATS
}
@@ -13328,6 +13369,9 @@ void gc_heap::add_item_to_current_pinned_free (int gen_number, size_t free_size)
free_size, (i + 10), gen_number,
generation_pinned_free_obj_space (gen),
gen->gen_current_pinned_free_spaces[i]));
+#else
+ UNREFERENCED_PARAMETER(gen_number);
+ UNREFERENCED_PARAMETER(free_size);
#endif //FREE_USAGE_STATS
}
@@ -13349,6 +13393,9 @@ void gc_heap::add_gen_free (int gen_number, size_t free_size)
}
(gen->gen_free_spaces[i])++;
+#else
+ UNREFERENCED_PARAMETER(gen_number);
+ UNREFERENCED_PARAMETER(free_size);
#endif //FREE_USAGE_STATS
}
@@ -13370,6 +13417,9 @@ void gc_heap::remove_gen_free (int gen_number, size_t free_size)
}
(gen->gen_free_spaces[i])--;
+#else
+ UNREFERENCED_PARAMETER(gen_number);
+ UNREFERENCED_PARAMETER(free_size);
#endif //FREE_USAGE_STATS
}
@@ -13629,6 +13679,7 @@ uint8_t* gc_heap::allocate_in_expanded_heap (generation* gen,
int active_new_gen_number
REQD_ALIGN_AND_OFFSET_DCL)
{
+ UNREFERENCED_PARAMETER(active_new_gen_number);
dprintf (3, ("aie: P: %Ix, size: %Ix", old_loc, size));
size = Align (size);
@@ -14319,7 +14370,7 @@ int gc_heap::generation_to_condemn (int n_initial,
}
int i = 0;
int temp_gen = 0;
- MEMORYSTATUSEX ms;
+ GCMemoryStatus ms;
memset (&ms, 0, sizeof(ms));
BOOL low_memory_detected = g_low_memory_status;
BOOL check_memory = FALSE;
@@ -14923,11 +14974,14 @@ void gc_heap::concurrent_print_time_delta (const char* msg)
time_bgc_last = current_time;
dprintf (2, ("h%d: %s T %Id ms", heap_number, msg, elapsed_time));
+#else
+ UNREFERENCED_PARAMETER(msg);
#endif //TRACE_GC
}
void gc_heap::free_list_info (int gen_num, const char* msg)
{
+ UNREFERENCED_PARAMETER(gen_num);
#if defined (BACKGROUND_GC) && defined (TRACE_GC)
dprintf (3, ("h%d: %s", heap_number, msg));
for (int i = 0; i <= (max_generation + 1); i++)
@@ -14948,6 +15002,8 @@ void gc_heap::free_list_info (int gen_num, const char* msg)
generation_free_obj_space (gen)));
}
}
+#else
+ UNREFERENCED_PARAMETER(msg);
#endif // BACKGROUND_GC && TRACE_GC
}
@@ -16737,6 +16793,7 @@ gc_heap* gc_heap::heap_of (uint8_t* o)
return (seg ? heap_segment_heap (seg) : g_heaps [0]);
#endif //SEG_MAPPING_TABLE
#else //MULTIPLE_HEAPS
+ UNREFERENCED_PARAMETER(o);
return __this;
#endif //MULTIPLE_HEAPS
}
@@ -16756,6 +16813,7 @@ gc_heap* gc_heap::heap_of_gc (uint8_t* o)
return (seg ? heap_segment_heap (seg) : g_heaps [0]);
#endif //SEG_MAPPING_TABLE
#else //MULTIPLE_HEAPS
+ UNREFERENCED_PARAMETER(o);
return __this;
#endif //MULTIPLE_HEAPS
}
@@ -17210,6 +17268,8 @@ void gc_heap::enque_pinned_plug (uint8_t* plug,
void gc_heap::save_post_plug_info (uint8_t* last_pinned_plug, uint8_t* last_object_in_last_plug, uint8_t* post_plug)
{
+ UNREFERENCED_PARAMETER(last_pinned_plug);
+
mark& m = mark_stack_array[mark_stack_tos - 1];
assert (last_pinned_plug == m.first);
m.saved_post_plug_info_start = (uint8_t*)&(((plug_and_gap*)post_plug)[-1]);
@@ -17854,7 +17914,7 @@ gc_heap::ha_mark_object_simple (uint8_t** po THREAD_NUMBER_DCL)
{
size_t new_size = 2*internal_root_array_length;
- MEMORYSTATUSEX statex;
+ GCMemoryStatus statex;
GetProcessMemoryLoad(&statex);
if (new_size > (size_t)(statex.ullAvailPhys / 10))
{
@@ -18208,6 +18268,8 @@ uint8_t* gc_heap::background_mark_object (uint8_t* o THREAD_NUMBER_DCL)
void gc_heap::background_verify_mark (Object*& object, ScanContext* sc, uint32_t flags)
{
+ UNREFERENCED_PARAMETER(sc);
+
assert (settings.concurrent);
uint8_t* o = (uint8_t*)object;
@@ -18785,6 +18847,7 @@ inline
void gc_heap::mark_through_object (uint8_t* oo, BOOL mark_class_object_p THREAD_NUMBER_DCL)
{
#ifndef COLLECTIBLE_CLASS
+ UNREFERENCED_PARAMETER(mark_class_object_p);
BOOL to_mark_class_object = FALSE;
#else //COLLECTIBLE_CLASS
BOOL to_mark_class_object = (mark_class_object_p && (is_collectible(oo)));
@@ -18881,7 +18944,7 @@ recheck:
size_t current_promoted_bytes = promoted_bytes (heap_number);
if (current_promoted_bytes != last_promoted_bytes)
- fire_mark_event (heap_number, ETW::GCLog::ETW_GC_INFO::GC_ROOT_OVERFLOW, (current_promoted_bytes - last_promoted_bytes));
+ fire_mark_event (heap_number, ETW::GC_ROOT_OVERFLOW, (current_promoted_bytes - last_promoted_bytes));
return overflow_p;
}
@@ -19098,6 +19161,8 @@ void gc_heap::scan_dependent_handles (int condemned_gen_number, ScanContext *sc,
// threads synchronized.
void gc_heap::scan_dependent_handles (int condemned_gen_number, ScanContext *sc, BOOL initial_scan_p)
{
+ UNREFERENCED_PARAMETER(initial_scan_p);
+
// Whenever we call this method there may have been preceding object promotions. So set
// fUnscannedPromotions unconditionally (during further iterations of the scanning loop this will be set
// based on the how the scanning proceeded).
@@ -19266,7 +19331,7 @@ void gc_heap::mark_phase (int condemned_gen_number, BOOL mark_only_p)
if ((condemned_gen_number == max_generation) && (num_sizedrefs > 0))
{
CNameSpace::GcScanSizedRefs(GCHeap::Promote, condemned_gen_number, max_generation, &sc);
- fire_mark_event (heap_number, ETW::GCLog::ETW_GC_INFO::GC_ROOT_SIZEDREF, (promoted_bytes (heap_number) - last_promoted_bytes));
+ fire_mark_event (heap_number, ETW::GC_ROOT_SIZEDREF, (promoted_bytes (heap_number) - last_promoted_bytes));
last_promoted_bytes = promoted_bytes (heap_number);
#ifdef MULTIPLE_HEAPS
@@ -19285,7 +19350,7 @@ void gc_heap::mark_phase (int condemned_gen_number, BOOL mark_only_p)
condemned_gen_number, max_generation,
&sc);
- fire_mark_event (heap_number, ETW::GCLog::ETW_GC_INFO::GC_ROOT_STACK, (promoted_bytes (heap_number) - last_promoted_bytes));
+ fire_mark_event (heap_number, ETW::GC_ROOT_STACK, (promoted_bytes (heap_number) - last_promoted_bytes));
last_promoted_bytes = promoted_bytes (heap_number);
#ifdef BACKGROUND_GC
@@ -19300,7 +19365,7 @@ void gc_heap::mark_phase (int condemned_gen_number, BOOL mark_only_p)
finalize_queue->GcScanRoots(GCHeap::Promote, heap_number, 0);
#endif // FEATURE_PREMORTEM_FINALIZATION
- fire_mark_event (heap_number, ETW::GCLog::ETW_GC_INFO::GC_ROOT_FQ, (promoted_bytes (heap_number) - last_promoted_bytes));
+ fire_mark_event (heap_number, ETW::GC_ROOT_FQ, (promoted_bytes (heap_number) - last_promoted_bytes));
last_promoted_bytes = promoted_bytes (heap_number);
// MTHTS
@@ -19310,7 +19375,7 @@ void gc_heap::mark_phase (int condemned_gen_number, BOOL mark_only_p)
CNameSpace::GcScanHandles(GCHeap::Promote,
condemned_gen_number, max_generation,
&sc);
- fire_mark_event (heap_number, ETW::GCLog::ETW_GC_INFO::GC_ROOT_HANDLES, (promoted_bytes (heap_number) - last_promoted_bytes));
+ fire_mark_event (heap_number, ETW::GC_ROOT_HANDLES, (promoted_bytes (heap_number) - last_promoted_bytes));
last_promoted_bytes = promoted_bytes (heap_number);
}
@@ -19355,7 +19420,7 @@ void gc_heap::mark_phase (int condemned_gen_number, BOOL mark_only_p)
dprintf (3, ("marked by cards: %Id",
(promoted_bytes (heap_number) - promoted_before_cards)));
- fire_mark_event (heap_number, ETW::GCLog::ETW_GC_INFO::GC_ROOT_OLDER, (promoted_bytes (heap_number) - last_promoted_bytes));
+ fire_mark_event (heap_number, ETW::GC_ROOT_OLDER, (promoted_bytes (heap_number) - last_promoted_bytes));
last_promoted_bytes = promoted_bytes (heap_number);
}
}
@@ -19498,7 +19563,7 @@ void gc_heap::mark_phase (int condemned_gen_number, BOOL mark_only_p)
#endif //MARK_LIST
//decide on promotion
- if (settings.promotion != TRUE)
+ if (!settings.promotion)
{
size_t m = 0;
for (int n = 0; n <= condemned_gen_number;n++)
@@ -19850,6 +19915,8 @@ void clear_node_realigned(uint8_t* node)
{
#ifdef RESPECT_LARGE_ALIGNMENT
((plug_and_reloc*)(node))[-1].reloc &= ~1;
+#else
+ UNREFERENCED_PARAMETER(node);
#endif //RESPECT_LARGE_ALIGNMENT
}
#endif // FEATURE_STRUCTALIGN
@@ -20272,7 +20339,7 @@ void gc_heap::seg_clear_mark_bits (heap_segment* seg)
void gc_heap::sweep_ro_segments (heap_segment* start_seg)
{
-
+ UNREFERENCED_PARAMETER(start_seg);
#if 0
//go through all of the segment in range and reset the mark bit
//TODO works only on small object segments
@@ -20393,6 +20460,8 @@ BOOL gc_heap::loh_size_fit_p (size_t size, uint8_t* alloc_pointer, uint8_t* allo
uint8_t* gc_heap::loh_allocate_in_condemned (uint8_t* old_loc, size_t size)
{
+ UNREFERENCED_PARAMETER(old_loc);
+
generation* gen = large_object_generation;
dprintf (1235, ("E: p:%Ix, l:%Ix, s: %Id",
generation_allocation_pointer (gen),
@@ -20980,6 +21049,8 @@ void gc_heap::store_plug_gap_info (uint8_t* plug_start,
// this is only for verification purpose
size_t last_plug_len)
{
+ UNREFERENCED_PARAMETER(last_plug_len);
+
if (!last_npinned_plug_p && !last_pinned_plug_p)
{
//dprintf (3, ("last full plug end: %Ix, full plug start: %Ix", plug_end, plug_start));
@@ -21047,6 +21118,8 @@ void gc_heap::record_interesting_data_point (interesting_data_point idp)
{
#ifdef GC_CONFIG_DRIVEN
(interesting_data_per_gc[idp])++;
+#else
+ UNREFERENCED_PARAMETER(idp);
#endif //GC_CONFIG_DRIVEN
}
@@ -22575,6 +22648,8 @@ Called after compact phase to fix all generation gaps
void gc_heap::fix_generation_bounds (int condemned_gen_number,
generation* consing_gen)
{
+ UNREFERENCED_PARAMETER(consing_gen);
+
assert (generation_allocation_segment (consing_gen) ==
ephemeral_heap_segment);
@@ -22626,8 +22701,10 @@ void gc_heap::fix_generation_bounds (int condemned_gen_number,
uint8_t* start = generation_allocation_start (youngest_generation);
MAYBE_UNUSED_VAR(start);
if (settings.promotion && !settings.demotion)
+ {
assert ((start + Align (size (start))) ==
heap_segment_plan_allocated(ephemeral_heap_segment));
+ }
heap_segment_allocated(ephemeral_heap_segment)=
heap_segment_plan_allocated(ephemeral_heap_segment);
@@ -23021,6 +23098,8 @@ void gc_heap::clear_unused_array (uint8_t* x, size_t size)
((CObjectHeader*)tmp)->UnsetFree();
}
+#else
+ UNREFERENCED_PARAMETER(size);
#endif
}
@@ -23142,6 +23221,8 @@ gc_heap::check_class_object_demotion (uint8_t* obj)
{
check_class_object_demotion_internal (obj);
}
+#else
+ UNREFERENCED_PARAMETER(obj);
#endif //COLLECTIBLE_CLASS
}
@@ -23432,6 +23513,8 @@ void gc_heap::verify_pins_with_post_plug_info (const char* msg)
dprintf (3, ("%s verified", msg));
}
+#else // _DEBUG && VERIFY_HEAP
+ UNREFERENCED_PARAMETER(msg);
#endif // _DEBUG && VERIFY_HEAP
}
@@ -23763,7 +23846,9 @@ void gc_heap::walk_relocation_in_brick (uint8_t* tree, walk_relocate_args* args,
BOOL check_last_object_p = (args->is_shortened || has_pre_plug_info_p);
if (!check_last_object_p)
+ {
assert (last_plug_size >= Align (min_obj_size));
+ }
walk_plug (args->last_plug, last_plug_size, check_last_object_p, args, profiling_context);
}
@@ -24348,7 +24433,9 @@ void gc_heap::compact_in_brick (uint8_t* tree, compact_args* args)
BOOL check_last_object_p = (args->is_shortened || has_pre_plug_info_p);
if (!check_last_object_p)
+ {
assert (last_plug_size >= Align (min_obj_size));
+ }
compact_plug (args->last_plug, last_plug_size, check_last_object_p, args);
}
@@ -24597,6 +24684,7 @@ uint32_t __stdcall gc_heap::gc_thread_stub (void* arg)
ClrFlsSetThreadType (ThreadType_GC);
STRESS_LOG_RESERVE_MEM (GC_STRESSLOG_MULTIPLY);
+#ifndef FEATURE_REDHAWK
// We commit the thread's entire stack to ensure we're robust in low memory conditions.
BOOL fSuccess = Thread::CommitThreadStack(NULL);
@@ -24611,7 +24699,7 @@ uint32_t __stdcall gc_heap::gc_thread_stub (void* arg)
EEPOLICY_HANDLE_FATAL_ERROR(COR_E_STACKOVERFLOW);
#endif //BACKGROUND_GC
}
-
+#endif // FEATURE_REDHAWK
#ifndef NO_CATCH_HANDLERS
PAL_TRY
{
@@ -24694,6 +24782,8 @@ uint32_t __stdcall gc_heap::bgc_thread_stub (void* arg)
void gc_heap::background_drain_mark_list (int thread)
{
+ UNREFERENCED_PARAMETER(thread);
+
size_t saved_c_mark_list_index = c_mark_list_index;
if (saved_c_mark_list_index)
@@ -24950,6 +25040,10 @@ void gc_heap::verify_mark_array_cleared (uint8_t* begin, uint8_t* end, uint32_t*
}
markw++;
}
+#else // _DEBUG
+ UNREFERENCED_PARAMETER(begin);
+ UNREFERENCED_PARAMETER(end);
+ UNREFERENCED_PARAMETER(mark_array_addr);
#endif //_DEBUG
}
@@ -24962,6 +25056,8 @@ BOOL gc_heap::commit_mark_array_new_seg (gc_heap* hp,
heap_segment* seg,
uint8_t* new_lowest_address)
{
+ UNREFERENCED_PARAMETER(hp); // compiler bug? -- this *is*, indeed, referenced
+
uint8_t* start = (uint8_t*)seg;
uint8_t* end = heap_segment_reserved (seg);
@@ -25095,6 +25191,8 @@ BOOL gc_heap::commit_mark_array_by_seg (heap_segment* seg, uint32_t* mark_array_
BOOL gc_heap::commit_mark_array_bgc_init (uint32_t* mark_array_addr)
{
+ UNREFERENCED_PARAMETER(mark_array_addr);
+
dprintf (GC_TABLE_LOG, ("BGC init commit: lowest: %Ix, highest: %Ix, mark_array: %Ix",
lowest_address, highest_address, mark_array));
@@ -25888,6 +25986,8 @@ void gc_heap::revisit_written_page (uint8_t* page,
BOOL large_objects_p,
size_t& num_marked_objects)
{
+ UNREFERENCED_PARAMETER(seg);
+
uint8_t* start_address = page;
uint8_t* o = 0;
int align_const = get_alignment_constant (!large_objects_p);
@@ -26484,6 +26584,8 @@ BOOL gc_heap::create_bgc_threads_support (int number_of_heaps)
#ifdef MULTIPLE_HEAPS
bgc_t_join.init (number_of_heaps, join_flavor_bgc);
+#else
+ UNREFERENCED_PARAMETER(number_of_heaps);
#endif //MULTIPLE_HEAPS
ret = TRUE;
@@ -27292,6 +27394,8 @@ uint8_t*
gc_heap::compute_next_boundary (uint8_t* low, int gen_number,
BOOL relocating)
{
+ UNREFERENCED_PARAMETER(low);
+
//when relocating, the fault line is the plan start of the younger
//generation because the generation is promoted.
if (relocating && (gen_number == (settings.condemned_generation + 1)))
@@ -28356,6 +28460,8 @@ BOOL gc_heap::process_free_space (heap_segment* seg,
settings.condemned_generation,
*total_free_space, min_free_size, *largest_free_space, min_cont_size,
(size_t)seg));
+#else
+ UNREFERENCED_PARAMETER(seg);
#endif //SIMPLE_DPRINTF
return TRUE;
}
@@ -28985,6 +29091,7 @@ generation* gc_heap::expand_heap (int condemned_generation,
generation* consing_gen,
heap_segment* new_heap_segment)
{
+ UNREFERENCED_PARAMETER(condemned_generation);
assert (condemned_generation >= (max_generation -1));
unsigned int active_new_gen_number = max_generation; //Set one too high to get generation gap
uint8_t* start_address = generation_limit (max_generation);
@@ -29351,6 +29458,8 @@ static size_t linear_allocation_model (float allocation_fraction, size_t new_all
if (smoothing > collection_count)
smoothing = collection_count;
new_allocation = new_allocation / smoothing + ((previous_desired_allocation / smoothing) * (smoothing-1));
+#else
+ UNREFERENCED_PARAMETER(collection_count);
#endif //0
return new_allocation;
}
@@ -29419,7 +29528,7 @@ size_t gc_heap::desired_new_allocation (dynamic_data* dd,
}
else //large object heap
{
- MEMORYSTATUSEX ms;
+ GCMemoryStatus ms;
GetProcessMemoryLoad (&ms);
uint64_t available_ram = ms.ullAvailPhys;
@@ -29655,7 +29764,7 @@ size_t gc_heap::joined_youngest_desired (size_t new_allocation)
(total_new_allocation > max (youngest_gen_desired_th, total_min_allocation)))
{
uint32_t dwMemoryLoad = 0;
- MEMORYSTATUSEX ms;
+ GCMemoryStatus ms;
GetProcessMemoryLoad(&ms);
dprintf (2, ("Current memory load: %d", ms.dwMemoryLoad));
dwMemoryLoad = ms.dwMemoryLoad;
@@ -31744,7 +31853,6 @@ go_through_refs:
void gc_heap::descr_segment (heap_segment* seg )
{
-
#ifdef TRACE_GC
uint8_t* x = heap_segment_mem (seg);
while (x < heap_segment_allocated (seg))
@@ -31752,7 +31860,9 @@ void gc_heap::descr_segment (heap_segment* seg )
dprintf(2, ( "%Ix: %d ", (size_t)x, size (x)));
x = x + Align(size (x));
}
-#endif //TRACE_GC
+#else // TRACE_GC
+ UNREFERENCED_PARAMETER(seg);
+#endif // TRACE_GC
}
void gc_heap::descr_card_table ()
@@ -31872,6 +31982,8 @@ void gc_heap::descr_generations_to_profiler (gen_walk_fn fn, void *context)
// Note that when logging is on it can take a long time to go through the free items.
void gc_heap::print_free_list (int gen, heap_segment* seg)
{
+ UNREFERENCED_PARAMETER(gen);
+ UNREFERENCED_PARAMETER(seg);
/*
if (settings.concurrent == FALSE)
{
@@ -31912,6 +32024,7 @@ void gc_heap::print_free_list (int gen, heap_segment* seg)
void gc_heap::descr_generations (BOOL begin_gc_p)
{
+ UNREFERENCED_PARAMETER(begin_gc_p);
#ifdef STRESS_LOG
if (StressLog::StressLogOn(LF_GC, LL_INFO10))
{
@@ -32701,6 +32814,7 @@ gc_heap::verify_heap (BOOL begin_gc_p)
#endif //BACKGROUND_GC
#endif //MULTIPLE_HEAPS
+ UNREFERENCED_PARAMETER(begin_gc_p);
#ifdef BACKGROUND_GC
dprintf (2,("[%s]GC#%d(%s): Verifying heap - begin",
(begin_gc_p ? "BEG" : "END"),
@@ -33173,6 +33287,7 @@ void GCHeap::ValidateObjectMember (Object* obj)
void DestructObject (CObjectHeader* hdr)
{
+ UNREFERENCED_PARAMETER(hdr); // compiler bug? -- this *is*, indeed, referenced
hdr->~CObjectHeader();
}
@@ -33303,6 +33418,7 @@ HRESULT GCHeap::Init(size_t hn)
if ((pGenGCHeap = gc_heap::make_gc_heap(this, (int)hn)) == 0)
hres = E_OUTOFMEMORY;
#else
+ UNREFERENCED_PARAMETER(hn);
if (!gc_heap::make_gc_heap())
hres = E_OUTOFMEMORY;
#endif //MULTIPLE_HEAPS
@@ -33340,7 +33456,7 @@ HRESULT GCHeap::Initialize ()
return hr;
#if defined(_WIN64)
- MEMORYSTATUSEX ms;
+ GCMemoryStatus ms;
GetProcessMemoryLoad (&ms);
gc_heap::total_physical_mem = ms.ullTotalPhys;
gc_heap::mem_one_percent = gc_heap::total_physical_mem / 100;
@@ -33601,6 +33717,8 @@ void GCHeap::Promote(Object** ppObject, ScanContext* sc, uint32_t flags)
#ifdef _DEBUG
((CObjectHeader*)o)->ValidatePromote(sc, flags);
+#else
+ UNREFERENCED_PARAMETER(sc);
#endif //_DEBUG
if (flags & GC_CALL_PINNED)
@@ -33637,6 +33755,8 @@ void GCHeap::Promote(Object** ppObject, ScanContext* sc, uint32_t flags)
void GCHeap::Relocate (Object** ppObject, ScanContext* sc,
uint32_t flags)
{
+ UNREFERENCED_PARAMETER(sc);
+
uint8_t* object = (uint8_t*)(Object*)(*ppObject);
THREAD_NUMBER_FROM_CONTEXT;
@@ -35236,6 +35356,8 @@ bool GCHeap::IsThreadUsingAllocationContextHeap(alloc_context* acontext, int thr
return ((acontext->home_heap == GetHeap(thread_number)) ||
((acontext->home_heap == 0) && (thread_number == 0)));
#else
+ UNREFERENCED_PARAMETER(acontext);
+ UNREFERENCED_PARAMETER(thread_number);
return true;
#endif //MULTIPLE_HEAPS
}
@@ -35490,7 +35612,7 @@ size_t GCHeap::GetValidGen0MaxSize(size_t seg_size)
GetLargestOnDieCacheSize(TRUE),
GetLogicalCpuCount()));
- MEMORYSTATUSEX ms;
+ GCMemoryStatus ms;
GetProcessMemoryLoad (&ms);
// if the total min GC across heaps will exceed 1/6th of available memory,
// then reduce the min GC size until it either fits or has been reduced to cache size.
@@ -36192,6 +36314,8 @@ CFinalize::ScanForFinalization (promote_func* pfn, int gen, BOOL mark_only_p,
sc.promotion = TRUE;
#ifdef MULTIPLE_HEAPS
sc.thread_number = hp->heap_number;
+#else
+ UNREFERENCED_PARAMETER(hp);
#endif //MULTIPLE_HEAPS
BOOL finalizedFound = FALSE;
@@ -36302,6 +36426,8 @@ CFinalize::RelocateFinalizationData (int gen, gc_heap* hp)
sc.promotion = FALSE;
#ifdef MULTIPLE_HEAPS
sc.thread_number = hp->heap_number;
+#else
+ UNREFERENCED_PARAMETER(hp);
#endif //MULTIPLE_HEAPS
unsigned int Seg = gen_segment (gen);
@@ -36704,7 +36830,12 @@ void checkGCWriteBarrier()
#ifdef FEATURE_BASICFREEZE
void gc_heap::walk_read_only_segment(heap_segment *seg, void *pvContext, object_callback_func pfnMethodTable, object_callback_func pfnObjRef)
{
-#ifndef DACCESS_COMPILE
+#ifdef DACCESS_COMPILE
+ UNREFERENCED_PARAMETER(seg);
+ UNREFERENCED_PARAMETER(pvContext);
+ UNREFERENCED_PARAMETER(pfnMethodTable);
+ UNREFERENCED_PARAMETER(pfnObjRef);
+#else
uint8_t *o = heap_segment_mem(seg);
// small heap alignment constant
@@ -36754,14 +36885,14 @@ HRESULT GCHeap::WaitUntilConcurrentGCCompleteAsync(int millisecondsTimeout)
void GCHeap::TemporaryEnableConcurrentGC()
{
#ifdef BACKGROUND_GC
- gc_heap::temp_disable_concurrent_p = FALSE;
+ gc_heap::temp_disable_concurrent_p = false;
#endif //BACKGROUND_GC
}
void GCHeap::TemporaryDisableConcurrentGC()
{
#ifdef BACKGROUND_GC
- gc_heap::temp_disable_concurrent_p = TRUE;
+ gc_heap::temp_disable_concurrent_p = true;
#endif //BACKGROUND_GC
}
diff --git a/src/gc/gc.h b/src/gc/gc.h
index dafa7f88fe..838f3bac7e 100644
--- a/src/gc/gc.h
+++ b/src/gc/gc.h
@@ -210,9 +210,11 @@ struct ScanContext
AppDomain *pCurrentDomain;
#endif //CHECK_APP_DOMAIN_LEAKS || FEATURE_APPDOMAIN_RESOURCE_MONITORING || DACCESS_COMPILE
+#ifndef FEATURE_REDHAWK
#if defined(GC_PROFILING) || defined (DACCESS_COMPILE)
MethodDesc *pMD;
#endif //GC_PROFILING || DACCESS_COMPILE
+#endif // FEATURE_REDHAWK
#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
EtwGCRootKind dwEtwRootKind;
#endif // GC_PROFILING || FEATURE_EVENT_TRACE
@@ -363,6 +365,7 @@ void record_global_mechanism (int mech_index);
#define GC_ALLOC_FINALIZE 0x1
#define GC_ALLOC_CONTAINS_REF 0x2
#define GC_ALLOC_ALIGN8_BIAS 0x4
+#define GC_ALLOC_ALIGN8 0x8
class GCHeap {
friend struct ::_DacGlobals;
@@ -433,6 +436,7 @@ public:
}
#endif
#else // FEATURE_SVR_GC
+ UNREFERENCED_PARAMETER(bServerHeap);
CONSISTENCY_CHECK(bServerHeap == false);
#endif // FEATURE_SVR_GC
}
diff --git a/src/gc/gcee.cpp b/src/gc/gcee.cpp
index 138ec6102e..8270799807 100644
--- a/src/gc/gcee.cpp
+++ b/src/gc/gcee.cpp
@@ -74,6 +74,7 @@ void GCHeap::UpdatePreGCCounters()
#endif //ENABLE_PERF_COUNTERS
+#ifdef FEATURE_EVENT_TRACE
#ifdef MULTIPLE_HEAPS
//take the first heap....
gc_mechanisms *pSettings = &gc_heap::g_heaps[0]->settings;
@@ -81,7 +82,6 @@ void GCHeap::UpdatePreGCCounters()
gc_mechanisms *pSettings = &gc_heap::settings;
#endif //MULTIPLE_HEAPS
-#ifdef FEATURE_EVENT_TRACE
ETW::GCLog::ETW_GC_INFO Info;
Info.GCStart.Count = (uint32_t)pSettings->gc_index;
@@ -627,19 +627,29 @@ BOOL GCHeap::IsConcurrentGCInProgress()
#ifdef FEATURE_EVENT_TRACE
void gc_heap::fire_etw_allocation_event (size_t allocation_amount, int gen_number, uint8_t* object_address)
{
+ void * typeId = nullptr;
+ wchar_t * name = nullptr;
+#ifdef FEATURE_REDHAWK
+ typeId = RedhawkGCInterface::GetLastAllocEEType();
+#else
TypeHandle th = GetThread()->GetTHAllocContextObj();
-
if (th != 0)
{
- InlineSString<MAX_CLASSNAME_LENGTH> strTypeName;
+ InlineSString<MAX_CLASSNAME_LENGTH> strTypeName;
th.GetName(strTypeName);
+ typeId = th.GetMethodTable();
+ name = strTypeName.GetUnicode();
+ }
+#endif
+ if (typeId != nullptr)
+ {
FireEtwGCAllocationTick_V3((uint32_t)allocation_amount,
((gen_number == 0) ? ETW::GCLog::ETW_GC_INFO::AllocationSmall : ETW::GCLog::ETW_GC_INFO::AllocationLarge),
GetClrInstanceId(),
allocation_amount,
- th.GetMethodTable(),
- strTypeName.GetUnicode(),
+ typeId,
+ name,
heap_number,
object_address
);
@@ -647,6 +657,10 @@ void gc_heap::fire_etw_allocation_event (size_t allocation_amount, int gen_numbe
}
void gc_heap::fire_etw_pin_object_event (uint8_t* object, uint8_t** ppObject)
{
+#ifdef FEATURE_REDHAWK
+ UNREFERENCED_PARAMETER(object);
+ UNREFERENCED_PARAMETER(ppObject);
+#else
Object* obj = (Object*)object;
InlineSString<MAX_CLASSNAME_LENGTH> strTypeName;
@@ -669,6 +683,7 @@ void gc_heap::fire_etw_pin_object_event (uint8_t* object, uint8_t** ppObject)
}
EX_CATCH {}
EX_END_CATCH(SwallowAllExceptions)
+#endif // FEATURE_REDHAWK
}
#endif // FEATURE_EVENT_TRACE
diff --git a/src/gc/gcpriv.h b/src/gc/gcpriv.h
index 5f6122daf6..c5d88c2d1f 100644
--- a/src/gc/gcpriv.h
+++ b/src/gc/gcpriv.h
@@ -3152,10 +3152,10 @@ protected:
CLREvent gc_lh_block_event;
PER_HEAP_ISOLATED
- BOOL gc_can_use_concurrent;
+ bool gc_can_use_concurrent;
PER_HEAP_ISOLATED
- BOOL temp_disable_concurrent_p;
+ bool temp_disable_concurrent_p;
PER_HEAP_ISOLATED
BOOL do_ephemeral_gc_p;
diff --git a/src/gc/gcscan.cpp b/src/gc/gcscan.cpp
index 078d7404a0..0bf039702a 100644
--- a/src/gc/gcscan.cpp
+++ b/src/gc/gcscan.cpp
@@ -27,7 +27,7 @@ SVAL_IMPL_INIT(int32_t, CNameSpace, m_GcStructuresInvalidCnt, 1);
VOLATILE(int32_t) CNameSpace::m_GcStructuresInvalidCnt = 1;
#endif //DACCESS_COMPILE
-BOOL CNameSpace::GetGcRuntimeStructuresValid ()
+bool CNameSpace::GetGcRuntimeStructuresValid ()
{
LIMITED_METHOD_CONTRACT;
SUPPORTS_DAC;
diff --git a/src/gc/gcscan.h b/src/gc/gcscan.h
index 64280c3fac..956622abb4 100644
--- a/src/gc/gcscan.h
+++ b/src/gc/gcscan.h
@@ -60,7 +60,7 @@ class CNameSpace
static void GcRuntimeStructuresValid (BOOL bValid);
- static BOOL GetGcRuntimeStructuresValid ();
+ static bool GetGcRuntimeStructuresValid ();
#ifdef DACCESS_COMPILE
static void EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
#endif // DACCESS_COMPILE
diff --git a/src/gc/handletable.cpp b/src/gc/handletable.cpp
index 645d18e298..be415bccf1 100644
--- a/src/gc/handletable.cpp
+++ b/src/gc/handletable.cpp
@@ -441,8 +441,9 @@ void ValidateAppDomainForHandle(OBJECTHANDLE handle)
// Verify that we are not trying to access freed handle.
_ASSERTE("Attempt to access destroyed handle." && *(_UNCHECKED_OBJECTREF *)handle != DEBUG_DestroyedHandleValue);
#endif
-#ifndef DACCESS_COMPILE
-
+#ifdef DACCESS_COMPILE
+ UNREFERENCED_PARAMETER(handle);
+#else
BEGIN_DEBUG_ONLY_CODE;
ADIndex id = HndGetHandleADIndex(handle);
AppDomain *pUnloadingDomain = SystemDomain::AppDomainBeingUnloaded();
@@ -755,6 +756,7 @@ void HndLogSetEvent(OBJECTHANDLE handle, _UNCHECKED_OBJECTREF value)
FireEtwSetGCHandle((void*) handle, value, hndType, generation, (int64_t) pAppDomain, GetClrInstanceId());
FireEtwPrvSetGCHandle((void*) handle, value, hndType, generation, (int64_t) pAppDomain, GetClrInstanceId());
+#ifndef FEATURE_REDHAWK
// Also fire the things pinned by Async pinned handles
if (hndType == HNDTYPE_ASYNCPINNED)
{
@@ -781,7 +783,11 @@ void HndLogSetEvent(OBJECTHANDLE handle, _UNCHECKED_OBJECTREF value)
}
}
}
+#endif // FEATURE_REDHAWK
}
+#else
+ UNREFERENCED_PARAMETER(handle);
+ UNREFERENCED_PARAMETER(value);
#endif
}
@@ -1149,6 +1155,9 @@ void HndNotifyGcCycleComplete(HHANDLETABLE hTable, uint32_t condemned, uint32_t
}
#else
LIMITED_METHOD_CONTRACT;
+ UNREFERENCED_PARAMETER(hTable);
+ UNREFERENCED_PARAMETER(condemned);
+ UNREFERENCED_PARAMETER(maxgen);
#endif
}
@@ -1354,6 +1363,7 @@ BOOL Ref_ContainHandle(HandleTableBucket *pBucket, OBJECTHANDLE handle)
void DEBUG_PostGCScanHandler(HandleTable *pTable, const uint32_t *types, uint32_t typeCount, uint32_t condemned, uint32_t maxgen, ScanCallbackInfo *info)
{
LIMITED_METHOD_CONTRACT;
+ UNREFERENCED_PARAMETER(types);
// looks like the GC supports more generations than we expected
_ASSERTE(condemned < MAXSTATGEN);
@@ -1408,6 +1418,7 @@ void DEBUG_PostGCScanHandler(HandleTable *pTable, const uint32_t *types, uint32_
void DEBUG_LogScanningStatistics(HandleTable *pTable, uint32_t level)
{
WRAPPER_NO_CONTRACT;
+ UNREFERENCED_PARAMETER(level);
// have we done any GC's yet?
if (pTable->_DEBUG_iMaxGen >= 0)
diff --git a/src/gc/handletablecore.cpp b/src/gc/handletablecore.cpp
index 6384531bdb..8435f94165 100644
--- a/src/gc/handletablecore.cpp
+++ b/src/gc/handletablecore.cpp
@@ -203,6 +203,8 @@ void ZeroHandles(OBJECTHANDLE *pHandleBase, uint32_t uCount)
void CALLBACK DbgCountEnumeratedBlocks(TableSegment *pSegment, uint32_t uBlock, uint32_t uCount, ScanCallbackInfo *pInfo)
{
LIMITED_METHOD_CONTRACT;
+ UNREFERENCED_PARAMETER(pSegment);
+ UNREFERENCED_PARAMETER(uBlock);
// accumulate the block count in pInfo->param1
pInfo->param1 += uCount;
@@ -1878,6 +1880,7 @@ uint32_t BlockAllocHandlesInMask(TableSegment *pSegment, uint32_t uBlock,
OBJECTHANDLE *pHandleBase, uint32_t uCount)
{
LIMITED_METHOD_CONTRACT;
+ UNREFERENCED_PARAMETER(uBlock);
// keep track of how many handles we have left to allocate
uint32_t uRemain = uCount;
@@ -1953,6 +1956,7 @@ uint32_t BlockAllocHandlesInitial(TableSegment *pSegment, uint32_t uType, uint32
OBJECTHANDLE *pHandleBase, uint32_t uCount)
{
LIMITED_METHOD_CONTRACT;
+ UNREFERENCED_PARAMETER(uType);
// sanity check
_ASSERTE(uCount);
diff --git a/src/gc/handletablescan.cpp b/src/gc/handletablescan.cpp
index 5f88118d8b..2f83ac1b74 100644
--- a/src/gc/handletablescan.cpp
+++ b/src/gc/handletablescan.cpp
@@ -515,8 +515,13 @@ void CALLBACK ScanConsecutiveHandlesWithUserData(PTR_UNCHECKED_OBJECTREF pValue,
void CALLBACK BlockAgeBlocks(PTR_TableSegment pSegment, uint32_t uBlock, uint32_t uCount, ScanCallbackInfo *pInfo)
{
LIMITED_METHOD_CONTRACT;
+ UNREFERENCED_PARAMETER(pInfo);
-#ifndef DACCESS_COMPILE
+#ifdef DACCESS_COMPILE
+ UNREFERENCED_PARAMETER(pSegment);
+ UNREFERENCED_PARAMETER(uBlock);
+ UNREFERENCED_PARAMETER(uCount);
+#else
// set up to update the specified blocks
uint32_t *pdwGen = (uint32_t *)pSegment->rgGeneration + uBlock;
uint32_t *pdwGenLast = pdwGen + uCount;
@@ -908,6 +913,7 @@ void CALLBACK BlockResetAgeMapForBlocks(TableSegment *pSegment, uint32_t uBlock,
static void VerifyObject(_UNCHECKED_OBJECTREF from, _UNCHECKED_OBJECTREF obj)
{
#ifdef FEATURE_REDHAWK
+ UNREFERENCED_PARAMETER(from);
MethodTable* pMT = (MethodTable*)(obj->GetGCSafeMethodTable());
pMT->SanityCheck();
#else
@@ -917,6 +923,7 @@ static void VerifyObject(_UNCHECKED_OBJECTREF from, _UNCHECKED_OBJECTREF obj)
static void VerifyObjectAndAge(_UNCHECKED_OBJECTREF *pValue, _UNCHECKED_OBJECTREF from, _UNCHECKED_OBJECTREF obj, uint8_t minAge)
{
+ UNREFERENCED_PARAMETER(pValue);
VerifyObject(from, obj);
int thisAge = GCHeap::GetGCHeap()->WhichGeneration(obj);
@@ -1294,7 +1301,7 @@ void CALLBACK UnlockAndForgetQueuedBlocks(AsyncScanInfo *pAsyncInfo, ScanQNode *
* Frees the specified ScanQNode
*
*/
-void CALLBACK FreeScanQNode(AsyncScanInfo *pAsyncInfo, ScanQNode *pQNode, uintptr_t)
+void CALLBACK FreeScanQNode(AsyncScanInfo *, ScanQNode *pQNode, uintptr_t)
{
LIMITED_METHOD_CONTRACT;
@@ -1380,7 +1387,7 @@ void xxxTableScanQueuedBlocksAsync(PTR_HandleTable pTable, PTR_TableSegment pSeg
* Returns the next segment to be scanned in a scanning loop.
*
*/
-PTR_TableSegment CALLBACK QuickSegmentIterator(PTR_HandleTable pTable, PTR_TableSegment pPrevSegment, CrstHolderWithState *pCrstHolder)
+PTR_TableSegment CALLBACK QuickSegmentIterator(PTR_HandleTable pTable, PTR_TableSegment pPrevSegment, CrstHolderWithState *)
{
LIMITED_METHOD_CONTRACT;
@@ -1413,7 +1420,7 @@ PTR_TableSegment CALLBACK QuickSegmentIterator(PTR_HandleTable pTable, PTR_Table
* g0 scans are more likely to operate on contiguous blocks.
*
*/
-PTR_TableSegment CALLBACK StandardSegmentIterator(PTR_HandleTable pTable, PTR_TableSegment pPrevSegment, CrstHolderWithState *pCrstHolder)
+PTR_TableSegment CALLBACK StandardSegmentIterator(PTR_HandleTable pTable, PTR_TableSegment pPrevSegment, CrstHolderWithState *)
{
CONTRACTL
{
@@ -1447,7 +1454,7 @@ PTR_TableSegment CALLBACK StandardSegmentIterator(PTR_HandleTable pTable, PTR_Ta
* including freeing those it notices are empty along the way.
*
*/
-PTR_TableSegment CALLBACK FullSegmentIterator(PTR_HandleTable pTable, PTR_TableSegment pPrevSegment, CrstHolderWithState *pCrstHolder)
+PTR_TableSegment CALLBACK FullSegmentIterator(PTR_HandleTable pTable, PTR_TableSegment pPrevSegment, CrstHolderWithState *)
{
CONTRACTL
{
diff --git a/src/gc/objecthandle.cpp b/src/gc/objecthandle.cpp
index e16e39b8b0..93cf38fd5b 100644
--- a/src/gc/objecthandle.cpp
+++ b/src/gc/objecthandle.cpp
@@ -83,6 +83,7 @@ void CALLBACK VariableTraceDispatcher(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *
void CALLBACK PromoteRefCounted(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *pExtraInfo, uintptr_t lp1, uintptr_t lp2)
{
WRAPPER_NO_CONTRACT;
+ UNREFERENCED_PARAMETER(pExtraInfo);
// there are too many races when asychnronously scanning ref-counted handles so we no longer support it
_ASSERTE(!((ScanContext*)lp1)->concurrent);
@@ -210,7 +211,7 @@ void CALLBACK PromoteDependentHandle(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *p
}
}
-void CALLBACK ClearDependentHandle(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *pExtraInfo, uintptr_t lp1, uintptr_t lp2)
+void CALLBACK ClearDependentHandle(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *pExtraInfo, uintptr_t /*lp1*/, uintptr_t /*lp2*/)
{
LIMITED_METHOD_CONTRACT;
_ASSERTE(pExtraInfo);
@@ -248,6 +249,7 @@ void CALLBACK PinObject(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *pExtraInfo, ui
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_SO_TOLERANT;
STATIC_CONTRACT_MODE_COOPERATIVE;
+ UNREFERENCED_PARAMETER(pExtraInfo);
// PINNING IS BAD - DON'T DO IT IF YOU CAN AVOID IT
LOG((LF_GC, LL_WARNING, LOG_HANDLE_OBJECT_CLASS("WARNING: ", pObjRef, "causes pinning of ", *pObjRef)));
@@ -302,13 +304,14 @@ void CALLBACK PinObject(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *pExtraInfo, ui
void CALLBACK PromoteObject(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *pExtraInfo, uintptr_t lp1, uintptr_t lp2)
{
WRAPPER_NO_CONTRACT;
+ UNREFERENCED_PARAMETER(pExtraInfo);
LOG((LF_GC, LL_INFO1000, LOG_HANDLE_OBJECT_CLASS("", pObjRef, "causes promotion of ", *pObjRef)));
Object **ppRef = (Object **)pObjRef;
- _ASSERTE(lp2);
- promote_func* callback = (promote_func*) lp2;
- callback(ppRef, (ScanContext *)lp1, 0);
+ _ASSERTE(lp2);
+ promote_func* callback = (promote_func*) lp2;
+ callback(ppRef, (ScanContext *)lp1, 0);
}
@@ -321,6 +324,9 @@ void CALLBACK PromoteObject(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *pExtraInfo
void CALLBACK CheckPromoted(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *pExtraInfo, uintptr_t lp1, uintptr_t lp2)
{
WRAPPER_NO_CONTRACT;
+ UNREFERENCED_PARAMETER(pExtraInfo);
+ UNREFERENCED_PARAMETER(lp1);
+ UNREFERENCED_PARAMETER(lp2);
LOG((LF_GC, LL_INFO100000, LOG_HANDLE_OBJECT_CLASS("Checking referent of Weak-", pObjRef, "to ", *pObjRef)));
@@ -365,6 +371,7 @@ void CALLBACK CalculateSizedRefSize(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *pE
void CALLBACK UpdatePointer(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *pExtraInfo, uintptr_t lp1, uintptr_t lp2)
{
LIMITED_METHOD_CONTRACT;
+ UNREFERENCED_PARAMETER(pExtraInfo);
LOG((LF_GC, LL_INFO100000, LOG_HANDLE_OBJECT("Querying for new location of ", pObjRef, "to ", *pObjRef)));
@@ -374,9 +381,9 @@ void CALLBACK UpdatePointer(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *pExtraInfo
Object *pOldLocation = *ppRef;
#endif
- _ASSERTE(lp2);
- promote_func* callback = (promote_func*) lp2;
- callback(ppRef, (ScanContext *)lp1, 0);
+ _ASSERTE(lp2);
+ promote_func* callback = (promote_func*) lp2;
+ callback(ppRef, (ScanContext *)lp1, 0);
#ifdef _DEBUG
if (pOldLocation != *pObjRef)
@@ -398,6 +405,7 @@ void CALLBACK UpdatePointer(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *pExtraInfo
*/
void CALLBACK ScanPointerForProfilerAndETW(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *pExtraInfo, uintptr_t lp1, uintptr_t lp2)
{
+#ifndef FEATURE_REDHAWK
CONTRACTL
{
NOTHROW;
@@ -405,6 +413,9 @@ void CALLBACK ScanPointerForProfilerAndETW(_UNCHECKED_OBJECTREF *pObjRef, uintpt
if (GetThreadNULLOk()) { MODE_COOPERATIVE; }
}
CONTRACTL_END;
+#endif // FEATURE_REDHAWK
+ UNREFERENCED_PARAMETER(pExtraInfo);
+ UNREFERENCED_PARAMETER(lp2);
LOG((LF_GC | LF_CORPROF, LL_INFO100000, LOG_HANDLE_OBJECT_CLASS("Notifying profiler of ", pObjRef, "to ", *pObjRef)));
@@ -538,6 +549,7 @@ void CALLBACK ScanPointerForProfilerAndETW(_UNCHECKED_OBJECTREF *pObjRef, uintpt
void CALLBACK UpdatePointerPinned(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *pExtraInfo, uintptr_t lp1, uintptr_t lp2)
{
LIMITED_METHOD_CONTRACT;
+ UNREFERENCED_PARAMETER(pExtraInfo);
Object **ppRef = (Object **)pObjRef;
@@ -639,7 +651,7 @@ bool Ref_Initialize()
_ASSERTE(g_HandleTableMap.pBuckets == NULL);
// Create an array of INITIAL_HANDLE_TABLE_ARRAY_SIZE HandleTableBuckets to hold the handle table sets
- NewHolder<HandleTableBucket*> pBuckets(new (nothrow) HandleTableBucket * [ INITIAL_HANDLE_TABLE_ARRAY_SIZE ]);
+ HandleTableBucket** pBuckets = new (nothrow) HandleTableBucket * [ INITIAL_HANDLE_TABLE_ARRAY_SIZE ];
if (pBuckets == NULL)
return false;
@@ -648,45 +660,50 @@ bool Ref_Initialize()
// Crate the first bucket
HandleTableBucket * pBucket = new (nothrow) HandleTableBucket;
- if (pBucket == NULL)
- return false;
- pBucket->HandleTableIndex = 0;
+ if (pBucket != NULL)
+ {
+ pBucket->HandleTableIndex = 0;
- int n_slots = getNumberOfSlots();
+ int n_slots = getNumberOfSlots();
- HandleTableBucketHolder bucketHolder(pBucket, n_slots);
+ HandleTableBucketHolder bucketHolder(pBucket, n_slots);
- // create the handle table set for the first bucket
- pBucket->pTable = new (nothrow) HHANDLETABLE [ n_slots ];
- if (pBucket->pTable == NULL)
- return false;
+ // create the handle table set for the first bucket
+ pBucket->pTable = new (nothrow) HHANDLETABLE[n_slots];
+ if (pBucket->pTable == NULL)
+ goto CleanupAndFail;
- ZeroMemory(pBucket->pTable,
- n_slots * sizeof (HHANDLETABLE));
- for (int uCPUindex=0; uCPUindex < n_slots; uCPUindex++)
- {
- pBucket->pTable[uCPUindex] = HndCreateHandleTable(s_rgTypeFlags, _countof(s_rgTypeFlags), ADIndex(1));
- if (pBucket->pTable[uCPUindex] == NULL)
- return false;
-
- HndSetHandleTableIndex(pBucket->pTable[uCPUindex], 0);
- }
+ ZeroMemory(pBucket->pTable,
+ n_slots * sizeof(HHANDLETABLE));
+ for (int uCPUindex = 0; uCPUindex < n_slots; uCPUindex++)
+ {
+ pBucket->pTable[uCPUindex] = HndCreateHandleTable(s_rgTypeFlags, _countof(s_rgTypeFlags), ADIndex(1));
+ if (pBucket->pTable[uCPUindex] == NULL)
+ goto CleanupAndFail;
- pBuckets[0] = pBucket;
- bucketHolder.SuppressRelease();
+ HndSetHandleTableIndex(pBucket->pTable[uCPUindex], 0);
+ }
- g_HandleTableMap.pBuckets = pBuckets;
- g_HandleTableMap.dwMaxIndex = INITIAL_HANDLE_TABLE_ARRAY_SIZE;
- g_HandleTableMap.pNext = NULL;
- pBuckets.SuppressRelease();
+ pBuckets[0] = pBucket;
+ bucketHolder.SuppressRelease();
- // Allocate contexts used during dependent handle promotion scanning. There's one of these for every GC
- // heap since they're scanned in parallel.
- g_pDependentHandleContexts = new (nothrow) DhContext[n_slots];
- if (g_pDependentHandleContexts == NULL)
- return false;
+ g_HandleTableMap.pBuckets = pBuckets;
+ g_HandleTableMap.dwMaxIndex = INITIAL_HANDLE_TABLE_ARRAY_SIZE;
+ g_HandleTableMap.pNext = NULL;
- return true;
+ // Allocate contexts used during dependent handle promotion scanning. There's one of these for every GC
+ // heap since they're scanned in parallel.
+ g_pDependentHandleContexts = new (nothrow) DhContext[n_slots];
+ if (g_pDependentHandleContexts == NULL)
+ goto CleanupAndFail;
+
+ return true;
+ }
+
+CleanupAndFail:
+ if (pBuckets != NULL)
+ delete[] pBuckets;
+ return false;
}
void Ref_Shutdown()
@@ -863,6 +880,8 @@ int getSlotNumber(ScanContext* sc)
void Ref_EndSynchronousGC(uint32_t condemned, uint32_t maxgen)
{
LIMITED_METHOD_CONTRACT;
+ UNREFERENCED_PARAMETER(condemned);
+ UNREFERENCED_PARAMETER(maxgen);
// NOT used, must be modified for MTHTS (scalable HandleTable scan) if planned to use:
// need to pass ScanContext info to split HT bucket by threads, or to be performed under t_join::join
@@ -1499,6 +1518,7 @@ void ScanSizedRefByCPU(uint32_t maxgen, HANDLESCANPROC scanProc, ScanContext* sc
void Ref_ScanSizedRefHandles(uint32_t condemned, uint32_t maxgen, ScanContext* sc, Ref_promote_func* fn)
{
LOG((LF_GC, LL_INFO10000, "Scanning SizedRef handles to in generation %u\n", condemned));
+ UNREFERENCED_PARAMETER(condemned);
_ASSERTE (condemned == maxgen);
uint32_t flags = (sc->concurrent ? HNDGCF_ASYNC : HNDGCF_NORMAL) | HNDGCF_EXTRAINFO;
@@ -1669,6 +1689,62 @@ void Ref_ScanDependentHandlesForProfilerAndETW(uint32_t maxgen, ProfilingScanCon
#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
+// Callback to enumerate all object references held in handles.
+void CALLBACK ScanPointer(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *pExtraInfo, uintptr_t lp1, uintptr_t lp2)
+{
+ WRAPPER_NO_CONTRACT;
+ UNREFERENCED_PARAMETER(pExtraInfo);
+
+ Object **pRef = (Object **)pObjRef;
+ _ASSERTE(lp2);
+ promote_func* callback = (promote_func*)lp2;
+ callback(pRef, (ScanContext *)lp1, 0);
+}
+
+// Enumerate all object references held by any of the handle tables in the system.
+void Ref_ScanPointers(uint32_t condemned, uint32_t maxgen, ScanContext* sc, Ref_promote_func* fn)
+{
+ WRAPPER_NO_CONTRACT;
+
+ uint32_t types[] =
+ {
+ HNDTYPE_WEAK_SHORT,
+ HNDTYPE_WEAK_LONG,
+ HNDTYPE_STRONG,
+#if defined(FEATURE_COMINTEROP) || defined(FEATURE_REDHAWK)
+ HNDTYPE_REFCOUNTED,
+#endif // FEATURE_COMINTEROP || FEATURE_REDHAWK
+ HNDTYPE_PINNED,
+ HNDTYPE_ASYNCPINNED,
+ HNDTYPE_SIZEDREF,
+ };
+
+ uint32_t flags = HNDGCF_NORMAL;
+
+ // perform a multi-type scan that enumerates pointers
+ for (HandleTableMap * walk = &g_HandleTableMap;
+ walk != nullptr;
+ walk = walk->pNext)
+ {
+ for (uint32_t i = 0; i < INITIAL_HANDLE_TABLE_ARRAY_SIZE; i++)
+ {
+ if (walk->pBuckets[i] != NULL)
+ {
+ // this is the one of Ref_* function performed by single thread in MULTI_HEAPS case, so we need to loop through all HT of the bucket
+ for (int uCPUindex = 0; uCPUindex < getNumberOfSlots(); uCPUindex++)
+ {
+ HHANDLETABLE hTable = walk->pBuckets[i]->pTable[uCPUindex];
+ if (hTable)
+ HndScanHandlesForGC(hTable, &ScanPointer, LPARAM(sc), LPARAM(fn), types, _countof(types), condemned, maxgen, flags);
+ }
+ }
+ }
+ }
+
+ // enumerate pointers in variable handles whose dynamic type is VHT_WEAK_SHORT, VHT_WEAK_LONG or VHT_STRONG
+ TraceVariableHandlesBySingleThread(&ScanPointer, LPARAM(sc), LPARAM(fn), VHT_WEAK_SHORT | VHT_WEAK_LONG | VHT_STRONG, condemned, maxgen, flags);
+}
+
void Ref_UpdatePinnedPointers(uint32_t condemned, uint32_t maxgen, ScanContext* sc, Ref_promote_func* fn)
{
WRAPPER_NO_CONTRACT;
diff --git a/src/gc/objecthandle.h b/src/gc/objecthandle.h
index b016f9f4e9..42f054a865 100644
--- a/src/gc/objecthandle.h
+++ b/src/gc/objecthandle.h
@@ -669,6 +669,9 @@ bool Ref_ScanDependentHandlesForPromotion(DhContext *pDhContext);
void Ref_ScanDependentHandlesForClearing(uint32_t condemned, uint32_t maxgen, ScanContext* sc, Ref_promote_func* fn);
void Ref_ScanDependentHandlesForRelocation(uint32_t condemned, uint32_t maxgen, ScanContext* sc, Ref_promote_func* fn);
void Ref_ScanSizedRefHandles(uint32_t condemned, uint32_t maxgen, ScanContext* sc, Ref_promote_func* fn);
+#ifdef FEATURE_REDHAWK
+void Ref_ScanPointers(uint32_t condemned, uint32_t maxgen, ScanContext* sc, Ref_promote_func* fn);
+#endif
void Ref_CheckReachable (uint32_t uCondemnedGeneration, uint32_t uMaxGeneration, uintptr_t lp1);
void Ref_CheckAlive (uint32_t uCondemnedGeneration, uint32_t uMaxGeneration, uintptr_t lp1);
diff --git a/src/gc/sample/CMakeLists.txt b/src/gc/sample/CMakeLists.txt
index 5dc3014120..9a26c26cc4 100644
--- a/src/gc/sample/CMakeLists.txt
+++ b/src/gc/sample/CMakeLists.txt
@@ -4,12 +4,33 @@ include_directories(..)
include_directories(../env)
set(SOURCES
- GCSample.cpp
+ GCSample.cpp
+ gcenv.cpp
+ ../gccommon.cpp
+ ../gceewks.cpp
+ ../gcscan.cpp
+ ../gcwks.cpp
+ ../handletable.cpp
+ ../handletablecache.cpp
+ ../handletablecore.cpp
+ ../handletablescan.cpp
+ ../objecthandle.cpp
)
-add_executable(gcsample
- ${SOURCES}
-)
+if(WIN32)
+ list(APPEND SOURCES
+ ../env/gcenv.windows.cpp)
+else()
+ list(APPEND SOURCES
+ ../env/gcenv.unix.cpp)
+endif()
+
+if(CLR_CMAKE_PLATFORM_UNIX)
+ add_compile_options(-Wno-format)
+ add_compile_options(-Wno-unused-variable)
+ add_compile_options(-Wno-unused-private-field)
+ add_compile_options(-Wno-tautological-undefined-compare)
+endif()
if(CLR_CMAKE_PLATFORM_ARCH_AMD64)
add_definitions(-D_TARGET_AMD64_=1)
@@ -27,6 +48,6 @@ else()
clr_unknown_arch()
endif()
-target_link_libraries(gcsample
- clrgc
+add_executable(gcsample
+ ${SOURCES}
)