summaryrefslogtreecommitdiff
path: root/src/gc/env
diff options
context:
space:
mode:
authorJiyoung Yun <jy910.yun@samsung.com>2016-11-23 19:09:09 +0900
committerJiyoung Yun <jy910.yun@samsung.com>2016-11-23 19:09:09 +0900
commit4b4aad7217d3292650e77eec2cf4c198ea9c3b4b (patch)
tree98110734c91668dfdbb126fcc0e15ddbd93738ca /src/gc/env
parentfa45f57ed55137c75ac870356a1b8f76c84b229c (diff)
downloadcoreclr-4b4aad7217d3292650e77eec2cf4c198ea9c3b4b.tar.gz
coreclr-4b4aad7217d3292650e77eec2cf4c198ea9c3b4b.tar.bz2
coreclr-4b4aad7217d3292650e77eec2cf4c198ea9c3b4b.zip
Imported Upstream version 1.1.0upstream/1.1.0
Diffstat (limited to 'src/gc/env')
-rw-r--r--src/gc/env/common.cpp9
-rw-r--r--src/gc/env/common.h31
-rw-r--r--src/gc/env/etmdummy.h400
-rw-r--r--src/gc/env/gcenv.base.h626
-rw-r--r--src/gc/env/gcenv.ee.h85
-rw-r--r--src/gc/env/gcenv.interlocked.h101
-rw-r--r--src/gc/env/gcenv.interlocked.inl199
-rw-r--r--src/gc/env/gcenv.object.h148
-rw-r--r--src/gc/env/gcenv.os.h283
-rw-r--r--src/gc/env/gcenv.structs.h122
-rw-r--r--src/gc/env/gcenv.sync.h145
11 files changed, 2149 insertions, 0 deletions
diff --git a/src/gc/env/common.cpp b/src/gc/env/common.cpp
new file mode 100644
index 0000000000..313a4e4875
--- /dev/null
+++ b/src/gc/env/common.cpp
@@ -0,0 +1,9 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+// common.cpp : source file that includes just the standard includes
+// GCSample.pch will be the pre-compiled header
+// common.obj will contain the pre-compiled type information
+
+#include "common.h"
diff --git a/src/gc/env/common.h b/src/gc/env/common.h
new file mode 100644
index 0000000000..32c0d93577
--- /dev/null
+++ b/src/gc/env/common.h
@@ -0,0 +1,31 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+// common.h : include file for standard system include files,
+// or project specific include files that are used frequently, but
+// are changed infrequently
+//
+
+#pragma once
+
+#ifndef _CRT_SECURE_NO_WARNINGS
+ #define _CRT_SECURE_NO_WARNINGS
+#endif // _CRT_SECURE_NO_WARNINGS
+
+#include <stdint.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <string.h>
+#include <wchar.h>
+#include <assert.h>
+#include <stdarg.h>
+#include <memory.h>
+
+#include <new>
+
+#ifdef PLATFORM_UNIX
+#include <pthread.h>
+#endif
+
+using namespace std;
diff --git a/src/gc/env/etmdummy.h b/src/gc/env/etmdummy.h
new file mode 100644
index 0000000000..2b47a46e4e
--- /dev/null
+++ b/src/gc/env/etmdummy.h
@@ -0,0 +1,400 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+#define FireEtwGCStart(Count, Reason) 0
+#define FireEtwGCStart_V1(Count, Depth, Reason, Type, ClrInstanceID) 0
+#define FireEtwGCStart_V2(Count, Depth, Reason, Type, ClrInstanceID, ClientSequenceNumber) 0
+#define FireEtwGCEnd(Count, Depth) 0
+#define FireEtwGCEnd_V1(Count, Depth, ClrInstanceID) 0
+#define FireEtwGCRestartEEEnd() 0
+#define FireEtwGCRestartEEEnd_V1(ClrInstanceID) 0
+#define FireEtwGCHeapStats(GenerationSize0, TotalPromotedSize0, GenerationSize1, TotalPromotedSize1, GenerationSize2, TotalPromotedSize2, GenerationSize3, TotalPromotedSize3, FinalizationPromotedSize, FinalizationPromotedCount, PinnedObjectCount, SinkBlockCount, GCHandleCount) 0
+#define FireEtwGCHeapStats_V1(GenerationSize0, TotalPromotedSize0, GenerationSize1, TotalPromotedSize1, GenerationSize2, TotalPromotedSize2, GenerationSize3, TotalPromotedSize3, FinalizationPromotedSize, FinalizationPromotedCount, PinnedObjectCount, SinkBlockCount, GCHandleCount, ClrInstanceID) 0
+#define FireEtwGCCreateSegment(Address, Size, Type) 0
+#define FireEtwGCCreateSegment_V1(Address, Size, Type, ClrInstanceID) 0
+#define FireEtwGCFreeSegment(Address) 0
+#define FireEtwGCFreeSegment_V1(Address, ClrInstanceID) 0
+#define FireEtwGCRestartEEBegin() 0
+#define FireEtwGCRestartEEBegin_V1(ClrInstanceID) 0
+#define FireEtwGCSuspendEEEnd() 0
+#define FireEtwGCSuspendEEEnd_V1(ClrInstanceID) 0
+#define FireEtwGCSuspendEEBegin(Reason) 0
+#define FireEtwGCSuspendEEBegin_V1(Reason, Count, ClrInstanceID) 0
+#define FireEtwGCAllocationTick(AllocationAmount, AllocationKind) 0
+#define FireEtwGCAllocationTick_V1(AllocationAmount, AllocationKind, ClrInstanceID) 0
+#define FireEtwGCAllocationTick_V2(AllocationAmount, AllocationKind, ClrInstanceID, AllocationAmount64, TypeID, TypeName, HeapIndex) 0
+#define FireEtwGCAllocationTick_V3(AllocationAmount, AllocationKind, ClrInstanceID, AllocationAmount64, TypeID, TypeName, HeapIndex, Address) 0
+#define FireEtwGCCreateConcurrentThread() 0
+#define FireEtwGCCreateConcurrentThread_V1(ClrInstanceID) 0
+#define FireEtwGCTerminateConcurrentThread() 0
+#define FireEtwGCTerminateConcurrentThread_V1(ClrInstanceID) 0
+#define FireEtwGCFinalizersEnd(Count) 0
+#define FireEtwGCFinalizersEnd_V1(Count, ClrInstanceID) 0
+#define FireEtwGCFinalizersBegin() 0
+#define FireEtwGCFinalizersBegin_V1(ClrInstanceID) 0
+#define FireEtwBulkType(Count, ClrInstanceID, Values_Len_, Values) 0
+#define FireEtwGCBulkRootEdge(Index, Count, ClrInstanceID, Values_Len_, Values) 0
+#define FireEtwGCBulkRootConditionalWeakTableElementEdge(Index, Count, ClrInstanceID, Values_Len_, Values) 0
+#define FireEtwGCBulkNode(Index, Count, ClrInstanceID, Values_Len_, Values) 0
+#define FireEtwGCBulkEdge(Index, Count, ClrInstanceID, Values_Len_, Values) 0
+#define FireEtwGCSampledObjectAllocationHigh(Address, TypeID, ObjectCountForTypeSample, TotalSizeForTypeSample, ClrInstanceID) 0
+#define FireEtwGCBulkSurvivingObjectRanges(Index, Count, ClrInstanceID, Values_Len_, Values) 0
+#define FireEtwGCBulkMovedObjectRanges(Index, Count, ClrInstanceID, Values_Len_, Values) 0
+#define FireEtwGCGenerationRange(Generation, RangeStart, RangeUsedLength, RangeReservedLength, ClrInstanceID) 0
+#define FireEtwGCMarkStackRoots(HeapNum, ClrInstanceID) 0
+#define FireEtwGCMarkFinalizeQueueRoots(HeapNum, ClrInstanceID) 0
+#define FireEtwGCMarkHandles(HeapNum, ClrInstanceID) 0
+#define FireEtwGCMarkOlderGenerationRoots(HeapNum, ClrInstanceID) 0
+#define FireEtwFinalizeObject(TypeID, ObjectID, ClrInstanceID) 0
+#define FireEtwSetGCHandle(HandleID, ObjectID, Kind, Generation, AppDomainID, ClrInstanceID) 0
+#define FireEtwDestroyGCHandle(HandleID, ClrInstanceID) 0
+#define FireEtwGCSampledObjectAllocationLow(Address, TypeID, ObjectCountForTypeSample, TotalSizeForTypeSample, ClrInstanceID) 0
+#define FireEtwPinObjectAtGCTime(HandleID, ObjectID, ObjectSize, TypeName, ClrInstanceID) 0
+#define FireEtwGCTriggered(Reason, ClrInstanceID) 0
+#define FireEtwGCBulkRootCCW(Count, ClrInstanceID, Values_Len_, Values) 0
+#define FireEtwGCBulkRCW(Count, ClrInstanceID, Values_Len_, Values) 0
+#define FireEtwGCBulkRootStaticVar(Count, AppDomainID, ClrInstanceID, Values_Len_, Values) 0
+#define FireEtwWorkerThreadCreate(WorkerThreadCount, RetiredWorkerThreads) 0
+#define FireEtwWorkerThreadTerminate(WorkerThreadCount, RetiredWorkerThreads) 0
+#define FireEtwWorkerThreadRetire(WorkerThreadCount, RetiredWorkerThreads) 0
+#define FireEtwWorkerThreadUnretire(WorkerThreadCount, RetiredWorkerThreads) 0
+#define FireEtwIOThreadCreate(IOThreadCount, RetiredIOThreads) 0
+#define FireEtwIOThreadCreate_V1(IOThreadCount, RetiredIOThreads, ClrInstanceID) 0
+#define FireEtwIOThreadTerminate(IOThreadCount, RetiredIOThreads) 0
+#define FireEtwIOThreadTerminate_V1(IOThreadCount, RetiredIOThreads, ClrInstanceID) 0
+#define FireEtwIOThreadRetire(IOThreadCount, RetiredIOThreads) 0
+#define FireEtwIOThreadRetire_V1(IOThreadCount, RetiredIOThreads, ClrInstanceID) 0
+#define FireEtwIOThreadUnretire(IOThreadCount, RetiredIOThreads) 0
+#define FireEtwIOThreadUnretire_V1(IOThreadCount, RetiredIOThreads, ClrInstanceID) 0
+#define FireEtwThreadpoolSuspensionSuspendThread(ClrThreadID, CpuUtilization) 0
+#define FireEtwThreadpoolSuspensionResumeThread(ClrThreadID, CpuUtilization) 0
+#define FireEtwThreadPoolWorkerThreadStart(ActiveWorkerThreadCount, RetiredWorkerThreadCount, ClrInstanceID) 0
+#define FireEtwThreadPoolWorkerThreadStop(ActiveWorkerThreadCount, RetiredWorkerThreadCount, ClrInstanceID) 0
+#define FireEtwThreadPoolWorkerThreadRetirementStart(ActiveWorkerThreadCount, RetiredWorkerThreadCount, ClrInstanceID) 0
+#define FireEtwThreadPoolWorkerThreadRetirementStop(ActiveWorkerThreadCount, RetiredWorkerThreadCount, ClrInstanceID) 0
+#define FireEtwThreadPoolWorkerThreadAdjustmentSample(Throughput, ClrInstanceID) 0
+#define FireEtwThreadPoolWorkerThreadAdjustmentAdjustment(AverageThroughput, NewWorkerThreadCount, Reason, ClrInstanceID) 0
+#define FireEtwThreadPoolWorkerThreadAdjustmentStats(Duration, Throughput, ThreadWave, ThroughputWave, ThroughputErrorEstimate, AverageThroughputErrorEstimate, ThroughputRatio, Confidence, NewControlSetting, NewThreadWaveMagnitude, ClrInstanceID) 0
+#define FireEtwThreadPoolWorkerThreadWait(ActiveWorkerThreadCount, RetiredWorkerThreadCount, ClrInstanceID) 0
+#define FireEtwThreadPoolWorkingThreadCount(Count, ClrInstanceID) 0
+#define FireEtwThreadPoolEnqueue(WorkID, ClrInstanceID) 0
+#define FireEtwThreadPoolDequeue(WorkID, ClrInstanceID) 0
+#define FireEtwThreadPoolIOEnqueue(NativeOverlapped, Overlapped, MultiDequeues, ClrInstanceID) 0
+#define FireEtwThreadPoolIODequeue(NativeOverlapped, Overlapped, ClrInstanceID) 0
+#define FireEtwThreadPoolIOPack(NativeOverlapped, Overlapped, ClrInstanceID) 0
+#define FireEtwThreadCreating(ID, ClrInstanceID) 0
+#define FireEtwThreadRunning(ID, ClrInstanceID) 0
+#define FireEtwExceptionThrown() 0
+#define FireEtwExceptionThrown_V1(ExceptionType, ExceptionMessage, ExceptionEIP, ExceptionHRESULT, ExceptionFlags, ClrInstanceID) 0
+#define FireEtwExceptionCatchStart(EntryEIP, MethodID, MethodName, ClrInstanceID) 0
+#define FireEtwExceptionCatchStop() 0
+#define FireEtwExceptionFinallyStart(EntryEIP, MethodID, MethodName, ClrInstanceID) 0
+#define FireEtwExceptionFinallyStop() 0
+#define FireEtwExceptionFilterStart(EntryEIP, MethodID, MethodName, ClrInstanceID) 0
+#define FireEtwExceptionFilterStop() 0
+#define FireEtwExceptionThrownStop() 0
+#define FireEtwContention() 0
+#define FireEtwContentionStart_V1(ContentionFlags, ClrInstanceID) 0
+#define FireEtwContentionStop(ContentionFlags, ClrInstanceID) 0
+#define FireEtwCLRStackWalk(ClrInstanceID, Reserved1, Reserved2, FrameCount, Stack) 0
+#define FireEtwAppDomainMemAllocated(AppDomainID, Allocated, ClrInstanceID) 0
+#define FireEtwAppDomainMemSurvived(AppDomainID, Survived, ProcessSurvived, ClrInstanceID) 0
+#define FireEtwThreadCreated(ManagedThreadID, AppDomainID, Flags, ManagedThreadIndex, OSThreadID, ClrInstanceID) 0
+#define FireEtwThreadTerminated(ManagedThreadID, AppDomainID, ClrInstanceID) 0
+#define FireEtwThreadDomainEnter(ManagedThreadID, AppDomainID, ClrInstanceID) 0
+#define FireEtwILStubGenerated(ClrInstanceID, ModuleID, StubMethodID, StubFlags, ManagedInteropMethodToken, ManagedInteropMethodNamespace, ManagedInteropMethodName, ManagedInteropMethodSignature, NativeMethodSignature, StubMethodSignature, StubMethodILCode) 0
+#define FireEtwILStubCacheHit(ClrInstanceID, ModuleID, StubMethodID, ManagedInteropMethodToken, ManagedInteropMethodNamespace, ManagedInteropMethodName, ManagedInteropMethodSignature) 0
+#define FireEtwDCStartCompleteV2() 0
+#define FireEtwDCEndCompleteV2() 0
+#define FireEtwMethodDCStartV2(MethodID, ModuleID, MethodStartAddress, MethodSize, MethodToken, MethodFlags) 0
+#define FireEtwMethodDCEndV2(MethodID, ModuleID, MethodStartAddress, MethodSize, MethodToken, MethodFlags) 0
+#define FireEtwMethodDCStartVerboseV2(MethodID, ModuleID, MethodStartAddress, MethodSize, MethodToken, MethodFlags, MethodNamespace, MethodName, MethodSignature) 0
+#define FireEtwMethodDCEndVerboseV2(MethodID, ModuleID, MethodStartAddress, MethodSize, MethodToken, MethodFlags, MethodNamespace, MethodName, MethodSignature) 0
+#define FireEtwMethodLoad(MethodID, ModuleID, MethodStartAddress, MethodSize, MethodToken, MethodFlags) 0
+#define FireEtwMethodLoad_V1(MethodID, ModuleID, MethodStartAddress, MethodSize, MethodToken, MethodFlags, ClrInstanceID) 0
+#define FireEtwMethodLoad_V2(MethodID, ModuleID, MethodStartAddress, MethodSize, MethodToken, MethodFlags, ClrInstanceID, ReJITID) 0
+#define FireEtwMethodUnload(MethodID, ModuleID, MethodStartAddress, MethodSize, MethodToken, MethodFlags) 0
+#define FireEtwMethodUnload_V1(MethodID, ModuleID, MethodStartAddress, MethodSize, MethodToken, MethodFlags, ClrInstanceID) 0
+#define FireEtwMethodUnload_V2(MethodID, ModuleID, MethodStartAddress, MethodSize, MethodToken, MethodFlags, ClrInstanceID, ReJITID) 0
+#define FireEtwMethodLoadVerbose(MethodID, ModuleID, MethodStartAddress, MethodSize, MethodToken, MethodFlags, MethodNamespace, MethodName, MethodSignature) 0
+#define FireEtwMethodLoadVerbose_V1(MethodID, ModuleID, MethodStartAddress, MethodSize, MethodToken, MethodFlags, MethodNamespace, MethodName, MethodSignature, ClrInstanceID) 0
+#define FireEtwMethodLoadVerbose_V2(MethodID, ModuleID, MethodStartAddress, MethodSize, MethodToken, MethodFlags, MethodNamespace, MethodName, MethodSignature, ClrInstanceID, ReJITID) 0
+#define FireEtwMethodUnloadVerbose(MethodID, ModuleID, MethodStartAddress, MethodSize, MethodToken, MethodFlags, MethodNamespace, MethodName, MethodSignature) 0
+#define FireEtwMethodUnloadVerbose_V1(MethodID, ModuleID, MethodStartAddress, MethodSize, MethodToken, MethodFlags, MethodNamespace, MethodName, MethodSignature, ClrInstanceID) 0
+#define FireEtwMethodUnloadVerbose_V2(MethodID, ModuleID, MethodStartAddress, MethodSize, MethodToken, MethodFlags, MethodNamespace, MethodName, MethodSignature, ClrInstanceID, ReJITID) 0
+#define FireEtwMethodJittingStarted(MethodID, ModuleID, MethodToken, MethodILSize, MethodNamespace, MethodName, MethodSignature) 0
+#define FireEtwMethodJittingStarted_V1(MethodID, ModuleID, MethodToken, MethodILSize, MethodNamespace, MethodName, MethodSignature, ClrInstanceID) 0
+#define FireEtwMethodJitInliningSucceeded(MethodBeingCompiledNamespace, MethodBeingCompiledName, MethodBeingCompiledNameSignature, InlinerNamespace, InlinerName, InlinerNameSignature, InlineeNamespace, InlineeName, InlineeNameSignature, ClrInstanceID) 0
+#define FireEtwMethodJitInliningFailed(MethodBeingCompiledNamespace, MethodBeingCompiledName, MethodBeingCompiledNameSignature, InlinerNamespace, InlinerName, InlinerNameSignature, InlineeNamespace, InlineeName, InlineeNameSignature, FailAlways, FailReason, ClrInstanceID) 0
+#define FireEtwMethodJitTailCallSucceeded(MethodBeingCompiledNamespace, MethodBeingCompiledName, MethodBeingCompiledNameSignature, CallerNamespace, CallerName, CallerNameSignature, CalleeNamespace, CalleeName, CalleeNameSignature, TailPrefix, TailCallType, ClrInstanceID) 0
+#define FireEtwMethodJitTailCallFailed(MethodBeingCompiledNamespace, MethodBeingCompiledName, MethodBeingCompiledNameSignature, CallerNamespace, CallerName, CallerNameSignature, CalleeNamespace, CalleeName, CalleeNameSignature, TailPrefix, FailReason, ClrInstanceID) 0
+#define FireEtwMethodILToNativeMap(MethodID, ReJITID, MethodExtent, CountOfMapEntries, ILOffsets, NativeOffsets, ClrInstanceID) 0
+#define FireEtwModuleDCStartV2(ModuleID, AssemblyID, ModuleFlags, Reserved1, ModuleILPath, ModuleNativePath) 0
+#define FireEtwModuleDCEndV2(ModuleID, AssemblyID, ModuleFlags, Reserved1, ModuleILPath, ModuleNativePath) 0
+#define FireEtwDomainModuleLoad(ModuleID, AssemblyID, AppDomainID, ModuleFlags, Reserved1, ModuleILPath, ModuleNativePath) 0
+#define FireEtwDomainModuleLoad_V1(ModuleID, AssemblyID, AppDomainID, ModuleFlags, Reserved1, ModuleILPath, ModuleNativePath, ClrInstanceID) 0
+#define FireEtwModuleLoad(ModuleID, AssemblyID, ModuleFlags, Reserved1, ModuleILPath, ModuleNativePath) 0
+#define FireEtwModuleLoad_V1(ModuleID, AssemblyID, ModuleFlags, Reserved1, ModuleILPath, ModuleNativePath, ClrInstanceID) 0
+#define FireEtwModuleLoad_V2(ModuleID, AssemblyID, ModuleFlags, Reserved1, ModuleILPath, ModuleNativePath, ClrInstanceID, ManagedPdbSignature, ManagedPdbAge, ManagedPdbBuildPath, NativePdbSignature, NativePdbAge, NativePdbBuildPath) 0
+#define FireEtwModuleUnload(ModuleID, AssemblyID, ModuleFlags, Reserved1, ModuleILPath, ModuleNativePath) 0
+#define FireEtwModuleUnload_V1(ModuleID, AssemblyID, ModuleFlags, Reserved1, ModuleILPath, ModuleNativePath, ClrInstanceID) 0
+#define FireEtwModuleUnload_V2(ModuleID, AssemblyID, ModuleFlags, Reserved1, ModuleILPath, ModuleNativePath, ClrInstanceID, ManagedPdbSignature, ManagedPdbAge, ManagedPdbBuildPath, NativePdbSignature, NativePdbAge, NativePdbBuildPath) 0
+#define FireEtwAssemblyLoad(AssemblyID, AppDomainID, AssemblyFlags, FullyQualifiedAssemblyName) 0
+#define FireEtwAssemblyLoad_V1(AssemblyID, AppDomainID, BindingID, AssemblyFlags, FullyQualifiedAssemblyName, ClrInstanceID) 0
+#define FireEtwAssemblyUnload(AssemblyID, AppDomainID, AssemblyFlags, FullyQualifiedAssemblyName) 0
+#define FireEtwAssemblyUnload_V1(AssemblyID, AppDomainID, BindingID, AssemblyFlags, FullyQualifiedAssemblyName, ClrInstanceID) 0
+#define FireEtwAppDomainLoad(AppDomainID, AppDomainFlags, AppDomainName) 0
+#define FireEtwAppDomainLoad_V1(AppDomainID, AppDomainFlags, AppDomainName, AppDomainIndex, ClrInstanceID) 0
+#define FireEtwAppDomainUnload(AppDomainID, AppDomainFlags, AppDomainName) 0
+#define FireEtwAppDomainUnload_V1(AppDomainID, AppDomainFlags, AppDomainName, AppDomainIndex, ClrInstanceID) 0
+#define FireEtwModuleRangeLoad(ClrInstanceID, ModuleID, RangeBegin, RangeSize, RangeType) 0
+#define FireEtwStrongNameVerificationStart(VerificationFlags, ErrorCode, FullyQualifiedAssemblyName) 0
+#define FireEtwStrongNameVerificationStart_V1(VerificationFlags, ErrorCode, FullyQualifiedAssemblyName, ClrInstanceID) 0
+#define FireEtwStrongNameVerificationStop(VerificationFlags, ErrorCode, FullyQualifiedAssemblyName) 0
+#define FireEtwStrongNameVerificationStop_V1(VerificationFlags, ErrorCode, FullyQualifiedAssemblyName, ClrInstanceID) 0
+#define FireEtwAuthenticodeVerificationStart(VerificationFlags, ErrorCode, ModulePath) 0
+#define FireEtwAuthenticodeVerificationStart_V1(VerificationFlags, ErrorCode, ModulePath, ClrInstanceID) 0
+#define FireEtwAuthenticodeVerificationStop(VerificationFlags, ErrorCode, ModulePath) 0
+#define FireEtwAuthenticodeVerificationStop_V1(VerificationFlags, ErrorCode, ModulePath, ClrInstanceID) 0
+#define FireEtwRuntimeInformationStart(ClrInstanceID, Sku, BclMajorVersion, BclMinorVersion, BclBuildNumber, BclQfeNumber, VMMajorVersion, VMMinorVersion, VMBuildNumber, VMQfeNumber, StartupFlags, StartupMode, CommandLine, ComObjectGuid, RuntimeDllPath) 0
+#define FireEtwIncreaseMemoryPressure(BytesAllocated, ClrInstanceID) 0
+#define FireEtwDecreaseMemoryPressure(BytesFreed, ClrInstanceID) 0
+#define FireEtwGCMarkWithType(HeapNum, ClrInstanceID, Type, Bytes) 0
+#define FireEtwGCJoin_V2(Heap, JoinTime, JoinType, ClrInstanceID, JoinID) 0
+#define FireEtwGCPerHeapHistory_V3(ClrInstanceID, FreeListAllocated, FreeListRejected, EndOfSegAllocated, CondemnedAllocated, PinnedAllocated, PinnedAllocatedAdvance, RunningFreeListEfficiency, CondemnReasons0, CondemnReasons1, CompactMechanisms, ExpandMechanisms, HeapIndex, ExtraGen0Commit, Count, Values_Len_, Values) 0
+#define FireEtwGCGlobalHeapHistory_V2(FinalYoungestDesired, NumHeaps, CondemnedGeneration, Gen0ReductionCount, Reason, GlobalMechanisms, ClrInstanceID, PauseMode, MemoryPressure) 0
+#define FireEtwDebugIPCEventStart() 0
+#define FireEtwDebugIPCEventEnd() 0
+#define FireEtwDebugExceptionProcessingStart() 0
+#define FireEtwDebugExceptionProcessingEnd() 0
+#define FireEtwCodeSymbols(ModuleId, TotalChunks, ChunkNumber, ChunkLength, Chunk, ClrInstanceID) 0
+#define FireEtwCLRStackWalkDCStart(ClrInstanceID, Reserved1, Reserved2, FrameCount, Stack) 0
+#define FireEtwMethodDCStart(MethodID, ModuleID, MethodStartAddress, MethodSize, MethodToken, MethodFlags) 0
+#define FireEtwMethodDCStart_V1(MethodID, ModuleID, MethodStartAddress, MethodSize, MethodToken, MethodFlags, ClrInstanceID) 0
+#define FireEtwMethodDCStart_V2(MethodID, ModuleID, MethodStartAddress, MethodSize, MethodToken, MethodFlags, ClrInstanceID, ReJITID) 0
+#define FireEtwMethodDCEnd(MethodID, ModuleID, MethodStartAddress, MethodSize, MethodToken, MethodFlags) 0
+#define FireEtwMethodDCEnd_V1(MethodID, ModuleID, MethodStartAddress, MethodSize, MethodToken, MethodFlags, ClrInstanceID) 0
+#define FireEtwMethodDCEnd_V2(MethodID, ModuleID, MethodStartAddress, MethodSize, MethodToken, MethodFlags, ClrInstanceID, ReJITID) 0
+#define FireEtwMethodDCStartVerbose(MethodID, ModuleID, MethodStartAddress, MethodSize, MethodToken, MethodFlags, MethodNamespace, MethodName, MethodSignature) 0
+#define FireEtwMethodDCStartVerbose_V1(MethodID, ModuleID, MethodStartAddress, MethodSize, MethodToken, MethodFlags, MethodNamespace, MethodName, MethodSignature, ClrInstanceID) 0
+#define FireEtwMethodDCStartVerbose_V2(MethodID, ModuleID, MethodStartAddress, MethodSize, MethodToken, MethodFlags, MethodNamespace, MethodName, MethodSignature, ClrInstanceID, ReJITID) 0
+#define FireEtwMethodDCEndVerbose(MethodID, ModuleID, MethodStartAddress, MethodSize, MethodToken, MethodFlags, MethodNamespace, MethodName, MethodSignature) 0
+#define FireEtwMethodDCEndVerbose_V1(MethodID, ModuleID, MethodStartAddress, MethodSize, MethodToken, MethodFlags, MethodNamespace, MethodName, MethodSignature, ClrInstanceID) 0
+#define FireEtwMethodDCEndVerbose_V2(MethodID, ModuleID, MethodStartAddress, MethodSize, MethodToken, MethodFlags, MethodNamespace, MethodName, MethodSignature, ClrInstanceID, ReJITID) 0
+#define FireEtwDCStartComplete() 0
+#define FireEtwDCStartComplete_V1(ClrInstanceID) 0
+#define FireEtwDCEndComplete() 0
+#define FireEtwDCEndComplete_V1(ClrInstanceID) 0
+#define FireEtwDCStartInit() 0
+#define FireEtwDCStartInit_V1(ClrInstanceID) 0
+#define FireEtwDCEndInit() 0
+#define FireEtwDCEndInit_V1(ClrInstanceID) 0
+#define FireEtwMethodDCStartILToNativeMap(MethodID, ReJITID, MethodExtent, CountOfMapEntries, ILOffsets, NativeOffsets, ClrInstanceID) 0
+#define FireEtwMethodDCEndILToNativeMap(MethodID, ReJITID, MethodExtent, CountOfMapEntries, ILOffsets, NativeOffsets, ClrInstanceID) 0
+#define FireEtwDomainModuleDCStart(ModuleID, AssemblyID, AppDomainID, ModuleFlags, Reserved1, ModuleILPath, ModuleNativePath) 0
+#define FireEtwDomainModuleDCStart_V1(ModuleID, AssemblyID, AppDomainID, ModuleFlags, Reserved1, ModuleILPath, ModuleNativePath, ClrInstanceID) 0
+#define FireEtwDomainModuleDCEnd(ModuleID, AssemblyID, AppDomainID, ModuleFlags, Reserved1, ModuleILPath, ModuleNativePath) 0
+#define FireEtwDomainModuleDCEnd_V1(ModuleID, AssemblyID, AppDomainID, ModuleFlags, Reserved1, ModuleILPath, ModuleNativePath, ClrInstanceID) 0
+#define FireEtwModuleDCStart(ModuleID, AssemblyID, ModuleFlags, Reserved1, ModuleILPath, ModuleNativePath) 0
+#define FireEtwModuleDCStart_V1(ModuleID, AssemblyID, ModuleFlags, Reserved1, ModuleILPath, ModuleNativePath, ClrInstanceID) 0
+#define FireEtwModuleDCStart_V2(ModuleID, AssemblyID, ModuleFlags, Reserved1, ModuleILPath, ModuleNativePath, ClrInstanceID, ManagedPdbSignature, ManagedPdbAge, ManagedPdbBuildPath, NativePdbSignature, NativePdbAge, NativePdbBuildPath) 0
+#define FireEtwModuleDCEnd(ModuleID, AssemblyID, ModuleFlags, Reserved1, ModuleILPath, ModuleNativePath) 0
+#define FireEtwModuleDCEnd_V1(ModuleID, AssemblyID, ModuleFlags, Reserved1, ModuleILPath, ModuleNativePath, ClrInstanceID) 0
+#define FireEtwModuleDCEnd_V2(ModuleID, AssemblyID, ModuleFlags, Reserved1, ModuleILPath, ModuleNativePath, ClrInstanceID, ManagedPdbSignature, ManagedPdbAge, ManagedPdbBuildPath, NativePdbSignature, NativePdbAge, NativePdbBuildPath) 0
+#define FireEtwAssemblyDCStart(AssemblyID, AppDomainID, AssemblyFlags, FullyQualifiedAssemblyName) 0
+#define FireEtwAssemblyDCStart_V1(AssemblyID, AppDomainID, BindingID, AssemblyFlags, FullyQualifiedAssemblyName, ClrInstanceID) 0
+#define FireEtwAssemblyDCEnd(AssemblyID, AppDomainID, AssemblyFlags, FullyQualifiedAssemblyName) 0
+#define FireEtwAssemblyDCEnd_V1(AssemblyID, AppDomainID, BindingID, AssemblyFlags, FullyQualifiedAssemblyName, ClrInstanceID) 0
+#define FireEtwAppDomainDCStart(AppDomainID, AppDomainFlags, AppDomainName) 0
+#define FireEtwAppDomainDCStart_V1(AppDomainID, AppDomainFlags, AppDomainName, AppDomainIndex, ClrInstanceID) 0
+#define FireEtwAppDomainDCEnd(AppDomainID, AppDomainFlags, AppDomainName) 0
+#define FireEtwAppDomainDCEnd_V1(AppDomainID, AppDomainFlags, AppDomainName, AppDomainIndex, ClrInstanceID) 0
+#define FireEtwThreadDC(ManagedThreadID, AppDomainID, Flags, ManagedThreadIndex, OSThreadID, ClrInstanceID) 0
+#define FireEtwModuleRangeDCStart(ClrInstanceID, ModuleID, RangeBegin, RangeSize, RangeType) 0
+#define FireEtwModuleRangeDCEnd(ClrInstanceID, ModuleID, RangeBegin, RangeSize, RangeType) 0
+#define FireEtwRuntimeInformationDCStart(ClrInstanceID, Sku, BclMajorVersion, BclMinorVersion, BclBuildNumber, BclQfeNumber, VMMajorVersion, VMMinorVersion, VMBuildNumber, VMQfeNumber, StartupFlags, StartupMode, CommandLine, ComObjectGuid, RuntimeDllPath) 0
+#define FireEtwStressLogEvent(Facility, LogLevel, Message) 0
+#define FireEtwStressLogEvent_V1(Facility, LogLevel, Message, ClrInstanceID) 0
+#define FireEtwCLRStackWalkStress(ClrInstanceID, Reserved1, Reserved2, FrameCount, Stack) 0
+#define FireEtwGCDecision(DoCompact) 0
+#define FireEtwGCDecision_V1(DoCompact, ClrInstanceID) 0
+#define FireEtwGCSettings(SegmentSize, LargeObjectSegmentSize, ServerGC) 0
+#define FireEtwGCSettings_V1(SegmentSize, LargeObjectSegmentSize, ServerGC, ClrInstanceID) 0
+#define FireEtwGCOptimized(DesiredAllocation, NewAllocation, GenerationNumber) 0
+#define FireEtwGCOptimized_V1(DesiredAllocation, NewAllocation, GenerationNumber, ClrInstanceID) 0
+#define FireEtwGCPerHeapHistory() 0
+#define FireEtwGCPerHeapHistory_V1(ClrInstanceID) 0
+#define FireEtwGCGlobalHeapHistory(FinalYoungestDesired, NumHeaps, CondemnedGeneration, Gen0ReductionCount, Reason, GlobalMechanisms) 0
+#define FireEtwGCGlobalHeapHistory_V1(FinalYoungestDesired, NumHeaps, CondemnedGeneration, Gen0ReductionCount, Reason, GlobalMechanisms, ClrInstanceID) 0
+#define FireEtwGCJoin(Heap, JoinTime, JoinType) 0
+#define FireEtwGCJoin_V1(Heap, JoinTime, JoinType, ClrInstanceID) 0
+#define FireEtwPrvGCMarkStackRoots(HeapNum) 0
+#define FireEtwPrvGCMarkStackRoots_V1(HeapNum, ClrInstanceID) 0
+#define FireEtwPrvGCMarkFinalizeQueueRoots(HeapNum) 0
+#define FireEtwPrvGCMarkFinalizeQueueRoots_V1(HeapNum, ClrInstanceID) 0
+#define FireEtwPrvGCMarkHandles(HeapNum) 0
+#define FireEtwPrvGCMarkHandles_V1(HeapNum, ClrInstanceID) 0
+#define FireEtwPrvGCMarkCards(HeapNum) 0
+#define FireEtwPrvGCMarkCards_V1(HeapNum, ClrInstanceID) 0
+#define FireEtwBGCBegin(ClrInstanceID) 0
+#define FireEtwBGC1stNonConEnd(ClrInstanceID) 0
+#define FireEtwBGC1stConEnd(ClrInstanceID) 0
+#define FireEtwBGC2ndNonConBegin(ClrInstanceID) 0
+#define FireEtwBGC2ndNonConEnd(ClrInstanceID) 0
+#define FireEtwBGC2ndConBegin(ClrInstanceID) 0
+#define FireEtwBGC2ndConEnd(ClrInstanceID) 0
+#define FireEtwBGCPlanEnd(ClrInstanceID) 0
+#define FireEtwBGCSweepEnd(ClrInstanceID) 0
+#define FireEtwBGCDrainMark(Objects, ClrInstanceID) 0
+#define FireEtwBGCRevisit(Pages, Objects, IsLarge, ClrInstanceID) 0
+#define FireEtwBGCOverflow(Min, Max, Objects, IsLarge, ClrInstanceID) 0
+#define FireEtwBGCAllocWaitBegin(Reason, ClrInstanceID) 0
+#define FireEtwBGCAllocWaitEnd(Reason, ClrInstanceID) 0
+#define FireEtwGCFullNotify(GenNumber, IsAlloc) 0
+#define FireEtwGCFullNotify_V1(GenNumber, IsAlloc, ClrInstanceID) 0
+#define FireEtwEEStartupStart() 0
+#define FireEtwEEStartupStart_V1(ClrInstanceID) 0
+#define FireEtwEEStartupEnd() 0
+#define FireEtwEEStartupEnd_V1(ClrInstanceID) 0
+#define FireEtwEEConfigSetup() 0
+#define FireEtwEEConfigSetup_V1(ClrInstanceID) 0
+#define FireEtwEEConfigSetupEnd() 0
+#define FireEtwEEConfigSetupEnd_V1(ClrInstanceID) 0
+#define FireEtwLdSysBases() 0
+#define FireEtwLdSysBases_V1(ClrInstanceID) 0
+#define FireEtwLdSysBasesEnd() 0
+#define FireEtwLdSysBasesEnd_V1(ClrInstanceID) 0
+#define FireEtwExecExe() 0
+#define FireEtwExecExe_V1(ClrInstanceID) 0
+#define FireEtwExecExeEnd() 0
+#define FireEtwExecExeEnd_V1(ClrInstanceID) 0
+#define FireEtwMain() 0
+#define FireEtwMain_V1(ClrInstanceID) 0
+#define FireEtwMainEnd() 0
+#define FireEtwMainEnd_V1(ClrInstanceID) 0
+#define FireEtwApplyPolicyStart() 0
+#define FireEtwApplyPolicyStart_V1(ClrInstanceID) 0
+#define FireEtwApplyPolicyEnd() 0
+#define FireEtwApplyPolicyEnd_V1(ClrInstanceID) 0
+#define FireEtwLdLibShFolder() 0
+#define FireEtwLdLibShFolder_V1(ClrInstanceID) 0
+#define FireEtwLdLibShFolderEnd() 0
+#define FireEtwLdLibShFolderEnd_V1(ClrInstanceID) 0
+#define FireEtwPrestubWorker() 0
+#define FireEtwPrestubWorker_V1(ClrInstanceID) 0
+#define FireEtwPrestubWorkerEnd() 0
+#define FireEtwPrestubWorkerEnd_V1(ClrInstanceID) 0
+#define FireEtwGetInstallationStart() 0
+#define FireEtwGetInstallationStart_V1(ClrInstanceID) 0
+#define FireEtwGetInstallationEnd() 0
+#define FireEtwGetInstallationEnd_V1(ClrInstanceID) 0
+#define FireEtwOpenHModule() 0
+#define FireEtwOpenHModule_V1(ClrInstanceID) 0
+#define FireEtwOpenHModuleEnd() 0
+#define FireEtwOpenHModuleEnd_V1(ClrInstanceID) 0
+#define FireEtwExplicitBindStart() 0
+#define FireEtwExplicitBindStart_V1(ClrInstanceID) 0
+#define FireEtwExplicitBindEnd() 0
+#define FireEtwExplicitBindEnd_V1(ClrInstanceID) 0
+#define FireEtwParseXml() 0
+#define FireEtwParseXml_V1(ClrInstanceID) 0
+#define FireEtwParseXmlEnd() 0
+#define FireEtwParseXmlEnd_V1(ClrInstanceID) 0
+#define FireEtwInitDefaultDomain() 0
+#define FireEtwInitDefaultDomain_V1(ClrInstanceID) 0
+#define FireEtwInitDefaultDomainEnd() 0
+#define FireEtwInitDefaultDomainEnd_V1(ClrInstanceID) 0
+#define FireEtwInitSecurity() 0
+#define FireEtwInitSecurity_V1(ClrInstanceID) 0
+#define FireEtwInitSecurityEnd() 0
+#define FireEtwInitSecurityEnd_V1(ClrInstanceID) 0
+#define FireEtwAllowBindingRedirs() 0
+#define FireEtwAllowBindingRedirs_V1(ClrInstanceID) 0
+#define FireEtwAllowBindingRedirsEnd() 0
+#define FireEtwAllowBindingRedirsEnd_V1(ClrInstanceID) 0
+#define FireEtwEEConfigSync() 0
+#define FireEtwEEConfigSync_V1(ClrInstanceID) 0
+#define FireEtwEEConfigSyncEnd() 0
+#define FireEtwEEConfigSyncEnd_V1(ClrInstanceID) 0
+#define FireEtwFusionBinding() 0
+#define FireEtwFusionBinding_V1(ClrInstanceID) 0
+#define FireEtwFusionBindingEnd() 0
+#define FireEtwFusionBindingEnd_V1(ClrInstanceID) 0
+#define FireEtwLoaderCatchCall() 0
+#define FireEtwLoaderCatchCall_V1(ClrInstanceID) 0
+#define FireEtwLoaderCatchCallEnd() 0
+#define FireEtwLoaderCatchCallEnd_V1(ClrInstanceID) 0
+#define FireEtwFusionInit() 0
+#define FireEtwFusionInit_V1(ClrInstanceID) 0
+#define FireEtwFusionInitEnd() 0
+#define FireEtwFusionInitEnd_V1(ClrInstanceID) 0
+#define FireEtwFusionAppCtx() 0
+#define FireEtwFusionAppCtx_V1(ClrInstanceID) 0
+#define FireEtwFusionAppCtxEnd() 0
+#define FireEtwFusionAppCtxEnd_V1(ClrInstanceID) 0
+#define FireEtwFusion2EE() 0
+#define FireEtwFusion2EE_V1(ClrInstanceID) 0
+#define FireEtwFusion2EEEnd() 0
+#define FireEtwFusion2EEEnd_V1(ClrInstanceID) 0
+#define FireEtwSecurityCatchCall() 0
+#define FireEtwSecurityCatchCall_V1(ClrInstanceID) 0
+#define FireEtwSecurityCatchCallEnd() 0
+#define FireEtwSecurityCatchCallEnd_V1(ClrInstanceID) 0
+#define FireEtwCLRStackWalkPrivate(ClrInstanceID, Reserved1, Reserved2, FrameCount, Stack) 0
+#define FireEtwModuleRangeLoadPrivate(ClrInstanceID, ModuleID, RangeBegin, RangeSize, RangeType, IBCType, SectionType) 0
+#define FireEtwBindingPolicyPhaseStart(AppDomainID, LoadContextID, FromLoaderCache, DynamicLoad, AssemblyCodebase, AssemblyName, ClrInstanceID) 0
+#define FireEtwBindingPolicyPhaseEnd(AppDomainID, LoadContextID, FromLoaderCache, DynamicLoad, AssemblyCodebase, AssemblyName, ClrInstanceID) 0
+#define FireEtwBindingNgenPhaseStart(AppDomainID, LoadContextID, FromLoaderCache, DynamicLoad, AssemblyCodebase, AssemblyName, ClrInstanceID) 0
+#define FireEtwBindingNgenPhaseEnd(AppDomainID, LoadContextID, FromLoaderCache, DynamicLoad, AssemblyCodebase, AssemblyName, ClrInstanceID) 0
+#define FireEtwBindingLookupAndProbingPhaseStart(AppDomainID, LoadContextID, FromLoaderCache, DynamicLoad, AssemblyCodebase, AssemblyName, ClrInstanceID) 0
+#define FireEtwBindingLookupAndProbingPhaseEnd(AppDomainID, LoadContextID, FromLoaderCache, DynamicLoad, AssemblyCodebase, AssemblyName, ClrInstanceID) 0
+#define FireEtwLoaderPhaseStart(AppDomainID, LoadContextID, FromLoaderCache, DynamicLoad, AssemblyCodebase, AssemblyName, ClrInstanceID) 0
+#define FireEtwLoaderPhaseEnd(AppDomainID, LoadContextID, FromLoaderCache, DynamicLoad, AssemblyCodebase, AssemblyName, ClrInstanceID) 0
+#define FireEtwBindingPhaseStart(AppDomainID, LoadContextID, FromLoaderCache, DynamicLoad, AssemblyCodebase, AssemblyName, ClrInstanceID) 0
+#define FireEtwBindingPhaseEnd(AppDomainID, LoadContextID, FromLoaderCache, DynamicLoad, AssemblyCodebase, AssemblyName, ClrInstanceID) 0
+#define FireEtwBindingDownloadPhaseStart(AppDomainID, LoadContextID, FromLoaderCache, DynamicLoad, AssemblyCodebase, AssemblyName, ClrInstanceID) 0
+#define FireEtwBindingDownloadPhaseEnd(AppDomainID, LoadContextID, FromLoaderCache, DynamicLoad, AssemblyCodebase, AssemblyName, ClrInstanceID) 0
+#define FireEtwLoaderAssemblyInitPhaseStart(AppDomainID, LoadContextID, FromLoaderCache, DynamicLoad, AssemblyCodebase, AssemblyName, ClrInstanceID) 0
+#define FireEtwLoaderAssemblyInitPhaseEnd(AppDomainID, LoadContextID, FromLoaderCache, DynamicLoad, AssemblyCodebase, AssemblyName, ClrInstanceID) 0
+#define FireEtwLoaderMappingPhaseStart(AppDomainID, LoadContextID, FromLoaderCache, DynamicLoad, AssemblyCodebase, AssemblyName, ClrInstanceID) 0
+#define FireEtwLoaderMappingPhaseEnd(AppDomainID, LoadContextID, FromLoaderCache, DynamicLoad, AssemblyCodebase, AssemblyName, ClrInstanceID) 0
+#define FireEtwLoaderDeliverEventsPhaseStart(AppDomainID, LoadContextID, FromLoaderCache, DynamicLoad, AssemblyCodebase, AssemblyName, ClrInstanceID) 0
+#define FireEtwLoaderDeliverEventsPhaseEnd(AppDomainID, LoadContextID, FromLoaderCache, DynamicLoad, AssemblyCodebase, AssemblyName, ClrInstanceID) 0
+#define FireEtwEvidenceGenerated(Type, AppDomain, ILImage, ClrInstanceID) 0
+#define FireEtwModuleTransparencyComputationStart(Module, AppDomainID, ClrInstanceID) 0
+#define FireEtwModuleTransparencyComputationEnd(Module, AppDomainID, IsAllCritical, IsAllTransparent, IsTreatAsSafe, IsOpportunisticallyCritical, SecurityRuleSet, ClrInstanceID) 0
+#define FireEtwTypeTransparencyComputationStart(Type, Module, AppDomainID, ClrInstanceID) 0
+#define FireEtwTypeTransparencyComputationEnd(Type, Module, AppDomainID, IsAllCritical, IsAllTransparent, IsCritical, IsTreatAsSafe, ClrInstanceID) 0
+#define FireEtwMethodTransparencyComputationStart(Method, Module, AppDomainID, ClrInstanceID) 0
+#define FireEtwMethodTransparencyComputationEnd(Method, Module, AppDomainID, IsCritical, IsTreatAsSafe, ClrInstanceID) 0
+#define FireEtwFieldTransparencyComputationStart(Field, Module, AppDomainID, ClrInstanceID) 0
+#define FireEtwFieldTransparencyComputationEnd(Field, Module, AppDomainID, IsCritical, IsTreatAsSafe, ClrInstanceID) 0
+#define FireEtwTokenTransparencyComputationStart(Token, Module, AppDomainID, ClrInstanceID) 0
+#define FireEtwTokenTransparencyComputationEnd(Token, Module, AppDomainID, IsCritical, IsTreatAsSafe, ClrInstanceID) 0
+#define FireEtwNgenBindEvent(ClrInstanceID, BindingID, ReasonCode, AssemblyName) 0
+#define FireEtwFailFast(FailFastUserMessage, FailedEIP, OSExitCode, ClrExitCode, ClrInstanceID) 0
+#define FireEtwPrvFinalizeObject(TypeID, ObjectID, ClrInstanceID, TypeName) 0
+#define FireEtwCCWRefCountChange(HandleID, ObjectID, COMInterfacePointer, NewRefCount, AppDomainID, ClassName, NameSpace, Operation, ClrInstanceID) 0
+#define FireEtwPrvSetGCHandle(HandleID, ObjectID, Kind, Generation, AppDomainID, ClrInstanceID) 0
+#define FireEtwPrvDestroyGCHandle(HandleID, ClrInstanceID) 0
+#define FireEtwFusionMessageEvent(ClrInstanceID, Prepend, Message) 0
+#define FireEtwFusionErrorCodeEvent(ClrInstanceID, Category, ErrorCode) 0
+#define FireEtwPinPlugAtGCTime(PlugStart, PlugEnd, GapBeforeSize, ClrInstanceID) 0
+#define FireEtwAllocRequest(LoaderHeapPtr, MemoryAddress, RequestSize, Unused1, Unused2, ClrInstanceID) 0
+#define FireEtwMulticoreJit(ClrInstanceID, String1, String2, Int1, Int2, Int3) 0
+#define FireEtwMulticoreJitMethodCodeReturned(ClrInstanceID, ModuleID, MethodID) 0
+#define FireEtwIInspectableRuntimeClassName(TypeName, ClrInstanceID) 0
+#define FireEtwWinRTUnbox(TypeName, SecondTypeName, ClrInstanceID) 0
+#define FireEtwCreateRCW(TypeName, ClrInstanceID) 0
+#define FireEtwRCWVariance(TypeName, InterfaceTypeName, VariantInterfaceTypeName, ClrInstanceID) 0
+#define FireEtwRCWIEnumerableCasting(TypeName, SecondTypeName, ClrInstanceID) 0
+#define FireEtwCreateCCW(TypeName, ClrInstanceID) 0
+#define FireEtwCCWVariance(TypeName, InterfaceTypeName, VariantInterfaceTypeName, ClrInstanceID) 0
+#define FireEtwObjectVariantMarshallingToNative(TypeName, Int1, ClrInstanceID) 0
+#define FireEtwGetTypeFromGUID(TypeName, SecondTypeName, ClrInstanceID) 0
+#define FireEtwGetTypeFromProgID(TypeName, SecondTypeName, ClrInstanceID) 0
+#define FireEtwConvertToCallbackEtw(TypeName, SecondTypeName, ClrInstanceID) 0
+#define FireEtwBeginCreateManagedReference(ClrInstanceID) 0
+#define FireEtwEndCreateManagedReference(ClrInstanceID) 0
+#define FireEtwObjectVariantMarshallingToManaged(TypeName, Int1, ClrInstanceID) 0
diff --git a/src/gc/env/gcenv.base.h b/src/gc/env/gcenv.base.h
new file mode 100644
index 0000000000..a94f1a6394
--- /dev/null
+++ b/src/gc/env/gcenv.base.h
@@ -0,0 +1,626 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+#ifndef __GCENV_BASE_INCLUDED__
+#define __GCENV_BASE_INCLUDED__
+//
+// Sets up basic environment for CLR GC
+//
+
+#define FEATURE_REDHAWK 1
+#define FEATURE_CONSERVATIVE_GC 1
+
+#define GCENV_INCLUDED
+
+#define REDHAWK_PALIMPORT extern "C"
+#define REDHAWK_PALAPI __stdcall
+
+#ifndef _MSC_VER
+#define __stdcall
+#ifdef __clang__
+#define __forceinline __attribute__((always_inline)) inline
+#else // __clang__
+#define __forceinline inline
+#endif // __clang__
+#endif // !_MSC_VER
+
+#ifndef SIZE_T_MAX
+#define SIZE_T_MAX ((size_t)-1)
+#endif
+#ifndef SSIZE_T_MAX
+#define SSIZE_T_MAX ((ptrdiff_t)(SIZE_T_MAX / 2))
+#endif
+
+#ifndef _INC_WINDOWS
+// -----------------------------------------------------------------------------------------------------------
+//
+// Aliases for Win32 types
+//
+
+typedef uint32_t BOOL;
+typedef uint32_t DWORD;
+
+// -----------------------------------------------------------------------------------------------------------
+// HRESULT subset.
+
+#ifdef PLATFORM_UNIX
+typedef int32_t HRESULT;
+#else
+// this must exactly match the typedef used by windows.h
+typedef long HRESULT;
+#endif
+
+#define SUCCEEDED(_hr) ((HRESULT)(_hr) >= 0)
+#define FAILED(_hr) ((HRESULT)(_hr) < 0)
+
+inline HRESULT HRESULT_FROM_WIN32(unsigned long x)
+{
+ return (HRESULT)(x) <= 0 ? (HRESULT)(x) : (HRESULT) (((x) & 0x0000FFFF) | (7 << 16) | 0x80000000);
+}
+
+#define S_OK 0x0
+#define S_FALSE 0x1
+#define E_FAIL 0x80004005
+#define E_OUTOFMEMORY 0x8007000E
+#define E_UNEXPECTED 0x8000FFFF
+#define E_NOTIMPL 0x80004001
+#define E_INVALIDARG 0x80070057
+
+#define NOERROR 0x0
+#define ERROR_TIMEOUT 1460
+
+#define TRUE true
+#define FALSE false
+
+#define CALLBACK __stdcall
+#define FORCEINLINE __forceinline
+
+#define INFINITE 0xFFFFFFFF
+
+#define ZeroMemory(Destination,Length) memset((Destination),0,(Length))
+
+#ifndef _countof
+#define _countof(_array) (sizeof(_array)/sizeof(_array[0]))
+#endif
+
+#ifndef min
+#define min(a,b) (((a) < (b)) ? (a) : (b))
+#endif
+
+#ifndef max
+#define max(a,b) (((a) > (b)) ? (a) : (b))
+#endif
+
+#define C_ASSERT(cond) static_assert( cond, #cond )
+
+#define UNREFERENCED_PARAMETER(P) (void)(P)
+
+#ifdef PLATFORM_UNIX
+#define _vsnprintf vsnprintf
+#define sprintf_s snprintf
+#define swprintf_s swprintf
+#endif
+
+#ifdef UNICODE
+#define _tcslen wcslen
+#define _tcscpy wcscpy
+#define _stprintf_s swprintf_s
+#define _tfopen _wfopen
+#else
+#define _tcslen strlen
+#define _tcscpy strcpy
+#define _stprintf_s sprintf_s
+#define _tfopen fopen
+#endif
+
+#define WINAPI __stdcall
+
+typedef DWORD (WINAPI *PTHREAD_START_ROUTINE)(void* lpThreadParameter);
+
+#define WAIT_OBJECT_0 0
+#define WAIT_TIMEOUT 258
+#define WAIT_FAILED 0xFFFFFFFF
+
+#if defined(_MSC_VER)
+ #if defined(_ARM_)
+
+ __forceinline void YieldProcessor() { }
+ extern "C" void __emit(const unsigned __int32 opcode);
+ #pragma intrinsic(__emit)
+ #define MemoryBarrier() { __emit(0xF3BF); __emit(0x8F5F); }
+
+ #elif defined(_ARM64_)
+
+ extern "C" void __yield(void);
+ #pragma intrinsic(__yield)
+ __forceinline void YieldProcessor() { __yield();}
+
+ extern "C" void __dmb(const unsigned __int32 _Type);
+ #pragma intrinsic(__dmb)
+ #define MemoryBarrier() { __dmb(_ARM64_BARRIER_SY); }
+
+ #elif defined(_AMD64_)
+
+ extern "C" void
+ _mm_pause (
+ void
+ );
+
+ extern "C" void
+ _mm_mfence (
+ void
+ );
+
+ #pragma intrinsic(_mm_pause)
+ #pragma intrinsic(_mm_mfence)
+
+ #define YieldProcessor _mm_pause
+ #define MemoryBarrier _mm_mfence
+
+ #elif defined(_X86_)
+
+ #define YieldProcessor() __asm { rep nop }
+
+ __forceinline void MemoryBarrier()
+ {
+ int32_t Barrier;
+ __asm {
+ xchg Barrier, eax
+ }
+ }
+
+ #else // !_ARM_ && !_AMD64_ && !_X86_
+ #error Unsupported architecture
+ #endif
+#else // _MSC_VER
+
+#endif // _MSC_VER
+
+typedef struct _PROCESSOR_NUMBER {
+ uint16_t Group;
+ uint8_t Number;
+ uint8_t Reserved;
+} PROCESSOR_NUMBER, *PPROCESSOR_NUMBER;
+
+#endif // _INC_WINDOWS
+
+// -----------------------------------------------------------------------------------------------------------
+//
+// The subset of the contract code required by the GC/HandleTable sources. If Redhawk moves to support
+// contracts these local definitions will disappear and be replaced by real implementations.
+//
+
+#define LEAF_CONTRACT
+#define LIMITED_METHOD_CONTRACT
+#define LIMITED_METHOD_DAC_CONTRACT
+#define WRAPPER_CONTRACT
+#define WRAPPER_NO_CONTRACT
+#define STATIC_CONTRACT_LEAF
+#define STATIC_CONTRACT_DEBUG_ONLY
+#define STATIC_CONTRACT_NOTHROW
+#define STATIC_CONTRACT_CAN_TAKE_LOCK
+#define STATIC_CONTRACT_SO_TOLERANT
+#define STATIC_CONTRACT_GC_NOTRIGGER
+#define STATIC_CONTRACT_MODE_COOPERATIVE
+#define CONTRACTL
+#define CONTRACT(_expr)
+#define CONTRACT_VOID
+#define THROWS
+#define NOTHROW
+#define INSTANCE_CHECK
+#define MODE_COOPERATIVE
+#define MODE_ANY
+#define SO_INTOLERANT
+#define SO_TOLERANT
+#define GC_TRIGGERS
+#define GC_NOTRIGGER
+#define CAN_TAKE_LOCK
+#define SUPPORTS_DAC
+#define FORBID_FAULT
+#define CONTRACTL_END
+#define CONTRACT_END
+#define TRIGGERSGC()
+#define WRAPPER(_contract)
+#define DISABLED(_contract)
+#define INJECT_FAULT(_expr)
+#define INJECTFAULT_HANDLETABLE 0x1
+#define INJECTFAULT_GCHEAP 0x2
+#define FAULT_NOT_FATAL()
+#define BEGIN_DEBUG_ONLY_CODE
+#define END_DEBUG_ONLY_CODE
+#define BEGIN_GETTHREAD_ALLOWED
+#define END_GETTHREAD_ALLOWED
+#define LEAF_DAC_CONTRACT
+#define PRECONDITION(_expr)
+#define POSTCONDITION(_expr)
+#define RETURN return
+#define CONDITIONAL_CONTRACT_VIOLATION(_violation, _expr)
+
+// -----------------------------------------------------------------------------------------------------------
+//
+// Data access macros
+//
+#ifdef DACCESS_COMPILE
+#include "daccess.h"
+#else // DACCESS_COMPILE
+typedef uintptr_t TADDR;
+
+#define PTR_TO_TADDR(ptr) ((TADDR)(ptr))
+
+#define DPTR(type) type*
+#define SPTR(type) type*
+
+#define GVAL_DECL(type, var) \
+ extern type var
+#define GVAL_IMPL(type, var) \
+ type var
+#define GVAL_IMPL_INIT(type, var, init) \
+ type var = init
+
+#define GPTR_DECL(type, var) \
+ extern type* var
+#define GPTR_IMPL(type, var) \
+ type* var
+#define GPTR_IMPL_INIT(type, var, init) \
+ type* var = init
+
+#define SPTR_DECL(type, var) \
+ static type* var
+#define SPTR_IMPL(type, cls, var) \
+ type * cls::var
+#define SPTR_IMPL_NS(type, ns, cls, var) \
+ type * cls::var
+#define SPTR_IMPL_NS_INIT(type, ns, cls, var, init) \
+ type * cls::var = init
+
+#define SVAL_DECL(type, var) \
+ static type var
+#define SVAL_IMPL_NS(type, ns, cls, var) \
+ type cls::var
+#define SVAL_IMPL_NS_INIT(type, ns, cls, var, init) \
+ type cls::var = init
+
+#define GARY_DECL(type, var, size) \
+ extern type var[size]
+#define GARY_IMPL(type, var, size) \
+ type var[size]
+
+struct _DacGlobals;
+#endif // DACCESS_COMPILE
+
+typedef DPTR(size_t) PTR_size_t;
+typedef DPTR(uint8_t) PTR_uint8_t;
+
+// -----------------------------------------------------------------------------------------------------------
+
+#define DATA_ALIGNMENT sizeof(uintptr_t)
+
+#define RAW_KEYWORD(x) x
+
+#define DECLSPEC_ALIGN(x) __declspec(align(x))
+
+#define OS_PAGE_SIZE 4096
+
+#ifndef _ASSERTE
+#define _ASSERTE(_expr) ASSERT(_expr)
+#endif
+
+#define CONSISTENCY_CHECK(_expr) ASSERT(_expr)
+
+#define PREFIX_ASSUME(cond) ASSERT(cond)
+
+#define EEPOLICY_HANDLE_FATAL_ERROR(error) ASSERT(!"EEPOLICY_HANDLE_FATAL_ERROR")
+
+#define UI64(_literal) _literal##ULL
+
+class ObjHeader;
+class MethodTable;
+class Object;
+class ArrayBase;
+
+// Various types used to refer to object references or handles. This will get more complex if we decide
+// Redhawk wants to wrap object references in the debug build.
+typedef DPTR(Object) PTR_Object;
+typedef DPTR(PTR_Object) PTR_PTR_Object;
+
+typedef PTR_Object OBJECTREF;
+typedef PTR_PTR_Object PTR_OBJECTREF;
+typedef PTR_Object _UNCHECKED_OBJECTREF;
+typedef PTR_PTR_Object PTR_UNCHECKED_OBJECTREF;
+
+#ifndef DACCESS_COMPILE
+struct OBJECTHANDLE__
+{
+ void* unused;
+};
+typedef struct OBJECTHANDLE__* OBJECTHANDLE;
+#else
+typedef TADDR OBJECTHANDLE;
+#endif
+
+// With no object reference wrapping the following macros are very simple.
+#define ObjectToOBJECTREF(_obj) (OBJECTREF)(_obj)
+#define OBJECTREFToObject(_obj) (Object*)(_obj)
+
+#define VALIDATEOBJECTREF(_objref) _objref;
+
+#define VOLATILE(T) T volatile
+
+//
+// This code is extremely compiler- and CPU-specific, and will need to be altered to
+// support new compilers and/or CPUs. Here we enforce that we can only compile using
+// VC++, or Clang on x86, AMD64, ARM and ARM64.
+//
+#if !defined(_MSC_VER) && !defined(__clang__)
+#error The Volatile type is currently only defined for Visual C++ and Clang
+#endif
+
+#if defined(__clang__) && !defined(_X86_) && !defined(_AMD64_) && !defined(_ARM_) && !defined(_ARM64_)
+#error The Volatile type is currently only defined for Clang when targeting x86, AMD64, ARM or ARM64 CPUs
+#endif
+
+#if defined(__clang__)
+#if defined(_ARM_) || defined(_ARM64_)
+// This is functionally equivalent to the MemoryBarrier() macro used on ARM on Windows.
+#define VOLATILE_MEMORY_BARRIER() asm volatile ("dmb sy" : : : "memory")
+#else
+//
+// For Clang, we prevent reordering by the compiler by inserting the following after a volatile
+// load (to prevent subsequent operations from moving before the read), and before a volatile
+// write (to prevent prior operations from moving past the write). We don't need to do anything
+// special to prevent CPU reorderings, because the x86 and AMD64 architectures are already
+// sufficiently constrained for our purposes. If we ever need to run on weaker CPU architectures
+// (such as PowerPC), then we will need to do more work.
+//
+// Please do not use this macro outside of this file. It is subject to change or removal without
+// notice.
+//
+#define VOLATILE_MEMORY_BARRIER() asm volatile ("" : : : "memory")
+#endif // !_ARM_
+#elif defined(_ARM_) && _ISO_VOLATILE
+// ARM has a very weak memory model and very few tools to control that model. We're forced to perform a full
+// memory barrier to preserve the volatile semantics. Technically this is only necessary on MP systems but we
+// currently don't have a cheap way to determine the number of CPUs from this header file. Revisit this if it
+// turns out to be a performance issue for the uni-proc case.
+#define VOLATILE_MEMORY_BARRIER() MemoryBarrier()
+#else
+//
+// On VC++, reorderings at the compiler and machine level are prevented by the use of the
+// "volatile" keyword in VolatileLoad and VolatileStore. This should work on any CPU architecture
+// targeted by VC++ with /iso_volatile-.
+//
+#define VOLATILE_MEMORY_BARRIER()
+#endif
+
+//
+// VolatileLoad loads a T from a pointer to T. It is guaranteed that this load will not be optimized
+// away by the compiler, and that any operation that occurs after this load, in program order, will
+// not be moved before this load. In general it is not guaranteed that the load will be atomic, though
+// this is the case for most aligned scalar data types. If you need atomic loads or stores, you need
+// to consult the compiler and CPU manuals to find which circumstances allow atomicity.
+//
+template<typename T>
+inline
+T VolatileLoad(T const * pt)
+{
+ T val = *(T volatile const *)pt;
+ VOLATILE_MEMORY_BARRIER();
+ return val;
+}
+
+template<typename T>
+inline
+T VolatileLoadWithoutBarrier(T const * pt)
+{
+#ifndef DACCESS_COMPILE
+ T val = *(T volatile const *)pt;
+#else
+ T val = *pt;
+#endif
+ return val;
+}
+
+//
+// VolatileStore stores a T into the target of a pointer to T. Is is guaranteed that this store will
+// not be optimized away by the compiler, and that any operation that occurs before this store, in program
+// order, will not be moved after this store. In general, it is not guaranteed that the store will be
+// atomic, though this is the case for most aligned scalar data types. If you need atomic loads or stores,
+// you need to consult the compiler and CPU manuals to find which circumstances allow atomicity.
+//
+template<typename T>
+inline
+void VolatileStore(T* pt, T val)
+{
+ VOLATILE_MEMORY_BARRIER();
+ *(T volatile *)pt = val;
+}
+
+extern GCSystemInfo g_SystemInfo;
+
+extern MethodTable * g_pFreeObjectMethodTable;
+
+extern int32_t g_TrapReturningThreads;
+
+extern bool g_fFinalizerRunOnShutDown;
+
+//
+// Locks
+//
+
+struct alloc_context;
+class Thread;
+
+Thread * GetThread();
+
+typedef void (CALLBACK *HANDLESCANPROC)(PTR_UNCHECKED_OBJECTREF pref, uintptr_t *pExtraInfo, uintptr_t param1, uintptr_t param2);
+
+class FinalizerThread
+{
+public:
+ static bool Initialize();
+ static void EnableFinalization();
+
+ static bool HaveExtraWorkForFinalizer();
+
+ static bool IsCurrentThreadFinalizer();
+ static void Wait(DWORD timeout, bool allowReentrantWait = false);
+ static void SignalFinalizationDone(bool fFinalizer);
+ static void SetFinalizerThread(Thread * pThread);
+ static HANDLE GetFinalizerEvent();
+};
+
+bool IsGCSpecialThread();
+
+inline bool dbgOnly_IsSpecialEEThread()
+{
+ return false;
+}
+
+#define ClrFlsSetThreadType(type)
+
+//
+// Performance logging
+//
+
+#define COUNTER_ONLY(x)
+
+//#include "etmdummy.h"
+//#define ETW_EVENT_ENABLED(e,f) false
+
+namespace ETW
+{
+ typedef enum _GC_ROOT_KIND {
+ GC_ROOT_STACK = 0,
+ GC_ROOT_FQ = 1,
+ GC_ROOT_HANDLES = 2,
+ GC_ROOT_OLDER = 3,
+ GC_ROOT_SIZEDREF = 4,
+ GC_ROOT_OVERFLOW = 5
+ } GC_ROOT_KIND;
+};
+
+//
+// Logging
+//
+
+void LogSpewAlways(const char *fmt, ...);
+
+#define DEFAULT_GC_PRN_LVL 3
+
+// -----------------------------------------------------------------------------------------------------------
+
+void StompWriteBarrierEphemeral(bool isRuntimeSuspended);
+void StompWriteBarrierResize(bool isRuntimeSuspended, bool bReqUpperBoundsCheck);
+bool IsGCThread();
+
+class CLRConfig
+{
+public:
+ enum CLRConfigTypes
+ {
+ UNSUPPORTED_GCLogEnabled,
+ UNSUPPORTED_GCLogFile,
+ UNSUPPORTED_GCLogFileSize,
+ UNSUPPORTED_GCConfigLogEnabled,
+ UNSUPPORTED_GCConfigLogFile,
+ UNSUPPORTED_BGCSpinCount,
+ UNSUPPORTED_BGCSpin,
+ EXTERNAL_GCStressStart,
+ INTERNAL_GCStressStartAtJit,
+ INTERNAL_DbgDACSkipVerifyDlls,
+ Config_COUNT
+ };
+
+ typedef CLRConfigTypes ConfigDWORDInfo;
+ typedef CLRConfigTypes ConfigStringInfo;
+
+ static uint32_t GetConfigValue(ConfigDWORDInfo eType);
+ static HRESULT GetConfigValue(ConfigStringInfo /*eType*/, __out_z TCHAR * * outVal);
+};
+
+inline bool FitsInU1(uint64_t val)
+{
+ return val == (uint64_t)(uint8_t)val;
+}
+
+// -----------------------------------------------------------------------------------------------------------
+//
+// AppDomain emulation. The we don't have these in Redhawk so instead we emulate the bare minimum of the API
+// touched by the GC/HandleTable and pretend we have precisely one (default) appdomain.
+//
+
+#define RH_DEFAULT_DOMAIN_ID 1
+
+struct ADIndex
+{
+ DWORD m_dwIndex;
+
+ ADIndex () : m_dwIndex(RH_DEFAULT_DOMAIN_ID) {}
+ explicit ADIndex (DWORD id) : m_dwIndex(id) {}
+ BOOL operator==(const ADIndex& ad) const { return m_dwIndex == ad.m_dwIndex; }
+ BOOL operator!=(const ADIndex& ad) const { return m_dwIndex != ad.m_dwIndex; }
+};
+
+class AppDomain
+{
+public:
+ ADIndex GetIndex() { return ADIndex(RH_DEFAULT_DOMAIN_ID); }
+ BOOL IsRudeUnload() { return FALSE; }
+ BOOL NoAccessToHandleTable() { return FALSE; }
+ void DecNumSizedRefHandles() {}
+};
+
+class SystemDomain
+{
+public:
+ static SystemDomain *System() { return NULL; }
+ static AppDomain *GetAppDomainAtIndex(ADIndex /*index*/) { return (AppDomain *)-1; }
+ static AppDomain *AppDomainBeingUnloaded() { return NULL; }
+ AppDomain *DefaultDomain() { return NULL; }
+ DWORD GetTotalNumSizedRefHandles() { return 0; }
+};
+
+#ifdef STRESS_HEAP
+namespace GCStressPolicy
+{
+ static volatile int32_t s_cGcStressDisables;
+
+ inline bool IsEnabled() { return s_cGcStressDisables == 0; }
+ inline void GlobalDisable() { Interlocked::Increment(&s_cGcStressDisables); }
+ inline void GlobalEnable() { Interlocked::Decrement(&s_cGcStressDisables); }
+}
+
+enum gcs_trigger_points
+{
+ cfg_any,
+};
+
+template <enum gcs_trigger_points tp>
+class GCStress
+{
+public:
+ static inline bool IsEnabled()
+ {
+ return g_pConfig->GetGCStressLevel() != 0;
+ }
+};
+#endif // STRESS_HEAP
+
+class NumaNodeInfo
+{
+public:
+ static bool CanEnableGCNumaAware();
+ static void GetGroupForProcessor(uint16_t processor_number, uint16_t * group_number, uint16_t * group_processor_number);
+ static bool GetNumaProcessorNodeEx(PPROCESSOR_NUMBER proc_no, uint16_t * node_no);
+};
+
+class CPUGroupInfo
+{
+public:
+ static bool CanEnableGCCPUGroups();
+ static uint32_t GetNumActiveProcessors();
+ static void GetGroupForProcessor(uint16_t processor_number, uint16_t * group_number, uint16_t * group_processor_number);
+};
+
+
+#endif // __GCENV_BASE_INCLUDED__
diff --git a/src/gc/env/gcenv.ee.h b/src/gc/env/gcenv.ee.h
new file mode 100644
index 0000000000..0c1fd4988a
--- /dev/null
+++ b/src/gc/env/gcenv.ee.h
@@ -0,0 +1,85 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+// Interface between the GC and EE
+//
+
+#ifndef __GCENV_EE_H__
+#define __GCENV_EE_H__
+
+struct ScanContext;
+class CrawlFrame;
+
+typedef void promote_func(PTR_PTR_Object, ScanContext*, uint32_t);
+
+typedef void enum_alloc_context_func(alloc_context*, void*);
+
+typedef struct
+{
+ promote_func* f;
+ ScanContext* sc;
+ CrawlFrame * cf;
+} GCCONTEXT;
+
+// GC background thread function prototype
+typedef uint32_t (__stdcall *GCBackgroundThreadFunction)(void* param);
+
+class GCToEEInterface
+{
+public:
+ //
+ // Suspend/Resume callbacks
+ //
+ typedef enum
+ {
+ SUSPEND_FOR_GC = 1,
+ SUSPEND_FOR_GC_PREP = 6
+ } SUSPEND_REASON;
+
+ static void SuspendEE(SUSPEND_REASON reason);
+ static void RestartEE(bool bFinishedGC); //resume threads.
+
+ //
+ // The GC roots enumeration callback
+ //
+ static void GcScanRoots(promote_func* fn, int condemned, int max_gen, ScanContext* sc);
+
+ //
+ // Callbacks issues during GC that the execution engine can do its own bookeeping
+ //
+
+ // start of GC call back - single threaded
+ static void GcStartWork(int condemned, int max_gen);
+
+ //EE can perform post stack scanning action, while the
+ // user threads are still suspended
+ static void AfterGcScanRoots(int condemned, int max_gen, ScanContext* sc);
+
+ // Called before BGC starts sweeping, the heap is walkable
+ static void GcBeforeBGCSweepWork();
+
+ // post-gc callback.
+ static void GcDone(int condemned);
+
+ // Promote refcounted handle callback
+ static bool RefCountedHandleCallbacks(Object * pObject);
+
+ // Sync block cache management
+ static void SyncBlockCacheWeakPtrScan(HANDLESCANPROC scanProc, uintptr_t lp1, uintptr_t lp2);
+ static void SyncBlockCacheDemote(int max_gen);
+ static void SyncBlockCachePromotionsGranted(int max_gen);
+
+ // Thread functions
+ static bool IsPreemptiveGCDisabled(Thread * pThread);
+ static void EnablePreemptiveGC(Thread * pThread);
+ static void DisablePreemptiveGC(Thread * pThread);
+
+ static alloc_context * GetAllocContext(Thread * pThread);
+ static bool CatchAtSafePoint(Thread * pThread);
+
+ static void GcEnumAllocContexts(enum_alloc_context_func* fn, void* param);
+
+ static Thread* CreateBackgroundThread(GCBackgroundThreadFunction threadStart, void* arg);
+};
+
+#endif // __GCENV_EE_H__
diff --git a/src/gc/env/gcenv.interlocked.h b/src/gc/env/gcenv.interlocked.h
new file mode 100644
index 0000000000..1b1035958e
--- /dev/null
+++ b/src/gc/env/gcenv.interlocked.h
@@ -0,0 +1,101 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+// Interlocked operations
+//
+
+#ifndef __GCENV_INTERLOCKED_H__
+#define __GCENV_INTERLOCKED_H__
+
+// Interlocked operations
+class Interlocked
+{
+public:
+
+ // Increment the value of the specified 32-bit variable as an atomic operation.
+ // Parameters:
+ // addend - variable to be incremented
+ // Return:
+ // The resulting incremented value
+ template<typename T>
+ static T Increment(T volatile *addend);
+
+ // Decrement the value of the specified 32-bit variable as an atomic operation.
+ // Parameters:
+ // addend - variable to be decremented
+ // Return:
+ // The resulting decremented value
+ template<typename T>
+ static T Decrement(T volatile *addend);
+
+ // Perform an atomic AND operation on the specified values values
+ // Parameters:
+ // destination - the first operand and the destination
+ // value - second operand
+ template<typename T>
+ static void And(T volatile *destination, T value);
+
+ // Perform an atomic OR operation on the specified values values
+ // Parameters:
+ // destination - the first operand and the destination
+ // value - second operand
+ template<typename T>
+ static void Or(T volatile *destination, T value);
+
+ // Set a 32-bit variable to the specified value as an atomic operation.
+ // Parameters:
+ // destination - value to be exchanged
+ // value - value to set the destination to
+ // Return:
+ // The previous value of the destination
+ template<typename T>
+ static T Exchange(T volatile *destination, T value);
+
+ // Set a pointer variable to the specified value as an atomic operation.
+ // Parameters:
+ // destination - value to be exchanged
+ // value - value to set the destination to
+ // Return:
+ // The previous value of the destination
+ template <typename T>
+ static T ExchangePointer(T volatile * destination, T value);
+
+ template <typename T>
+ static T ExchangePointer(T volatile * destination, std::nullptr_t value);
+
+ // Perform an atomic addition of two 32-bit values and return the original value of the addend.
+ // Parameters:
+ // addend - variable to be added to
+ // value - value to add
+ // Return:
+ // The previous value of the addend
+ template<typename T>
+ static T ExchangeAdd(T volatile *addend, T value);
+
+ // Performs an atomic compare-and-exchange operation on the specified values.
+ // Parameters:
+ // destination - value to be exchanged
+ // exchange - value to set the destination to
+ // comparand - value to compare the destination to before setting it to the exchange.
+ // The destination is set only if the destination is equal to the comparand.
+ // Return:
+ // The original value of the destination
+ template<typename T>
+ static T CompareExchange(T volatile *destination, T exchange, T comparand);
+
+ // Performs an atomic compare-and-exchange operation on the specified pointers.
+ // Parameters:
+ // destination - value to be exchanged
+ // exchange - value to set the destination to
+ // comparand - value to compare the destination to before setting it to the exchange.
+ // The destination is set only if the destination is equal to the comparand.
+ // Return:
+ // The original value of the destination
+ template <typename T>
+ static T CompareExchangePointer(T volatile *destination, T exchange, T comparand);
+
+ template <typename T>
+ static T CompareExchangePointer(T volatile *destination, T exchange, std::nullptr_t comparand);
+};
+
+#endif // __GCENV_INTERLOCKED_H__
diff --git a/src/gc/env/gcenv.interlocked.inl b/src/gc/env/gcenv.interlocked.inl
new file mode 100644
index 0000000000..fd4f839970
--- /dev/null
+++ b/src/gc/env/gcenv.interlocked.inl
@@ -0,0 +1,199 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+// __forceinline implementation of the Interlocked class methods
+//
+
+#ifndef __GCENV_INTERLOCKED_INL__
+#define __GCENV_INTERLOCKED_INL__
+
+#ifdef _MSC_VER
+#include <intrin.h>
+#endif // _MSC_VER
+
+// Increment the value of the specified 32-bit variable as an atomic operation.
+// Parameters:
+// addend - variable to be incremented
+// Return:
+// The resulting incremented value
+template <typename T>
+__forceinline T Interlocked::Increment(T volatile *addend)
+{
+#ifdef _MSC_VER
+ static_assert(sizeof(long) == sizeof(T), "Size of long must be the same as size of T");
+ return _InterlockedIncrement((long*)addend);
+#else
+ return __sync_add_and_fetch(addend, 1);
+#endif
+}
+
+// Decrement the value of the specified 32-bit variable as an atomic operation.
+// Parameters:
+// addend - variable to be decremented
+// Return:
+// The resulting decremented value
+template <typename T>
+__forceinline T Interlocked::Decrement(T volatile *addend)
+{
+#ifdef _MSC_VER
+ static_assert(sizeof(long) == sizeof(T), "Size of long must be the same as size of T");
+ return _InterlockedDecrement((long*)addend);
+#else
+ return __sync_sub_and_fetch(addend, 1);
+#endif
+}
+
+// Set a 32-bit variable to the specified value as an atomic operation.
+// Parameters:
+// destination - value to be exchanged
+// value - value to set the destination to
+// Return:
+// The previous value of the destination
+template <typename T>
+__forceinline T Interlocked::Exchange(T volatile *destination, T value)
+{
+#ifdef _MSC_VER
+ static_assert(sizeof(long) == sizeof(T), "Size of long must be the same as size of T");
+ return _InterlockedExchange((long*)destination, value);
+#else
+ return __sync_swap(destination, value);
+#endif
+}
+
+// Performs an atomic compare-and-exchange operation on the specified values.
+// Parameters:
+// destination - value to be exchanged
+// exchange - value to set the destinaton to
+// comparand - value to compare the destination to before setting it to the exchange.
+// The destination is set only if the destination is equal to the comparand.
+// Return:
+// The original value of the destination
+template <typename T>
+__forceinline T Interlocked::CompareExchange(T volatile *destination, T exchange, T comparand)
+{
+#ifdef _MSC_VER
+ static_assert(sizeof(long) == sizeof(T), "Size of long must be the same as size of T");
+ return _InterlockedCompareExchange((long*)destination, exchange, comparand);
+#else
+ return __sync_val_compare_and_swap(destination, comparand, exchange);
+#endif
+}
+
+// Perform an atomic addition of two 32-bit values and return the original value of the addend.
+// Parameters:
+// addend - variable to be added to
+// value - value to add
+// Return:
+// The previous value of the addend
+template <typename T>
+__forceinline T Interlocked::ExchangeAdd(T volatile *addend, T value)
+{
+#ifdef _MSC_VER
+ static_assert(sizeof(long) == sizeof(T), "Size of long must be the same as size of T");
+ return _InterlockedExchangeAdd((long*)addend, value);
+#else
+ return __sync_fetch_and_add(addend, value);
+#endif
+}
+
+// Perform an atomic AND operation on the specified values values
+// Parameters:
+// destination - the first operand and the destination
+// value - second operand
+template <typename T>
+__forceinline void Interlocked::And(T volatile *destination, T value)
+{
+#ifdef _MSC_VER
+ static_assert(sizeof(long) == sizeof(T), "Size of long must be the same as size of T");
+ _InterlockedAnd((long*)destination, value);
+#else
+ __sync_and_and_fetch(destination, value);
+#endif
+}
+
+// Perform an atomic OR operation on the specified values values
+// Parameters:
+// destination - the first operand and the destination
+// value - second operand
+template <typename T>
+__forceinline void Interlocked::Or(T volatile *destination, T value)
+{
+#ifdef _MSC_VER
+ static_assert(sizeof(long) == sizeof(T), "Size of long must be the same as size of T");
+ _InterlockedOr((long*)destination, value);
+#else
+ __sync_or_and_fetch(destination, value);
+#endif
+}
+
+// Set a pointer variable to the specified value as an atomic operation.
+// Parameters:
+// destination - value to be exchanged
+// value - value to set the destination to
+// Return:
+// The previous value of the destination
+template <typename T>
+__forceinline T Interlocked::ExchangePointer(T volatile * destination, T value)
+{
+#ifdef _MSC_VER
+#ifdef BIT64
+ return (T)(TADDR)_InterlockedExchangePointer((void* volatile *)destination, value);
+#else
+ return (T)(TADDR)_InterlockedExchange((long volatile *)(void* volatile *)destination, (long)(void*)value);
+#endif
+#else
+ return (T)(TADDR)__sync_swap((void* volatile *)destination, value);
+#endif
+}
+
+template <typename T>
+__forceinline T Interlocked::ExchangePointer(T volatile * destination, std::nullptr_t value)
+{
+#ifdef _MSC_VER
+#ifdef BIT64
+ return (T)(TADDR)_InterlockedExchangePointer((void* volatile *)destination, value);
+#else
+ return (T)(TADDR)_InterlockedExchange((long volatile *)(void* volatile *)destination, (long)(void*)value);
+#endif
+#else
+ return (T)(TADDR)__sync_swap((void* volatile *)destination, value);
+#endif
+}
+
+// Performs an atomic compare-and-exchange operation on the specified pointers.
+// Parameters:
+// destination - value to be exchanged
+// exchange - value to set the destinaton to
+// comparand - value to compare the destination to before setting it to the exchange.
+// The destination is set only if the destination is equal to the comparand.
+// Return:
+// The original value of the destination
+template <typename T>
+__forceinline T Interlocked::CompareExchangePointer(T volatile *destination, T exchange, T comparand)
+{
+#ifdef _MSC_VER
+#ifdef BIT64
+ return (T)(TADDR)_InterlockedCompareExchangePointer((void* volatile *)destination, exchange, comparand);
+#else
+ return (T)(TADDR)_InterlockedCompareExchange((long volatile *)(void* volatile *)destination, (long)(void*)exchange, (long)(void*)comparand);
+#endif
+#else
+ return (T)(TADDR)__sync_val_compare_and_swap((void* volatile *)destination, comparand, exchange);
+#endif
+}
+
+template <typename T>
+__forceinline T Interlocked::CompareExchangePointer(T volatile *destination, T exchange, std::nullptr_t comparand)
+{
+#ifdef _MSC_VER
+#ifdef BIT64
+ return (T)(TADDR)_InterlockedCompareExchangePointer((void* volatile *)destination, (void*)exchange, (void*)comparand);
+#else
+ return (T)(TADDR)_InterlockedCompareExchange((long volatile *)(void* volatile *)destination, (long)(void*)exchange, (long)(void*)comparand);
+#endif
+#else
+ return (T)(TADDR)__sync_val_compare_and_swap((void* volatile *)destination, (void*)comparand, (void*)exchange);
+#endif
+}
+
+#endif // __GCENV_INTERLOCKED_INL__
diff --git a/src/gc/env/gcenv.object.h b/src/gc/env/gcenv.object.h
new file mode 100644
index 0000000000..c999e4538e
--- /dev/null
+++ b/src/gc/env/gcenv.object.h
@@ -0,0 +1,148 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+//-------------------------------------------------------------------------------------------------
+//
+// Low-level types describing GC object layouts.
+//
+
+// Bits stolen from the sync block index that the GC/HandleTable knows about (currently these are at the same
+// positions as the mainline runtime but we can change this below when it becomes apparent how Redhawk will
+// handle sync blocks).
+#define BIT_SBLK_GC_RESERVE 0x20000000
+#define BIT_SBLK_FINALIZER_RUN 0x40000000
+
+// The sync block index header (small structure that immediately precedes every object in the GC heap). Only
+// the GC uses this so far, and only to store a couple of bits of information.
+class ObjHeader
+{
+private:
+#if defined(BIT64)
+ uint32_t m_uAlignpad;
+#endif // BIT64
+ uint32_t m_uSyncBlockValue;
+
+public:
+ uint32_t GetBits() { return m_uSyncBlockValue; }
+ void SetBit(uint32_t uBit) { Interlocked::Or(&m_uSyncBlockValue, uBit); }
+ void ClrBit(uint32_t uBit) { Interlocked::And(&m_uSyncBlockValue, ~uBit); }
+ void SetGCBit() { m_uSyncBlockValue |= BIT_SBLK_GC_RESERVE; }
+ void ClrGCBit() { m_uSyncBlockValue &= ~BIT_SBLK_GC_RESERVE; }
+};
+
+#define MTFlag_ContainsPointers 1
+#define MTFlag_HasFinalizer 2
+#define MTFlag_IsArray 4
+
+class MethodTable
+{
+public:
+ uint16_t m_componentSize;
+ uint16_t m_flags;
+ uint32_t m_baseSize;
+
+ MethodTable * m_pRelatedType;
+
+public:
+ void InitializeFreeObject()
+ {
+ m_baseSize = 3 * sizeof(void *);
+ m_componentSize = 1;
+ m_flags = 0;
+ }
+
+ uint32_t GetBaseSize()
+ {
+ return m_baseSize;
+ }
+
+ uint16_t RawGetComponentSize()
+ {
+ return m_componentSize;
+ }
+
+ bool ContainsPointers()
+ {
+ return (m_flags & MTFlag_ContainsPointers) != 0;
+ }
+
+ bool ContainsPointersOrCollectible()
+ {
+ return ContainsPointers();
+ }
+
+ bool HasComponentSize()
+ {
+ return m_componentSize != 0;
+ }
+
+ bool HasFinalizer()
+ {
+ return (m_flags & MTFlag_HasFinalizer) != 0;
+ }
+
+ bool HasCriticalFinalizer()
+ {
+ return false;
+ }
+
+ bool IsArray()
+ {
+ return (m_flags & MTFlag_IsArray) != 0;
+ }
+
+ MethodTable * GetParent()
+ {
+ _ASSERTE(!IsArray());
+ return m_pRelatedType;
+ }
+
+ bool SanityCheck()
+ {
+ return true;
+ }
+};
+
+class Object
+{
+ MethodTable * m_pMethTab;
+
+public:
+ ObjHeader * GetHeader()
+ {
+ return ((ObjHeader *)this) - 1;
+ }
+
+ MethodTable * RawGetMethodTable() const
+ {
+ return m_pMethTab;
+ }
+
+ MethodTable * GetGCSafeMethodTable() const
+ {
+ return (MethodTable *)((uintptr_t)m_pMethTab & ~3);
+ }
+
+ void RawSetMethodTable(MethodTable * pMT)
+ {
+ m_pMethTab = pMT;
+ }
+};
+#define MIN_OBJECT_SIZE (2*sizeof(uint8_t*) + sizeof(ObjHeader))
+
+class ArrayBase : public Object
+{
+ uint32_t m_dwLength;
+
+public:
+ uint32_t GetNumComponents()
+ {
+ return m_dwLength;
+ }
+
+ static size_t GetOffsetOfNumComponents()
+ {
+ return offsetof(ArrayBase, m_dwLength);
+ }
+};
diff --git a/src/gc/env/gcenv.os.h b/src/gc/env/gcenv.os.h
new file mode 100644
index 0000000000..bb0153f117
--- /dev/null
+++ b/src/gc/env/gcenv.os.h
@@ -0,0 +1,283 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+// Interface between GC and the OS specific functionality
+//
+
+#ifndef __GCENV_OS_H__
+#define __GCENV_OS_H__
+
+// Critical section used by the GC
+class CLRCriticalSection
+{
+ CRITICAL_SECTION m_cs;
+
+public:
+ // Initialize the critical section
+ void Initialize();
+
+ // Destroy the critical section
+ void Destroy();
+
+ // Enter the critical section. Blocks until the section can be entered.
+ void Enter();
+
+ // Leave the critical section
+ void Leave();
+};
+
+// Flags for the GCToOSInterface::VirtualReserve method
+struct VirtualReserveFlags
+{
+ enum
+ {
+ None = 0,
+ WriteWatch = 1,
+ };
+};
+
+// Affinity of a GC thread
+struct GCThreadAffinity
+{
+ static const int None = -1;
+
+ // Processor group index, None if no group is specified
+ int Group;
+ // Processor index, None if no affinity is specified
+ int Processor;
+};
+
+// GC thread function prototype
+typedef void (*GCThreadFunction)(void* param);
+
+// Interface that the GC uses to invoke OS specific functionality
+class GCToOSInterface
+{
+public:
+
+ //
+ // Initialization and shutdown of the interface
+ //
+
+ // Initialize the interface implementation
+ // Return:
+ // true if it has succeeded, false if it has failed
+ static bool Initialize();
+
+ // Shutdown the interface implementation
+ static void Shutdown();
+
+ //
+ // Virtual memory management
+ //
+
+ // Reserve virtual memory range.
+ // Parameters:
+ // address - starting virtual address, it can be NULL to let the function choose the starting address
+ // size - size of the virtual memory range
+ // alignment - requested memory alignment
+ // flags - flags to control special settings like write watching
+ // Return:
+ // Starting virtual address of the reserved range
+ static void* VirtualReserve(void *address, size_t size, size_t alignment, uint32_t flags);
+
+ // Release virtual memory range previously reserved using VirtualReserve
+ // Parameters:
+ // address - starting virtual address
+ // size - size of the virtual memory range
+ // Return:
+ // true if it has succeeded, false if it has failed
+ static bool VirtualRelease(void *address, size_t size);
+
+ // Commit virtual memory range. It must be part of a range reserved using VirtualReserve.
+ // Parameters:
+ // address - starting virtual address
+ // size - size of the virtual memory range
+ // Return:
+ // true if it has succeeded, false if it has failed
+ static bool VirtualCommit(void *address, size_t size);
+
+ // Decomit virtual memory range.
+ // Parameters:
+ // address - starting virtual address
+ // size - size of the virtual memory range
+ // Return:
+ // true if it has succeeded, false if it has failed
+ static bool VirtualDecommit(void *address, size_t size);
+
+ // Reset virtual memory range. Indicates that data in the memory range specified by address and size is no
+ // longer of interest, but it should not be decommitted.
+ // Parameters:
+ // address - starting virtual address
+ // size - size of the virtual memory range
+ // unlock - true if the memory range should also be unlocked
+ // Return:
+ // true if it has succeeded, false if it has failed
+ static bool VirtualReset(void *address, size_t size, bool unlock);
+
+ //
+ // Write watching
+ //
+
+ // Check if the OS supports write watching
+ static bool SupportsWriteWatch();
+
+ // Reset the write tracking state for the specified virtual memory range.
+ // Parameters:
+ // address - starting virtual address
+ // size - size of the virtual memory range
+ static void ResetWriteWatch(void *address, size_t size);
+
+ // Retrieve addresses of the pages that are written to in a region of virtual memory
+ // Parameters:
+ // resetState - true indicates to reset the write tracking state
+ // address - starting virtual address
+ // size - size of the virtual memory range
+ // pageAddresses - buffer that receives an array of page addresses in the memory region
+ // pageAddressesCount - on input, size of the lpAddresses array, in array elements
+ // on output, the number of page addresses that are returned in the array.
+ // Return:
+ // true if it has succeeded, false if it has failed
+ static bool GetWriteWatch(bool resetState, void* address, size_t size, void** pageAddresses, uintptr_t* pageAddressesCount);
+
+ //
+ // Thread and process
+ //
+
+ // Create a new thread
+ // Parameters:
+ // function - the function to be executed by the thread
+ // param - parameters of the thread
+ // affinity - processor affinity of the thread
+ // Return:
+ // true if it has succeeded, false if it has failed
+ static bool CreateThread(GCThreadFunction function, void* param, GCThreadAffinity* affinity);
+
+ // Causes the calling thread to sleep for the specified number of milliseconds
+ // Parameters:
+ // sleepMSec - time to sleep before switching to another thread
+ static void Sleep(uint32_t sleepMSec);
+
+ // Causes the calling thread to yield execution to another thread that is ready to run on the current processor.
+ // Parameters:
+ // switchCount - number of times the YieldThread was called in a loop
+ static void YieldThread(uint32_t switchCount);
+
+ // Get the number of the current processor
+ static uint32_t GetCurrentProcessorNumber();
+
+ // Check if the OS supports getting current processor number
+ static bool CanGetCurrentProcessorNumber();
+
+ // Set ideal processor for the current thread
+ // Parameters:
+ // processorIndex - index of the processor in the group
+ // affinity - ideal processor affinity for the thread
+ // Return:
+ // true if it has succeeded, false if it has failed
+ static bool SetCurrentThreadIdealAffinity(GCThreadAffinity* affinity);
+
+ // Get numeric id of the current thread if possible on the
+ // current platform. It is indended for logging purposes only.
+ // Return:
+ // Numeric id of the current thread or 0 if the
+ static uint64_t GetCurrentThreadIdForLogging();
+
+ // Get id of the current process
+ // Return:
+ // Id of the current process
+ static uint32_t GetCurrentProcessId();
+
+ //
+ // Processor topology
+ //
+
+ // Get number of logical processors
+ static uint32_t GetLogicalCpuCount();
+
+ // Get size of the largest cache on the processor die
+ // Parameters:
+ // trueSize - true to return true cache size, false to return scaled up size based on
+ // the processor architecture
+ // Return:
+ // Size of the cache
+ static size_t GetLargestOnDieCacheSize(bool trueSize = true);
+
+ // Get number of processors assigned to the current process
+ // Return:
+ // The number of processors
+ static uint32_t GetCurrentProcessCpuCount();
+
+ // Get affinity mask of the current process
+ // Parameters:
+ // processMask - affinity mask for the specified process
+ // systemMask - affinity mask for the system
+ // Return:
+ // true if it has succeeded, false if it has failed
+ // Remarks:
+ // A process affinity mask is a bit vector in which each bit represents the processors that
+ // a process is allowed to run on. A system affinity mask is a bit vector in which each bit
+ // represents the processors that are configured into a system.
+ // A process affinity mask is a subset of the system affinity mask. A process is only allowed
+ // to run on the processors configured into a system. Therefore, the process affinity mask cannot
+ // specify a 1 bit for a processor when the system affinity mask specifies a 0 bit for that processor.
+ static bool GetCurrentProcessAffinityMask(uintptr_t *processMask, uintptr_t *systemMask);
+
+ //
+ // Global memory info
+ //
+
+ // Return the size of the user-mode portion of the virtual address space of this process.
+ // Return:
+ // non zero if it has succeeded, 0 if it has failed
+ static size_t GetVirtualMemoryLimit();
+
+ // Get the physical memory that this process can use.
+ // Return:
+ // non zero if it has succeeded, 0 if it has failed
+ // Remarks:
+ // If a process runs with a restricted memory limit, it returns the limit. If there's no limit
+ // specified, it returns amount of actual physical memory.
+ static uint64_t GetPhysicalMemoryLimit();
+
+ // Get memory status
+ // Parameters:
+ // memory_load - A number between 0 and 100 that specifies the approximate percentage of physical memory
+ // that is in use (0 indicates no memory use and 100 indicates full memory use).
+ // available_physical - The amount of physical memory currently available, in bytes.
+ // available_page_file - The maximum amount of memory the current process can commit, in bytes.
+ // Remarks:
+ // Any parameter can be null.
+ static void GetMemoryStatus(uint32_t* memory_load, uint64_t* available_physical, uint64_t* available_page_file);
+
+ //
+ // Misc
+ //
+
+ // Flush write buffers of processors that are executing threads of the current process
+ static void FlushProcessWriteBuffers();
+
+ // Break into a debugger
+ static void DebugBreak();
+
+ //
+ // Time
+ //
+
+ // Get a high precision performance counter
+ // Return:
+ // The counter value
+ static int64_t QueryPerformanceCounter();
+
+ // Get a frequency of the high precision performance counter
+ // Return:
+ // The counter frequency
+ static int64_t QueryPerformanceFrequency();
+
+ // Get a time stamp with a low precision
+ // Return:
+ // Time stamp in milliseconds
+ static uint32_t GetLowPrecisionTimeStamp();
+};
+
+#endif // __GCENV_OS_H__
diff --git a/src/gc/env/gcenv.structs.h b/src/gc/env/gcenv.structs.h
new file mode 100644
index 0000000000..5887dd7852
--- /dev/null
+++ b/src/gc/env/gcenv.structs.h
@@ -0,0 +1,122 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+#ifndef __GCENV_STRUCTS_INCLUDED__
+#define __GCENV_STRUCTS_INCLUDED__
+//
+// Structs shared between the GC and the environment
+//
+
+struct GCSystemInfo
+{
+ uint32_t dwNumberOfProcessors;
+ uint32_t dwPageSize;
+ uint32_t dwAllocationGranularity;
+};
+
+typedef void * HANDLE;
+
+#ifdef PLATFORM_UNIX
+
+typedef char TCHAR;
+#define _T(s) s
+
+#else
+
+#ifndef _INC_WINDOWS
+typedef wchar_t TCHAR;
+#define _T(s) L##s
+#endif
+
+#endif
+
+#ifdef PLATFORM_UNIX
+
+class EEThreadId
+{
+ pthread_t m_id;
+ // Indicates whether the m_id is valid or not. pthread_t doesn't have any
+ // portable "invalid" value.
+ bool m_isValid;
+
+public:
+ bool IsCurrentThread()
+ {
+ return m_isValid && pthread_equal(m_id, pthread_self());
+ }
+
+ void SetToCurrentThread()
+ {
+ m_id = pthread_self();
+ m_isValid = true;
+ }
+
+ void Clear()
+ {
+ m_isValid = false;
+ }
+};
+
+#else // PLATFORM_UNIX
+
+#ifndef _INC_WINDOWS
+extern "C" uint32_t __stdcall GetCurrentThreadId();
+#endif
+
+class EEThreadId
+{
+ uint32_t m_uiId;
+public:
+
+ bool IsCurrentThread()
+ {
+ return m_uiId == ::GetCurrentThreadId();
+ }
+
+ void SetToCurrentThread()
+ {
+ m_uiId = ::GetCurrentThreadId();
+ }
+
+ void Clear()
+ {
+ m_uiId = 0;
+ }
+};
+
+#endif // PLATFORM_UNIX
+
+#ifndef _INC_WINDOWS
+
+#ifdef PLATFORM_UNIX
+
+typedef struct _RTL_CRITICAL_SECTION {
+ pthread_mutex_t mutex;
+} CRITICAL_SECTION, RTL_CRITICAL_SECTION, *PRTL_CRITICAL_SECTION;
+
+#else
+
+#pragma pack(push, 8)
+
+typedef struct _RTL_CRITICAL_SECTION {
+ void* DebugInfo;
+
+ //
+ // The following three fields control entering and exiting the critical
+ // section for the resource
+ //
+
+ int32_t LockCount;
+ int32_t RecursionCount;
+ HANDLE OwningThread; // from the thread's ClientId->UniqueThread
+ HANDLE LockSemaphore;
+ uintptr_t SpinCount; // force size on 64-bit systems when packed
+} CRITICAL_SECTION, RTL_CRITICAL_SECTION, *PRTL_CRITICAL_SECTION;
+
+#pragma pack(pop)
+
+#endif
+
+#endif // _INC_WINDOWS
+
+#endif // __GCENV_STRUCTS_INCLUDED__
diff --git a/src/gc/env/gcenv.sync.h b/src/gc/env/gcenv.sync.h
new file mode 100644
index 0000000000..d6bee05a19
--- /dev/null
+++ b/src/gc/env/gcenv.sync.h
@@ -0,0 +1,145 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+// -----------------------------------------------------------------------------------------------------------
+//
+// Helper classes expected by the GC
+//
+#define CRST_REENTRANCY 0
+#define CRST_UNSAFE_SAMELEVEL 0
+#define CRST_UNSAFE_ANYMODE 0
+#define CRST_DEBUGGER_THREAD 0
+#define CRST_DEFAULT 0
+
+#define CrstHandleTable 0
+
+typedef int CrstFlags;
+typedef int CrstType;
+
+class CrstStatic
+{
+ CLRCriticalSection m_cs;
+#ifdef _DEBUG
+ EEThreadId m_holderThreadId;
+#endif
+
+public:
+ bool InitNoThrow(CrstType eType, CrstFlags eFlags = CRST_DEFAULT)
+ {
+ m_cs.Initialize();
+ return true;
+ }
+
+ void Destroy()
+ {
+ m_cs.Destroy();
+ }
+
+ void Enter()
+ {
+ m_cs.Enter();
+#ifdef _DEBUG
+ m_holderThreadId.SetToCurrentThread();
+#endif
+ }
+
+ void Leave()
+ {
+#ifdef _DEBUG
+ m_holderThreadId.Clear();
+#endif
+ m_cs.Leave();
+ }
+
+#ifdef _DEBUG
+ EEThreadId GetHolderThreadId()
+ {
+ return m_holderThreadId;
+ }
+
+ bool OwnedByCurrentThread()
+ {
+ return GetHolderThreadId().IsCurrentThread();
+ }
+#endif
+};
+
+class CrstHolder
+{
+ CrstStatic * m_pLock;
+
+public:
+ CrstHolder(CrstStatic * pLock)
+ : m_pLock(pLock)
+ {
+ m_pLock->Enter();
+ }
+
+ ~CrstHolder()
+ {
+ m_pLock->Leave();
+ }
+};
+
+class CrstHolderWithState
+{
+ CrstStatic * m_pLock;
+ bool m_fAcquired;
+
+public:
+ CrstHolderWithState(CrstStatic * pLock, bool fAcquire = true)
+ : m_pLock(pLock), m_fAcquired(fAcquire)
+ {
+ if (fAcquire)
+ m_pLock->Enter();
+ }
+
+ ~CrstHolderWithState()
+ {
+ if (m_fAcquired)
+ m_pLock->Leave();
+ }
+
+ void Acquire()
+ {
+ if (!m_fAcquired)
+ {
+ m_pLock->Enter();
+ m_fAcquired = true;
+ }
+ }
+
+ void Release()
+ {
+ if (m_fAcquired)
+ {
+ m_pLock->Leave();
+ m_fAcquired = false;
+ }
+ }
+
+ CrstStatic * GetValue()
+ {
+ return m_pLock;
+ }
+};
+
+class CLREventStatic
+{
+public:
+ bool CreateAutoEventNoThrow(bool bInitialState);
+ bool CreateManualEventNoThrow(bool bInitialState);
+ bool CreateOSAutoEventNoThrow(bool bInitialState);
+ bool CreateOSManualEventNoThrow(bool bInitialState);
+
+ void CloseEvent();
+ bool IsValid() const;
+ bool Set();
+ bool Reset();
+ uint32_t Wait(uint32_t dwMilliseconds, bool bAlertable);
+
+private:
+ HANDLE m_hEvent;
+ bool m_fInitialized;
+};