summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--CMakeLists.txt520
-rw-r--r--src/corefx/System.Globalization.Native/CMakeLists.txt1
-rw-r--r--src/debug/ee/rcthread.cpp4
-rw-r--r--src/dlls/mscoree/coreclr/CMakeLists.txt1
-rw-r--r--src/gc/env/common.h2
-rw-r--r--src/gc/env/gcenv.base.h380
-rw-r--r--src/gc/env/gcenv.ee.h85
-rw-r--r--src/gc/env/gcenv.interlocked.h102
-rw-r--r--src/gc/env/gcenv.interlocked.inl184
-rw-r--r--src/gc/env/gcenv.object.h4
-rw-r--r--src/gc/env/gcenv.os.h274
-rw-r--r--src/gc/env/gcenv.structs.h70
-rw-r--r--src/gc/env/gcenv.sync.h31
-rw-r--r--src/gc/env/gcenv.windows.cpp268
-rw-r--r--src/gc/gc.cpp769
-rw-r--r--src/gc/gcee.cpp10
-rw-r--r--src/gc/gcpriv.h93
-rw-r--r--src/gc/gcscan.cpp4
-rw-r--r--src/gc/handletable.cpp2
-rw-r--r--src/gc/handletable.inl4
-rw-r--r--src/gc/handletablecache.cpp26
-rw-r--r--src/gc/handletablecore.cpp12
-rw-r--r--src/gc/objecthandle.cpp8
-rw-r--r--src/gc/sample/CMakeLists.txt41
-rw-r--r--src/gc/sample/GCSample.cpp9
-rw-r--r--src/gc/sample/GCSample.vcxproj4
-rw-r--r--src/gc/sample/GCSample.vcxproj.filters6
-rw-r--r--src/gc/sample/gcenv.cpp156
-rw-r--r--src/gc/sample/gcenv.ee.cpp289
-rw-r--r--src/gc/sample/gcenv.h9
-rw-r--r--src/gc/sample/gcenv.unix.cpp (renamed from src/gc/env/gcenv.unix.cpp)68
-rw-r--r--src/gc/sample/gcenv.windows.cpp446
-rw-r--r--src/pal/inc/pal.h1
-rw-r--r--src/pal/src/exception/seh.cpp8
-rw-r--r--src/pal/src/misc/time.cpp10
-rw-r--r--src/pal/src/thread/context.cpp1
-rw-r--r--src/pal/tests/palsuite/eventprovider/CMakeLists.txt3
-rw-r--r--src/strongname/api/api.props3
-rw-r--r--src/strongname/api/common.h3
-rw-r--r--src/vm/CMakeLists.txt3
-rw-r--r--src/vm/appdomain.cpp1
-rw-r--r--src/vm/common.h4
-rw-r--r--src/vm/crst.cpp14
-rw-r--r--src/vm/crst.h2
-rw-r--r--src/vm/eehash.inl4
-rw-r--r--src/vm/gcenv.ee.cpp (renamed from src/vm/gcenv.cpp)24
-rw-r--r--src/vm/gcenv.ee.h6
-rw-r--r--src/vm/gcenv.h94
-rw-r--r--src/vm/gcenv.interlocked.h6
-rw-r--r--src/vm/gcenv.interlocked.inl6
-rw-r--r--src/vm/gcenv.os.cpp520
-rw-r--r--src/vm/gcenv.os.h6
-rw-r--r--src/vm/hash.cpp4
-rw-r--r--src/vm/spinlock.cpp6
-rw-r--r--src/vm/syncblk.cpp2
-rw-r--r--src/vm/syncblk.h2
-rw-r--r--src/vm/threads.cpp12
-rw-r--r--src/vm/threadsuspend.cpp8
-rw-r--r--src/vm/util.hpp8
-rw-r--r--src/vm/wks/wks.targets3
60 files changed, 2854 insertions, 1792 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 4bdc49fb52..15bed4dd3b 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -104,6 +104,17 @@ elseif(WIN32)
endif()
endif()
+if(CLR_CMAKE_PLATFORM_UNIX)
+ # Set flag to indicate if this will be a 64bit build
+ # CMAKE_SYSTEM_PROCESSOR returns the value of `uname -p`.
+ # For the AMD/Intel 64bit architecure two different strings are common.
+ # Linux and Darwin identify it as "x86_64" while FreeBSD uses the
+ # "amd64" string. Accept either of the two here.
+ if(CMAKE_SYSTEM_PROCESSOR STREQUAL x86_64 OR CMAKE_SYSTEM_PROCESSOR STREQUAL amd64 OR CMAKE_SYSTEM_PROCESSOR STREQUAL aarch64)
+ set(IS_64BIT_BUILD 1)
+ endif (CMAKE_SYSTEM_PROCESSOR STREQUAL x86_64 OR CMAKE_SYSTEM_PROCESSOR STREQUAL amd64 OR CMAKE_SYSTEM_PROCESSOR STREQUAL aarch64)
+endif(CLR_CMAKE_PLATFORM_UNIX)
+
if(WIN32)
enable_language(ASM_MASM)
else()
@@ -248,13 +259,6 @@ elseif (CLR_CMAKE_PLATFORM_UNIX)
endif(WIN32)
-if (WIN32 OR CLR_CMAKE_PLATFORM_LINUX)
- add_definitions(-DFEATURE_EVENT_TRACE=1)
-endif (WIN32 OR CLR_CMAKE_PLATFORM_LINUX)
-
-if (CLR_CMAKE_PLATFORM_LINUX)
- add_definitions(-DFEATURE_EVENTSOURCE_XPLAT=1)
-endif (CLR_CMAKE_PLATFORM_LINUX)
if (CLR_CMAKE_PLATFORM_UNIX)
add_definitions(-DPLATFORM_UNIX=1)
@@ -325,125 +329,111 @@ if (CLR_CMAKE_PLATFORM_UNIX)
endif ()
endif(UPPERCASE_CMAKE_BUILD_TYPE STREQUAL DEBUG OR UPPERCASE_CMAKE_BUILD_TYPE STREQUAL CHECKED)
- add_subdirectory(src/ToolBox/SOS/lldbplugin)
- add_subdirectory(src/pal)
- add_subdirectory(src/corefx)
- add_subdirectory(src/coreclr/hosts/unixcoreruncommon)
- add_subdirectory(src/coreclr/hosts/unixcorerun)
- add_subdirectory(src/coreclr/hosts/unixcoreconsole)
- add_subdirectory(src/ildasm/unixcoreclrloader)
-endif(CLR_CMAKE_PLATFORM_UNIX)
-
-if(CLR_CMAKE_PLATFORM_DARWIN)
- add_subdirectory(src/coreclr/hosts/osxbundlerun)
-endif(CLR_CMAKE_PLATFORM_DARWIN)
-
-# Add this subdir. We install the headers for the jit.
-add_subdirectory(src/pal/prebuilt/inc)
-
-# Set to 1 if you want to clear the CMAKE initial compiler flags and set all the flags explicitly
-# or to 0 if the CMake generated flags should be used
+ add_definitions(-DDISABLE_CONTRACTS)
+ # The -ferror-limit is helpful during the porting, it makes sure the compiler doesn't stop
+ # after hitting just about 20 errors.
+ add_compile_options(-ferror-limit=4096)
+
+ # All warnings that are not explicitly disabled are reported as errors
+ add_compile_options(-Werror)
+
+ # Disabled warnings
+ add_compile_options(-Wno-unused-private-field)
+ add_compile_options(-Wno-unused-variable)
+ # Explicit constructor calls are not supported by clang (this->ClassName::ClassName())
+ add_compile_options(-Wno-microsoft)
+ # This warning is caused by comparing 'this' to NULL
+ add_compile_options(-Wno-tautological-compare)
+ # There are constants of type BOOL used in a condition. But BOOL is defined as int
+ # and so the compiler thinks that there is a mistake.
+ add_compile_options(-Wno-constant-logical-operand)
+
+ add_compile_options(-Wno-unknown-warning-option)
+
+ #These seem to indicate real issues
+ add_compile_options(-Wno-invalid-offsetof)
+ # The following warning indicates that an attribute __attribute__((__ms_struct__)) was applied
+ # to a struct or a class that has virtual members or a base class. In that case, clang
+ # may not generate the same object layout as MSVC.
+ add_compile_options(-Wno-incompatible-ms-struct)
-# Enable for UNIX altjit on Windows - set(CLR_CMAKE_PLATFORM_UNIX_TARGET_AMD64 1)
-# Enable for UNIX altjit on Windows - add_definitions(-DCLR_CMAKE_PLATFORM_UNIX=1)
-
-if (WIN32)
- set(OVERRIDE_CMAKE_CXX_FLAGS 1)
-elseif (CLR_CMAKE_PLATFORM_UNIX)
- # Set flag to indicate if this will be a 64bit build
- # CMAKE_SYSTEM_PROCESSOR returns the value of `uname -p`.
- # For the AMD/Intel 64bit architecure two different strings are common.
- # Linux and Darwin identify it as "x86_64" while FreeBSD uses the
- # "amd64" string. Accept either of the two here.
- if(CMAKE_SYSTEM_PROCESSOR STREQUAL x86_64 OR CMAKE_SYSTEM_PROCESSOR STREQUAL amd64 OR CMAKE_SYSTEM_PROCESSOR STREQUAL aarch64)
- set(IS_64BIT_BUILD 1)
- endif (CMAKE_SYSTEM_PROCESSOR STREQUAL x86_64 OR CMAKE_SYSTEM_PROCESSOR STREQUAL amd64 OR CMAKE_SYSTEM_PROCESSOR STREQUAL aarch64)
-endif(WIN32)
-
-if (OVERRIDE_CMAKE_CXX_FLAGS)
+endif(CLR_CMAKE_PLATFORM_UNIX)
-# Compile options for targeting windows
if (WIN32)
-
-# The following options are set by the razzle build
-add_compile_options(/TP) # compile all files as C++
-add_compile_options(/FIWarningControl.h) # force include of WarningControl.h
-add_compile_options(/d2Zi+) # make optimized builds debugging easier
-add_compile_options(/nologo) # Suppress Startup Banner
-add_compile_options(/W3) # set warning level to 3
-add_compile_options(/WX) # treat warnings as errors
-add_compile_options(/Oi) # enable intrinsics
-add_compile_options(/Oy-) # disable suppressing of the creation of frame pointers on the call stack for quicker function calls
-add_compile_options(/U_MT) # undefine the predefined _MT macro
-add_compile_options(/GF) # enable read-only string pooling
-add_compile_options(/Gm-) # disable minimal rebuild
-add_compile_options(/EHa) # enable C++ EH (w/ SEH exceptions)
-add_compile_options(/Zp8) # pack structs on 8-byte boundary
-add_compile_options(/GS) # enable security checks
-add_compile_options(/Gy) # separate functions for linker
-add_compile_options(/Zc:wchar_t-) # C++ language conformance: wchar_t is NOT the native type, but a typedef
-add_compile_options(/Zc:forScope) # C++ language conformance: enforce Standard C++ for scoping rules
-add_compile_options(/GR-) # disable C++ RTTI
-add_compile_options(/FC) # use full pathnames in diagnostics
-add_compile_options(/Zl) # omit default library name in .OBJ
-add_compile_options(/MP) # Build with Multiple Processes (number of processes equal to the number of processors)
-add_compile_options(/GS) # Buffer Security Check
-add_compile_options(/Zm200) # Specify Precompiled Header Memory Allocation Limit of 150MB
-add_compile_options(/wd4960 /wd4961 /wd4603 /wd4627 /wd4838 /wd4456 /wd4457 /wd4458 /wd4459 /wd4091 /we4640)
-add_compile_options(/Zi) # enable debugging information
-
-if (CLR_CMAKE_PLATFORM_ARCH_I386)
- add_compile_options(/Gz)
-endif (CLR_CMAKE_PLATFORM_ARCH_I386)
-
-add_compile_options($<$<OR:$<CONFIG:Release>,$<CONFIG:Relwithdebinfo>>:/GL>)
-add_compile_options($<$<OR:$<OR:$<CONFIG:Release>,$<CONFIG:Relwithdebinfo>>,$<CONFIG:Checked>>:/O1>)
-
-if (IS_64BIT_BUILD EQUAL 1)
-# The generator expression in the following command means that the /homeparams option is added only for debug builds
-add_compile_options($<$<CONFIG:Debug>:/homeparams>) # Force parameters passed in registers to be written to the stack
-endif (IS_64BIT_BUILD EQUAL 1)
-
-# Disable the following line for UNIX altjit on Windows
-set(CMAKE_CXX_STANDARD_LIBRARIES "") # do not link against standard win32 libs i.e. kernel32, uuid, user32, etc.
-
-# Linker flags
-#
-# Disable the following line for UNIX altjit on Windows
-set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} /MANIFEST:NO") #Do not create Side-by-Side Assembly Manifest
-set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} /SUBSYSTEM:WINDOWS,6.00") #windows subsystem
-set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} /LARGEADDRESSAWARE") # can handle addresses larger than 2 gigabytes
-set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} /RELEASE") #sets the checksum in the header
-set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} /NXCOMPAT") #Compatible with Data Execution Prevention
-set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} /DYNAMICBASE") #Use address space layout randomization
-set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} /DEBUGTYPE:cv,fixup") #debugging format
-set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} /PDBCOMPRESS") #shrink pdb size
-set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} /DEBUG")
-set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} /IGNORE:4197,4013,4254,4070")
-set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} /INCREMENTAL:NO")
-
-set(CMAKE_STATIC_LINKER_FLAGS "${CMAKE_STATIC_LINKER_FLAGS} /IGNORE:4221")
-
-set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /DEBUG /PDBCOMPRESS")
-set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /STACK:1572864")
-
-# Debug build specific flags
-set(CMAKE_SHARED_LINKER_FLAGS_DEBUG "/NOVCFEATURE")
-
-# Checked build specific flags
-set(CMAKE_SHARED_LINKER_FLAGS_CHECKED "${CMAKE_SHARED_LINKER_FLAGS_CHECKED} /OPT:REF /OPT:NOICF /NOVCFEATURE")
-set(CMAKE_STATIC_LINKER_FLAGS_CHECKED "${CMAKE_STATIC_LINKER_FLAGS_CHECKED} /OPT:REF /OPT:NOICF")
-set(CMAKE_EXE_LINKER_FLAGS_CHECKED "${CMAKE_EXE_LINKER_FLAGS_CHECKED} /OPT:REF /OPT:NOICF")
-
-# Release build specific flags
-set(CMAKE_SHARED_LINKER_FLAGS_RELEASE "${CMAKE_SHARED_LINKER_FLAGS_RELEASE} /LTCG /OPT:REF /OPT:ICF")
-set(CMAKE_STATIC_LINKER_FLAGS_RELEASE "${CMAKE_STATIC_LINKER_FLAGS_RELEASE} /LTCG")
-set(CMAKE_EXE_LINKER_FLAGS_RELEASE "${CMAKE_EXE_LINKER_FLAGS_RELEASE} /LTCG /OPT:REF /OPT:ICF")
-
-# ReleaseWithDebugInfo build specific flags
-set(CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFO} /LTCG /OPT:REF /OPT:ICF")
-set(CMAKE_STATIC_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_STATIC_LINKER_FLAGS_RELWITHDEBINFO} /LTCG")
-set(CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO} /LTCG /OPT:REF /OPT:ICF")
+ # Compile options for targeting windows
+
+ # The following options are set by the razzle build
+ add_compile_options(/TP) # compile all files as C++
+ add_compile_options(/d2Zi+) # make optimized builds debugging easier
+ add_compile_options(/nologo) # Suppress Startup Banner
+ add_compile_options(/W3) # set warning level to 3
+ add_compile_options(/WX) # treat warnings as errors
+ add_compile_options(/Oi) # enable intrinsics
+ add_compile_options(/Oy-) # disable suppressing of the creation of frame pointers on the call stack for quicker function calls
+ add_compile_options(/U_MT) # undefine the predefined _MT macro
+ add_compile_options(/GF) # enable read-only string pooling
+ add_compile_options(/Gm-) # disable minimal rebuild
+ add_compile_options(/EHa) # enable C++ EH (w/ SEH exceptions)
+ add_compile_options(/Zp8) # pack structs on 8-byte boundary
+ add_compile_options(/GS) # enable security checks
+ add_compile_options(/Gy) # separate functions for linker
+ add_compile_options(/Zc:wchar_t-) # C++ language conformance: wchar_t is NOT the native type, but a typedef
+ add_compile_options(/Zc:forScope) # C++ language conformance: enforce Standard C++ for scoping rules
+ add_compile_options(/GR-) # disable C++ RTTI
+ add_compile_options(/FC) # use full pathnames in diagnostics
+ add_compile_options(/MP) # Build with Multiple Processes (number of processes equal to the number of processors)
+ add_compile_options(/GS) # Buffer Security Check
+ add_compile_options(/Zm200) # Specify Precompiled Header Memory Allocation Limit of 150MB
+ add_compile_options(/wd4960 /wd4961 /wd4603 /wd4627 /wd4838 /wd4456 /wd4457 /wd4458 /wd4459 /wd4091 /we4640)
+ add_compile_options(/Zi) # enable debugging information
+
+ if (CLR_CMAKE_PLATFORM_ARCH_I386)
+ add_compile_options(/Gz)
+ endif (CLR_CMAKE_PLATFORM_ARCH_I386)
+
+ add_compile_options($<$<OR:$<CONFIG:Release>,$<CONFIG:Relwithdebinfo>>:/GL>)
+ add_compile_options($<$<OR:$<OR:$<CONFIG:Release>,$<CONFIG:Relwithdebinfo>>,$<CONFIG:Checked>>:/O1>)
+
+ if (IS_64BIT_BUILD EQUAL 1)
+ # The generator expression in the following command means that the /homeparams option is added only for debug builds
+ add_compile_options($<$<CONFIG:Debug>:/homeparams>) # Force parameters passed in registers to be written to the stack
+ endif (IS_64BIT_BUILD EQUAL 1)
+
+ # Linker flags
+ #
+ # Disable the following line for UNIX altjit on Windows
+ set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} /MANIFEST:NO") #Do not create Side-by-Side Assembly Manifest
+ set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} /SUBSYSTEM:WINDOWS,6.00") #windows subsystem
+ set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} /LARGEADDRESSAWARE") # can handle addresses larger than 2 gigabytes
+ set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} /RELEASE") #sets the checksum in the header
+ set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} /NXCOMPAT") #Compatible with Data Execution Prevention
+ set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} /DYNAMICBASE") #Use address space layout randomization
+ set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} /DEBUGTYPE:cv,fixup") #debugging format
+ set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} /PDBCOMPRESS") #shrink pdb size
+ set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} /DEBUG")
+ set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} /IGNORE:4197,4013,4254,4070,4221")
+ set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} /INCREMENTAL:NO")
+
+ set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /DEBUG /PDBCOMPRESS")
+ set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /STACK:1572864")
+
+ # Debug build specific flags
+ set(CMAKE_SHARED_LINKER_FLAGS_DEBUG "/NOVCFEATURE")
+
+ # Checked build specific flags
+ set(CMAKE_SHARED_LINKER_FLAGS_CHECKED "${CMAKE_SHARED_LINKER_FLAGS_CHECKED} /OPT:REF /OPT:NOICF /NOVCFEATURE")
+ set(CMAKE_STATIC_LINKER_FLAGS_CHECKED "${CMAKE_STATIC_LINKER_FLAGS_CHECKED} /OPT:REF /OPT:NOICF")
+ set(CMAKE_EXE_LINKER_FLAGS_CHECKED "${CMAKE_EXE_LINKER_FLAGS_CHECKED} /OPT:REF /OPT:NOICF")
+
+ # Release build specific flags
+ set(CMAKE_SHARED_LINKER_FLAGS_RELEASE "${CMAKE_SHARED_LINKER_FLAGS_RELEASE} /LTCG /OPT:REF /OPT:ICF")
+ set(CMAKE_STATIC_LINKER_FLAGS_RELEASE "${CMAKE_STATIC_LINKER_FLAGS_RELEASE} /LTCG /OPT:REF /OPT:ICF")
+ set(CMAKE_EXE_LINKER_FLAGS_RELEASE "${CMAKE_EXE_LINKER_FLAGS_RELEASE} /LTCG /OPT:REF /OPT:ICF")
+
+ # ReleaseWithDebugInfo build specific flags
+ set(CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFO} /LTCG /OPT:REF /OPT:ICF")
+ set(CMAKE_STATIC_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_STATIC_LINKER_FLAGS_RELWITHDEBINFO} /LTCG /OPT:REF /OPT:ICF")
+ set(CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO} /LTCG /OPT:REF /OPT:ICF")
# Temporary until cmake has VS generators for arm64
if(CLR_CMAKE_PLATFORM_ARCH_ARM64)
@@ -454,13 +444,6 @@ endif(CLR_CMAKE_PLATFORM_ARCH_ARM64)
endif (WIN32)
-endif (OVERRIDE_CMAKE_CXX_FLAGS)
-
-if(CLR_CMAKE_PLATFORM_UNIX_TARGET_AMD64)
-add_definitions(-DFEATURE_UNIX_AMD64_STRUCT_PASSING_ITF)
-add_definitions(-DFEATURE_UNIX_AMD64_STRUCT_PASSING)
-endif (CLR_CMAKE_PLATFORM_UNIX_TARGET_AMD64)
-
OPTION(CMAKE_ENABLE_CODE_COVERAGE "Enable code coverage" OFF)
if(CMAKE_ENABLE_CODE_COVERAGE)
@@ -482,39 +465,90 @@ if(CMAKE_ENABLE_CODE_COVERAGE)
endif(CMAKE_ENABLE_CODE_COVERAGE)
+# Start of projects that require usage of platform include files
+
if(CLR_CMAKE_PLATFORM_UNIX)
-add_definitions(-DDISABLE_CONTRACTS)
-# The -ferror-limit is helpful during the porting, it makes sure the compiler doesn't stop
-# after hitting just about 20 errors.
-add_compile_options(-ferror-limit=4096)
-
-# All warnings that are not explicitly disabled are reported as errors
-add_compile_options(-Werror)
-
-# Disabled warnings
-add_compile_options(-Wno-unused-private-field)
-add_compile_options(-Wno-unused-variable)
-# Explicit constructor calls are not supported by clang (this->ClassName::ClassName())
-add_compile_options(-Wno-microsoft)
-# This warning is caused by comparing 'this' to NULL
-add_compile_options(-Wno-tautological-compare)
-# There are constants of type BOOL used in a condition. But BOOL is defined as int
-# and so the compiler thinks that there is a mistake.
-add_compile_options(-Wno-constant-logical-operand)
-
-add_compile_options(-Wno-unknown-warning-option)
-
-#These seem to indicate real issues
-add_compile_options(-Wno-invalid-offsetof)
-# The following warning indicates that an attribute __attribute__((__ms_struct__)) was applied
-# to a struct or a class that has virtual members or a base class. In that case, clang
-# may not generate the same object layout as MSVC.
-add_compile_options(-Wno-incompatible-ms-struct)
+ add_subdirectory(src/corefx)
+endif(CLR_CMAKE_PLATFORM_UNIX)
+
+if(IS_64BIT_BUILD)
+ add_definitions(-DBIT64=1)
+endif(IS_64BIT_BUILD)
+if (CLR_CMAKE_PLATFORM_ARCH_AMD64)
+ if (CLR_CMAKE_PLATFORM_UNIX_TARGET_AMD64)
+ add_definitions(-DDBG_TARGET_AMD64_UNIX)
+ endif()
+ add_definitions(-D_TARGET_AMD64_=1)
+ add_definitions(-DDBG_TARGET_AMD64)
+elseif (CLR_CMAKE_PLATFORM_ARCH_ARM64)
+ if (CLR_CMAKE_PLATFORM_UNIX_TARGET_ARM64)
+ add_definitions(-DDBG_TARGET_ARM64_UNIX)
+ endif()
+ add_definitions(-D_TARGET_ARM64_=1)
+ add_definitions(-DDBG_TARGET_ARM64)
+elseif (CLR_CMAKE_PLATFORM_ARCH_ARM)
+ if (CLR_CMAKE_PLATFORM_UNIX_TARGET_ARM)
+ add_definitions(-DDBG_TARGET_ARM_UNIX)
+ endif (CLR_CMAKE_PLATFORM_UNIX_TARGET_ARM)
+ add_definitions(-D_TARGET_ARM_=1)
+ add_definitions(-DDBG_TARGET_ARM)
+elseif (CLR_CMAKE_PLATFORM_ARCH_I386)
+ add_definitions(-D_TARGET_X86_=1)
+ add_definitions(-DDBG_TARGET_X86)
+else ()
+ clr_unknown_arch()
+endif (CLR_CMAKE_PLATFORM_ARCH_AMD64)
+
+if (CLR_CMAKE_PLATFORM_ARCH_AMD64)
+ add_definitions(-D_AMD64_)
+ add_definitions(-D_WIN64)
+ add_definitions(-DAMD64)
+elseif (CLR_CMAKE_PLATFORM_ARCH_I386)
+ add_definitions(-D_WIN32)
+ add_definitions(-D_X86_)
+elseif (CLR_CMAKE_PLATFORM_ARCH_ARM)
+ add_definitions(-D_ARM_)
+ add_definitions(-DARM)
+ add_definitions(-D_WIN32)
+elseif (CLR_CMAKE_PLATFORM_ARCH_ARM64)
+ add_definitions(-D_ARM64_)
+ add_definitions(-DARM64)
+ add_definitions(-D_WIN64)
+else ()
+ clr_unknown_arch()
+endif ()
+
+if(CLR_CMAKE_PLATFORM_UNIX)
+ add_subdirectory(src/ToolBox/SOS/lldbplugin)
+ add_subdirectory(src/pal)
+ add_subdirectory(src/coreclr/hosts/unixcoreruncommon)
+ add_subdirectory(src/coreclr/hosts/unixcorerun)
+ add_subdirectory(src/coreclr/hosts/unixcoreconsole)
+ add_subdirectory(src/ildasm/unixcoreclrloader)
endif(CLR_CMAKE_PLATFORM_UNIX)
+if(CLR_CMAKE_PLATFORM_DARWIN)
+ add_subdirectory(src/coreclr/hosts/osxbundlerun)
+endif(CLR_CMAKE_PLATFORM_DARWIN)
+
+# Add this subdir. We install the headers for the jit.
+add_subdirectory(src/pal/prebuilt/inc)
+
add_subdirectory(src/debug/debug-pal)
+if(WIN32)
+ add_subdirectory(src/gc/sample)
+endif()
+
+# End of projects that require usage of platform include files
+
+# Enable for UNIX altjit on Windows - set(CLR_CMAKE_PLATFORM_UNIX_TARGET_AMD64 1)
+# Enable for UNIX altjit on Windows - add_definitions(-DCLR_CMAKE_PLATFORM_UNIX=1)
+
+# Disable the following line for UNIX altjit on Windows
+set(CMAKE_CXX_STANDARD_LIBRARIES "") # do not link against standard win32 libs i.e. kernel32, uuid, user32, etc.
+
# Include directory directives
# Include the basic prebuilt headers - required for getting fileversion resource details.
@@ -532,23 +566,22 @@ if (WIN32)
set(STATIC_MT_CRT_LIB "libcmt$<$<OR:$<CONFIG:Debug>,$<CONFIG:Checked>>:d>.lib")
set(STATIC_MT_CPP_LIB "libcpmt$<$<OR:$<CONFIG:Debug>,$<CONFIG:Checked>>:d>.lib")
endif(WIN32)
-# Definition directives
- if(IS_64BIT_BUILD)
- add_definitions(-DBIT64=1)
- endif(IS_64BIT_BUILD)
+# Definition directives
if (CLR_CMAKE_PLATFORM_UNIX)
- add_definitions(-DFEATURE_PAL_SXS)
- add_definitions(-DFEATURE_COREFX_GLOBALIZATION)
- add_definitions(-DFEATURE_PAL)
if(CLR_CMAKE_PLATFORM_DARWIN)
add_definitions(-D_XOPEN_SOURCE)
endif(CLR_CMAKE_PLATFORM_DARWIN)
-endif(CLR_CMAKE_PLATFORM_UNIX)
+ if (CLR_CMAKE_PLATFORM_UNIX_TARGET_AMD64)
+ add_definitions(-DUNIX_AMD64_ABI)
+ elseif (CLR_CMAKE_PLATFORM_UNIX_TARGET_ARM)
+ add_definitions(-DUNIX_ARM_ABI)
+ endif()
+endif(CLR_CMAKE_PLATFORM_UNIX)
add_definitions(-D_CRT_STDIO_ARBITRARY_WIDE_SPECIFIERS)
add_definitions(-DDEV10)
@@ -560,86 +593,63 @@ add_definitions(-DWINNT=1)
add_definitions(-DNT_INST=0)
add_definitions(-DCONDITION_HANDLING=1)
add_definitions(-DNTDDI_VERSION=NTDDI_WIN8)
-
-if (CLR_CMAKE_PLATFORM_ARCH_AMD64)
- if (CLR_CMAKE_PLATFORM_UNIX_TARGET_AMD64)
- add_definitions(-DDBG_TARGET_AMD64_UNIX)
- endif()
- add_definitions(-D_TARGET_AMD64_=1)
- add_definitions(-DDBG_TARGET_AMD64)
-elseif(CLR_CMAKE_PLATFORM_ARCH_ARM64)
- if (CLR_CMAKE_PLATFORM_UNIX_TARGET_ARM64)
- add_definitions(-DDBG_TARGET_ARM64_UNIX)
- endif()
- add_definitions(-D_TARGET_ARM64_=1)
- add_definitions(-DDBG_TARGET_ARM64)
-elseif (CLR_CMAKE_PLATFORM_ARCH_ARM)
- if (CLR_CMAKE_PLATFORM_UNIX_TARGET_ARM)
- add_definitions(-DDBG_TARGET_ARM_UNIX)
- endif (CLR_CMAKE_PLATFORM_UNIX_TARGET_ARM)
- add_definitions(-D_TARGET_ARM_=1)
- add_definitions(-DDBG_TARGET_ARM)
-elseif (CLR_CMAKE_PLATFORM_ARCH_I386)
- add_definitions(-D_TARGET_X86_=1)
- add_definitions(-DDBG_TARGET_X86)
-else ()
- clr_unknown_arch()
-endif (CLR_CMAKE_PLATFORM_ARCH_AMD64)
-
-if(WIN32)
- add_definitions(-D_CRT_SECURE_NO_WARNINGS)
-endif(WIN32)
add_definitions(-DNTMAKEENV)
add_definitions(-D_BLD_CLR)
add_definitions(-DWINVER=0x0602)
add_definitions(-DWIN32_LEAN_AND_MEAN=1)
add_definitions(-DDEBUGGING_SUPPORTED)
+
if(WIN32)
if(CLR_CMAKE_PLATFORM_ARCH_AMD64 OR CLR_CMAKE_PLATFORM_ARCH_I386)
- # Only enable edit and continue on windows x86 and x64
- # exclude Linux, arm & arm64
- add_definitions(-DEnC_SUPPORTED)
+ add_definitions(-D_CRT_SECURE_NO_WARNINGS)
+ # Only enable edit and continue on windows x86 and x64
+ # exclude Linux, arm & arm64
+ add_definitions(-DEnC_SUPPORTED)
endif(CLR_CMAKE_PLATFORM_ARCH_AMD64 OR CLR_CMAKE_PLATFORM_ARCH_I386)
+ add_definitions(-DPROFILING_SUPPORTED)
endif(WIN32)
+
+# Features - please keep them alphabetically sorted
+
add_definitions(-DFEATURE_APPDOMAIN_RESOURCE_MONITORING)
+if(WIN32)
+ add_definitions(-DFEATURE_APPX)
+endif(WIN32)
if(CLR_CMAKE_PLATFORM_ARCH_AMD64 OR CLR_CMAKE_PLATFORM_ARCH_ARM OR CLR_CMAKE_PLATFORM_ARCH_ARM64)
add_definitions(-DFEATURE_ARRAYSTUB_AS_IL)
endif()
-if (CLR_CMAKE_PLATFORM_UNIX OR CLR_CMAKE_PLATFORM_ARCH_ARM64)
- add_definitions(-DFEATURE_STUBS_AS_IL)
- add_definitions(-DFEATURE_IMPLICIT_TLS)
- set(FEATURE_IMPLICIT_TLS 1)
-endif()
-
-if (CLR_CMAKE_PLATFORM_UNIX)
- if (CLR_CMAKE_PLATFORM_UNIX_TARGET_AMD64)
- add_definitions(-DUNIX_AMD64_ABI)
- elseif (CLR_CMAKE_PLATFORM_UNIX_TARGET_ARM)
- add_definitions(-DUNIX_ARM_ABI)
- endif()
-endif(CLR_CMAKE_PLATFORM_UNIX)
add_definitions(-DFEATURE_ASYNC_IO)
add_definitions(-DFEATURE_BCL_FORMATTING)
add_definitions(-DFEATURE_COLLECTIBLE_TYPES)
if(WIN32)
add_definitions(-DFEATURE_CLASSIC_COMINTEROP)
- add_definitions(-DFEATURE_APPX)
add_definitions(-DFEATURE_COMINTEROP)
add_definitions(-DFEATURE_COMINTEROP_APARTMENT_SUPPORT)
add_definitions(-DFEATURE_COMINTEROP_UNMANAGED_ACTIVATION)
add_definitions(-DFEATURE_COMINTEROP_WINRT_MANAGED_ACTIVATION)
endif(WIN32)
-add_definitions(-DFEATURE_ICASTABLE)
-
add_definitions(-DFEATURE_CORECLR)
+if (CLR_CMAKE_PLATFORM_UNIX)
+ add_definitions(-DFEATURE_COREFX_GLOBALIZATION)
+endif(CLR_CMAKE_PLATFORM_UNIX)
add_definitions(-DFEATURE_CORESYSTEM)
add_definitions(-DFEATURE_CORRUPTING_EXCEPTIONS)
if(WIN32)
add_definitions(-DFEATURE_CRYPTO)
endif(WIN32)
+if(CLR_CMAKE_PLATFORM_UNIX)
+ add_definitions(-DFEATURE_DBGIPC_TRANSPORT_DI)
+ add_definitions(-DFEATURE_DBGIPC_TRANSPORT_VM)
+endif(CLR_CMAKE_PLATFORM_UNIX)
+if (WIN32 OR CLR_CMAKE_PLATFORM_LINUX)
+ add_definitions(-DFEATURE_EVENT_TRACE=1)
+endif (WIN32 OR CLR_CMAKE_PLATFORM_LINUX)
+if (CLR_CMAKE_PLATFORM_LINUX)
+ add_definitions(-DFEATURE_EVENTSOURCE_XPLAT=1)
+endif (CLR_CMAKE_PLATFORM_LINUX)
add_definitions(-DFEATURE_EXCEPTIONDISPATCHINFO)
add_definitions(-DFEATURE_FRAMEWORK_INTERNAL)
if(NOT CLR_CMAKE_PLATFORM_UNIX_TARGET_ARM)
@@ -647,6 +657,11 @@ if(NOT CLR_CMAKE_PLATFORM_UNIX_TARGET_ARM)
endif(NOT CLR_CMAKE_PLATFORM_UNIX_TARGET_ARM)
add_definitions(-DFEATURE_HOST_ASSEMBLY_RESOLVER)
add_definitions(-DFEATURE_HOSTED_BINDER)
+add_definitions(-DFEATURE_ICASTABLE)
+if (CLR_CMAKE_PLATFORM_UNIX OR CLR_CMAKE_PLATFORM_ARCH_ARM64)
+ add_definitions(-DFEATURE_IMPLICIT_TLS)
+ set(FEATURE_IMPLICIT_TLS 1)
+endif(CLR_CMAKE_PLATFORM_UNIX OR CLR_CMAKE_PLATFORM_ARCH_ARM64)
if(WIN32)
add_definitions(-DFEATURE_ISOSTORE)
add_definitions(-DFEATURE_ISOSTORE_LIGHT)
@@ -670,7 +685,15 @@ if(WIN32)
# Disable the following or UNIX altjit on Windows
add_definitions(-DFEATURE_MERGE_JIT_AND_ENGINE)
endif(WIN32)
+add_definitions(-DFEATURE_MULTICOREJIT)
add_definitions(-DFEATURE_NORM_IDNA_ONLY)
+if(CLR_CMAKE_PLATFORM_UNIX)
+ add_definitions(-DFEATURE_PAL)
+ add_definitions(-DFEATURE_PAL_SXS)
+endif(CLR_CMAKE_PLATFORM_UNIX)
+if(CLR_CMAKE_PLATFORM_LINUX)
+ add_definitions(-DFEATURE_PERFMAP)
+endif(CLR_CMAKE_PLATFORM_LINUX)
add_definitions(-DFEATURE_PREJIT)
add_definitions(-DFEATURE_RANDOMIZED_STRING_HASHING)
if(NOT DEFINED CLR_CMAKE_PLATFORM_ARCH_ARM64)
@@ -683,11 +706,17 @@ add_definitions(-DFEATURE_STRONGNAME_MIGRATION)
if(WIN32)
add_definitions(-DFEATURE_STRONGNAME_TESTKEY_ALLOWED)
endif(WIN32)
+if (CLR_CMAKE_PLATFORM_UNIX OR CLR_CMAKE_PLATFORM_ARCH_ARM64)
+ add_definitions(-DFEATURE_STUBS_AS_IL)
+endif(CLR_CMAKE_PLATFORM_UNIX OR CLR_CMAKE_PLATFORM_ARCH_ARM64)
add_definitions(-DFEATURE_SVR_GC)
-if(CLR_CMAKE_PLATFORM_LINUX)
- add_definitions(-DFEATURE_PERFMAP)
-endif(CLR_CMAKE_PLATFORM_LINUX)
+add_definitions(-DFEATURE_SYMDIFF)
add_definitions(-DFEATURE_SYNTHETIC_CULTURES)
+if(CLR_CMAKE_PLATFORM_UNIX_TARGET_AMD64)
+ add_definitions(-DFEATURE_UNIX_AMD64_STRUCT_PASSING)
+ add_definitions(-DFEATURE_UNIX_AMD64_STRUCT_PASSING_ITF)
+endif (CLR_CMAKE_PLATFORM_UNIX_TARGET_AMD64)
+add_definitions(-DFEATURE_USE_ASM_GC_WRITE_BARRIERS)
add_definitions(-DFEATURE_VERSIONING)
if(WIN32)
add_definitions(-DFEATURE_VERSIONING_LOG)
@@ -698,55 +727,12 @@ add_definitions(-DFEATURE_WINMD_RESILIENT)
if(WIN32)
add_definitions(-DFEATURE_X509)
add_definitions(-DFEATURE_X509_SECURESTRINGS)
- add_definitions(-DPROFILING_SUPPORTED)
- if (CLR_CMAKE_PLATFORM_ARCH_I386 OR CLR_CMAKE_PLATFORM_ARCH_ARM)
- add_definitions(-DFEATURE_LAZY_COW_PAGES)
- endif()
endif(WIN32)
-add_definitions(-DFEATURE_MULTICOREJIT)
-add_definitions(-DFEATURE_USE_ASM_GC_WRITE_BARRIERS)
-add_definitions(-DFEATURE_SYMDIFF)
-
-if(CLR_CMAKE_PLATFORM_UNIX)
- add_definitions(-DFEATURE_DBGIPC_TRANSPORT_DI)
- add_definitions(-DFEATURE_DBGIPC_TRANSPORT_VM)
-endif(CLR_CMAKE_PLATFORM_UNIX)
-
-if (CLR_CMAKE_PLATFORM_ARCH_AMD64)
- add_definitions(-D_AMD64_)
- add_definitions(-D_AMD64_SIMULATOR_)
- add_definitions(-D_AMD64_SIMULATOR_PERF_)
- add_definitions(-D_AMD64_WORKAROUND_)
- add_definitions(-D_WIN64)
- add_definitions(-DAMD64)
-elseif (CLR_CMAKE_PLATFORM_ARCH_I386)
- add_definitions(-D_WIN32)
- add_definitions(-D_X86_)
-elseif (CLR_CMAKE_PLATFORM_ARCH_ARM)
- add_definitions(-D_ARM_)
- add_definitions(-DARM)
- add_definitions(-D_WIN32)
-elseif (CLR_CMAKE_PLATFORM_ARCH_ARM64)
- add_definitions(-D_ARM64_)
- add_definitions(-DARM64)
- add_definitions(-D_WIN64)
-else ()
- clr_unknown_arch()
-endif ()
if(CLR_CMAKE_BUILD_TESTS)
add_subdirectory(tests)
endif(CLR_CMAKE_BUILD_TESTS)
-add_definitions(-D_SKIP_IF_SIMULATOR_)
-add_definitions(-D_SECURE_SCL=0)
-add_definitions(-D_NEW_SDK=1)
-add_definitions(-DOFFICIAL_BUILD=0)
-add_definitions(-DBETA=0)
-add_definitions(-DFX_BRANCH_SYNC_COUNTER_VALUE=0)
-add_definitions(-DUNICODE)
-add_definitions(-D_UNICODE)
-
if (CLR_CMAKE_PLATFORM_ARCH_AMD64)
set(ARCH_SOURCES_DIR amd64)
elseif (CLR_CMAKE_PLATFORM_ARCH_ARM64)
@@ -759,4 +745,20 @@ else ()
clr_unknown_arch()
endif ()
+add_definitions(-D_SKIP_IF_SIMULATOR_)
+add_definitions(-D_SECURE_SCL=0)
+add_definitions(-D_NEW_SDK=1)
+add_definitions(-DOFFICIAL_BUILD=0)
+add_definitions(-DBETA=0)
+add_definitions(-DFX_BRANCH_SYNC_COUNTER_VALUE=0)
+add_definitions(-DUNICODE)
+add_definitions(-D_UNICODE)
+
+# Compiler options
+
+if(WIN32)
+ add_compile_options(/FIWarningControl.h) # force include of WarningControl.h
+ add_compile_options(/Zl) # omit default library name in .OBJ
+endif(WIN32)
+
add_subdirectory(src)
diff --git a/src/corefx/System.Globalization.Native/CMakeLists.txt b/src/corefx/System.Globalization.Native/CMakeLists.txt
index 08d35a0053..71b5c039d2 100644
--- a/src/corefx/System.Globalization.Native/CMakeLists.txt
+++ b/src/corefx/System.Globalization.Native/CMakeLists.txt
@@ -1,5 +1,6 @@
project(System.Globalization.Native)
+include(CheckCXXSourceCompiles)
set(CMAKE_INCLUDE_CURRENT_DIR ON)
diff --git a/src/debug/ee/rcthread.cpp b/src/debug/ee/rcthread.cpp
index 442c0748a3..d4b8a61c7d 100644
--- a/src/debug/ee/rcthread.cpp
+++ b/src/debug/ee/rcthread.cpp
@@ -882,7 +882,7 @@ void AssertAllocationAllowed()
// Can't call IsDbgHelperSpecialThread() here b/c that changes program state.
// So we use our
- if (DebuggerRCThread::s_DbgHelperThreadId.IsSameThread())
+ if (DebuggerRCThread::s_DbgHelperThreadId.IsCurrentThread())
{
// In case assert allocates, bump up the 'OK' counter to avoid an infinite recursion.
SUPPRESS_ALLOCATION_ASSERTS_IN_THIS_SCOPE;
@@ -935,7 +935,7 @@ void DebuggerRCThread::ThreadProc(void)
#ifdef _DEBUG
// Track the helper thread.
- s_DbgHelperThreadId.SetThreadId();
+ s_DbgHelperThreadId.SetToCurrentThread();
#endif
CantAllocHolder caHolder;
diff --git a/src/dlls/mscoree/coreclr/CMakeLists.txt b/src/dlls/mscoree/coreclr/CMakeLists.txt
index 98af5911b8..792a9aa7e8 100644
--- a/src/dlls/mscoree/coreclr/CMakeLists.txt
+++ b/src/dlls/mscoree/coreclr/CMakeLists.txt
@@ -177,4 +177,3 @@ install (TARGETS coreclr DESTINATION .)
if(WIN32)
install (FILES ${CMAKE_CURRENT_BINARY_DIR}/$<CONFIG>/coreclr.pdb DESTINATION PDB)
endif(WIN32)
-
diff --git a/src/gc/env/common.h b/src/gc/env/common.h
index 3e982f8f6c..39e97b3e7a 100644
--- a/src/gc/env/common.h
+++ b/src/gc/env/common.h
@@ -22,7 +22,7 @@
#include <new>
-#ifndef WIN32
+#ifdef PLATFORM_UNIX
#include <pthread.h>
#endif
diff --git a/src/gc/env/gcenv.base.h b/src/gc/env/gcenv.base.h
index 5b8f5f7dd3..628a90cc88 100644
--- a/src/gc/env/gcenv.base.h
+++ b/src/gc/env/gcenv.base.h
@@ -16,11 +16,17 @@
#define REDHAWK_PALIMPORT extern "C"
#define REDHAWK_PALAPI __stdcall
-
#ifndef _MSC_VER
#define __stdcall
+#ifdef __clang__
+#define __forceinline __attribute__((always_inline))
+#else // __clang__
#define __forceinline inline
-#endif
+#endif // __clang__
+#endif // !_MSC_VER
+
+#define SIZE_T_MAX ((size_t)-1)
+#define SSIZE_T_MAX ((ptrdiff_t)(SIZE_T_MAX / 2))
#ifndef _INC_WINDOWS
// -----------------------------------------------------------------------------------------------------------
@@ -44,17 +50,14 @@ typedef size_t SIZE_T;
typedef void * HANDLE;
-#define SIZE_T_MAX ((size_t)-1)
-#define SSIZE_T_MAX ((ptrdiff_t)(SIZE_T_MAX / 2))
-
// -----------------------------------------------------------------------------------------------------------
// HRESULT subset.
-#ifdef WIN32
+#ifdef PLATFORM_UNIX
+typedef int32_t HRESULT;
+#else
// this must exactly match the typedef used by windows.h
typedef long HRESULT;
-#else
-typedef int32_t HRESULT;
#endif
#define SUCCEEDED(_hr) ((HRESULT)(_hr) >= 0)
@@ -104,122 +107,20 @@ inline HRESULT HRESULT_FROM_WIN32(unsigned long x)
#define INVALID_HANDLE_VALUE ((HANDLE)-1)
-#ifndef WIN32
+#ifdef PLATFORM_UNIX
#define _vsnprintf vsnprintf
#define sprintf_s snprintf
+#define swprintf_s swprintf
#endif
-#define WINBASEAPI extern "C"
#define WINAPI __stdcall
typedef DWORD (WINAPI *PTHREAD_START_ROUTINE)(void* lpThreadParameter);
-WINBASEAPI
-void
-WINAPI
-DebugBreak();
-
-WINBASEAPI
-BOOL
-WINAPI
-VirtualUnlock(
- LPVOID lpAddress,
- SIZE_T dwSize
- );
-
-WINBASEAPI
-DWORD
-WINAPI
-GetLastError();
-
-WINBASEAPI
-UINT
-WINAPI
-GetWriteWatch(
- DWORD dwFlags,
- PVOID lpBaseAddress,
- SIZE_T dwRegionSize,
- PVOID *lpAddresses,
- ULONG_PTR * lpdwCount,
- DWORD * lpdwGranularity
-);
-
-WINBASEAPI
-UINT
-WINAPI
-ResetWriteWatch(
- LPVOID lpBaseAddress,
- SIZE_T dwRegionSize
-);
-
-WINBASEAPI
-VOID
-WINAPI
-FlushProcessWriteBuffers();
-
-WINBASEAPI
-DWORD
-WINAPI
-GetTickCount();
-
-WINBASEAPI
-BOOL
-WINAPI
-QueryPerformanceCounter(LARGE_INTEGER *lpPerformanceCount);
-
-WINBASEAPI
-BOOL
-WINAPI
-QueryPerformanceFrequency(LARGE_INTEGER *lpFrequency);
-
-WINBASEAPI
-DWORD
-WINAPI
-GetCurrentThreadId(
- VOID);
-
-WINBASEAPI
-BOOL
-WINAPI
-CloseHandle(
- HANDLE hObject);
-
#define WAIT_OBJECT_0 0
#define WAIT_TIMEOUT 258
#define WAIT_FAILED 0xFFFFFFFF
-#define GENERIC_WRITE 0x40000000
-#define FILE_SHARE_READ 0x00000001
-#define CREATE_ALWAYS 2
-#define FILE_ATTRIBUTE_NORMAL 0x00000080
-
-WINBASEAPI
-BOOL
-WINAPI
-WriteFile(
- HANDLE hFile,
- LPCVOID lpBuffer,
- DWORD nNumberOfBytesToWrite,
- DWORD * lpNumberOfBytesWritten,
- PVOID lpOverlapped);
-
-#define FILE_BEGIN 0
-
-WINBASEAPI
-DWORD
-WINAPI
-SetFilePointer(
- HANDLE hFile,
- int32_t lDistanceToMove,
- int32_t * lpDistanceToMoveHigh,
- DWORD dwMoveMethod);
-
-WINBASEAPI
-BOOL
-WINAPI
-FlushFileBuffers(
- HANDLE hFile);
-
#if defined(_MSC_VER)
#if defined(_ARM_)
@@ -263,24 +164,8 @@ FlushFileBuffers(
#endif
#else // _MSC_VER
-WINBASEAPI
-VOID
-WINAPI
-YieldProcessor();
-
-WINBASEAPI
-VOID
-WINAPI
-MemoryBarrier();
-
#endif // _MSC_VER
-typedef struct _GUID {
- unsigned long Data1;
- unsigned short Data2;
- unsigned short Data3;
- unsigned char Data4[8];
-} GUID;
#endif // _INC_WINDOWS
// -----------------------------------------------------------------------------------------------------------
@@ -410,56 +295,6 @@ typedef DPTR(uint8_t) PTR_uint8_t;
#define UI64(_literal) _literal##ULL
-int32_t FastInterlockIncrement(int32_t volatile *lpAddend);
-int32_t FastInterlockDecrement(int32_t volatile *lpAddend);
-int32_t FastInterlockExchange(int32_t volatile *Target, int32_t Value);
-int32_t FastInterlockCompareExchange(int32_t volatile *Destination, int32_t Exchange, int32_t Comperand);
-int32_t FastInterlockExchangeAdd(int32_t volatile *Addend, int32_t Value);
-
-void * _FastInterlockExchangePointer(void * volatile *Target, void * Value);
-void * _FastInterlockCompareExchangePointer(void * volatile *Destination, void * Exchange, void * Comperand);
-
-template <typename T>
-inline T FastInterlockExchangePointer(
- T volatile * target,
- T value)
-{
- return (T)((TADDR)_FastInterlockExchangePointer((void **)target, value));
-}
-
-template <typename T>
-inline T FastInterlockExchangePointer(
- T volatile * target,
- nullptr_t value)
-{
- return (T)((TADDR)_FastInterlockExchangePointer((void **)target, value));
-}
-
-template <typename T>
-inline T FastInterlockCompareExchangePointer(
- T volatile * destination,
- T exchange,
- T comparand)
-{
- return (T)((TADDR)_FastInterlockCompareExchangePointer((void **)destination, exchange, comparand));
-}
-
-template <typename T>
-inline T FastInterlockCompareExchangePointer(
- T volatile * destination,
- T exchange,
- nullptr_t comparand)
-{
- return (T)((TADDR)_FastInterlockCompareExchangePointer((void **)destination, exchange, comparand));
-}
-
-
-void FastInterlockOr(uint32_t volatile *p, uint32_t msk);
-void FastInterlockAnd(uint32_t volatile *p, uint32_t msk);
-
-#define CALLER_LIMITS_SPINNING 0
-bool __SwitchToThread (uint32_t dwSleepMSec, uint32_t dwSwitchCount);
-
class ObjHeader;
class MethodTable;
class Object;
@@ -493,7 +328,51 @@ typedef TADDR OBJECTHANDLE;
#define VOLATILE(T) T volatile
+//
+// This code is extremely compiler- and CPU-specific, and will need to be altered to
+// support new compilers and/or CPUs. Here we enforce that we can only compile using
+// VC++, or Clang on x86, AMD64, ARM and ARM64.
+//
+#if !defined(_MSC_VER) && !defined(__clang__)
+#error The Volatile type is currently only defined for Visual C++ and Clang
+#endif
+
+#if defined(__clang__) && !defined(_X86_) && !defined(_AMD64_) && !defined(_ARM_) && !defined(_ARM64_)
+#error The Volatile type is currently only defined for Clang when targeting x86, AMD64, ARM or ARM64 CPUs
+#endif
+
+#if defined(__clang__)
+#if defined(_ARM_) || defined(_ARM64_)
+// This is functionally equivalent to the MemoryBarrier() macro used on ARM on Windows.
+#define VOLATILE_MEMORY_BARRIER() asm volatile ("dmb sy" : : : "memory")
+#else
+//
+// For Clang, we prevent reordering by the compiler by inserting the following after a volatile
+// load (to prevent subsequent operations from moving before the read), and before a volatile
+// write (to prevent prior operations from moving past the write). We don't need to do anything
+// special to prevent CPU reorderings, because the x86 and AMD64 architectures are already
+// sufficiently constrained for our purposes. If we ever need to run on weaker CPU architectures
+// (such as PowerPC), then we will need to do more work.
+//
+// Please do not use this macro outside of this file. It is subject to change or removal without
+// notice.
+//
+#define VOLATILE_MEMORY_BARRIER() asm volatile ("" : : : "memory")
+#endif // !_ARM_
+#elif defined(_ARM_) && _ISO_VOLATILE
+// ARM has a very weak memory model and very few tools to control that model. We're forced to perform a full
+// memory barrier to preserve the volatile semantics. Technically this is only necessary on MP systems but we
+// currently don't have a cheap way to determine the number of CPUs from this header file. Revisit this if it
+// turns out to be a performance issue for the uni-proc case.
+#define VOLATILE_MEMORY_BARRIER() MemoryBarrier()
+#else
+//
+// On VC++, reorderings at the compiler and machine level are prevented by the use of the
+// "volatile" keyword in VolatileLoad and VolatileStore. This should work on any CPU architecture
+// targeted by VC++ with /iso_volatile-.
+//
#define VOLATILE_MEMORY_BARRIER()
+#endif
//
// VolatileLoad loads a T from a pointer to T. It is guaranteed that this load will not be optimized
@@ -539,11 +418,6 @@ void VolatileStore(T* pt, T val)
}
extern GCSystemInfo g_SystemInfo;
-void InitializeSystemInfo();
-
-void
-GetProcessMemoryLoad(
- GCMemoryStatus* lpBuffer);
extern MethodTable * g_pFreeObjectMethodTable;
@@ -552,43 +426,6 @@ extern int32_t g_TrapReturningThreads;
extern bool g_fFinalizerRunOnShutDown;
//
-// Memory allocation
-//
-#define MEM_COMMIT 0x1000
-#define MEM_RESERVE 0x2000
-#define MEM_DECOMMIT 0x4000
-#define MEM_RELEASE 0x8000
-#define MEM_RESET 0x80000
-
-#define PAGE_NOACCESS 0x01
-#define PAGE_READWRITE 0x04
-
-void * ClrVirtualAlloc(
- void * lpAddress,
- size_t dwSize,
- uint32_t flAllocationType,
- uint32_t flProtect);
-
-void * ClrVirtualAllocAligned(
- void * lpAddress,
- size_t dwSize,
- uint32_t flAllocationType,
- uint32_t flProtect,
- size_t dwAlignment);
-
-bool ClrVirtualFree(
- void * lpAddress,
- size_t dwSize,
- uint32_t dwFreeType);
-
-bool
-ClrVirtualProtect(
- void * lpAddress,
- size_t dwSize,
- uint32_t flNewProtect,
- uint32_t * lpflOldProtect);
-
-//
// Locks
//
@@ -597,71 +434,8 @@ class Thread;
Thread * GetThread();
-struct ScanContext;
-typedef void promote_func(PTR_PTR_Object, ScanContext*, uint32_t);
-
typedef void (CALLBACK *HANDLESCANPROC)(PTR_UNCHECKED_OBJECTREF pref, uintptr_t *pExtraInfo, uintptr_t param1, uintptr_t param2);
-typedef void enum_alloc_context_func(alloc_context*, void*);
-
-class GCToEEInterface
-{
-public:
- //
- // Suspend/Resume callbacks
- //
- typedef enum
- {
- SUSPEND_FOR_GC,
- SUSPEND_FOR_GC_PREP
- } SUSPEND_REASON;
-
- static void SuspendEE(SUSPEND_REASON reason);
- static void RestartEE(bool bFinishedGC); //resume threads.
-
- //
- // The stack roots enumeration callback
- //
- static void GcScanRoots(promote_func* fn, int condemned, int max_gen, ScanContext* sc);
-
- //
- // Callbacks issues during GC that the execution engine can do its own bookeeping
- //
-
- // start of GC call back - single threaded
- static void GcStartWork(int condemned, int max_gen);
-
- //EE can perform post stack scanning action, while the
- // user threads are still suspended
- static void AfterGcScanRoots(int condemned, int max_gen, ScanContext* sc);
-
- // Called before BGC starts sweeping, the heap is walkable
- static void GcBeforeBGCSweepWork();
-
- // post-gc callback.
- static void GcDone(int condemned);
-
- // Promote refcounted handle callback
- static bool RefCountedHandleCallbacks(Object * pObject);
-
- // Sync block cache management
- static void SyncBlockCacheWeakPtrScan(HANDLESCANPROC scanProc, uintptr_t lp1, uintptr_t lp2);
- static void SyncBlockCacheDemote(int max_gen);
- static void SyncBlockCachePromotionsGranted(int max_gen);
-
- // Thread functions
- static bool IsPreemptiveGCDisabled(Thread * pThread);
- static void EnablePreemptiveGC(Thread * pThread);
- static void DisablePreemptiveGC(Thread * pThread);
- static void SetGCSpecial(Thread * pThread);
- static bool CatchAtSafePoint(Thread * pThread);
- static alloc_context * GetAllocContext(Thread * pThread);
-
- // ThreadStore functions
- static void AttachCurrentThread(); // does not acquire thread store lock
- static void GcEnumAllocContexts (enum_alloc_context_func* fn, void* param);
-};
-
class FinalizerThread
{
public:
@@ -678,9 +452,20 @@ public:
static HANDLE GetFinalizerEvent();
};
+#ifdef FEATURE_REDHAWK
typedef uint32_t (__stdcall *BackgroundCallback)(void* pCallbackContext);
REDHAWK_PALIMPORT bool REDHAWK_PALAPI PalStartBackgroundGCThread(BackgroundCallback callback, void* pCallbackContext);
+enum PalCapability
+{
+ WriteWatchCapability = 0x00000001, // GetWriteWatch() and friends
+ LowMemoryNotificationCapability = 0x00000002, // CreateMemoryResourceNotification() and friends
+ GetCurrentProcessorNumberCapability = 0x00000004, // GetCurrentProcessorNumber()
+};
+
+REDHAWK_PALIMPORT bool REDHAWK_PALAPI PalHasCapability(PalCapability capability);
+#endif // FEATURE_REDHAWK
+
void DestroyThread(Thread * pThread);
bool IsGCSpecialThread();
@@ -692,12 +477,6 @@ inline bool dbgOnly_IsSpecialEEThread()
#define ClrFlsSetThreadType(type)
-void UnsafeInitializeCriticalSection(CRITICAL_SECTION * lpCriticalSection);
-void UnsafeEEEnterCriticalSection(CRITICAL_SECTION *lpCriticalSection);
-void UnsafeEELeaveCriticalSection(CRITICAL_SECTION * lpCriticalSection);
-void UnsafeDeleteCriticalSection(CRITICAL_SECTION *lpCriticalSection);
-
-
//
// Performance logging
//
@@ -763,29 +542,10 @@ VOID LogSpewAlways(const char *fmt, ...);
#define STRESS_LOG_RESERVE_MEM(numChunks) do {} while (0)
#define STRESS_LOG_GC_STACK
-typedef void* CLR_MUTEX_ATTRIBUTES;
-typedef void* CLR_MUTEX_COOKIE;
-
-CLR_MUTEX_COOKIE ClrCreateMutex(CLR_MUTEX_ATTRIBUTES lpMutexAttributes, bool bInitialOwner, LPCWSTR lpName);
-void ClrCloseMutex(CLR_MUTEX_COOKIE mutex);
-bool ClrReleaseMutex(CLR_MUTEX_COOKIE mutex);
-uint32_t ClrWaitForMutex(CLR_MUTEX_COOKIE mutex, uint32_t dwMilliseconds, bool bAlertable);
-
-REDHAWK_PALIMPORT HANDLE REDHAWK_PALAPI PalCreateFileW(_In_z_ LPCWSTR pFileName, uint32_t desiredAccess, uint32_t shareMode, _In_opt_ void* pSecurityAttributes, uint32_t creationDisposition, uint32_t flagsAndAttributes, HANDLE hTemplateFile);
-
#define DEFAULT_GC_PRN_LVL 3
// -----------------------------------------------------------------------------------------------------------
-enum PalCapability
-{
- WriteWatchCapability = 0x00000001, // GetWriteWatch() and friends
- LowMemoryNotificationCapability = 0x00000002, // CreateMemoryResourceNotification() and friends
- GetCurrentProcessorNumberCapability = 0x00000004, // GetCurrentProcessorNumber()
-};
-
-REDHAWK_PALIMPORT bool REDHAWK_PALAPI PalHasCapability(PalCapability capability);
-
void StompWriteBarrierEphemeral();
void StompWriteBarrierResize(bool bReqUpperBoundsCheck);
@@ -862,8 +622,8 @@ namespace GCStressPolicy
static volatile int32_t s_cGcStressDisables;
inline bool IsEnabled() { return s_cGcStressDisables == 0; }
- inline void GlobalDisable() { FastInterlockIncrement(&s_cGcStressDisables); }
- inline void GlobalEnable() { FastInterlockDecrement(&s_cGcStressDisables); }
+ inline void GlobalDisable() { Interlocked::Increment(&s_cGcStressDisables); }
+ inline void GlobalEnable() { Interlocked::Decrement(&s_cGcStressDisables); }
}
enum gcs_trigger_points
diff --git a/src/gc/env/gcenv.ee.h b/src/gc/env/gcenv.ee.h
new file mode 100644
index 0000000000..741337fbbf
--- /dev/null
+++ b/src/gc/env/gcenv.ee.h
@@ -0,0 +1,85 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// Interface between the GC and EE
+//
+
+#ifndef __GCENV_EE_H__
+#define __GCENV_EE_H__
+
+struct ScanContext;
+class CrawlFrame;
+
+typedef void promote_func(PTR_PTR_Object, ScanContext*, uint32_t);
+
+typedef void enum_alloc_context_func(alloc_context*, void*);
+
+typedef struct
+{
+ promote_func* f;
+ ScanContext* sc;
+ CrawlFrame * cf;
+} GCCONTEXT;
+
+
+class GCToEEInterface
+{
+public:
+ //
+ // Suspend/Resume callbacks
+ //
+ typedef enum
+ {
+ SUSPEND_FOR_GC = 1,
+ SUSPEND_FOR_GC_PREP = 6
+ } SUSPEND_REASON;
+
+ static void SuspendEE(SUSPEND_REASON reason);
+ static void RestartEE(bool bFinishedGC); //resume threads.
+
+ //
+ // The GC roots enumeration callback
+ //
+ static void GcScanRoots(promote_func* fn, int condemned, int max_gen, ScanContext* sc);
+
+ //
+ // Callbacks issues during GC that the execution engine can do its own bookeeping
+ //
+
+ // start of GC call back - single threaded
+ static void GcStartWork(int condemned, int max_gen);
+
+ //EE can perform post stack scanning action, while the
+ // user threads are still suspended
+ static void AfterGcScanRoots(int condemned, int max_gen, ScanContext* sc);
+
+ // Called before BGC starts sweeping, the heap is walkable
+ static void GcBeforeBGCSweepWork();
+
+ // post-gc callback.
+ static void GcDone(int condemned);
+
+ // Promote refcounted handle callback
+ static bool RefCountedHandleCallbacks(Object * pObject);
+
+ // Sync block cache management
+ static void SyncBlockCacheWeakPtrScan(HANDLESCANPROC scanProc, uintptr_t lp1, uintptr_t lp2);
+ static void SyncBlockCacheDemote(int max_gen);
+ static void SyncBlockCachePromotionsGranted(int max_gen);
+
+ // Thread functions
+ static bool IsPreemptiveGCDisabled(Thread * pThread);
+ static void EnablePreemptiveGC(Thread * pThread);
+ static void DisablePreemptiveGC(Thread * pThread);
+
+ static void SetGCSpecial(Thread * pThread);
+ static alloc_context * GetAllocContext(Thread * pThread);
+ static bool CatchAtSafePoint(Thread * pThread);
+
+ static void GcEnumAllocContexts(enum_alloc_context_func* fn, void* param);
+
+ static void AttachCurrentThread(); // does not acquire thread store lock
+};
+
+#endif // __GCENV_EE_H__
diff --git a/src/gc/env/gcenv.interlocked.h b/src/gc/env/gcenv.interlocked.h
new file mode 100644
index 0000000000..1d6cc8424f
--- /dev/null
+++ b/src/gc/env/gcenv.interlocked.h
@@ -0,0 +1,102 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// Interlocked operations
+//
+
+#ifndef __GCENV_INTERLOCKED_H__
+#define __GCENV_INTERLOCKED_H__
+
+// Interlocked operations
+class Interlocked
+{
+public:
+
+ // Increment the value of the specified 32-bit variable as an atomic operation.
+ // Parameters:
+ // addend - variable to be incremented
+ // Return:
+ // The resulting incremented value
+ template<typename T>
+ static T Increment(T volatile *addend);
+
+ // Decrement the value of the specified 32-bit variable as an atomic operation.
+ // Parameters:
+ // addend - variable to be decremented
+ // Return:
+ // The resulting decremented value
+ template<typename T>
+ static T Decrement(T volatile *addend);
+
+ // Perform an atomic AND operation on the specified values values
+ // Parameters:
+ // destination - the first operand and the destination
+ // value - second operand
+ template<typename T>
+ static void And(T volatile *destination, T value);
+
+ // Perform an atomic OR operation on the specified values values
+ // Parameters:
+ // destination - the first operand and the destination
+ // value - second operand
+ template<typename T>
+ static void Or(T volatile *destination, T value);
+
+ // Set a 32-bit variable to the specified value as an atomic operation.
+ // Parameters:
+ // destination - value to be exchanged
+ // value - value to set the destination to
+ // Return:
+ // The previous value of the destination
+ template<typename T>
+ static T Exchange(T volatile *destination, T value);
+
+ // Set a pointer variable to the specified value as an atomic operation.
+ // Parameters:
+ // destination - value to be exchanged
+ // value - value to set the destination to
+ // Return:
+ // The previous value of the destination
+ template <typename T>
+ static T ExchangePointer(T volatile * destination, T value);
+
+ template <typename T>
+ static T ExchangePointer(T volatile * destination, std::nullptr_t value);
+
+ // Perform an atomic addition of two 32-bit values and return the original value of the addend.
+ // Parameters:
+ // addend - variable to be added to
+ // value - value to add
+ // Return:
+ // The previous value of the addend
+ template<typename T>
+ static T ExchangeAdd(T volatile *addend, T value);
+
+ // Performs an atomic compare-and-exchange operation on the specified values.
+ // Parameters:
+ // destination - value to be exchanged
+ // exchange - value to set the destination to
+ // comparand - value to compare the destination to before setting it to the exchange.
+ // The destination is set only if the destination is equal to the comparand.
+ // Return:
+ // The original value of the destination
+ template<typename T>
+ static T CompareExchange(T volatile *destination, T exchange, T comparand);
+
+ // Performs an atomic compare-and-exchange operation on the specified pointers.
+ // Parameters:
+ // destination - value to be exchanged
+ // exchange - value to set the destination to
+ // comparand - value to compare the destination to before setting it to the exchange.
+ // The destination is set only if the destination is equal to the comparand.
+ // Return:
+ // The original value of the destination
+ template <typename T>
+ static T CompareExchangePointer(T volatile *destination, T exchange, T comparand);
+
+ template <typename T>
+ static T CompareExchangePointer(T volatile *destination, T exchange, std::nullptr_t comparand);
+};
+
+#endif // __GCENV_INTERLOCKED_H__
diff --git a/src/gc/env/gcenv.interlocked.inl b/src/gc/env/gcenv.interlocked.inl
new file mode 100644
index 0000000000..943bc2ef98
--- /dev/null
+++ b/src/gc/env/gcenv.interlocked.inl
@@ -0,0 +1,184 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// __forceinline implementation of the Interlocked class methods
+//
+
+#ifndef __GCENV_INTERLOCKED_INL__
+#define __GCENV_INTERLOCKED_INL__
+
+#ifdef _MSC_VER
+#include <intrin.h>
+#endif // _MSC_VER
+
+// Increment the value of the specified 32-bit variable as an atomic operation.
+// Parameters:
+// addend - variable to be incremented
+// Return:
+// The resulting incremented value
+template <typename T>
+__forceinline T Interlocked::Increment(T volatile *addend)
+{
+#ifdef _MSC_VER
+ static_assert(sizeof(long) == sizeof(T), "Size of long must be the same as size of T");
+ return _InterlockedIncrement((long*)addend);
+#else
+ return __sync_add_and_fetch(addend, 1);
+#endif
+}
+
+// Decrement the value of the specified 32-bit variable as an atomic operation.
+// Parameters:
+// addend - variable to be decremented
+// Return:
+// The resulting decremented value
+template <typename T>
+__forceinline T Interlocked::Decrement(T volatile *addend)
+{
+#ifdef _MSC_VER
+ static_assert(sizeof(long) == sizeof(T), "Size of long must be the same as size of T");
+ return _InterlockedDecrement((long*)addend);
+#else
+ return __sync_sub_and_fetch(addend, 1);
+#endif
+}
+
+// Set a 32-bit variable to the specified value as an atomic operation.
+// Parameters:
+// destination - value to be exchanged
+// value - value to set the destination to
+// Return:
+// The previous value of the destination
+template <typename T>
+__forceinline T Interlocked::Exchange(T volatile *destination, T value)
+{
+#ifdef _MSC_VER
+ static_assert(sizeof(long) == sizeof(T), "Size of long must be the same as size of T");
+ return _InterlockedExchange((long*)destination, value);
+#else
+ return __sync_swap(destination, value);
+#endif
+}
+
+// Performs an atomic compare-and-exchange operation on the specified values.
+// Parameters:
+// destination - value to be exchanged
+// exchange - value to set the destinaton to
+// comparand - value to compare the destination to before setting it to the exchange.
+// The destination is set only if the destination is equal to the comparand.
+// Return:
+// The original value of the destination
+template <typename T>
+__forceinline T Interlocked::CompareExchange(T volatile *destination, T exchange, T comparand)
+{
+#ifdef _MSC_VER
+ static_assert(sizeof(long) == sizeof(T), "Size of long must be the same as size of T");
+ return _InterlockedCompareExchange((long*)destination, exchange, comparand);
+#else
+ return __sync_val_compare_and_swap(destination, comparand, exchange);
+#endif
+}
+
+// Perform an atomic addition of two 32-bit values and return the original value of the addend.
+// Parameters:
+// addend - variable to be added to
+// value - value to add
+// Return:
+// The previous value of the addend
+template <typename T>
+__forceinline T Interlocked::ExchangeAdd(T volatile *addend, T value)
+{
+#ifdef _MSC_VER
+ static_assert(sizeof(long) == sizeof(T), "Size of long must be the same as size of T");
+ return _InterlockedExchangeAdd((long*)addend, value);
+#else
+ return __sync_fetch_and_add(addend, value);
+#endif
+}
+
+// Perform an atomic AND operation on the specified values values
+// Parameters:
+// destination - the first operand and the destination
+// value - second operand
+template <typename T>
+__forceinline void Interlocked::And(T volatile *destination, T value)
+{
+#ifdef _MSC_VER
+ static_assert(sizeof(long) == sizeof(T), "Size of long must be the same as size of T");
+ _InterlockedAnd((long*)destination, value);
+#else
+ __sync_and_and_fetch(destination, value);
+#endif
+}
+
+// Perform an atomic OR operation on the specified values values
+// Parameters:
+// destination - the first operand and the destination
+// value - second operand
+template <typename T>
+__forceinline void Interlocked::Or(T volatile *destination, T value)
+{
+#ifdef _MSC_VER
+ static_assert(sizeof(long) == sizeof(T), "Size of long must be the same as size of T");
+ _InterlockedOr((long*)destination, value);
+#else
+ __sync_or_and_fetch(destination, value);
+#endif
+}
+
+// Set a pointer variable to the specified value as an atomic operation.
+// Parameters:
+// destination - value to be exchanged
+// value - value to set the destination to
+// Return:
+// The previous value of the destination
+template <typename T>
+__forceinline T Interlocked::ExchangePointer(T volatile * destination, T value)
+{
+#ifdef _MSC_VER
+ return (T)(TADDR)_InterlockedExchangePointer((void* volatile *)destination, value);
+#else
+ return (T)(TADDR)__sync_swap((void* volatile *)destination, value);
+#endif
+}
+
+template <typename T>
+__forceinline T Interlocked::ExchangePointer(T volatile * destination, std::nullptr_t value)
+{
+#ifdef _MSC_VER
+ return (T)(TADDR)_InterlockedExchangePointer((void* volatile *)destination, value);
+#else
+ return (T)(TADDR)__sync_swap((void* volatile *)destination, value);
+#endif
+}
+
+// Performs an atomic compare-and-exchange operation on the specified pointers.
+// Parameters:
+// destination - value to be exchanged
+// exchange - value to set the destinaton to
+// comparand - value to compare the destination to before setting it to the exchange.
+// The destination is set only if the destination is equal to the comparand.
+// Return:
+// The original value of the destination
+template <typename T>
+__forceinline T Interlocked::CompareExchangePointer(T volatile *destination, T exchange, T comparand)
+{
+#ifdef _MSC_VER
+ return (T)(TADDR)_InterlockedCompareExchangePointer((void* volatile *)destination, exchange, comparand);
+#else
+ return (T)(TADDR)__sync_val_compare_and_swap((void* volatile *)destination, comparand, exchange);
+#endif
+}
+
+template <typename T>
+__forceinline T Interlocked::CompareExchangePointer(T volatile *destination, T exchange, std::nullptr_t comparand)
+{
+#ifdef _MSC_VER
+ return (T)(TADDR)_InterlockedCompareExchangePointer((void* volatile *)destination, exchange, comparand);
+#else
+ return (T)(TADDR)__sync_val_compare_and_swap((void* volatile *)destination, comparand, exchange);
+#endif
+}
+
+#endif // __GCENV_INTERLOCKED_INL__
diff --git a/src/gc/env/gcenv.object.h b/src/gc/env/gcenv.object.h
index 31dfe838dd..d3660173ce 100644
--- a/src/gc/env/gcenv.object.h
+++ b/src/gc/env/gcenv.object.h
@@ -26,8 +26,8 @@ private:
public:
uint32_t GetBits() { return m_uSyncBlockValue; }
- void SetBit(uint32_t uBit) { FastInterlockOr(&m_uSyncBlockValue, uBit); }
- void ClrBit(uint32_t uBit) { FastInterlockAnd(&m_uSyncBlockValue, ~uBit); }
+ void SetBit(uint32_t uBit) { Interlocked::Or(&m_uSyncBlockValue, uBit); }
+ void ClrBit(uint32_t uBit) { Interlocked::And(&m_uSyncBlockValue, ~uBit); }
void SetGCBit() { m_uSyncBlockValue |= BIT_SBLK_GC_RESERVE; }
void ClrGCBit() { m_uSyncBlockValue &= ~BIT_SBLK_GC_RESERVE; }
};
diff --git a/src/gc/env/gcenv.os.h b/src/gc/env/gcenv.os.h
new file mode 100644
index 0000000000..c1ae87a042
--- /dev/null
+++ b/src/gc/env/gcenv.os.h
@@ -0,0 +1,274 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// Interface between GC and the OS specific functionality
+//
+
+#ifndef __GCENV_OS_H__
+#define __GCENV_OS_H__
+
+// Critical section used by the GC
+class CLRCriticalSection
+{
+ CRITICAL_SECTION m_cs;
+
+public:
+ // Initialize the critical section
+ void Initialize();
+
+ // Destroy the critical section
+ void Destroy();
+
+ // Enter the critical section. Blocks until the section can be entered.
+ void Enter();
+
+ // Leave the critical section
+ void Leave();
+};
+
+// Flags for the GCToOSInterface::VirtualReserve method
+struct VirtualReserveFlags
+{
+ enum
+ {
+ None = 0,
+ WriteWatch = 1,
+ };
+};
+
+// Affinity of a GC thread
+struct GCThreadAffinity
+{
+ static const int None = -1;
+
+ // Processor group index, None if no group is specified
+ int Group;
+ // Processor index, None if no affinity is specified
+ int Processor;
+};
+
+// GC thread function prototype
+typedef void (*GCThreadFunction)(void* param);
+
+// Interface that the GC uses to invoke OS specific functionality
+class GCToOSInterface
+{
+public:
+
+ //
+ // Initialization and shutdown of the interface
+ //
+
+ // Initialize the interface implementation
+ // Return:
+ // true if it has succeeded, false if it has failed
+ static bool Initialize();
+
+ // Shutdown the interface implementation
+ static void Shutdown();
+
+ //
+ // Virtual memory management
+ //
+
+ // Reserve virtual memory range.
+ // Parameters:
+ // address - starting virtual address, it can be NULL to let the function choose the starting address
+ // size - size of the virtual memory range
+ // alignment - requested memory alignment
+ // flags - flags to control special settings like write watching
+ // Return:
+ // Starting virtual address of the reserved range
+ static void* VirtualReserve(void *address, size_t size, size_t alignment, uint32_t flags);
+
+ // Release virtual memory range previously reserved using VirtualReserve
+ // Parameters:
+ // address - starting virtual address
+ // size - size of the virtual memory range
+ // Return:
+ // true if it has succeeded, false if it has failed
+ static bool VirtualRelease(void *address, size_t size);
+
+ // Commit virtual memory range. It must be part of a range reserved using VirtualReserve.
+ // Parameters:
+ // address - starting virtual address
+ // size - size of the virtual memory range
+ // Return:
+ // true if it has succeeded, false if it has failed
+ static bool VirtualCommit(void *address, size_t size);
+
+ // Decomit virtual memory range.
+ // Parameters:
+ // address - starting virtual address
+ // size - size of the virtual memory range
+ // Return:
+ // true if it has succeeded, false if it has failed
+ static bool VirtualDecommit(void *address, size_t size);
+
+ // Reset virtual memory range. Indicates that data in the memory range specified by address and size is no
+ // longer of interest, but it should not be decommitted.
+ // Parameters:
+ // address - starting virtual address
+ // size - size of the virtual memory range
+ // unlock - true if the memory range should also be unlocked
+ // Return:
+ // true if it has succeeded, false if it has failed
+ static bool VirtualReset(void *address, size_t size, bool unlock);
+
+ //
+ // Write watching
+ //
+
+ // Check if the OS supports write watching
+ static bool SupportsWriteWatch();
+
+ // Reset the write tracking state for the specified virtual memory range.
+ // Parameters:
+ // address - starting virtual address
+ // size - size of the virtual memory range
+ static void ResetWriteWatch(void *address, size_t size);
+
+ // Retrieve addresses of the pages that are written to in a region of virtual memory
+ // Parameters:
+ // resetState - true indicates to reset the write tracking state
+ // address - starting virtual address
+ // size - size of the virtual memory range
+ // pageAddresses - buffer that receives an array of page addresses in the memory region
+ // pageAddressesCount - on input, size of the lpAddresses array, in array elements
+ // on output, the number of page addresses that are returned in the array.
+ // Return:
+ // true if it has succeeded, false if it has failed
+ static bool GetWriteWatch(bool resetState, void* address, size_t size, void** pageAddresses, uintptr_t* pageAddressesCount);
+
+ //
+ // Thread and process
+ //
+
+ // Create a new thread
+ // Parameters:
+ // function - the function to be executed by the thread
+ // param - parameters of the thread
+ // affinity - processor affinity of the thread
+ // Return:
+ // true if it has succeeded, false if it has failed
+ static bool CreateThread(GCThreadFunction function, void* param, GCThreadAffinity* affinity);
+
+ // Causes the calling thread to sleep for the specified number of milliseconds
+ // Parameters:
+ // sleepMSec - time to sleep before switching to another thread
+ static void Sleep(uint32_t sleepMSec);
+
+ // Causes the calling thread to yield execution to another thread that is ready to run on the current processor.
+ // Parameters:
+ // switchCount - number of times the YieldThread was called in a loop
+ static void YieldThread(uint32_t switchCount);
+
+ // Get the number of the current processor
+ static uint32_t GetCurrentProcessorNumber();
+
+ // Check if the OS supports getting current processor number
+ static bool CanGetCurrentProcessorNumber();
+
+ // Set ideal processor for the current thread
+ // Parameters:
+ // processorIndex - index of the processor in the group
+ // affinity - ideal processor affinity for the thread
+ // Return:
+ // true if it has succeeded, false if it has failed
+ static bool SetCurrentThreadIdealAffinity(GCThreadAffinity* affinity);
+
+ // Get numeric id of the current thread if possible on the
+ // current platform. It is indended for logging purposes only.
+ // Return:
+ // Numeric id of the current thread or 0 if the
+ static uint32_t GetCurrentThreadIdForLogging();
+
+ // Get id of the current process
+ // Return:
+ // Id of the current process
+ static uint32_t GetCurrentProcessId();
+
+ //
+ // Processor topology
+ //
+
+ // Get number of logical processors
+ static uint32_t GetLogicalCpuCount();
+
+ // Get size of the largest cache on the processor die
+ // Parameters:
+ // trueSize - true to return true cache size, false to return scaled up size based on
+ // the processor architecture
+ // Return:
+ // Size of the cache
+ static size_t GetLargestOnDieCacheSize(bool trueSize = true);
+
+ // Get number of processors assigned to the current process
+ // Return:
+ // The number of processors
+ static uint32_t GetCurrentProcessCpuCount();
+
+ // Get affinity mask of the current process
+ // Parameters:
+ // processMask - affinity mask for the specified process
+ // systemMask - affinity mask for the system
+ // Return:
+ // true if it has succeeded, false if it has failed
+ // Remarks:
+ // A process affinity mask is a bit vector in which each bit represents the processors that
+ // a process is allowed to run on. A system affinity mask is a bit vector in which each bit
+ // represents the processors that are configured into a system.
+ // A process affinity mask is a subset of the system affinity mask. A process is only allowed
+ // to run on the processors configured into a system. Therefore, the process affinity mask cannot
+ // specify a 1 bit for a processor when the system affinity mask specifies a 0 bit for that processor.
+ static bool GetCurrentProcessAffinityMask(uintptr_t *processMask, uintptr_t *systemMask);
+
+ //
+ // Misc
+ //
+
+ // Get global memory status
+ // Parameters:
+ // ms - pointer to the structure that will be filled in with the memory status
+ static void GetMemoryStatus(GCMemoryStatus* ms);
+
+ // Flush write buffers of processors that are executing threads of the current process
+ static void FlushProcessWriteBuffers();
+
+ // Break into a debugger
+ static void DebugBreak();
+
+ //
+ // Time
+ //
+
+ // Get a high precision performance counter
+ // Return:
+ // The counter value
+ static int64_t QueryPerformanceCounter();
+
+ // Get a frequency of the high precision performance counter
+ // Return:
+ // The counter frequency
+ static int64_t QueryPerformanceFrequency();
+
+ // Get a time stamp with a low precision
+ // Return:
+ // Time stamp in milliseconds
+ static uint32_t GetLowPrecisionTimeStamp();
+
+ //
+ // File
+ //
+
+ // Open a file
+ // Parameters:
+ // filename - name of the file to open
+ // mode - mode to open the file in (like in the CRT fopen)
+ // Return:
+ // FILE* of the opened file
+ static FILE* OpenFile(const WCHAR* filename, const WCHAR* mode);
+};
+
+#endif // __GCENV_OS_H__
diff --git a/src/gc/env/gcenv.structs.h b/src/gc/env/gcenv.structs.h
index e3bfb17f56..7c576a5928 100644
--- a/src/gc/env/gcenv.structs.h
+++ b/src/gc/env/gcenv.structs.h
@@ -31,6 +31,62 @@ struct GCMemoryStatus
typedef void * HANDLE;
+#ifdef PLATFORM_UNIX
+
+class EEThreadId
+{
+ pthread_t m_id;
+ // Indicates whether the m_id is valid or not. pthread_t doesn't have any
+ // portable "invalid" value.
+ bool m_isValid;
+
+public:
+ bool IsCurrentThread()
+ {
+ return m_isValid && pthread_equal(m_id, pthread_self());
+ }
+
+ void SetToCurrentThread()
+ {
+ m_id = pthread_self();
+ m_isValid = true;
+ }
+
+ void Clear()
+ {
+ m_isValid = false;
+ }
+};
+
+#else // PLATFORM_UNIX
+
+#ifndef _INC_WINDOWS
+extern "C" uint32_t __stdcall GetCurrentThreadId();
+#endif
+
+class EEThreadId
+{
+ uint32_t m_uiId;
+public:
+
+ bool IsCurrentThread()
+ {
+ return m_uiId == ::GetCurrentThreadId();
+ }
+
+ void SetToCurrentThread()
+ {
+ m_uiId = ::GetCurrentThreadId();
+ }
+
+ void Clear()
+ {
+ m_uiId = 0;
+ }
+};
+
+#endif // PLATFORM_UNIX
+
#ifndef _INC_WINDOWS
typedef union _LARGE_INTEGER {
@@ -46,7 +102,13 @@ typedef union _LARGE_INTEGER {
int64_t QuadPart;
} LARGE_INTEGER, *PLARGE_INTEGER;
-#ifdef WIN32
+#ifdef PLATFORM_UNIX
+
+typedef struct _RTL_CRITICAL_SECTION {
+ pthread_mutex_t mutex;
+} CRITICAL_SECTION, RTL_CRITICAL_SECTION, *PRTL_CRITICAL_SECTION;
+
+#else
#pragma pack(push, 8)
@@ -67,12 +129,6 @@ typedef struct _RTL_CRITICAL_SECTION {
#pragma pack(pop)
-#else
-
-typedef struct _RTL_CRITICAL_SECTION {
- pthread_mutex_t mutex;
-} CRITICAL_SECTION, RTL_CRITICAL_SECTION, *PRTL_CRITICAL_SECTION;
-
#endif
#endif // _INC_WINDOWS
diff --git a/src/gc/env/gcenv.sync.h b/src/gc/env/gcenv.sync.h
index c3aea23fde..fe619cc696 100644
--- a/src/gc/env/gcenv.sync.h
+++ b/src/gc/env/gcenv.sync.h
@@ -7,19 +7,6 @@
//
// Helper classes expected by the GC
//
-class EEThreadId
-{
-public:
- EEThreadId(uint32_t uiId) : m_uiId(uiId) {}
- bool IsSameThread()
- {
- return m_uiId == GetCurrentThreadId();
- }
-
-private:
- uint32_t m_uiId;
-};
-
#define CRST_REENTRANCY 0
#define CRST_UNSAFE_SAMELEVEL 0
#define CRST_UNSAFE_ANYMODE 0
@@ -33,37 +20,37 @@ typedef int CrstType;
class CrstStatic
{
- CRITICAL_SECTION m_cs;
+ CLRCriticalSection m_cs;
#ifdef _DEBUG
- uint32_t m_holderThreadId;
+ EEThreadId m_holderThreadId;
#endif
public:
bool InitNoThrow(CrstType eType, CrstFlags eFlags = CRST_DEFAULT)
{
- UnsafeInitializeCriticalSection(&m_cs);
+ m_cs.Initialize();
return true;
}
void Destroy()
{
- UnsafeDeleteCriticalSection(&m_cs);
+ m_cs.Destroy();
}
void Enter()
{
- UnsafeEEEnterCriticalSection(&m_cs);
+ m_cs.Enter();
#ifdef _DEBUG
- m_holderThreadId = GetCurrentThreadId();
+ m_holderThreadId.SetToCurrentThread();
#endif
}
void Leave()
{
#ifdef _DEBUG
- m_holderThreadId = 0;
+ m_holderThreadId.Clear();
#endif
- UnsafeEELeaveCriticalSection(&m_cs);
+ m_cs.Leave();
}
#ifdef _DEBUG
@@ -74,7 +61,7 @@ public:
bool OwnedByCurrentThread()
{
- return GetHolderThreadId().IsSameThread();
+ return GetHolderThreadId().IsCurrentThread();
}
#endif
};
diff --git a/src/gc/env/gcenv.windows.cpp b/src/gc/env/gcenv.windows.cpp
deleted file mode 100644
index 1059e5e6a2..0000000000
--- a/src/gc/env/gcenv.windows.cpp
+++ /dev/null
@@ -1,268 +0,0 @@
-//
-// Copyright (c) Microsoft. All rights reserved.
-// Licensed under the MIT license. See LICENSE file in the project root for full license information.
-//
-
-//
-// Implementation of the GC environment
-//
-
-#include "common.h"
-
-#include "windows.h"
-
-#include "gcenv.h"
-#include "gc.h"
-
-int32_t FastInterlockIncrement(int32_t volatile *lpAddend)
-{
- return InterlockedIncrement((LONG *)lpAddend);
-}
-
-int32_t FastInterlockDecrement(int32_t volatile *lpAddend)
-{
- return InterlockedDecrement((LONG *)lpAddend);
-}
-
-int32_t FastInterlockExchange(int32_t volatile *Target, int32_t Value)
-{
- return InterlockedExchange((LONG *)Target, Value);
-}
-
-int32_t FastInterlockCompareExchange(int32_t volatile *Destination, int32_t Exchange, int32_t Comperand)
-{
- return InterlockedCompareExchange((LONG *)Destination, Exchange, Comperand);
-}
-
-int32_t FastInterlockExchangeAdd(int32_t volatile *Addend, int32_t Value)
-{
- return InterlockedExchangeAdd((LONG *)Addend, Value);
-}
-
-void * _FastInterlockExchangePointer(void * volatile *Target, void * Value)
-{
- return InterlockedExchangePointer(Target, Value);
-}
-
-void * _FastInterlockCompareExchangePointer(void * volatile *Destination, void * Exchange, void * Comperand)
-{
- return InterlockedCompareExchangePointer(Destination, Exchange, Comperand);
-}
-
-void FastInterlockOr(uint32_t volatile *p, uint32_t msk)
-{
- InterlockedOr((LONG volatile *)p, msk);
-}
-
-void FastInterlockAnd(uint32_t volatile *p, uint32_t msk)
-{
- InterlockedAnd((LONG volatile *)p, msk);
-}
-
-
-void UnsafeInitializeCriticalSection(CRITICAL_SECTION * lpCriticalSection)
-{
- InitializeCriticalSection(lpCriticalSection);
-}
-
-void UnsafeEEEnterCriticalSection(CRITICAL_SECTION *lpCriticalSection)
-{
- EnterCriticalSection(lpCriticalSection);
-}
-
-void UnsafeEELeaveCriticalSection(CRITICAL_SECTION * lpCriticalSection)
-{
- LeaveCriticalSection(lpCriticalSection);
-}
-
-void UnsafeDeleteCriticalSection(CRITICAL_SECTION *lpCriticalSection)
-{
- DeleteCriticalSection(lpCriticalSection);
-}
-
-
-void GetProcessMemoryLoad(GCMemoryStatus* pGCMemStatus)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- }
- CONTRACTL_END;
-
- MEMORYSTATUSEX memStatus;
-
- memStatus.dwLength = sizeof(MEMORYSTATUSEX);
- BOOL fRet = GlobalMemoryStatusEx(&memStatus);
- _ASSERTE (fRet);
-
- // If the machine has more RAM than virtual address limit, let us cap it.
- // Our GC can never use more than virtual address limit.
- if (memStatus.ullAvailPhys > memStatus.ullTotalVirtual)
- {
- memStatus.ullAvailPhys = memStatus.ullAvailVirtual;
- }
-
- // Convert Windows struct to abstract struct
- pGCMemStatus->dwMemoryLoad = memStatus.dwMemoryLoad ;
- pGCMemStatus->ullTotalPhys = memStatus.ullTotalPhys ;
- pGCMemStatus->ullAvailPhys = memStatus.ullAvailPhys ;
- pGCMemStatus->ullTotalPageFile = memStatus.ullTotalPageFile ;
- pGCMemStatus->ullAvailPageFile = memStatus.ullAvailPageFile ;
- pGCMemStatus->ullTotalVirtual = memStatus.ullTotalVirtual ;
- pGCMemStatus->ullAvailVirtual = memStatus.ullAvailVirtual ;
-}
-
-void CLREventStatic::CreateManualEvent(bool bInitialState)
-{
- m_hEvent = CreateEventW(NULL, TRUE, bInitialState, NULL);
- m_fInitialized = true;
-}
-
-void CLREventStatic::CreateAutoEvent(bool bInitialState)
-{
- m_hEvent = CreateEventW(NULL, FALSE, bInitialState, NULL);
- m_fInitialized = true;
-}
-
-void CLREventStatic::CreateOSManualEvent(bool bInitialState)
-{
- m_hEvent = CreateEventW(NULL, TRUE, bInitialState, NULL);
- m_fInitialized = true;
-}
-
-void CLREventStatic::CreateOSAutoEvent(bool bInitialState)
-{
- m_hEvent = CreateEventW(NULL, FALSE, bInitialState, NULL);
- m_fInitialized = true;
-}
-
-void CLREventStatic::CloseEvent()
-{
- if (m_fInitialized && m_hEvent != INVALID_HANDLE_VALUE)
- {
- CloseHandle(m_hEvent);
- m_hEvent = INVALID_HANDLE_VALUE;
- }
-}
-
-bool CLREventStatic::IsValid() const
-{
- return m_fInitialized && m_hEvent != INVALID_HANDLE_VALUE;
-}
-
-bool CLREventStatic::Set()
-{
- if (!m_fInitialized)
- return false;
- return !!SetEvent(m_hEvent);
-}
-
-bool CLREventStatic::Reset()
-{
- if (!m_fInitialized)
- return false;
- return !!ResetEvent(m_hEvent);
-}
-
-uint32_t CLREventStatic::Wait(uint32_t dwMilliseconds, bool bAlertable)
-{
- DWORD result = WAIT_FAILED;
-
- if (m_fInitialized)
- {
- bool disablePreemptive = false;
- Thread * pCurThread = GetThread();
-
- if (NULL != pCurThread)
- {
- if (pCurThread->PreemptiveGCDisabled())
- {
- pCurThread->EnablePreemptiveGC();
- disablePreemptive = true;
- }
- }
-
- result = WaitForSingleObjectEx(m_hEvent, dwMilliseconds, bAlertable);
-
- if (disablePreemptive)
- {
- pCurThread->DisablePreemptiveGC();
- }
- }
-
- return result;
-}
-
-bool __SwitchToThread(uint32_t dwSleepMSec, uint32_t dwSwitchCount)
-{
- SwitchToThread();
- return true;
-}
-
-void * ClrVirtualAlloc(
- void * lpAddress,
- size_t dwSize,
- uint32_t flAllocationType,
- uint32_t flProtect)
-{
- return VirtualAlloc(lpAddress, dwSize, flAllocationType, flProtect);
-}
-
-void * ClrVirtualAllocAligned(
- void * lpAddress,
- size_t dwSize,
- uint32_t flAllocationType,
- uint32_t flProtect,
- size_t dwAlignment)
-{
- return VirtualAlloc(lpAddress, dwSize, flAllocationType, flProtect);
-}
-
-bool ClrVirtualFree(
- void * lpAddress,
- size_t dwSize,
- uint32_t dwFreeType)
-{
- return !!VirtualFree(lpAddress, dwSize, dwFreeType);
-}
-
-bool
-ClrVirtualProtect(
- void * lpAddress,
- size_t dwSize,
- uint32_t flNewProtect,
- uint32_t * lpflOldProtect)
-{
- return !!VirtualProtect(lpAddress, dwSize, flNewProtect, (DWORD *)lpflOldProtect);
-}
-
-MethodTable * g_pFreeObjectMethodTable;
-
-GCSystemInfo g_SystemInfo;
-
-void InitializeSystemInfo()
-{
- SYSTEM_INFO systemInfo;
- GetSystemInfo(&systemInfo);
-
- g_SystemInfo.dwNumberOfProcessors = systemInfo.dwNumberOfProcessors;
- g_SystemInfo.dwPageSize = systemInfo.dwPageSize;
- g_SystemInfo.dwAllocationGranularity = systemInfo.dwAllocationGranularity;
-}
-
-int32_t g_TrapReturningThreads;
-
-bool g_fFinalizerRunOnShutDown;
-
-void DestroyThread(Thread * pThread)
-{
- // TODO: Implement
-}
-
-bool PalHasCapability(PalCapability capability)
-{
- // TODO: Implement for background GC
- return false;
-}
-
diff --git a/src/gc/gc.cpp b/src/gc/gc.cpp
index f89ecbb334..4baf08f540 100644
--- a/src/gc/gc.cpp
+++ b/src/gc/gc.cpp
@@ -149,6 +149,17 @@ BOOL is_induced_blocking (gc_reason reason)
(reason == reason_induced_compacting));
}
+#ifndef DACCESS_COMPILE
+int64_t qpf;
+
+size_t GetHighPrecisionTimeStamp()
+{
+ int64_t ts = GCToOSInterface::QueryPerformanceCounter();
+
+ return (size_t)(ts / (qpf / 1000));
+}
+#endif
+
#ifdef GC_STATS
// There is a current and a prior copy of the statistics. This allows us to display deltas per reporting
// interval, as well as running totals. The 'min' and 'max' values require special treatment. They are
@@ -296,15 +307,7 @@ uint32_t bgc_alloc_spin = 2;
inline
void c_write (uint32_t& place, uint32_t value)
{
- FastInterlockExchange (&(LONG&)place, value);
- //place = value;
-}
-
-// TODO - can't make it work with the syntax for Volatile<T>
-inline
-void c_write_volatile (BOOL* place, uint32_t value)
-{
- FastInterlockExchange ((LONG*)place, value);
+ Interlocked::Exchange (&place, value);
//place = value;
}
@@ -368,15 +371,15 @@ void gc_heap::add_to_history()
#endif //DACCESS_COMPILE
#endif //BACKGROUND_GC
-#ifdef TRACE_GC
+#if defined(TRACE_GC) && !defined(DACCESS_COMPILE)
BOOL gc_log_on = TRUE;
-HANDLE gc_log = INVALID_HANDLE_VALUE;
+FILE* gc_log = NULL;
size_t gc_log_file_size = 0;
size_t gc_buffer_index = 0;
size_t max_gc_buffers = 0;
-static CLR_MUTEX_COOKIE gc_log_lock = 0;
+static CLRCriticalSection gc_log_lock;
// we keep this much in a buffer and only flush when the buffer is full
#define gc_log_buffer_size (1024*1024)
@@ -385,8 +388,7 @@ size_t gc_log_buffer_offset = 0;
void log_va_msg(const char *fmt, va_list args)
{
- uint32_t status = ClrWaitForMutex(gc_log_lock, INFINITE, FALSE);
- assert (WAIT_OBJECT_0 == status);
+ gc_log_lock.Enter();
const int BUFFERSIZE = 512;
static char rgchBuffer[BUFFERSIZE];
@@ -395,7 +397,7 @@ void log_va_msg(const char *fmt, va_list args)
pBuffer[0] = '\r';
pBuffer[1] = '\n';
int buffer_start = 2;
- int pid_len = sprintf_s (&pBuffer[buffer_start], BUFFERSIZE - buffer_start, "[%5d]", GetCurrentThreadId());
+ int pid_len = sprintf_s (&pBuffer[buffer_start], BUFFERSIZE - buffer_start, "[%5d]", GCToOSInterface::GetCurrentThreadIdForLogging());
buffer_start += pid_len;
memset(&pBuffer[buffer_start], '-', BUFFERSIZE - buffer_start);
int msg_len = _vsnprintf(&pBuffer[buffer_start], BUFFERSIZE - buffer_start, fmt, args );
@@ -418,12 +420,11 @@ void log_va_msg(const char *fmt, va_list args)
gc_buffer_index++;
if (gc_buffer_index > max_gc_buffers)
{
- SetFilePointer (gc_log, 0, NULL, FILE_BEGIN);
+ fseek (gc_log, 0, SEEK_SET);
gc_buffer_index = 0;
}
- uint32_t written_to_log = 0;
- WriteFile (gc_log, gc_log_buffer, (uint32_t)gc_log_buffer_size, (DWORD*)&written_to_log, NULL);
- FlushFileBuffers (gc_log);
+ fwrite(gc_log_buffer, gc_log_buffer_size, 1, gc_log);
+ fflush(gc_log);
memset (gc_log_buffer, '*', gc_log_buffer_size);
gc_log_buffer_offset = 0;
}
@@ -431,13 +432,12 @@ void log_va_msg(const char *fmt, va_list args)
memcpy (gc_log_buffer + gc_log_buffer_offset, pBuffer, msg_len);
gc_log_buffer_offset += msg_len;
- status = ClrReleaseMutex(gc_log_lock);
- assert (status);
+ gc_log_lock.Leave();
}
void GCLog (const char *fmt, ... )
{
- if (gc_log_on && (gc_log != INVALID_HANDLE_VALUE))
+ if (gc_log_on && (gc_log != NULL))
{
va_list args;
va_start(args, fmt);
@@ -445,11 +445,12 @@ void GCLog (const char *fmt, ... )
va_end(args);
}
}
-#endif //TRACE_GC
+#endif // TRACE_GC && !DACCESS_COMPILE
+
+#if defined(GC_CONFIG_DRIVEN) && !defined(DACCESS_COMPILE)
-#ifdef GC_CONFIG_DRIVEN
BOOL gc_config_log_on = FALSE;
-HANDLE gc_config_log = INVALID_HANDLE_VALUE;
+FILE* gc_config_log = NULL;
// we keep this much in a buffer and only flush when the buffer is full
#define gc_config_log_buffer_size (1*1024) // TEMP
@@ -473,9 +474,8 @@ void log_va_msg_config(const char *fmt, va_list args)
if ((gc_config_log_buffer_offset + msg_len) > gc_config_log_buffer_size)
{
- uint32_t written_to_log = 0;
- WriteFile (gc_config_log, gc_config_log_buffer, (uint32_t)gc_config_log_buffer_offset, (DWORD*)&written_to_log, NULL);
- FlushFileBuffers (gc_config_log);
+ fwrite(gc_config_log_buffer, gc_config_log_buffer_offset, 1, gc_config_log);
+ fflush(gc_config_log);
gc_config_log_buffer_offset = 0;
}
@@ -485,14 +485,14 @@ void log_va_msg_config(const char *fmt, va_list args)
void GCLogConfig (const char *fmt, ... )
{
- if (gc_config_log_on && (gc_config_log != INVALID_HANDLE_VALUE))
+ if (gc_config_log_on && (gc_config_log != NULL))
{
va_list args;
va_start( args, fmt );
log_va_msg_config (fmt, args);
}
}
-#endif //GC_CONFIG_DRIVEN
+#endif // GC_CONFIG_DRIVEN && !DACCESS_COMPILE
#ifdef SYNCHRONIZATION_STATS
@@ -523,7 +523,7 @@ init_sync_log_stats()
gc_during_log = 0;
gc_lock_contended = 0;
- log_start_tick = GetTickCount();
+ log_start_tick = GCToOSInterface::GetLowPrecisionTimeStamp();
}
gc_count_during_log++;
#endif //SYNCHRONIZATION_STATS
@@ -534,7 +534,7 @@ process_sync_log_stats()
{
#ifdef SYNCHRONIZATION_STATS
- unsigned int log_elapsed = GetTickCount() - log_start_tick;
+ unsigned int log_elapsed = GCToOSInterface::GetLowPrecisionTimeStamp() - log_start_tick;
if (log_elapsed > log_interval)
{
@@ -700,7 +700,7 @@ public:
flavor = f;
#ifdef JOIN_STATS
- start_tick = GetTickCount();
+ start_tick = GCToOSInterface::GetLowPrecisionTimeStamp();
#endif //JOIN_STATS
return TRUE;
@@ -731,7 +731,7 @@ public:
assert (!join_struct.joined_p);
int color = join_struct.lock_color;
- if (FastInterlockDecrement((LONG*)&join_struct.join_lock) != 0)
+ if (Interlocked::Decrement(&join_struct.join_lock) != 0)
{
dprintf (JOIN_LOG, ("join%d(%d): Join() Waiting...join_lock is now %d",
flavor, join_id, (int32_t)(join_struct.join_lock)));
@@ -783,7 +783,7 @@ respin:
fire_event (gch->heap_number, time_end, type_join, join_id);
// last thread out should reset event
- if (FastInterlockDecrement((LONG*)&join_struct.join_restart) == 0)
+ if (Interlocked::Decrement(&join_struct.join_restart) == 0)
{
// the joined event must be set at this point, because the restarting must have done this
join_struct.join_restart = join_struct.n_threads - 1;
@@ -793,7 +793,7 @@ respin:
#ifdef JOIN_STATS
// parallel execution starts here
start[gch->heap_number] = GetCycleCount32();
- FastInterlockExchangeAdd((int*)&in_join_total[join_id], (start[gch->heap_number] - end[gch->heap_number])/1000);
+ Interlocked::ExchangeAdd(&in_join_total[join_id], (start[gch->heap_number] - end[gch->heap_number])/1000);
#endif //JOIN_STATS
}
else
@@ -810,7 +810,7 @@ respin:
// and keep track of the cycles spent waiting in the join
thd = gch->heap_number;
start_seq = GetCycleCount32();
- FastInterlockExchangeAdd((int*)&in_join_total[join_id], (start_seq - end[gch->heap_number])/1000);
+ Interlocked::ExchangeAdd(&in_join_total[join_id], (start_seq - end[gch->heap_number])/1000);
#endif //JOIN_STATS
}
}
@@ -831,7 +831,7 @@ respin:
return TRUE;
}
- if (FastInterlockDecrement((LONG*)&join_struct.r_join_lock) != (join_struct.n_threads - 1))
+ if (Interlocked::Decrement(&join_struct.r_join_lock) != (join_struct.n_threads - 1))
{
if (!join_struct.wait_done)
{
@@ -879,7 +879,7 @@ respin:
#ifdef JOIN_STATS
// parallel execution starts here
start[gch->heap_number] = GetCycleCount32();
- FastInterlockExchangeAdd((volatile int *)&in_join_total[join_id], (start[gch->heap_number] - end[gch->heap_number])/1000);
+ Interlocked::ExchangeAdd(&in_join_total[join_id], (start[gch->heap_number] - end[gch->heap_number])/1000);
#endif //JOIN_STATS
}
@@ -918,7 +918,7 @@ respin:
par_loss_total[join_id] += par_loss/1000;
// every 10 seconds, print a summary of the time spent in each type of join, in 1000's of clock cycles
- if (GetTickCount() - start_tick > 10*1000)
+ if (GCToOSInterface::GetLowPrecisionTimeStamp() - start_tick > 10*1000)
{
printf("**** summary *****\n");
for (int i = 0; i < 16; i++)
@@ -926,7 +926,7 @@ respin:
printf("join #%3d seq_loss = %8u par_loss = %8u in_join_total = %8u\n", i, seq_loss_total[i], par_loss_total[i], in_join_total[i]);
elapsed_total[i] = seq_loss_total[i] = par_loss_total[i] = in_join_total[i] = 0;
}
- start_tick = GetTickCount();
+ start_tick = GCToOSInterface::GetLowPrecisionTimeStamp();
}
#endif //JOIN_STATS
@@ -998,7 +998,7 @@ t_join bgc_t_join;
} \
if (!(expr)) \
{ \
- __SwitchToThread(0, CALLER_LIMITS_SPINNING); \
+ GCToOSInterface::YieldThread(0); \
} \
}
@@ -1051,7 +1051,7 @@ public:
{
if (alloc_objects [i] != (uint8_t*)0)
{
- DebugBreak();
+ GCToOSInterface::DebugBreak();
}
}
}
@@ -1060,7 +1060,7 @@ public:
{
dprintf (3, ("cm: probing %Ix", obj));
retry:
- if (FastInterlockExchange ((LONG*)&needs_checking, 1) == 0)
+ if (Interlocked::Exchange (&needs_checking, 1) == 0)
{
// If we spend too much time spending all the allocs,
// consider adding a high water mark and scan up
@@ -1099,7 +1099,7 @@ retry:
retry:
dprintf (3, ("loh alloc: probing %Ix", obj));
- if (FastInterlockExchange ((LONG*)&needs_checking, 1) == 0)
+ if (Interlocked::Exchange (&needs_checking, 1) == 0)
{
if (obj == rwp_object)
{
@@ -1117,7 +1117,7 @@ retry:
needs_checking = 0;
//if (cookie >= 4)
//{
- // DebugBreak();
+ // GCToOSInterface::DebugBreak();
//}
dprintf (3, ("loh alloc: set %Ix at %d", obj, cookie));
@@ -1273,7 +1273,7 @@ void recursive_gc_sync::begin_foreground()
try_again_top:
- FastInterlockIncrement ((LONG*)&foreground_request_count);
+ Interlocked::Increment (&foreground_request_count);
try_again_no_inc:
dprintf(2, ("Waiting sync gc point"));
@@ -1291,7 +1291,7 @@ try_again_no_inc:
if (foreground_gate)
{
- FastInterlockIncrement ((LONG*)&foreground_count);
+ Interlocked::Increment (&foreground_count);
dprintf (2, ("foreground_count: %d", (int32_t)foreground_count));
if (foreground_gate)
{
@@ -1316,11 +1316,11 @@ void recursive_gc_sync::end_foreground()
dprintf (2, ("end_foreground"));
if (gc_background_running)
{
- FastInterlockDecrement ((LONG*)&foreground_request_count);
+ Interlocked::Decrement (&foreground_request_count);
dprintf (2, ("foreground_count before decrement: %d", (int32_t)foreground_count));
- if (FastInterlockDecrement ((LONG*)&foreground_count) == 0)
+ if (Interlocked::Decrement (&foreground_count) == 0)
{
- //c_write_volatile ((BOOL*)&foreground_gate, 0);
+ //c_write ((BOOL*)&foreground_gate, 0);
// TODO - couldn't make the syntax work with Volatile<T>
foreground_gate = 0;
if (foreground_count == 0)
@@ -1350,7 +1350,7 @@ BOOL recursive_gc_sync::allow_foreground()
//background and foreground
// gc_heap::disallow_new_allocation (0);
- //__SwitchToThread(0, CALLER_LIMITS_SPINNING);
+ //GCToOSInterface::YieldThread(0);
//END of TODO
if (foreground_request_count != 0)
@@ -1362,7 +1362,7 @@ BOOL recursive_gc_sync::allow_foreground()
do
{
did_fgc = TRUE;
- //c_write_volatile ((BOOL*)&foreground_gate, 1);
+ //c_write ((BOOL*)&foreground_gate, 1);
// TODO - couldn't make the syntax work with Volatile<T>
foreground_gate = 1;
foreground_allowed.Set ();
@@ -1411,9 +1411,6 @@ __asm pop EDX
#endif //COUNT_CYCLES || JOIN_STATS || SYNCHRONIZATION_STATS
-LARGE_INTEGER qpf;
-
-
#ifdef TIME_GC
int mark_time, plan_time, sweep_time, reloc_time, compact_time;
#endif //TIME_GC
@@ -1438,13 +1435,9 @@ void reset_memory (uint8_t* o, size_t sizeo);
#ifdef WRITE_WATCH
-#define MEM_WRITE_WATCH 0x200000
+static bool virtual_alloc_write_watch = false;
-static uint32_t mem_reserve = MEM_RESERVE;
-
-#ifndef FEATURE_REDHAWK
-BOOL write_watch_capability = FALSE;
-#endif
+static bool write_watch_capability = false;
#ifndef DACCESS_COMPILE
@@ -1452,34 +1445,22 @@ BOOL write_watch_capability = FALSE;
void write_watch_api_supported()
{
-#ifndef FEATURE_REDHAWK
- // check if the OS will accept the MEM_WRITE_WATCH flag at runtime.
- // Drawbridge does not support write-watch so we still need to do the runtime detection for them.
- // Otherwise, all currently supported OSes do support write-watch.
- void* mem = VirtualAlloc (0, g_SystemInfo.dwAllocationGranularity, MEM_WRITE_WATCH|MEM_RESERVE,
- PAGE_READWRITE);
- if (mem == 0)
+ if (GCToOSInterface::SupportsWriteWatch())
{
- dprintf (2,("WriteWatch not supported"));
+ write_watch_capability = true;
+ dprintf (2, ("WriteWatch supported"));
}
else
{
- write_watch_capability = TRUE;
- dprintf (2, ("WriteWatch supported"));
- VirtualFree (mem, 0, MEM_RELEASE);
+ dprintf (2,("WriteWatch not supported"));
}
-#endif //FEATURE_REDHAWK
}
#endif //!DACCESS_COMPILE
-inline BOOL can_use_write_watch()
+inline bool can_use_write_watch()
{
-#ifdef FEATURE_REDHAWK
- return PalHasCapability(WriteWatchCapability);
-#else //FEATURE_REDHAWK
return write_watch_capability;
-#endif //FEATURE_REDHAWK
}
#else
@@ -1509,12 +1490,12 @@ void WaitLongerNoInstru (int i)
{
YieldProcessor(); // indicate to the processor that we are spining
if (i & 0x01f)
- __SwitchToThread (0, CALLER_LIMITS_SPINNING);
+ GCToOSInterface::YieldThread (0);
else
- __SwitchToThread (5, CALLER_LIMITS_SPINNING);
+ GCToOSInterface::Sleep (5);
}
else
- __SwitchToThread (5, CALLER_LIMITS_SPINNING);
+ GCToOSInterface::Sleep (5);
}
// If CLR is hosted, a thread may reach here while it is in preemptive GC mode,
@@ -1544,7 +1525,7 @@ static void safe_switch_to_thread()
Thread* current_thread = GetThread();
BOOL cooperative_mode = gc_heap::enable_preemptive(current_thread);
- __SwitchToThread(0, CALLER_LIMITS_SPINNING);
+ GCToOSInterface::YieldThread(0);
gc_heap::disable_preemptive(current_thread, cooperative_mode);
}
@@ -1558,7 +1539,7 @@ static void enter_spin_lock_noinstru (RAW_KEYWORD(volatile) int32_t* lock)
{
retry:
- if (FastInterlockExchange ((LONG*)lock, 0) >= 0)
+ if (Interlocked::Exchange (lock, 0) >= 0)
{
unsigned int i = 0;
while (VolatileLoad(lock) >= 0)
@@ -1600,7 +1581,7 @@ retry:
inline
static BOOL try_enter_spin_lock_noinstru(RAW_KEYWORD(volatile) int32_t* lock)
{
- return (FastInterlockExchange ((LONG*)&*lock, 0) < 0);
+ return (Interlocked::Exchange (&*lock, 0) < 0);
}
inline
@@ -1685,12 +1666,12 @@ void WaitLonger (int i
{
YieldProcessor(); // indicate to the processor that we are spining
if (i & 0x01f)
- __SwitchToThread (0, CALLER_LIMITS_SPINNING);
+ GCToOSInterface::YieldThread (0);
else
- __SwitchToThread (5, CALLER_LIMITS_SPINNING);
+ GCToOSInterface::Sleep (5);
}
else
- __SwitchToThread (5, CALLER_LIMITS_SPINNING);
+ GCToOSInterface::Sleep (5);
}
// If CLR is hosted, a thread may reach here while it is in preemptive GC mode,
@@ -1719,7 +1700,7 @@ static void enter_spin_lock (GCSpinLock* spin_lock)
{
retry:
- if (FastInterlockExchange ((LONG*)&spin_lock->lock, 0) >= 0)
+ if (Interlocked::Exchange (&spin_lock->lock, 0) >= 0)
{
unsigned int i = 0;
while (spin_lock->lock >= 0)
@@ -1747,13 +1728,13 @@ retry:
Thread* current_thread = GetThread();
BOOL cooperative_mode = gc_heap::enable_preemptive (current_thread);
- __SwitchToThread(0, CALLER_LIMITS_SPINNING);
+ GCToOSInterface::YieldThread(0);
gc_heap::disable_preemptive (current_thread, cooperative_mode);
}
}
else
- __SwitchToThread(0, CALLER_LIMITS_SPINNING);
+ GCToOSInterface::YieldThread(0);
}
else
{
@@ -1770,7 +1751,7 @@ retry:
inline BOOL try_enter_spin_lock(GCSpinLock* spin_lock)
{
- return (FastInterlockExchange ((LONG*)&spin_lock->lock, 0) < 0);
+ return (Interlocked::Exchange (&spin_lock->lock, 0) < 0);
}
inline
@@ -1783,8 +1764,6 @@ static void leave_spin_lock (GCSpinLock * spin_lock)
#endif //_DEBUG
-#endif // !DACCESS_COMPILE
-
BOOL gc_heap::enable_preemptive (Thread* current_thread)
{
bool cooperative_mode = false;
@@ -1811,6 +1790,8 @@ void gc_heap::disable_preemptive (Thread* current_thread, BOOL restore_cooperati
}
}
+#endif // !DACCESS_COMPILE
+
typedef void ** PTR_PTR;
//This function clears a piece of memory
// size has to be Dword aligned
@@ -2256,8 +2237,6 @@ CLREvent gc_heap::gc_start_event;
SVAL_IMPL_NS(int, SVR, gc_heap, n_heaps);
SPTR_IMPL_NS(PTR_gc_heap, SVR, gc_heap, g_heaps);
-HANDLE* gc_heap::g_gc_threads;
-
size_t* gc_heap::g_promoted;
#ifdef MH_SC_MARK
@@ -2460,7 +2439,7 @@ BOOL gc_heap::loh_compacted_p = FALSE;
#ifdef BACKGROUND_GC
-uint32_t gc_heap::bgc_thread_id = 0;
+EEThreadId gc_heap::bgc_thread_id;
uint8_t* gc_heap::background_written_addresses [array_size+2];
@@ -2518,7 +2497,7 @@ BOOL gc_heap::bgc_thread_running;
CLREvent gc_heap::background_gc_create_event;
-CRITICAL_SECTION gc_heap::bgc_threads_timeout_cs;
+CLRCriticalSection gc_heap::bgc_threads_timeout_cs;
CLREvent gc_heap::gc_lh_block_event;
@@ -4262,7 +4241,8 @@ void* virtual_alloc (size_t size)
}
}
- void* prgmem = ClrVirtualAllocAligned (0, requested_size, mem_reserve, PAGE_READWRITE, card_size * card_word_width);
+ uint32_t flags = virtual_alloc_write_watch ? VirtualReserveFlags::WriteWatch : VirtualReserveFlags::None;
+ void* prgmem = GCToOSInterface::VirtualReserve (0, requested_size, card_size * card_word_width, flags);
void *aligned_mem = prgmem;
// We don't want (prgmem + size) to be right at the end of the address space
@@ -4276,7 +4256,7 @@ void* virtual_alloc (size_t size)
if ((end_mem == 0) || ((size_t)(MAX_PTR - end_mem) <= END_SPACE_AFTER_GC))
{
- VirtualFree (prgmem, 0, MEM_RELEASE);
+ GCToOSInterface::VirtualRelease (prgmem, requested_size);
dprintf (2, ("Virtual Alloc size %Id returned memory right against 4GB [%Ix, %Ix[ - discarding",
requested_size, (size_t)prgmem, (size_t)((uint8_t*)prgmem+requested_size)));
prgmem = 0;
@@ -4297,7 +4277,7 @@ void* virtual_alloc (size_t size)
void virtual_free (void* add, size_t size)
{
- VirtualFree (add, 0, MEM_RELEASE);
+ GCToOSInterface::VirtualRelease (add, size);
gc_heap::reserved_memory -= size;
dprintf (2, ("Virtual Free size %Id: [%Ix, %Ix[",
size, (size_t)add, (size_t)((uint8_t*)add+size)));
@@ -4757,6 +4737,7 @@ gc_heap::get_large_segment (size_t size, BOOL* did_full_compact_gc)
return res;
}
+#if 0
BOOL gc_heap::unprotect_segment (heap_segment* seg)
{
uint8_t* start = align_lower_page (heap_segment_mem (seg));
@@ -4764,16 +4745,15 @@ BOOL gc_heap::unprotect_segment (heap_segment* seg)
if (region_size != 0 )
{
- uint32_t old_protection;
dprintf (3, ("unprotecting segment %Ix:", (size_t)seg));
- BOOL status = VirtualProtect (start, region_size,
- PAGE_READWRITE, (DWORD*)&old_protection);
+ BOOL status = GCToOSInterface::VirtualUnprotect (start, region_size);
assert (status);
return status;
}
return FALSE;
}
+#endif
#ifdef MULTIPLE_HEAPS
#ifdef _X86_
@@ -4836,10 +4816,6 @@ extern "C" uint64_t __rdtsc();
#error NYI platform: get_cycle_count
#endif //_TARGET_X86_
-// The purpose of this whole class is to guess the right heap to use for a given thread.
-typedef
-uint32_t (WINAPI *GetCurrentProcessorNumber_t)(void);
-
class heap_select
{
heap_select() {}
@@ -4865,31 +4841,11 @@ class heap_select
return (int) elapsed_cycles;
}
- static
- GetCurrentProcessorNumber_t GCGetCurrentProcessorNumber;
-
- //check if the new APIs are supported.
- static
- BOOL api_supported()
- {
-#ifdef FEATURE_REDHAWK
- BOOL fSupported = PalHasCapability(GetCurrentProcessorNumberCapability);
- GCGetCurrentProcessorNumber = fSupported ? PalGetCurrentProcessorNumber : NULL;
- return fSupported;
-#elif !defined(FEATURE_PAL)
- // on all platforms we support this API exists.
- GCGetCurrentProcessorNumber = (GetCurrentProcessorNumber_t)&GetCurrentProcessorNumber;
- return TRUE;
-#else
- return FALSE;
-#endif //FEATURE_REDHAWK
- }
-
public:
static BOOL init(int n_heaps)
{
assert (sniff_buffer == NULL && n_sniff_buffers == 0);
- if (!api_supported())
+ if (!GCToOSInterface::CanGetCurrentProcessorNumber())
{
n_sniff_buffers = n_heaps*2+1;
size_t sniff_buf_size = 0;
@@ -4921,10 +4877,10 @@ public:
static void init_cpu_mapping(gc_heap *heap, int heap_number)
{
- if (GCGetCurrentProcessorNumber != 0)
+ if (GCToOSInterface::CanGetCurrentProcessorNumber())
{
- uint32_t proc_no = GCGetCurrentProcessorNumber() % gc_heap::n_heaps;
- // We can safely cast heap_number to a uint8_t 'cause GetCurrentProcessCpuCount
+ uint32_t proc_no = GCToOSInterface::GetCurrentProcessorNumber() % gc_heap::n_heaps;
+ // We can safely cast heap_number to a BYTE 'cause GetCurrentProcessCpuCount
// only returns up to MAX_SUPPORTED_CPUS procs right now. We only ever create at most
// MAX_SUPPORTED_CPUS GC threads.
proc_no_to_heap_no[proc_no] = (uint8_t)heap_number;
@@ -4933,7 +4889,7 @@ public:
static void mark_heap(int heap_number)
{
- if (GCGetCurrentProcessorNumber != 0)
+ if (GCToOSInterface::CanGetCurrentProcessorNumber())
return;
for (unsigned sniff_index = 0; sniff_index < n_sniff_buffers; sniff_index++)
@@ -4942,10 +4898,10 @@ public:
static int select_heap(alloc_context* acontext, int hint)
{
- if (GCGetCurrentProcessorNumber)
- return proc_no_to_heap_no[GCGetCurrentProcessorNumber() % gc_heap::n_heaps];
+ if (GCToOSInterface::CanGetCurrentProcessorNumber())
+ return proc_no_to_heap_no[GCToOSInterface::GetCurrentProcessorNumber() % gc_heap::n_heaps];
- unsigned sniff_index = FastInterlockIncrement((LONG *)&cur_sniff_index);
+ unsigned sniff_index = Interlocked::Increment(&cur_sniff_index);
sniff_index %= n_sniff_buffers;
int best_heap = 0;
@@ -4983,12 +4939,9 @@ public:
return best_heap;
}
- static BOOL can_find_heap_fast()
+ static bool can_find_heap_fast()
{
- if (GCGetCurrentProcessorNumber)
- return TRUE;
- else
- return FALSE;
+ return GCToOSInterface::CanGetCurrentProcessorNumber();
}
static uint8_t find_proc_no_from_heap_no(int heap_number)
@@ -5057,7 +5010,6 @@ public:
uint8_t* heap_select::sniff_buffer;
unsigned heap_select::n_sniff_buffers;
unsigned heap_select::cur_sniff_index;
-GetCurrentProcessorNumber_t heap_select::GCGetCurrentProcessorNumber;
uint8_t heap_select::proc_no_to_heap_no[MAX_SUPPORTED_CPUS];
uint8_t heap_select::heap_no_to_proc_no[MAX_SUPPORTED_CPUS];
uint8_t heap_select::heap_no_to_numa_node[MAX_SUPPORTED_CPUS];
@@ -5107,17 +5059,15 @@ void gc_heap::destroy_thread_support ()
}
}
-void set_thread_group_affinity_for_heap(HANDLE gc_thread, int heap_number)
-{
#if !defined(FEATURE_REDHAWK) && !defined(FEATURE_CORECLR)
+void set_thread_group_affinity_for_heap(int heap_number, GCThreadAffinity* affinity)
+{
+ affinity->Group = GCThreadAffinity::None;
+ affinity->Processor = GCThreadAffinity::None;
GROUP_AFFINITY ga;
uint16_t gn, gpn;
CPUGroupInfo::GetGroupForProcessor((uint16_t)heap_number, &gn, &gpn);
- ga.Group = gn;
- ga.Reserved[0] = 0; // reserve must be filled with zero
- ga.Reserved[1] = 0; // otherwise call may fail
- ga.Reserved[2] = 0;
int bit_number = 0;
for (uintptr_t mask = 1; mask !=0; mask <<=1)
@@ -5125,8 +5075,8 @@ void set_thread_group_affinity_for_heap(HANDLE gc_thread, int heap_number)
if (bit_number == gpn)
{
dprintf(3, ("using processor group %d, mask %x%Ix for heap %d\n", gn, mask, heap_number));
- ga.Mask = mask;
- CPUGroupInfo::SetThreadGroupAffinity(gc_thread, &ga, NULL);
+ *affinity->Processor = gpn;
+ *affinity->Group = gn;
heap_select::set_cpu_group_for_heap(heap_number, (uint8_t)gn);
heap_select::set_group_proc_for_heap(heap_number, (uint8_t)gpn);
if (NumaNodeInfo::CanEnableGCNumaAware())
@@ -5148,15 +5098,15 @@ void set_thread_group_affinity_for_heap(HANDLE gc_thread, int heap_number)
}
bit_number++;
}
-#endif
}
-void set_thread_affinity_mask_for_heap(HANDLE gc_thread, int heap_number)
+void set_thread_affinity_mask_for_heap(int heap_number, GCThreadAffinity* affinity)
{
-#if !defined(FEATURE_REDHAWK) && !defined(FEATURE_CORECLR)
+ affinity->Group = GCThreadAffinity::None;
+ affinity->Processor = GCThreadAffinity::None;
DWORD_PTR pmask, smask;
- if (GetProcessAffinityMask(GetCurrentProcess(), &pmask, &smask))
+ if (GCToOSInterface::GetCurrentProcessAffinityMask(&pmask, &smask))
{
pmask &= smask;
int bit_number = 0;
@@ -5167,7 +5117,8 @@ void set_thread_affinity_mask_for_heap(HANDLE gc_thread, int heap_number)
{
if (bit_number == heap_number)
{
- dprintf (3, ("Using processor mask 0x%Ix for heap %d\n", mask, heap_number));
+ dprintf (3, ("Using processor %d for heap %d\n", proc_number, heap_number));
+ affinity->Processor = proc_number;
SetThreadAffinityMask(gc_thread, mask);
heap_select::set_proc_no_for_heap(heap_number, proc_number);
if (NumaNodeInfo::CanEnableGCNumaAware())
@@ -5198,42 +5149,35 @@ void set_thread_affinity_mask_for_heap(HANDLE gc_thread, int heap_number)
proc_number++;
}
}
-#endif
}
+#endif // !FEATURE_REDHAWK && !FEATURE_CORECLR
-HANDLE gc_heap::create_gc_thread ()
+bool gc_heap::create_gc_thread ()
{
- uint32_t thread_id;
dprintf (3, ("Creating gc thread\n"));
-#ifdef FEATURE_REDHAWK
- HANDLE gc_thread = CreateThread(0, 4096, gc_thread_stub,this, CREATE_SUSPENDED, &thread_id);
-#else //FEATURE_REDHAWK
- HANDLE gc_thread = Thread::CreateUtilityThread(Thread::StackSize_Medium, (DWORD (*)(void*))gc_thread_stub, this, CREATE_SUSPENDED, (DWORD*)&thread_id);
-#endif //FEATURE_REDHAWK
-
- if (!gc_thread)
- {
- return 0;;
- }
- SetThreadPriority(gc_thread, /* THREAD_PRIORITY_ABOVE_NORMAL );*/ THREAD_PRIORITY_HIGHEST );
+ GCThreadAffinity affinity;
+ affinity.Group = GCThreadAffinity::None;
+ affinity.Processor = GCThreadAffinity::None;
+#if !defined(FEATURE_REDHAWK) && !defined(FEATURE_CORECLR)
//We are about to set affinity for GC threads, it is a good place to setup NUMA and
//CPU groups, because the process mask, processor number, group number are all
//readyly available.
if (CPUGroupInfo::CanEnableGCCPUGroups())
- set_thread_group_affinity_for_heap(gc_thread, heap_number);
+ set_thread_group_affinity_for_heap(heap_number, &affinity);
else
- set_thread_affinity_mask_for_heap(gc_thread, heap_number);
+ set_thread_affinity_mask_for_heap(heap_number, &affinity);
- ResumeThread(gc_thread);
- return gc_thread;
+#endif // !FEATURE_REDHAWK && !FEATURE_CORECLR
+
+ return GCToOSInterface::CreateThread(gc_thread_stub, this, &affinity);
}
#ifdef _MSC_VER
#pragma warning(disable:4715) //IA64 xcompiler recognizes that without the 'break;' the while(1) will never end and therefore not return a value for that code path
#endif //_MSC_VER
-uint32_t gc_heap::gc_thread_function ()
+void gc_heap::gc_thread_function ()
{
assert (gc_done_event.IsValid());
assert (gc_start_event.IsValid());
@@ -5321,7 +5265,6 @@ uint32_t gc_heap::gc_thread_function ()
set_gc_done();
}
}
- return 0;
}
#ifdef _MSC_VER
#pragma warning(default:4715) //IA64 xcompiler recognizes that without the 'break;' the while(1) will never end and therefore not return a value for that code path
@@ -5329,8 +5272,7 @@ uint32_t gc_heap::gc_thread_function ()
#endif //MULTIPLE_HEAPS
-void* virtual_alloc_commit_for_heap(void* addr, size_t size, uint32_t type,
- uint32_t prot, int h_number)
+bool virtual_alloc_commit_for_heap(void* addr, size_t size, int h_number)
{
#if defined(MULTIPLE_HEAPS) && !defined(FEATURE_REDHAWK) && !defined(FEATURE_PAL)
// Currently there is no way for us to specific the numa node to allocate on via hosting interfaces to
@@ -5341,17 +5283,17 @@ void* virtual_alloc_commit_for_heap(void* addr, size_t size, uint32_t type,
{
uint32_t numa_node = heap_select::find_numa_node_from_heap_no(h_number);
void * ret = NumaNodeInfo::VirtualAllocExNuma(GetCurrentProcess(), addr, size,
- type, prot, numa_node);
+ MEM_COMMIT, PAGE_READWRITE, numa_node);
if (ret != NULL)
- return ret;
+ return true;
}
}
#else
UNREFERENCED_PARAMETER(h_number);
#endif
- //numa aware not enabled, or call failed --> fallback to VirtualAlloc()
- return VirtualAlloc(addr, size, type, prot);
+ //numa aware not enabled, or call failed --> fallback to VirtualCommit()
+ return GCToOSInterface::VirtualCommit(addr, size);
}
#ifndef SEG_MAPPING_TABLE
@@ -5972,7 +5914,7 @@ bool gc_heap::new_allocation_allowed (int gen_number)
if ((allocation_running_amount - dd_new_allocation (dd0)) >
dd_min_gc_size (dd0))
{
- uint32_t ctime = GetTickCount();
+ uint32_t ctime = GCToOSInterface::GetLowPrecisionTimeStamp();
if ((ctime - allocation_running_time) > 1000)
{
dprintf (2, (">1s since last gen0 gc"));
@@ -6558,21 +6500,22 @@ class card_table_info
{
public:
unsigned recount;
- uint8_t* lowest_address;
- uint8_t* highest_address;
+ uint8_t* lowest_address;
+ uint8_t* highest_address;
short* brick_table;
#ifdef CARD_BUNDLE
- uint32_t* card_bundle_table;
+ uint32_t* card_bundle_table;
#endif //CARD_BUNDLE
// mark_array is always at the end of the data structure because we
// want to be able to make one commit call for everything before it.
#ifdef MARK_ARRAY
- uint32_t* mark_array;
+ uint32_t* mark_array;
#endif //MARK_ARRAY
- uint32_t* next_card_table;
+ size_t size;
+ uint32_t* next_card_table;
};
//These are accessors on untranslated cardtable
@@ -6718,7 +6661,7 @@ void gc_heap::mark_array_set_marked (uint8_t* add)
size_t index = mark_word_of (add);
uint32_t val = (1 << mark_bit_bit_of (add));
#ifdef MULTIPLE_HEAPS
- InterlockedOr ((LONG*)&(mark_array [index]), val);
+ Interlocked::Or (&(mark_array [index]), val);
#else
mark_array [index] |= val;
#endif
@@ -6829,6 +6772,12 @@ uint32_t*& card_table_next (uint32_t* c_table)
return ((card_table_info*)((uint8_t*)c_table - sizeof (card_table_info)))->next_card_table;
}
+inline
+size_t& card_table_size (uint32_t* c_table)
+{
+ return ((card_table_info*)((uint8_t*)c_table - sizeof (card_table_info)))->size;
+}
+
void own_card_table (uint32_t* c_table)
{
card_table_refcount (c_table) += 1;
@@ -6880,7 +6829,8 @@ void release_card_table (uint32_t* c_table)
void destroy_card_table (uint32_t* c_table)
{
// delete (uint32_t*)&card_table_refcount(c_table);
- VirtualFree (&card_table_refcount(c_table), 0, MEM_RELEASE);
+
+ GCToOSInterface::VirtualRelease (&card_table_refcount(c_table), card_table_size(c_table));
dprintf (2, ("Table Virtual Free : %Ix", (size_t)&card_table_refcount(c_table)));
}
@@ -6889,7 +6839,7 @@ uint32_t* gc_heap::make_card_table (uint8_t* start, uint8_t* end)
assert (g_lowest_address == start);
assert (g_highest_address == end);
- uint32_t mem_flags = MEM_RESERVE;
+ uint32_t virtual_reserve_flags = VirtualReserveFlags::None;
size_t bs = size_brick_of (start, end);
size_t cs = size_card_of (start, end);
@@ -6906,7 +6856,7 @@ uint32_t* gc_heap::make_card_table (uint8_t* start, uint8_t* end)
#ifdef CARD_BUNDLE
if (can_use_write_watch())
{
- mem_flags |= MEM_WRITE_WATCH;
+ virtual_reserve_flags |= VirtualReserveFlags::WriteWatch;
cb = size_card_bundle_of (g_lowest_address, g_highest_address);
}
#endif //CARD_BUNDLE
@@ -6922,8 +6872,7 @@ uint32_t* gc_heap::make_card_table (uint8_t* start, uint8_t* end)
size_t alloc_size = sizeof (uint8_t)*(bs + cs + cb + ms + st + sizeof (card_table_info));
size_t alloc_size_aligned = Align (alloc_size, g_SystemInfo.dwAllocationGranularity-1);
- uint32_t* ct = (uint32_t*)VirtualAlloc (0, alloc_size_aligned,
- mem_flags, PAGE_READWRITE);
+ uint32_t* ct = (uint32_t*)GCToOSInterface::VirtualReserve (0, alloc_size_aligned, 0, virtual_reserve_flags);
if (!ct)
return 0;
@@ -6933,11 +6882,11 @@ uint32_t* gc_heap::make_card_table (uint8_t* start, uint8_t* end)
// mark array will be committed separately (per segment).
size_t commit_size = alloc_size - ms;
-
- if (!VirtualAlloc ((uint8_t*)ct, commit_size, MEM_COMMIT, PAGE_READWRITE))
+
+ if (!GCToOSInterface::VirtualCommit ((uint8_t*)ct, commit_size))
{
- dprintf (2, ("Table commit failed: %d", GetLastError()));
- VirtualFree ((uint8_t*)ct, 0, MEM_RELEASE);
+ dprintf (2, ("Table commit failed"));
+ GCToOSInterface::VirtualRelease ((uint8_t*)ct, alloc_size_aligned);
return 0;
}
@@ -6947,6 +6896,7 @@ uint32_t* gc_heap::make_card_table (uint8_t* start, uint8_t* end)
card_table_lowest_address (ct) = start;
card_table_highest_address (ct) = end;
card_table_brick_table (ct) = (short*)((uint8_t*)ct + cs);
+ card_table_size (ct) = alloc_size_aligned;
card_table_next (ct) = 0;
#ifdef CARD_BUNDLE
@@ -7012,7 +6962,7 @@ int gc_heap::grow_brick_card_tables (uint8_t* start,
//modify the higest address so the span covered
//is twice the previous one.
GCMemoryStatus st;
- GetProcessMemoryLoad (&st);
+ GCToOSInterface::GetMemoryStatus (&st);
uint8_t* top = (uint8_t*)0 + Align ((size_t)(st.ullTotalVirtual));
// On non-Windows systems, we get only an approximate ullTotalVirtual
// value that can possibly be slightly lower than the saved_g_highest_address.
@@ -7052,7 +7002,7 @@ int gc_heap::grow_brick_card_tables (uint8_t* start,
(size_t)saved_g_lowest_address,
(size_t)saved_g_highest_address));
- uint32_t mem_flags = MEM_RESERVE;
+ uint32_t virtual_reserve_flags = VirtualReserveFlags::None;
uint32_t* saved_g_card_table = g_card_table;
uint32_t* ct = 0;
short* bt = 0;
@@ -7073,7 +7023,7 @@ int gc_heap::grow_brick_card_tables (uint8_t* start,
#ifdef CARD_BUNDLE
if (can_use_write_watch())
{
- mem_flags |= MEM_WRITE_WATCH;
+ virtual_reserve_flags = VirtualReserveFlags::WriteWatch;
cb = size_card_bundle_of (saved_g_lowest_address, saved_g_highest_address);
}
#endif //CARD_BUNDLE
@@ -7091,7 +7041,7 @@ int gc_heap::grow_brick_card_tables (uint8_t* start,
dprintf (GC_TABLE_LOG, ("brick table: %Id; card table: %Id; mark array: %Id, card bundle: %Id, seg table: %Id",
bs, cs, ms, cb, st));
- uint8_t* mem = (uint8_t*)VirtualAlloc (0, alloc_size_aligned, mem_flags, PAGE_READWRITE);
+ uint8_t* mem = (uint8_t*)GCToOSInterface::VirtualReserve (0, alloc_size_aligned, 0, virtual_reserve_flags);
if (!mem)
{
@@ -7106,7 +7056,7 @@ int gc_heap::grow_brick_card_tables (uint8_t* start,
// mark array will be committed separately (per segment).
size_t commit_size = alloc_size - ms;
- if (!VirtualAlloc (mem, commit_size, MEM_COMMIT, PAGE_READWRITE))
+ if (!GCToOSInterface::VirtualCommit (mem, commit_size))
{
dprintf (GC_TABLE_LOG, ("Table commit failed"));
set_fgm_result (fgm_commit_table, commit_size, loh_p);
@@ -7205,7 +7155,7 @@ int gc_heap::grow_brick_card_tables (uint8_t* start,
// with address that it does not cover. Write barriers access card table
// without memory barriers for performance reasons, so we need to flush
// the store buffers here.
- FlushProcessWriteBuffers();
+ GCToOSInterface::FlushProcessWriteBuffers();
g_lowest_address = saved_g_lowest_address;
VolatileStore(&g_highest_address, saved_g_highest_address);
@@ -7223,9 +7173,9 @@ fail:
}
//delete (uint32_t*)((uint8_t*)ct - sizeof(card_table_info));
- if (!VirtualFree (mem, 0, MEM_RELEASE))
+ if (!GCToOSInterface::VirtualRelease (mem, alloc_size_aligned))
{
- dprintf (GC_TABLE_LOG, ("VirtualFree failed: %d", GetLastError()));
+ dprintf (GC_TABLE_LOG, ("GCToOSInterface::VirtualRelease failed"));
assert (!"release failed");
}
}
@@ -8950,12 +8900,10 @@ int gc_heap::object_gennum_plan (uint8_t* o)
heap_segment* gc_heap::make_heap_segment (uint8_t* new_pages, size_t size, int h_number)
{
- void * res;
size_t initial_commit = SEGMENT_INITIAL_COMMIT;
//Commit the first page
- if ((res = virtual_alloc_commit_for_heap (new_pages, initial_commit,
- MEM_COMMIT, PAGE_READWRITE, h_number)) == 0)
+ if (!virtual_alloc_commit_for_heap (new_pages, initial_commit, h_number))
{
return 0;
}
@@ -9062,7 +9010,7 @@ void gc_heap::reset_heap_segment_pages (heap_segment* seg)
size_t page_start = align_on_page ((size_t)heap_segment_allocated (seg));
size_t size = (size_t)heap_segment_committed (seg) - page_start;
if (size != 0)
- VirtualAlloc ((char*)page_start, size, MEM_RESET, PAGE_READWRITE);
+ GCToOSInterface::VirtualReset((void*)page_start, size, false /* unlock */);
#endif //!FEATURE_PAL
}
@@ -9077,7 +9025,7 @@ void gc_heap::decommit_heap_segment_pages (heap_segment* seg,
page_start += max(extra_space, 32*OS_PAGE_SIZE);
size -= max (extra_space, 32*OS_PAGE_SIZE);
- VirtualFree (page_start, size, MEM_DECOMMIT);
+ GCToOSInterface::VirtualDecommit (page_start, size);
dprintf (3, ("Decommitting heap segment [%Ix, %Ix[(%d)",
(size_t)page_start,
(size_t)(page_start + size),
@@ -9102,7 +9050,7 @@ void gc_heap::decommit_heap_segment (heap_segment* seg)
#endif //BACKGROUND_GC
size_t size = heap_segment_committed (seg) - page_start;
- VirtualFree (page_start, size, MEM_DECOMMIT);
+ GCToOSInterface::VirtualDecommit (page_start, size);
//re-init the segment object
heap_segment_committed (seg) = page_start;
@@ -9239,7 +9187,6 @@ void gc_heap::update_card_table_bundle()
uint8_t* base_address = (uint8_t*)(&card_table[card_word (card_of (lowest_address))]);
uint8_t* saved_base_address = base_address;
uintptr_t bcount = array_size;
- uint32_t granularity = 0;
uint8_t* high_address = (uint8_t*)(&card_table[card_word (card_of (highest_address))]);
size_t saved_region_size = align_on_page (high_address) - saved_base_address;
@@ -9247,16 +9194,15 @@ void gc_heap::update_card_table_bundle()
{
size_t region_size = align_on_page (high_address) - base_address;
dprintf (3,("Probing card table pages [%Ix, %Ix[", (size_t)base_address, (size_t)base_address+region_size));
- uint32_t status = GetWriteWatch (0, base_address, region_size,
- (void**)g_addresses,
- (ULONG_PTR*)&bcount, (DWORD*)&granularity);
- assert (status == 0);
- assert (granularity == OS_PAGE_SIZE);
+ bool success = GCToOSInterface::GetWriteWatch (false /* resetState */ , base_address, region_size,
+ (void**)g_addresses,
+ &bcount);
+ assert (success);
dprintf (3,("Found %d pages written", bcount));
for (unsigned i = 0; i < bcount; i++)
{
size_t bcardw = (uint32_t*)(max(g_addresses[i],base_address)) - &card_table[0];
- size_t ecardw = (uint32_t*)(min(g_addresses[i]+granularity, high_address)) - &card_table[0];
+ size_t ecardw = (uint32_t*)(min(g_addresses[i]+OS_PAGE_SIZE, high_address)) - &card_table[0];
assert (bcardw >= card_word (card_of (g_lowest_address)));
card_bundles_set (cardw_card_bundle (bcardw),
@@ -9283,7 +9229,7 @@ void gc_heap::update_card_table_bundle()
}
} while ((bcount >= array_size) && (base_address < high_address));
- ResetWriteWatch (saved_base_address, saved_region_size);
+ GCToOSInterface::ResetWriteWatch (saved_base_address, saved_region_size);
#ifdef _DEBUG
@@ -9328,7 +9274,7 @@ void gc_heap::switch_one_quantum()
{
Thread* current_thread = GetThread();
enable_preemptive (current_thread);
- __SwitchToThread (1, CALLER_LIMITS_SPINNING);
+ GCToOSInterface::Sleep (1);
disable_preemptive (current_thread, TRUE);
}
@@ -9344,7 +9290,7 @@ void gc_heap::reset_ww_by_chunk (uint8_t* start_address, size_t total_reset_size
next_reset_size = ((remaining_reset_size >= ww_reset_quantum) ? ww_reset_quantum : remaining_reset_size);
if (next_reset_size)
{
- ResetWriteWatch (start_address, next_reset_size);
+ GCToOSInterface::ResetWriteWatch (start_address, next_reset_size);
reset_size += next_reset_size;
switch_one_quantum();
@@ -9354,7 +9300,7 @@ void gc_heap::reset_ww_by_chunk (uint8_t* start_address, size_t total_reset_size
assert (reset_size == total_reset_size);
}
-// This does a __SwitchToThread for every reset ww_reset_quantum bytes of reset
+// This does a Sleep(1) for every reset ww_reset_quantum bytes of reset
// we do concurrently.
void gc_heap::switch_on_reset (BOOL concurrent_p, size_t* current_total_reset_size, size_t last_reset_size)
{
@@ -9413,7 +9359,7 @@ void gc_heap::reset_write_watch (BOOL concurrent_p)
#endif //TIME_WRITE_WATCH
dprintf (3, ("h%d: soh ww: [%Ix(%Id)", heap_number, (size_t)base_address, region_size));
//reset_ww_by_chunk (base_address, region_size);
- ResetWriteWatch (base_address, region_size);
+ GCToOSInterface::ResetWriteWatch (base_address, region_size);
#ifdef TIME_WRITE_WATCH
unsigned int time_stop = GetCycleCount32();
@@ -9456,7 +9402,7 @@ void gc_heap::reset_write_watch (BOOL concurrent_p)
#endif //TIME_WRITE_WATCH
dprintf (3, ("h%d: loh ww: [%Ix(%Id)", heap_number, (size_t)base_address, region_size));
//reset_ww_by_chunk (base_address, region_size);
- ResetWriteWatch (base_address, region_size);
+ GCToOSInterface::ResetWriteWatch (base_address, region_size);
#ifdef TIME_WRITE_WATCH
unsigned int time_stop = GetCycleCount32();
@@ -9560,45 +9506,28 @@ void gc_heap::adjust_ephemeral_limits ()
}
#if defined(TRACE_GC) || defined(GC_CONFIG_DRIVEN)
-HANDLE CreateLogFile(const CLRConfig::ConfigStringInfo & info, BOOL is_config)
+FILE* CreateLogFile(const CLRConfig::ConfigStringInfo & info, BOOL is_config)
{
+ FILE* logFile;
LPWSTR temp_logfile_name = NULL;
CLRConfig::GetConfigValue(info, &temp_logfile_name);
-#ifdef FEATURE_REDHAWK
- UNREFERENCED_PARAMETER(is_config);
- return PalCreateFileW(
- temp_logfile_name,
- GENERIC_WRITE,
- FILE_SHARE_READ,
- NULL,
- CREATE_ALWAYS,
- FILE_ATTRIBUTE_NORMAL,
- NULL);
-#else // FEATURE_REDHAWK
- char logfile_name[MAX_PATH+1];
+ WCHAR logfile_name[MAX_LONGPATH+1];
if (temp_logfile_name != 0)
{
- int ret;
- ret = WszWideCharToMultiByte(CP_ACP, 0, temp_logfile_name, -1, logfile_name, sizeof(logfile_name)-1, NULL, NULL);
- _ASSERTE(ret != 0);
- delete temp_logfile_name;
- }
-
- char szPid[20];
- sprintf_s(szPid, _countof(szPid), ".%d", GetCurrentProcessId());
- strcat_s(logfile_name, _countof(logfile_name), szPid);
- strcat_s(logfile_name, _countof(logfile_name), (is_config ? ".config.log" : ".log"));
-
- return CreateFileA(
- logfile_name,
- GENERIC_WRITE,
- FILE_SHARE_READ,
- NULL,
- CREATE_ALWAYS,
- FILE_ATTRIBUTE_NORMAL,
- NULL);
-#endif //FEATURE_REDHAWK
+ wcscpy(logfile_name, temp_logfile_name);
+ }
+
+ size_t logfile_name_len = wcslen(logfile_name);
+ WCHAR* szPid = logfile_name + logfile_name_len;
+ size_t remaining_space = MAX_LONGPATH + 1 - logfile_name_len;
+ swprintf_s(szPid, remaining_space, W(".%d%s"), GCToOSInterface::GetCurrentProcessId(), (is_config ? W(".config.log") : W(".log")));
+
+ logFile = GCToOSInterface::OpenFile(logfile_name, W("wb"));
+
+ delete temp_logfile_name;
+
+ return logFile;
}
#endif //TRACE_GC || GC_CONFIG_DRIVEN
@@ -9615,7 +9544,7 @@ HRESULT gc_heap::initialize_gc (size_t segment_size,
{
gc_log = CreateLogFile(CLRConfig::UNSUPPORTED_GCLogFile, FALSE);
- if (gc_log == INVALID_HANDLE_VALUE)
+ if (gc_log == NULL)
return E_FAIL;
// GCLogFileSize in MBs.
@@ -9623,15 +9552,15 @@ HRESULT gc_heap::initialize_gc (size_t segment_size,
if (gc_log_file_size > 500)
{
- CloseHandle (gc_log);
+ fclose (gc_log);
return E_FAIL;
}
- gc_log_lock = ClrCreateMutex(NULL, FALSE, NULL);
+ gc_log_lock.Initialize();
gc_log_buffer = new (nothrow) uint8_t [gc_log_buffer_size];
if (!gc_log_buffer)
{
- CloseHandle(gc_log);
+ fclose(gc_log);
return E_FAIL;
}
@@ -9647,13 +9576,13 @@ HRESULT gc_heap::initialize_gc (size_t segment_size,
{
gc_config_log = CreateLogFile(CLRConfig::UNSUPPORTED_GCConfigLogFile, TRUE);
- if (gc_config_log == INVALID_HANDLE_VALUE)
+ if (gc_config_log == NULL)
return E_FAIL;
gc_config_log_buffer = new (nothrow) uint8_t [gc_config_log_buffer_size];
if (!gc_config_log_buffer)
{
- CloseHandle(gc_config_log);
+ fclose(gc_config_log);
return E_FAIL;
}
@@ -9687,7 +9616,7 @@ HRESULT gc_heap::initialize_gc (size_t segment_size,
GCStatistics::logFileName = CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_GCMixLog);
if (GCStatistics::logFileName != NULL)
{
- GCStatistics::logFile = _wfopen((LPCWSTR)GCStatistics::logFileName, W("a"));
+ GCStatistics::logFile = GCToOSInterface::OpenFile((LPCWSTR)GCStatistics::logFileName, W("a"));
}
#endif // GC_STATS
@@ -9700,7 +9629,7 @@ HRESULT gc_heap::initialize_gc (size_t segment_size,
if (can_use_write_watch () && g_pConfig->GetGCconcurrent()!=0)
{
gc_can_use_concurrent = true;
- mem_reserve = MEM_WRITE_WATCH | MEM_RESERVE;
+ virtual_alloc_write_watch = true;
}
else
{
@@ -9777,10 +9706,6 @@ HRESULT gc_heap::initialize_gc (size_t segment_size,
return E_OUTOFMEMORY;
#endif //MH_SC_MARK
- g_gc_threads = new (nothrow) HANDLE [number_of_heaps];
- if (!g_gc_threads)
- return E_OUTOFMEMORY;
-
if (!create_thread_support (number_of_heaps))
return E_OUTOFMEMORY;
@@ -10053,7 +9978,7 @@ gc_heap::enter_gc_done_event_lock()
uint32_t dwSwitchCount = 0;
retry:
- if (FastInterlockExchange ((LONG*)&gc_done_event_lock, 0) >= 0)
+ if (Interlocked::Exchange (&gc_done_event_lock, 0) >= 0)
{
while (gc_done_event_lock >= 0)
{
@@ -10067,10 +9992,10 @@ retry:
YieldProcessor(); // indicate to the processor that we are spining
}
if (gc_done_event_lock >= 0)
- __SwitchToThread(0, ++dwSwitchCount);
+ GCToOSInterface::YieldThread(++dwSwitchCount);
}
else
- __SwitchToThread(0, ++dwSwitchCount);
+ GCToOSInterface::YieldThread(++dwSwitchCount);
}
goto retry;
}
@@ -10105,7 +10030,7 @@ void gc_heap::add_saved_spinlock_info (
current->enter_state = enter_state;
current->take_state = take_state;
- current->thread_id = GetCurrentThreadId();
+ current->thread_id.SetToCurrentThread();
spinlock_info_index++;
@@ -10388,8 +10313,7 @@ gc_heap::init_gc_heap (int h_number)
#ifdef MULTIPLE_HEAPS
//register the heap in the heaps array
- g_gc_threads [heap_number] = create_gc_thread ();
- if (!g_gc_threads [heap_number])
+ if (!create_gc_thread ())
return 0;
g_heaps [heap_number] = this;
@@ -10439,7 +10363,7 @@ gc_heap::init_gc_heap (int h_number)
#endif // MULTIPLE_HEAPS
#ifdef BACKGROUND_GC
- bgc_thread_id = 0;
+ bgc_thread_id.Clear();
if (!create_bgc_thread_support())
{
@@ -10462,7 +10386,7 @@ gc_heap::init_gc_heap (int h_number)
bgc_thread_running = 0;
bgc_thread = 0;
- InitializeCriticalSection (&bgc_threads_timeout_cs);
+ bgc_threads_timeout_cs.Initialize();
expanded_in_fgc = 0;
current_bgc_state = bgc_not_in_process;
background_soh_alloc_count = 0;
@@ -10570,17 +10494,14 @@ void gc_heap::shutdown_gc()
#ifdef MULTIPLE_HEAPS
//delete the heaps array
delete g_heaps;
- for (int i = 0; i < n_heaps; i++)
- {
- CloseHandle (g_gc_threads [i]);
- }
- delete g_gc_threads;
destroy_thread_support();
n_heaps = 0;
#endif //MULTIPLE_HEAPS
//destroy seg_manager
destroy_initial_memory();
+
+ GCToOSInterface::Shutdown();
}
inline
@@ -10667,8 +10588,7 @@ BOOL gc_heap::grow_heap_segment (heap_segment* seg, uint8_t* high_address)
dprintf(3, ("Growing segment allocation %Ix %Ix", (size_t)heap_segment_committed(seg),c_size));
- if (!virtual_alloc_commit_for_heap(heap_segment_committed (seg), c_size,
- MEM_COMMIT, PAGE_READWRITE, heap_number))
+ if (!virtual_alloc_commit_for_heap(heap_segment_committed (seg), c_size, heap_number))
{
dprintf(3, ("Cannot grow heap segment"));
return FALSE;
@@ -11319,7 +11239,7 @@ void gc_heap::handle_oom (int heap_num, oom_reason reason, size_t alloc_size,
// could have allocated on the same heap when OOM happened.
if (g_pConfig->IsGCBreakOnOOMEnabled())
{
- DebugBreak();
+ GCToOSInterface::DebugBreak();
}
}
@@ -11999,7 +11919,7 @@ void gc_heap::wait_for_bgc_high_memory (alloc_wait_reason awr)
{
GCMemoryStatus ms;
memset (&ms, 0, sizeof(ms));
- GetProcessMemoryLoad(&ms);
+ GCToOSInterface::GetMemoryStatus(&ms);
if (ms.dwMemoryLoad >= 95)
{
dprintf (GTC_LOG, ("high mem - wait for BGC to finish, wait reason: %d", awr));
@@ -12089,7 +12009,7 @@ BOOL gc_heap::allocate_small (int gen_number,
dprintf (SPINLOCK_LOG, ("[%d]spin Lmsl", heap_number));
leave_spin_lock (&more_space_lock);
BOOL cooperative_mode = enable_preemptive (current_thread);
- __SwitchToThread (bgc_alloc_spin, CALLER_LIMITS_SPINNING);
+ GCToOSInterface::Sleep (bgc_alloc_spin);
disable_preemptive (current_thread, cooperative_mode);
enter_spin_lock (&more_space_lock);
add_saved_spinlock_info (me_acquire, mt_alloc_small);
@@ -12097,7 +12017,7 @@ BOOL gc_heap::allocate_small (int gen_number,
}
else
{
- //__SwitchToThread (0, CALLER_LIMITS_SPINNING);
+ //GCToOSInterface::YieldThread (0);
}
}
#endif //BACKGROUND_GC && !MULTIPLE_HEAPS
@@ -12577,7 +12497,7 @@ exit:
}
#ifdef RECORD_LOH_STATE
-void gc_heap::add_saved_loh_state (allocation_state loh_state_to_save, uint32_t thread_id)
+void gc_heap::add_saved_loh_state (allocation_state loh_state_to_save, EEThreadId thread_id)
{
// When the state is can_allocate we already have released the more
// space lock. So we are not logging states here since this code
@@ -12618,7 +12538,7 @@ BOOL gc_heap::allocate_large (int gen_number,
dprintf (SPINLOCK_LOG, ("[%d]spin Lmsl loh", heap_number));
leave_spin_lock (&more_space_lock);
BOOL cooperative_mode = enable_preemptive (current_thread);
- __SwitchToThread (bgc_alloc_spin_loh, CALLER_LIMITS_SPINNING);
+ GCToOSInterface::YieldThread (bgc_alloc_spin_loh);
disable_preemptive (current_thread, cooperative_mode);
enter_spin_lock (&more_space_lock);
add_saved_spinlock_info (me_acquire, mt_alloc_large);
@@ -12642,7 +12562,8 @@ BOOL gc_heap::allocate_large (int gen_number,
// That's why there are local variable for each state
allocation_state loh_alloc_state = a_state_start;
#ifdef RECORD_LOH_STATE
- uint32_t current_thread_id = GetCurrentThreadId();
+ EEThreadId current_thread_id;
+ current_thread_id.SetToCurrentThread();
#endif //RECORD_LOH_STATE
// If we can get a new seg it means allocation will succeed.
@@ -13126,41 +13047,29 @@ try_again:
{
uint8_t group_proc_no = heap_select::find_group_proc_from_heap_no(max_hp->heap_number);
-#if !defined(FEATURE_CORESYSTEM)
- SetThreadIdealProcessor(GetCurrentThread(), (uint32_t)group_proc_no);
-#else
- PROCESSOR_NUMBER proc;
- proc.Group = org_gn;
- proc.Number = group_proc_no;
- proc.Reserved = 0;
-
- if(!SetThreadIdealProcessorEx(GetCurrentThread(), &proc, NULL))
+ GCThreadAffinity affinity;
+ affinity.Processor = group_proc_no;
+ affinity.Group = org_gn;
+ if (!GCToOSInterface::SetCurrentThreadIdealAffinity(&affinity))
{
dprintf (3, ("Failed to set the ideal processor and group for heap %d.",
org_hp->heap_number));
}
-#endif
}
}
else
{
uint8_t proc_no = heap_select::find_proc_no_from_heap_no(max_hp->heap_number);
-#if !defined(FEATURE_CORESYSTEM)
- SetThreadIdealProcessor(GetCurrentThread(), (uint32_t)proc_no);
-#else
- PROCESSOR_NUMBER proc;
- if(GetThreadIdealProcessorEx(GetCurrentThread(), &proc))
+ GCThreadAffinity affinity;
+ affinity.Processor = proc_no;
+ affinity.Group = GCThreadAffinity::None;
+
+ if (!GCToOSInterface::SetCurrentThreadIdealAffinity(&affinity))
{
- proc.Number = proc_no;
- BOOL result;
- if(!SetThreadIdealProcessorEx(GetCurrentThread(), &proc, NULL))
- {
- dprintf (3, ("Failed to set the ideal processor for heap %d.",
- org_hp->heap_number));
- }
+ dprintf (3, ("Failed to set the ideal processor for heap %d.",
+ org_hp->heap_number));
}
-#endif
}
#endif // !FEATURE_REDHAWK && !FEATURE_PAL
dprintf (3, ("Switching context %p (home heap %d) ",
@@ -14532,11 +14441,7 @@ int gc_heap::generation_to_condemn (int n_initial,
(local_settings->pause_mode == pause_sustained_low_latency))
{
dynamic_data* dd0 = dynamic_data_of (0);
- LARGE_INTEGER ts;
- if (!QueryPerformanceCounter(&ts))
- FATAL_GC_ERROR();
-
- size_t now = (size_t) (ts.QuadPart/(qpf.QuadPart/1000));
+ size_t now = GetHighPrecisionTimeStamp();
temp_gen = n;
for (i = (temp_gen+1); i <= n_time_max; i++)
{
@@ -14655,7 +14560,7 @@ int gc_heap::generation_to_condemn (int n_initial,
if (check_memory)
{
//find out if we are short on memory
- GetProcessMemoryLoad(&ms);
+ GCToOSInterface::GetMemoryStatus(&ms);
if (heap_number == 0)
{
dprintf (GTC_LOG, ("ml: %d", ms.dwMemoryLoad));
@@ -14955,7 +14860,7 @@ size_t gc_heap::min_reclaim_fragmentation_threshold (uint32_t num_heaps)
size_t min_mem_based_on_available =
(500 - (settings.entry_memory_load - high_memory_load_th) * 40) * 1024 * 1024 / num_heaps;
size_t ten_percent_size = (size_t)((float)generation_size (max_generation) * 0.10);
- ULONGLONG three_percent_mem = mem_one_percent * 3 / num_heaps;
+ uint64_t three_percent_mem = mem_one_percent * 3 / num_heaps;
#ifdef SIMPLE_DPRINTF
dprintf (GTC_LOG, ("min av: %Id, 10%% gen2: %Id, 3%% mem: %I64d",
@@ -15036,10 +14941,7 @@ void fire_overflow_event (uint8_t* overflow_min,
void gc_heap::concurrent_print_time_delta (const char* msg)
{
#ifdef TRACE_GC
- LARGE_INTEGER ts;
- QueryPerformanceCounter (&ts);
-
- size_t current_time = (size_t) (ts.QuadPart/(qpf.QuadPart/1000));
+ size_t current_time = GetHighPrecisionTimeStamp();
size_t elapsed_time = current_time - time_bgc_last;
time_bgc_last = current_time;
@@ -15113,7 +15015,7 @@ BOOL gc_heap::should_proceed_with_gc()
void gc_heap::gc1()
{
#ifdef BACKGROUND_GC
- assert (settings.concurrent == (uint32_t)(GetCurrentThreadId() == bgc_thread_id));
+ assert (settings.concurrent == (uint32_t)(bgc_thread_id.IsCurrentThread()));
#endif //BACKGROUND_GC
#ifdef TIME_GC
@@ -15151,10 +15053,7 @@ void gc_heap::gc1()
if (settings.concurrent)
{
#ifdef TRACE_GC
- LARGE_INTEGER ts;
- QueryPerformanceCounter (&ts);
-
- time_bgc_last = (size_t)(ts.QuadPart/(qpf.QuadPart/1000));
+ time_bgc_last = GetHighPrecisionTimeStamp();
#endif //TRACE_GC
fire_bgc_event (BGCBegin);
@@ -15183,17 +15082,7 @@ void gc_heap::gc1()
}
}
- LARGE_INTEGER ts;
- if (!QueryPerformanceCounter(&ts))
- FATAL_GC_ERROR();
-
- size_t end_gc_time = (size_t) (ts.QuadPart/(qpf.QuadPart/1000));
-
-#ifdef GC_CONFIG_DRIVEN
- if (heap_number == 0)
- time_since_init = end_gc_time - time_init;
-#endif //GC_CONFIG_DRIVEN
-
+ size_t end_gc_time = GetHighPrecisionTimeStamp();
// printf ("generation: %d, elapsed time: %Id\n", n, end_gc_time - dd_time_clock (dynamic_data_of (0)));
//adjust the allocation size from the pinned quantities.
@@ -15393,7 +15282,7 @@ void gc_heap::gc1()
#endif //TIME_GC
#ifdef BACKGROUND_GC
- assert (settings.concurrent == (uint32_t)(GetCurrentThreadId() == bgc_thread_id));
+ assert (settings.concurrent == (uint32_t)(bgc_thread_id.IsCurrentThread()));
#endif //BACKGROUND_GC
#if defined(VERIFY_HEAP) || (defined (FEATURE_EVENT_TRACE) && defined(BACKGROUND_GC))
@@ -15443,7 +15332,7 @@ void gc_heap::gc1()
#endif //BACKGROUND_GC
#ifdef BACKGROUND_GC
- assert (settings.concurrent == (uint32_t)(GetCurrentThreadId() == bgc_thread_id));
+ assert (settings.concurrent == (uint32_t)(bgc_thread_id.IsCurrentThread()));
#ifdef FEATURE_EVENT_TRACE
if (ETW::GCLog::ShouldTrackMovementForEtw() && settings.concurrent)
{
@@ -15547,7 +15436,7 @@ void gc_heap::gc1()
size_t min_gc_size = dd_min_gc_size(dd);
// if min GC size larger than true on die cache, then don't bother
// limiting the desired size
- if ((min_gc_size <= GetLargestOnDieCacheSize(TRUE) / GetLogicalCpuCount()) &&
+ if ((min_gc_size <= GCToOSInterface::GetLargestOnDieCacheSize(TRUE) / GCToOSInterface::GetLogicalCpuCount()) &&
desired_per_heap <= 2*min_gc_size)
{
desired_per_heap = min_gc_size;
@@ -16063,11 +15952,7 @@ void gc_heap::update_collection_counts ()
dynamic_data* dd0 = dynamic_data_of (0);
dd_gc_clock (dd0) += 1;
- LARGE_INTEGER ts;
- if (!QueryPerformanceCounter (&ts))
- FATAL_GC_ERROR();
-
- size_t now = (size_t)(ts.QuadPart/(qpf.QuadPart/1000));
+ size_t now = GetHighPrecisionTimeStamp();
for (int i = 0; i <= settings.condemned_generation;i++)
{
@@ -16719,7 +16604,7 @@ int gc_heap::garbage_collect (int n)
gc1();
}
#ifndef MULTIPLE_HEAPS
- allocation_running_time = (size_t)GetTickCount();
+ allocation_running_time = (size_t)GCToOSInterface::GetLowPrecisionTimeStamp();
allocation_running_amount = dd_new_allocation (dynamic_data_of (0));
fgn_last_alloc = dd_new_allocation (dynamic_data_of (0));
#endif //MULTIPLE_HEAPS
@@ -17721,7 +17606,7 @@ gc_heap::mark_steal()
#ifdef SNOOP_STATS
dprintf (SNOOP_LOG, ("(GC%d)heap%d: start snooping %d", settings.gc_index, heap_number, (heap_number+1)%n_heaps));
- uint32_t begin_tick = GetTickCount();
+ uint32_t begin_tick = GCToOSInterface::GetLowPrecisionTimeStamp();
#endif //SNOOP_STATS
int idle_loop_count = 0;
@@ -17822,8 +17707,8 @@ gc_heap::mark_steal()
#ifdef SNOOP_STATS
dprintf (SNOOP_LOG, ("heap%d: marking %Ix from %d [%d] tl:%dms",
heap_number, (size_t)o, (heap_number+1)%n_heaps, level,
- (GetTickCount()-begin_tick)));
- uint32_t start_tick = GetTickCount();
+ (GCToOSInterface::GetLowPrecisionTimeStamp()-begin_tick)));
+ uint32_t start_tick = GCToOSInterface::GetLowPrecisionTimeStamp();
#endif //SNOOP_STATS
mark_object_simple1 (o, start, heap_number);
@@ -17831,7 +17716,7 @@ gc_heap::mark_steal()
#ifdef SNOOP_STATS
dprintf (SNOOP_LOG, ("heap%d: done marking %Ix from %d [%d] %dms tl:%dms",
heap_number, (size_t)o, (heap_number+1)%n_heaps, level,
- (GetTickCount()-start_tick),(GetTickCount()-begin_tick)));
+ (GCToOSInterface::GetLowPrecisionTimeStamp()-start_tick),(GCToOSInterface::GetLowPrecisionTimeStamp()-begin_tick)));
#endif //SNOOP_STATS
mark_stack_busy() = 0;
@@ -17872,7 +17757,7 @@ gc_heap::mark_steal()
#ifdef SNOOP_STATS
snoop_stat.switch_to_thread_count++;
#endif //SNOOP_STATS
- __SwitchToThread(1,0);
+ GCToOSInterface::Sleep(1);
}
int free_count = 1;
#ifdef SNOOP_STATS
@@ -17985,7 +17870,7 @@ gc_heap::ha_mark_object_simple (uint8_t** po THREAD_NUMBER_DCL)
size_t new_size = 2*internal_root_array_length;
GCMemoryStatus statex;
- GetProcessMemoryLoad(&statex);
+ GCToOSInterface::GetMemoryStatus(&statex);
if (new_size > (size_t)(statex.ullAvailPhys / 10))
{
heap_analyze_success = FALSE;
@@ -18510,11 +18395,10 @@ void gc_heap::fix_card_table ()
PREFIX_ASSUME(seg != NULL);
- uint32_t granularity;
#ifdef BACKGROUND_GC
- uint32_t mode = settings.concurrent ? 1 : 0;
+ bool reset_watch_state = !!settings.concurrent;
#else //BACKGROUND_GC
- uint32_t mode = 0;
+ bool reset_watch_state = false;
#endif //BACKGROUND_GC
BOOL small_object_segments = TRUE;
while (1)
@@ -18554,10 +18438,10 @@ void gc_heap::fix_card_table ()
#ifdef TIME_WRITE_WATCH
unsigned int time_start = GetCycleCount32();
#endif //TIME_WRITE_WATCH
- uint32_t status = GetWriteWatch (mode, base_address, region_size,
- (void**)g_addresses,
- (ULONG_PTR*)&bcount, (DWORD*)&granularity);
- assert (status == 0);
+ bool success = GCToOSInterface::GetWriteWatch(reset_watch_state, base_address, region_size,
+ (void**)g_addresses,
+ &bcount);
+ assert (success);
#ifdef TIME_WRITE_WATCH
unsigned int time_stop = GetCycleCount32();
@@ -18567,7 +18451,6 @@ void gc_heap::fix_card_table ()
#endif //TIME_WRITE_WATCH
assert( ((card_size * card_word_width)&(OS_PAGE_SIZE-1))==0 );
- assert (granularity == OS_PAGE_SIZE);
//printf ("%Ix written into\n", bcount);
dprintf (3,("Found %Id pages written", bcount));
for (unsigned i = 0; i < bcount; i++)
@@ -18595,7 +18478,7 @@ void gc_heap::fix_card_table ()
align_on_page (generation_allocation_start (generation_of (0)));
size_t region_size =
heap_segment_allocated (ephemeral_heap_segment) - base_address;
- ResetWriteWatch (base_address, region_size);
+ GCToOSInterface::ResetWriteWatch (base_address, region_size);
}
#endif //BACKGROUND_GC
#endif //WRITE_WATCH
@@ -21137,7 +21020,7 @@ void gc_heap::store_plug_gap_info (uint8_t* plug_start,
//if (last_plug_len == Align (min_obj_size))
//{
// dprintf (3, ("debugging only - last npinned plug is min, check to see if it's correct"));
- // DebugBreak();
+ // GCToOSInterface::DebugBreak();
//}
save_pre_plug_info_p = TRUE;
}
@@ -21170,7 +21053,7 @@ void gc_heap::store_plug_gap_info (uint8_t* plug_start,
//if (Align (last_plug_len) < min_pre_pin_obj_size)
//{
// dprintf (3, ("debugging only - last pinned plug is min, check to see if it's correct"));
- // DebugBreak();
+ // GCToOSInterface::DebugBreak();
//}
save_post_plug_info (last_pinned_plug, last_object_in_last_plug, plug_start);
@@ -23486,7 +23369,7 @@ void gc_heap::relocate_shortened_obj_helper (uint8_t* x, size_t s, uint8_t* end,
//{
// dprintf (3, ("obj %Ix needed padding: end %Ix is %d bytes from pinned obj %Ix",
// x, (x + s), (plug- (x + s)), plug));
- // DebugBreak();
+ // GCToOSInterface::DebugBreak();
//}
relocate_pre_plug_info (pinned_plug_entry);
@@ -24770,7 +24653,7 @@ inline int32_t GCUnhandledExceptionFilter(EXCEPTION_POINTERS* pExceptionPointers
#pragma warning(push)
#pragma warning(disable:4702) // C4702: unreachable code: gc_thread_function may not return
#endif //_MSC_VER
-uint32_t __stdcall gc_heap::gc_thread_stub (void* arg)
+void __stdcall gc_heap::gc_thread_stub (void* arg)
{
ClrFlsSetThreadType (ThreadType_GC);
STRESS_LOG_RESERVE_MEM (GC_STRESSLOG_MULTIPLY);
@@ -24783,7 +24666,7 @@ uint32_t __stdcall gc_heap::gc_thread_stub (void* arg)
{
#ifdef BACKGROUND_GC
// For background GC we revert to doing a blocking GC.
- return 0;
+ return;
#else
STRESS_LOG0(LF_GC, LL_ALWAYS, "Thread::CommitThreadStack failed.");
_ASSERTE(!"Thread::CommitThreadStack failed.");
@@ -24797,7 +24680,7 @@ uint32_t __stdcall gc_heap::gc_thread_stub (void* arg)
#endif // NO_CATCH_HANDLERS
gc_heap* heap = (gc_heap*)arg;
_alloca (256*heap->heap_number);
- return heap->gc_thread_function();
+ heap->gc_thread_function();
#ifndef NO_CATCH_HANDLERS
}
@@ -25229,7 +25112,7 @@ BOOL gc_heap::commit_mark_array_by_range (uint8_t* begin, uint8_t* end, uint32_t
size));
#endif //SIMPLE_DPRINTF
- if (VirtualAlloc (commit_start, size, MEM_COMMIT, PAGE_READWRITE))
+ if (GCToOSInterface::VirtualCommit (commit_start, size))
{
// We can only verify the mark array is cleared from begin to end, the first and the last
// page aren't necessarily all cleared 'cause they could be used by other segments or
@@ -25474,10 +25357,10 @@ void gc_heap::decommit_mark_array_by_seg (heap_segment* seg)
if (decommit_start < decommit_end)
{
- if (!VirtualFree (decommit_start, size, MEM_DECOMMIT))
+ if (!GCToOSInterface::VirtualDecommit (decommit_start, size))
{
- dprintf (GC_TABLE_LOG, ("VirtualFree on %Ix for %Id bytes failed: %d",
- decommit_start, size, GetLastError()));
+ dprintf (GC_TABLE_LOG, ("GCToOSInterface::VirtualDecommit on %Ix for %Id bytes failed",
+ decommit_start, size));
assert (!"decommit failed");
}
}
@@ -25603,7 +25486,7 @@ void gc_heap::background_mark_phase ()
dont_restart_ee_p = FALSE;
restart_vm();
- __SwitchToThread (0, CALLER_LIMITS_SPINNING);
+ GCToOSInterface::YieldThread (0);
#ifdef MULTIPLE_HEAPS
dprintf(3, ("Starting all gc threads for gc"));
bgc_t_join.restart();
@@ -26242,8 +26125,7 @@ void gc_heap::revisit_written_pages (BOOL concurrent_p, BOOL reset_only_p)
PREFIX_ASSUME(seg != NULL);
- uint32_t granularity;
- int mode = concurrent_p ? 1 : 0;
+ bool reset_watch_state = !!concurrent_p;
BOOL small_object_segments = TRUE;
int align_const = get_alignment_constant (small_object_segments);
@@ -26348,19 +26230,18 @@ void gc_heap::revisit_written_pages (BOOL concurrent_p, BOOL reset_only_p)
ptrdiff_t region_size = high_address - base_address;
dprintf (3, ("h%d: gw: [%Ix(%Id)", heap_number, (size_t)base_address, (size_t)region_size));
- uint32_t status = GetWriteWatch (mode, base_address, region_size,
- (void**)background_written_addresses,
- (ULONG_PTR*)&bcount, (DWORD*)&granularity);
+ bool success = GCToOSInterface::GetWriteWatch (reset_watch_state, base_address, region_size,
+ (void**)background_written_addresses,
+ &bcount);
//#ifdef _DEBUG
- if (status != 0)
+ if (!success)
{
printf ("GetWriteWatch Error ");
printf ("Probing pages [%Ix, %Ix[\n", (size_t)base_address, (size_t)high_address);
}
//#endif
- assert (status == 0);
- assert (granularity == OS_PAGE_SIZE);
+ assert (success);
if (bcount != 0)
{
@@ -26540,7 +26421,7 @@ BOOL gc_heap::prepare_bgc_thread(gc_heap* gh)
BOOL thread_created = FALSE;
dprintf (2, ("Preparing gc thread"));
- EnterCriticalSection (&(gh->bgc_threads_timeout_cs));
+ gh->bgc_threads_timeout_cs.Enter();
if (!(gh->bgc_thread_running))
{
dprintf (2, ("GC thread not runnning"));
@@ -26555,7 +26436,7 @@ BOOL gc_heap::prepare_bgc_thread(gc_heap* gh)
dprintf (3, ("GC thread already running"));
success = TRUE;
}
- LeaveCriticalSection (&(gh->bgc_threads_timeout_cs));
+ gh->bgc_threads_timeout_cs.Leave();
if(thread_created)
FireEtwGCCreateConcurrentThread_V1(GetClrInstanceId());
@@ -26581,7 +26462,7 @@ BOOL gc_heap::create_bgc_thread(gc_heap* gh)
// finished the event wait below.
rh_bgc_thread_ctx sContext;
- sContext.m_pRealStartRoutine = gh->bgc_thread_stub;
+ sContext.m_pRealStartRoutine = (PTHREAD_START_ROUTINE)gh->bgc_thread_stub;
sContext.m_pRealContext = gh;
if (!PalStartBackgroundGCThread(gh->rh_bgc_thread_stub, &sContext))
@@ -26833,7 +26714,7 @@ void gc_heap::kill_gc_thread()
background_gc_done_event.CloseEvent();
gc_lh_block_event.CloseEvent();
bgc_start_event.CloseEvent();
- DeleteCriticalSection (&bgc_threads_timeout_cs);
+ bgc_threads_timeout_cs.Destroy();
bgc_thread = 0;
recursive_gc_sync::shutdown();
}
@@ -26864,8 +26745,8 @@ uint32_t gc_heap::bgc_thread_function()
bgc_thread_running = TRUE;
Thread* current_thread = GetThread();
BOOL cooperative_mode = TRUE;
- bgc_thread_id = GetCurrentThreadId();
- dprintf (1, ("bgc_thread_id is set to %Ix", bgc_thread_id));
+ bgc_thread_id.SetToCurrentThread();
+ dprintf (1, ("bgc_thread_id is set to %Ix", GCToOSInterface::GetCurrentThreadIdForLogging()));
//this also indicates that the thread is ready.
background_gc_create_event.Set();
while (1)
@@ -26902,7 +26783,7 @@ uint32_t gc_heap::bgc_thread_function()
// Should join the bgc threads and terminate all of them
// at once.
dprintf (1, ("GC thread timeout"));
- EnterCriticalSection (&bgc_threads_timeout_cs);
+ bgc_threads_timeout_cs.Enter();
if (!keep_bgc_threads_p)
{
dprintf (2, ("GC thread exiting"));
@@ -26912,10 +26793,10 @@ uint32_t gc_heap::bgc_thread_function()
// assert if the lock count is not 0.
thread_to_destroy = bgc_thread;
bgc_thread = 0;
- bgc_thread_id = 0;
+ bgc_thread_id.Clear();
do_exit = TRUE;
}
- LeaveCriticalSection (&bgc_threads_timeout_cs);
+ bgc_threads_timeout_cs.Leave();
if (do_exit)
break;
else
@@ -29403,18 +29284,9 @@ generation* gc_heap::expand_heap (int condemned_generation,
bool gc_heap::init_dynamic_data()
{
- LARGE_INTEGER ts;
- if (!QueryPerformanceFrequency(&qpf))
- {
- FATAL_GC_ERROR();
- }
-
- if (!QueryPerformanceCounter(&ts))
- {
- FATAL_GC_ERROR();
- }
+ qpf = GCToOSInterface::QueryPerformanceFrequency();
- uint32_t now = (uint32_t)(ts.QuadPart/(qpf.QuadPart/1000));
+ uint32_t now = (uint32_t)GetHighPrecisionTimeStamp();
//clear some fields
for (int i = 0; i < max_generation+1; i++)
@@ -29536,17 +29408,6 @@ bool gc_heap::init_dynamic_data()
return true;
}
-// This returns a time stamp in milliseconds that is used throughout GC.
-// TODO: Replace all calls to QueryPerformanceCounter with this function.
-size_t gc_heap::get_time_now()
-{
- LARGE_INTEGER ts;
- if (!QueryPerformanceCounter(&ts))
- FATAL_GC_ERROR();
-
- return (size_t)(ts.QuadPart/(qpf.QuadPart/1000));
-}
-
float gc_heap::surv_to_growth (float cst, float limit, float max_limit)
{
if (cst < ((max_limit - limit ) / (limit * (max_limit-1.0f))))
@@ -29643,7 +29504,7 @@ size_t gc_heap::desired_new_allocation (dynamic_data* dd,
else //large object heap
{
GCMemoryStatus ms;
- GetProcessMemoryLoad (&ms);
+ GCToOSInterface::GetMemoryStatus (&ms);
uint64_t available_ram = ms.ullAvailPhys;
if (ms.ullAvailPhys > 1024*1024)
@@ -29879,7 +29740,7 @@ size_t gc_heap::joined_youngest_desired (size_t new_allocation)
{
uint32_t dwMemoryLoad = 0;
GCMemoryStatus ms;
- GetProcessMemoryLoad(&ms);
+ GCToOSInterface::GetMemoryStatus(&ms);
dprintf (2, ("Current memory load: %d", ms.dwMemoryLoad));
dwMemoryLoad = ms.dwMemoryLoad;
@@ -30573,7 +30434,7 @@ CObjectHeader* gc_heap::allocate_large_object (size_t jsize, int64_t& alloc_byte
{
if (g_pConfig->IsGCBreakOnOOMEnabled())
{
- DebugBreak();
+ GCToOSInterface::DebugBreak();
}
#ifndef FEATURE_REDHAWK
@@ -30666,10 +30527,7 @@ void reset_memory (uint8_t* o, size_t sizeo)
// on write watched memory.
if (reset_mm_p)
{
- if (VirtualAlloc ((char*)page_start, size, MEM_RESET, PAGE_READWRITE))
- VirtualUnlock ((char*)page_start, size);
- else
- reset_mm_p = FALSE;
+ reset_mm_p = GCToOSInterface::VirtualReset((void*)page_start, size, true /* unlock */);
}
}
#endif //!FEATURE_PAL
@@ -32524,11 +32382,7 @@ void gc_heap::clear_all_mark_array()
{
#ifdef MARK_ARRAY
//size_t num_dwords_written = 0;
- //LARGE_INTEGER ts;
- //if (!QueryPerformanceCounter(&ts))
- // FATAL_GC_ERROR();
- //
- //size_t begin_time = (size_t) (ts.QuadPart/(qpf.QuadPart/1000));
+ //size_t begin_time = GetHighPrecisionTimeStamp();
generation* gen = generation_of (max_generation);
heap_segment* seg = heap_segment_rw (generation_start_segment (gen));
@@ -32589,10 +32443,7 @@ void gc_heap::clear_all_mark_array()
seg = heap_segment_next_rw (seg);
}
- //if (!QueryPerformanceCounter(&ts))
- // FATAL_GC_ERROR();
- //
- //size_t end_time = (size_t) (ts.QuadPart/(qpf.QuadPart/1000)) - begin_time;
+ //size_t end_time = GetHighPrecisionTimeStamp() - begin_time;
//printf ("took %Id ms to clear %Id bytes\n", end_time, num_dwords_written*sizeof(uint32_t));
@@ -32932,7 +32783,7 @@ gc_heap::verify_heap (BOOL begin_gc_p)
#ifdef MULTIPLE_HEAPS
t_join* current_join = &gc_t_join;
#ifdef BACKGROUND_GC
- if (settings.concurrent && (GetCurrentThreadId() == bgc_thread_id))
+ if (settings.concurrent && (bgc_thread_id.IsCurrentThread()))
{
// We always call verify_heap on entry of GC on the SVR GC threads.
current_join = &bgc_t_join;
@@ -33559,6 +33410,11 @@ HRESULT GCHeap::Initialize ()
HRESULT hr = S_OK;
+ if (!GCToOSInterface::Initialize())
+ {
+ return E_FAIL;
+ }
+
//Initialize the static members.
#ifdef TRACE_GC
GcDuration = 0;
@@ -33572,7 +33428,7 @@ HRESULT GCHeap::Initialize ()
#ifdef MULTIPLE_HEAPS
// GetGCProcessCpuCount only returns up to 64 procs.
unsigned nhp = CPUGroupInfo::CanEnableGCCPUGroups() ? CPUGroupInfo::GetNumActiveProcessors():
- GetCurrentProcessCpuCount();
+ GCToOSInterface::GetCurrentProcessCpuCount();
hr = gc_heap::initialize_gc (seg_size, large_seg_size /*LHEAP_ALLOC*/, nhp);
#else
hr = gc_heap::initialize_gc (seg_size, large_seg_size /*LHEAP_ALLOC*/);
@@ -33582,7 +33438,7 @@ HRESULT GCHeap::Initialize ()
return hr;
GCMemoryStatus ms;
- GetProcessMemoryLoad (&ms);
+ GCToOSInterface::GetMemoryStatus (&ms);
gc_heap::total_physical_mem = ms.ullTotalPhys;
gc_heap::mem_one_percent = gc_heap::total_physical_mem / 100;
#ifndef MULTIPLE_HEAPS
@@ -33596,7 +33452,7 @@ HRESULT GCHeap::Initialize ()
// I am assuming 47 processes using WKS GC and 3 using SVR GC.
// I am assuming 3 in part due to the "very high memory load" is 97%.
int available_mem_th = 10;
- if (gc_heap::total_physical_mem >= ((ULONGLONG)80 * 1024 * 1024 * 1024))
+ if (gc_heap::total_physical_mem >= ((uint64_t)80 * 1024 * 1024 * 1024))
{
int adjusted_available_mem_th = 3 + (int)((float)47 / (float)(g_SystemInfo.dwNumberOfProcessors));
available_mem_th = min (available_mem_th, adjusted_available_mem_th);
@@ -34056,7 +33912,7 @@ BOOL GCHeap::StressHeap(alloc_context * acontext)
// Allow programmer to skip the first N Stress GCs so that you can
// get to the interesting ones faster.
- FastInterlockIncrement((LONG*)&GCStressCurCount);
+ Interlocked::Increment(&GCStressCurCount);
if (GCStressCurCount < GCStressStartCount)
return FALSE;
@@ -34102,7 +33958,7 @@ BOOL GCHeap::StressHeap(alloc_context * acontext)
// at a time. A secondary advantage is that we release part of our StressObjs
// buffer sparingly but just as effectively.
- if (FastInterlockIncrement((LONG *) &OneAtATime) == 0 &&
+ if (Interlocked::Increment(&OneAtATime) == 0 &&
!TrackAllocations()) // Messing with object sizes can confuse the profiler (see ICorProfilerInfo::GetObjectSize)
{
StringObject* str;
@@ -34164,7 +34020,7 @@ BOOL GCHeap::StressHeap(alloc_context * acontext)
}
}
}
- FastInterlockDecrement((LONG *) &OneAtATime);
+ Interlocked::Decrement(&OneAtATime);
#endif // !MULTIPLE_HEAPS
if (IsConcurrentGCEnabled())
{
@@ -35747,18 +35603,18 @@ size_t GCHeap::GetValidGen0MaxSize(size_t seg_size)
#ifdef SERVER_GC
// performance data seems to indicate halving the size results
// in optimal perf. Ask for adjusted gen0 size.
- gen0size = max(GetLargestOnDieCacheSize(FALSE)/GetLogicalCpuCount(),(256*1024));
+ gen0size = max(GCToOSInterface::GetLargestOnDieCacheSize(FALSE)/GCToOSInterface::GetLogicalCpuCount(),(256*1024));
#if (defined(_TARGET_AMD64_))
// if gen0 size is too large given the available memory, reduce it.
// Get true cache size, as we don't want to reduce below this.
- size_t trueSize = max(GetLargestOnDieCacheSize(TRUE)/GetLogicalCpuCount(),(256*1024));
+ size_t trueSize = max(GCToOSInterface::GetLargestOnDieCacheSize(TRUE)/GCToOSInterface::GetLogicalCpuCount(),(256*1024));
dprintf (2, ("cache: %Id-%Id, cpu: %Id",
- GetLargestOnDieCacheSize(FALSE),
- GetLargestOnDieCacheSize(TRUE),
- GetLogicalCpuCount()));
+ GCToOSInterface::GetLargestOnDieCacheSize(FALSE),
+ GCToOSInterface::GetLargestOnDieCacheSize(TRUE),
+ GCToOSInterface::GetLogicalCpuCount()));
GCMemoryStatus ms;
- GetProcessMemoryLoad (&ms);
+ GCToOSInterface::GetMemoryStatus (&ms);
// if the total min GC across heaps will exceed 1/6th of available memory,
// then reduce the min GC size until it either fits or has been reduced to cache size.
while ((gen0size * gc_heap::n_heaps) > (ms.ullAvailPhys / 6))
@@ -35773,7 +35629,7 @@ size_t GCHeap::GetValidGen0MaxSize(size_t seg_size)
#endif //_TARGET_AMD64_
#else //SERVER_GC
- gen0size = max((4*GetLargestOnDieCacheSize(TRUE)/5),(256*1024));
+ gen0size = max((4*GCToOSInterface::GetLargestOnDieCacheSize(TRUE)/5),(256*1024));
#endif //SERVER_GC
#else //!FEATURE_REDHAWK
gen0size = (256*1024);
@@ -35982,8 +35838,7 @@ GCHeap::SetCardsAfterBulkCopy( Object **StartPoint, size_t len )
// Set Bit For Card and advance to next card
size_t card = gcard_of ((uint8_t*)rover);
- FastInterlockOr ((DWORD RAW_KEYWORD(volatile) *)&g_card_table[card/card_word_width],
- (1 << (uint32_t)(card % card_word_width)));
+ Interlocked::Or (&g_card_table[card/card_word_width], (1U << (card % card_word_width)));
// Skip to next card for the object
rover = (Object**)align_on_card ((uint8_t*)(rover+1));
}
@@ -36029,7 +35884,7 @@ bool CFinalize::Initialize()
STRESS_LOG_OOM_STACK(sizeof(Object*[100]));
if (g_pConfig->IsGCBreakOnOOMEnabled())
{
- DebugBreak();
+ GCToOSInterface::DebugBreak();
}
return false;
}
@@ -36042,7 +35897,7 @@ bool CFinalize::Initialize()
m_PromotedCount = 0;
lock = -1;
#ifdef _DEBUG
- lockowner_threadid = (uint32_t) -1;
+ lockowner_threadid.Clear();
#endif // _DEBUG
return true;
@@ -36066,22 +35921,22 @@ void CFinalize::EnterFinalizeLock()
GCToEEInterface::IsPreemptiveGCDisabled(GetThread()));
retry:
- if (FastInterlockExchange ((LONG*)&lock, 0) >= 0)
+ if (Interlocked::Exchange (&lock, 0) >= 0)
{
unsigned int i = 0;
while (lock >= 0)
{
YieldProcessor(); // indicate to the processor that we are spining
if (++i & 7)
- __SwitchToThread (0, CALLER_LIMITS_SPINNING);
+ GCToOSInterface::YieldThread (0);
else
- __SwitchToThread (5, CALLER_LIMITS_SPINNING);
+ GCToOSInterface::Sleep (5);
}
goto retry;
}
#ifdef _DEBUG
- lockowner_threadid = ::GetCurrentThreadId();
+ lockowner_threadid.SetToCurrentThread();
#endif // _DEBUG
}
@@ -36093,7 +35948,7 @@ void CFinalize::LeaveFinalizeLock()
GCToEEInterface::IsPreemptiveGCDisabled(GetThread()));
#ifdef _DEBUG
- lockowner_threadid = (uint32_t) -1;
+ lockowner_threadid.Clear();
#endif // _DEBUG
lock = -1;
}
@@ -36143,7 +35998,7 @@ CFinalize::RegisterForFinalization (int gen, Object* obj, size_t size)
STRESS_LOG_OOM_STACK(0);
if (g_pConfig->IsGCBreakOnOOMEnabled())
{
- DebugBreak();
+ GCToOSInterface::DebugBreak();
}
#ifdef FEATURE_REDHAWK
return false;
@@ -36795,7 +36650,7 @@ void TouchPages(LPVOID pStart, uint32_t cb)
void deleteGCShadow()
{
if (g_GCShadow != 0)
- VirtualFree (g_GCShadow, 0, MEM_RELEASE);
+ GCToOSInterface::VirtualRelease (g_GCShadow, g_GCShadowEnd - g_GCShadow);
g_GCShadow = 0;
g_GCShadowEnd = 0;
}
@@ -36810,7 +36665,7 @@ void initGCShadow()
if (len > (size_t)(g_GCShadowEnd - g_GCShadow))
{
deleteGCShadow();
- g_GCShadowEnd = g_GCShadow = (uint8_t*) VirtualAlloc(0, len, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
+ g_GCShadowEnd = g_GCShadow = (uint8_t*) GCToOSInterface::VirtualCommit(0, len);
if (g_GCShadow)
{
g_GCShadowEnd += len;
diff --git a/src/gc/gcee.cpp b/src/gc/gcee.cpp
index 7a9bb1891f..8e4e4480b4 100644
--- a/src/gc/gcee.cpp
+++ b/src/gc/gcee.cpp
@@ -379,15 +379,11 @@ size_t GCHeap::GetLastGCDuration(int generation)
return dd_gc_elapsed_time (hp->dynamic_data_of (generation));
}
+size_t GetHighPrecisionTimeStamp();
+
size_t GCHeap::GetNow()
{
-#ifdef MULTIPLE_HEAPS
- gc_heap* hp = gc_heap::g_heaps[0];
-#else
- gc_heap* hp = pGenGCHeap;
-#endif //MULTIPLE_HEAPS
-
- return hp->get_time_now();
+ return GetHighPrecisionTimeStamp();
}
#if defined(GC_PROFILING) //UNIXTODO: Enable this for FEATURE_EVENT_TRACE
diff --git a/src/gc/gcpriv.h b/src/gc/gcpriv.h
index 14bea156a4..fe40c0ccd7 100644
--- a/src/gc/gcpriv.h
+++ b/src/gc/gcpriv.h
@@ -25,7 +25,7 @@
inline void FATAL_GC_ERROR()
{
- DebugBreak();
+ GCToOSInterface::DebugBreak();
_ASSERTE(!"Fatal Error in GC.");
EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE);
}
@@ -143,12 +143,12 @@ inline void FATAL_GC_ERROR()
#if defined (SYNCHRONIZATION_STATS) || defined (STAGE_STATS)
#define BEGIN_TIMING(x) \
LARGE_INTEGER x##_start; \
- QueryPerformanceCounter (&x##_start)
+ x##_start = GCToOSInterface::QueryPerformanceCounter ()
#define END_TIMING(x) \
LARGE_INTEGER x##_end; \
- QueryPerformanceCounter (&x##_end); \
- x += x##_end.QuadPart - x##_start.QuadPart
+ x##_end = GCToOSInterface::QueryPerformanceCounter (); \
+ x += x##_end - x##_start
#else
#define BEGIN_TIMING(x)
@@ -204,70 +204,7 @@ void GCLogConfig (const char *fmt, ... );
#define CLREvent CLREventStatic
-#ifdef CreateFileMapping
-
-#undef CreateFileMapping
-
-#endif //CreateFileMapping
-
-#define CreateFileMapping WszCreateFileMapping
-
// hosted api
-#ifdef InitializeCriticalSection
-#undef InitializeCriticalSection
-#endif //ifdef InitializeCriticalSection
-#define InitializeCriticalSection UnsafeInitializeCriticalSection
-
-#ifdef DeleteCriticalSection
-#undef DeleteCriticalSection
-#endif //ifdef DeleteCriticalSection
-#define DeleteCriticalSection UnsafeDeleteCriticalSection
-
-#ifdef EnterCriticalSection
-#undef EnterCriticalSection
-#endif //ifdef EnterCriticalSection
-#define EnterCriticalSection UnsafeEEEnterCriticalSection
-
-#ifdef LeaveCriticalSection
-#undef LeaveCriticalSection
-#endif //ifdef LeaveCriticalSection
-#define LeaveCriticalSection UnsafeEELeaveCriticalSection
-
-#ifdef TryEnterCriticalSection
-#undef TryEnterCriticalSection
-#endif //ifdef TryEnterCriticalSection
-#define TryEnterCriticalSection UnsafeEETryEnterCriticalSection
-
-#ifdef CreateSemaphore
-#undef CreateSemaphore
-#endif //CreateSemaphore
-#define CreateSemaphore UnsafeCreateSemaphore
-
-#ifdef CreateEvent
-#undef CreateEvent
-#endif //ifdef CreateEvent
-#define CreateEvent UnsafeCreateEvent
-
-#ifdef VirtualAlloc
-#undef VirtualAlloc
-#endif //ifdef VirtualAlloc
-#define VirtualAlloc ClrVirtualAlloc
-
-#ifdef VirtualFree
-#undef VirtualFree
-#endif //ifdef VirtualFree
-#define VirtualFree ClrVirtualFree
-
-#ifdef VirtualQuery
-#undef VirtualQuery
-#endif //ifdef VirtualQuery
-#define VirtualQuery ClrVirtualQuery
-
-#ifdef VirtualProtect
-#undef VirtualProtect
-#endif //ifdef VirtualProtect
-#define VirtualProtect ClrVirtualProtect
-
#ifdef memcpy
#undef memcpy
#endif //memcpy
@@ -1045,7 +982,7 @@ struct spinlock_info
{
msl_enter_state enter_state;
msl_take_state take_state;
- uint32_t thread_id;
+ EEThreadId thread_id;
};
const unsigned HS_CACHE_LINE_SIZE = 128;
@@ -1293,7 +1230,7 @@ public:
static
gc_heap* balance_heaps_loh (alloc_context* acontext, size_t size);
static
- uint32_t __stdcall gc_thread_stub (void* arg);
+ void __stdcall gc_thread_stub (void* arg);
#endif //MULTIPLE_HEAPS
CObjectHeader* try_fast_alloc (size_t jsize);
@@ -1621,13 +1558,13 @@ protected:
struct loh_state_info
{
allocation_state alloc_state;
- uint32_t thread_id;
+ EEThreadId thread_id;
};
PER_HEAP
loh_state_info last_loh_states[max_saved_loh_states];
PER_HEAP
- void add_saved_loh_state (allocation_state loh_state_to_save, uint32_t thread_id);
+ void add_saved_loh_state (allocation_state loh_state_to_save, EEThreadId thread_id);
#endif //RECORD_LOH_STATE
PER_HEAP
BOOL allocate_large (int gen_number,
@@ -2517,8 +2454,6 @@ protected:
PER_HEAP
void save_ephemeral_generation_starts();
- static size_t get_time_now();
-
PER_HEAP
bool init_dynamic_data ();
PER_HEAP
@@ -2612,9 +2547,9 @@ protected:
PER_HEAP_ISOLATED
void destroy_thread_support ();
PER_HEAP
- HANDLE create_gc_thread();
+ bool create_gc_thread();
PER_HEAP
- uint32_t gc_thread_function();
+ void gc_thread_function();
#ifdef MARK_LIST
#ifdef PARALLEL_MARK_LIST_SORT
PER_HEAP
@@ -3109,7 +3044,7 @@ protected:
#ifdef BACKGROUND_GC
PER_HEAP
- uint32_t bgc_thread_id;
+ EEThreadId bgc_thread_id;
#ifdef WRITE_WATCH
PER_HEAP
@@ -3152,7 +3087,7 @@ protected:
Thread* bgc_thread;
PER_HEAP
- CRITICAL_SECTION bgc_threads_timeout_cs;
+ CLRCriticalSection bgc_threads_timeout_cs;
PER_HEAP_ISOLATED
CLREvent background_gc_done_event;
@@ -3728,8 +3663,6 @@ public:
SPTR_DECL(PTR_gc_heap, g_heaps);
static
- HANDLE* g_gc_threads; // keep all of the gc threads.
- static
size_t* g_promoted;
#ifdef BACKGROUND_GC
static
@@ -3784,7 +3717,7 @@ private:
VOLATILE(int32_t) lock;
#ifdef _DEBUG
- uint32_t lockowner_threadid;
+ EEThreadId lockowner_threadid;
#endif // _DEBUG
BOOL GrowArray();
diff --git a/src/gc/gcscan.cpp b/src/gc/gcscan.cpp
index 621ffad7f3..78e0dd61f8 100644
--- a/src/gc/gcscan.cpp
+++ b/src/gc/gcscan.cpp
@@ -270,13 +270,13 @@ void GCScan::GcRuntimeStructuresValid (BOOL bValid)
if (!bValid)
{
int32_t result;
- result = FastInterlockIncrement ((LONG*)&m_GcStructuresInvalidCnt);
+ result = Interlocked::Increment (&m_GcStructuresInvalidCnt);
_ASSERTE (result > 0);
}
else
{
int32_t result;
- result = FastInterlockDecrement ((LONG*)&m_GcStructuresInvalidCnt);
+ result = Interlocked::Decrement (&m_GcStructuresInvalidCnt);
_ASSERTE (result >= 0);
}
}
diff --git a/src/gc/handletable.cpp b/src/gc/handletable.cpp
index e14316bd08..7f855bba29 100644
--- a/src/gc/handletable.cpp
+++ b/src/gc/handletable.cpp
@@ -688,7 +688,7 @@ uintptr_t HndCompareExchangeHandleExtraInfo(OBJECTHANDLE handle, uint32_t uType,
if (pUserData)
{
// yes - attempt to store the info
- return (uintptr_t)FastInterlockCompareExchangePointer((void**)pUserData, (void*)lNewExtraInfo, (void*)lOldExtraInfo);
+ return (uintptr_t)Interlocked::CompareExchangePointer((void**)pUserData, (void*)lNewExtraInfo, (void*)lOldExtraInfo);
}
_ASSERTE(!"Shouldn't be trying to call HndCompareExchangeHandleExtraInfo on handle types without extra info");
diff --git a/src/gc/handletable.inl b/src/gc/handletable.inl
index 29594d0a7c..15c38fdd9a 100644
--- a/src/gc/handletable.inl
+++ b/src/gc/handletable.inl
@@ -67,7 +67,7 @@ inline void* HndInterlockedCompareExchangeHandle(OBJECTHANDLE handle, OBJECTREF
// store the pointer
- void* ret = FastInterlockCompareExchangePointer(reinterpret_cast<_UNCHECKED_OBJECTREF volatile*>(handle), value, oldValue);
+ void* ret = Interlocked::CompareExchangePointer(reinterpret_cast<_UNCHECKED_OBJECTREF volatile*>(handle), value, oldValue);
if (ret == oldValue)
HndLogSetEvent(handle, value);
@@ -101,7 +101,7 @@ inline BOOL HndFirstAssignHandle(OBJECTHANDLE handle, OBJECTREF objref)
_UNCHECKED_OBJECTREF null = NULL;
// store the pointer if we are the first ones here
- BOOL success = (NULL == FastInterlockCompareExchangePointer(reinterpret_cast<_UNCHECKED_OBJECTREF volatile*>(handle),
+ BOOL success = (NULL == Interlocked::CompareExchangePointer(reinterpret_cast<_UNCHECKED_OBJECTREF volatile*>(handle),
value,
null));
diff --git a/src/gc/handletablecache.cpp b/src/gc/handletablecache.cpp
index 717348fdb4..33cc08e82f 100644
--- a/src/gc/handletablecache.cpp
+++ b/src/gc/handletablecache.cpp
@@ -86,7 +86,7 @@ void SpinUntil(void *pCond, BOOL fNonZero)
#endif //_DEBUG
// sleep for a little while
- __SwitchToThread(dwThisSleepPeriod, CALLER_LIMITS_SPINNING);
+ GCToOSInterface::Sleep(dwThisSleepPeriod);
// now update our sleep period
dwThisSleepPeriod = dwNextSleepPeriod;
@@ -471,7 +471,7 @@ void TableFullRebalanceCache(HandleTable *pTable,
// update the write index for the free bank
// NOTE: we use an interlocked exchange here to guarantee relative store order on MP
// AFTER THIS POINT THE FREE BANK IS LIVE AND COULD RECEIVE NEW HANDLES
- FastInterlockExchange((LONG*)&pCache->lFreeIndex, lMinFreeIndex);
+ Interlocked::Exchange(&pCache->lFreeIndex, lMinFreeIndex);
// now if we have any handles left, store them in the reserve bank
if (uHandleCount)
@@ -488,7 +488,7 @@ void TableFullRebalanceCache(HandleTable *pTable,
// update the read index for the reserve bank
// NOTE: we use an interlocked exchange here to guarantee relative store order on MP
// AT THIS POINT THE RESERVE BANK IS LIVE AND HANDLES COULD BE ALLOCATED FROM IT
- FastInterlockExchange((LONG*)&pCache->lReserveIndex, lMinReserveIndex);
+ Interlocked::Exchange(&pCache->lReserveIndex, lMinReserveIndex);
}
@@ -599,12 +599,12 @@ void TableQuickRebalanceCache(HandleTable *pTable,
// update the write index for the free bank
// NOTE: we use an interlocked exchange here to guarantee relative store order on MP
// AFTER THIS POINT THE FREE BANK IS LIVE AND COULD RECEIVE NEW HANDLES
- FastInterlockExchange((LONG*)&pCache->lFreeIndex, lMinFreeIndex);
+ Interlocked::Exchange(&pCache->lFreeIndex, lMinFreeIndex);
// update the read index for the reserve bank
// NOTE: we use an interlocked exchange here to guarantee relative store order on MP
// AT THIS POINT THE RESERVE BANK IS LIVE AND HANDLES COULD BE ALLOCATED FROM IT
- FastInterlockExchange((LONG*)&pCache->lReserveIndex, lMinReserveIndex);
+ Interlocked::Exchange(&pCache->lReserveIndex, lMinReserveIndex);
}
@@ -630,13 +630,13 @@ OBJECTHANDLE TableCacheMissOnAlloc(HandleTable *pTable, HandleTypeCache *pCache,
CrstHolder ch(&pTable->Lock);
// try again to take a handle (somebody else may have rebalanced)
- int32_t lReserveIndex = FastInterlockDecrement((LONG*)&pCache->lReserveIndex);
+ int32_t lReserveIndex = Interlocked::Decrement(&pCache->lReserveIndex);
// are we still waiting for handles?
if (lReserveIndex < 0)
{
// yup, suspend free list usage...
- int32_t lFreeIndex = FastInterlockExchange((LONG*)&pCache->lFreeIndex, 0L);
+ int32_t lFreeIndex = Interlocked::Exchange(&pCache->lFreeIndex, 0);
// ...and rebalance the cache...
TableQuickRebalanceCache(pTable, pCache, uType, lReserveIndex, lFreeIndex, &handle, NULL);
@@ -680,13 +680,13 @@ void TableCacheMissOnFree(HandleTable *pTable, HandleTypeCache *pCache, uint32_t
CrstHolder ch(&pTable->Lock);
// try again to take a slot (somebody else may have rebalanced)
- int32_t lFreeIndex = FastInterlockDecrement((LONG*)&pCache->lFreeIndex);
+ int32_t lFreeIndex = Interlocked::Decrement(&pCache->lFreeIndex);
// are we still waiting for free slots?
if (lFreeIndex < 0)
{
// yup, suspend reserve list usage...
- int32_t lReserveIndex = FastInterlockExchange((LONG*)&pCache->lReserveIndex, 0L);
+ int32_t lReserveIndex = Interlocked::Exchange(&pCache->lReserveIndex, 0);
// ...and rebalance the cache...
TableQuickRebalanceCache(pTable, pCache, uType, lReserveIndex, lFreeIndex, NULL, handle);
@@ -718,7 +718,7 @@ OBJECTHANDLE TableAllocSingleHandleFromCache(HandleTable *pTable, uint32_t uType
if (pTable->rgQuickCache[uType])
{
// try to grab the handle we saw
- handle = FastInterlockExchangePointer(pTable->rgQuickCache + uType, (OBJECTHANDLE)NULL);
+ handle = Interlocked::ExchangePointer(pTable->rgQuickCache + uType, (OBJECTHANDLE)NULL);
// if it worked then we're done
if (handle)
@@ -729,7 +729,7 @@ OBJECTHANDLE TableAllocSingleHandleFromCache(HandleTable *pTable, uint32_t uType
HandleTypeCache *pCache = pTable->rgMainCache + uType;
// try to take a handle from the main cache
- int32_t lReserveIndex = FastInterlockDecrement((LONG*)&pCache->lReserveIndex);
+ int32_t lReserveIndex = Interlocked::Decrement(&pCache->lReserveIndex);
// did we underflow?
if (lReserveIndex < 0)
@@ -787,7 +787,7 @@ void TableFreeSingleHandleToCache(HandleTable *pTable, uint32_t uType, OBJECTHAN
if (!pTable->rgQuickCache[uType])
{
// yup - try to stuff our handle in the slot we saw
- handle = FastInterlockExchangePointer(&pTable->rgQuickCache[uType], handle);
+ handle = Interlocked::ExchangePointer(&pTable->rgQuickCache[uType], handle);
// if we didn't end up with another handle then we're done
if (!handle)
@@ -798,7 +798,7 @@ void TableFreeSingleHandleToCache(HandleTable *pTable, uint32_t uType, OBJECTHAN
HandleTypeCache *pCache = pTable->rgMainCache + uType;
// try to take a free slot from the main cache
- int32_t lFreeIndex = FastInterlockDecrement((LONG*)&pCache->lFreeIndex);
+ int32_t lFreeIndex = Interlocked::Decrement(&pCache->lFreeIndex);
// did we underflow?
if (lFreeIndex < 0)
diff --git a/src/gc/handletablecore.cpp b/src/gc/handletablecore.cpp
index 8435f94165..d302087ec9 100644
--- a/src/gc/handletablecore.cpp
+++ b/src/gc/handletablecore.cpp
@@ -239,7 +239,7 @@ BOOL TableCanFreeSegmentNow(HandleTable *pTable, TableSegment *pSegment)
// fail but by the time a dump was created the lock was unowned so
// there was no way to tell who the previous owner was.
EEThreadId threadId = pTable->Lock.GetHolderThreadId();
- _ASSERTE(threadId.IsSameThread());
+ _ASSERTE(threadId.IsCurrentThread());
#endif // _DEBUG
// deterine if any segment is currently being scanned asynchronously
@@ -526,7 +526,7 @@ BOOL SegmentInitialize(TableSegment *pSegment, HandleTable *pTable)
dwCommit &= ~(g_SystemInfo.dwPageSize - 1);
// commit the header
- if (!ClrVirtualAlloc(pSegment, dwCommit, MEM_COMMIT, PAGE_READWRITE))
+ if (!GCToOSInterface::VirtualCommit(pSegment, dwCommit))
{
//_ASSERTE(FALSE);
return FALSE;
@@ -581,7 +581,7 @@ void SegmentFree(TableSegment *pSegment)
*/
// free the segment's memory
- ClrVirtualFree(pSegment, 0, MEM_RELEASE);
+ GCToOSInterface::VirtualRelease(pSegment, HANDLE_SEGMENT_SIZE);
}
@@ -611,7 +611,7 @@ TableSegment *SegmentAlloc(HandleTable *pTable)
_ASSERTE(HANDLE_SEGMENT_ALIGNMENT >= HANDLE_SEGMENT_SIZE);
_ASSERTE(HANDLE_SEGMENT_ALIGNMENT == 0x10000);
- pSegment = (TableSegment *)ClrVirtualAllocAligned(NULL, HANDLE_SEGMENT_SIZE, MEM_RESERVE, PAGE_NOACCESS, HANDLE_SEGMENT_ALIGNMENT);
+ pSegment = (TableSegment *)GCToOSInterface::VirtualReserve(NULL, HANDLE_SEGMENT_SIZE, HANDLE_SEGMENT_ALIGNMENT, VirtualReserveFlags::None);
_ASSERTE(((size_t)pSegment % HANDLE_SEGMENT_ALIGNMENT) == 0);
// bail out if we couldn't get any memory
@@ -1440,7 +1440,7 @@ uint32_t SegmentInsertBlockFromFreeListWorker(TableSegment *pSegment, uint32_t u
uint32_t dwCommit = g_SystemInfo.dwPageSize;
// commit the memory
- if (!ClrVirtualAlloc(pvCommit, dwCommit, MEM_COMMIT, PAGE_READWRITE))
+ if (!GCToOSInterface::VirtualCommit(pvCommit, dwCommit))
return BLOCK_INVALID;
// use the previous commit line as the new decommit line
@@ -1844,7 +1844,7 @@ void SegmentTrimExcessPages(TableSegment *pSegment)
if (dwHi > dwLo)
{
// decommit the memory
- ClrVirtualFree((LPVOID)dwLo, dwHi - dwLo, MEM_DECOMMIT);
+ GCToOSInterface::VirtualDecommit((LPVOID)dwLo, dwHi - dwLo);
// update the commit line
pSegment->bCommitLine = (uint8_t)((dwLo - (size_t)pSegment->rgValue) / HANDLE_BYTES_PER_BLOCK);
diff --git a/src/gc/objecthandle.cpp b/src/gc/objecthandle.cpp
index 8b72d0d430..1654cf9b94 100644
--- a/src/gc/objecthandle.cpp
+++ b/src/gc/objecthandle.cpp
@@ -787,7 +787,7 @@ HandleTableBucket *Ref_CreateHandleTableBucket(ADIndex uADIndex)
HndSetHandleTableIndex(result->pTable[uCPUindex], i+offset);
result->HandleTableIndex = i+offset;
- if (FastInterlockCompareExchangePointer(&walk->pBuckets[i], result, NULL) == 0) {
+ if (Interlocked::CompareExchangePointer(&walk->pBuckets[i], result, NULL) == 0) {
// Get a free slot.
bucketHolder.SuppressRelease();
return result;
@@ -812,7 +812,7 @@ HandleTableBucket *Ref_CreateHandleTableBucket(ADIndex uADIndex)
ZeroMemory(newMap->pBuckets,
INITIAL_HANDLE_TABLE_ARRAY_SIZE * sizeof (HandleTableBucket *));
- if (FastInterlockCompareExchangePointer(&last->pNext, newMap.GetValue(), NULL) != NULL)
+ if (Interlocked::CompareExchangePointer(&last->pNext, newMap.GetValue(), NULL) != NULL)
{
// This thread loses.
delete [] newMap->pBuckets;
@@ -1575,8 +1575,8 @@ void Ref_UpdatePointers(uint32_t condemned, uint32_t maxgen, ScanContext* sc, Re
if (GCHeap::IsServerHeap())
{
- bDo = (FastInterlockIncrement((LONG*)&uCount) == 1);
- FastInterlockCompareExchange ((LONG*)&uCount, 0, GCHeap::GetGCHeap()->GetNumberOfHeaps());
+ bDo = (Interlocked::Increment(&uCount) == 1);
+ Interlocked::CompareExchange (&uCount, 0, GCHeap::GetGCHeap()->GetNumberOfHeaps());
_ASSERTE (uCount <= GCHeap::GetGCHeap()->GetNumberOfHeaps());
}
diff --git a/src/gc/sample/CMakeLists.txt b/src/gc/sample/CMakeLists.txt
index 8bed3adee2..a46f9aeb8b 100644
--- a/src/gc/sample/CMakeLists.txt
+++ b/src/gc/sample/CMakeLists.txt
@@ -1,11 +1,13 @@
project(clrgcsample)
+set(CMAKE_INCLUDE_CURRENT_DIR ON)
+
include_directories(..)
include_directories(../env)
set(SOURCES
GCSample.cpp
- gcenv.cpp
+ gcenv.ee.cpp
../gccommon.cpp
../gceewks.cpp
../gcscan.cpp
@@ -19,45 +21,12 @@ set(SOURCES
if(WIN32)
list(APPEND SOURCES
- ../env/gcenv.windows.cpp)
+ gcenv.windows.cpp)
else()
list(APPEND SOURCES
- ../env/gcenv.unix.cpp)
-endif()
-
-if(CLR_CMAKE_PLATFORM_UNIX)
- add_compile_options(-Wno-format)
- add_compile_options(-Wno-unused-variable)
- add_compile_options(-Wno-unused-private-field)
- add_compile_options(-Wno-tautological-undefined-compare)
+ gcenv.unix.cpp)
endif()
-if(CLR_CMAKE_PLATFORM_ARCH_AMD64)
- add_definitions(-D_TARGET_AMD64_=1)
- set(IS_64BIT_BUILD 1)
-elseif(CLR_CMAKE_PLATFORM_ARCH_I386)
- add_definitions(-D_TARGET_X86_=1)
-elseif(CLR_CMAKE_PLATFORM_ARCH_ARM)
- add_definitions(-D_TARGET_ARM_=1)
-elseif(CLR_CMAKE_PLATFORM_ARCH_ARM64)
- add_definitions(-D_TARGET_ARM64_=1)
- set(IS_64BIT_BUILD 1)
-else()
- clr_unknown_arch()
-endif()
-
-if(IS_64BIT_BUILD)
- add_definitions(-DBIT64=1)
-endif(IS_64BIT_BUILD)
-
-if(WIN32)
- add_definitions(-DWIN32)
- add_definitions(-D_WIN32=1)
- if(IS_64BIT_BUILD)
- add_definitions(-D_WIN64=1)
- endif()
-endif(WIN32)
-
add_executable(gcsample
${SOURCES}
)
diff --git a/src/gc/sample/GCSample.cpp b/src/gc/sample/GCSample.cpp
index 446956110a..eb2c9aa9c4 100644
--- a/src/gc/sample/GCSample.cpp
+++ b/src/gc/sample/GCSample.cpp
@@ -111,12 +111,15 @@ void WriteBarrier(Object ** dst, Object * ref)
ErectWriteBarrier(dst, ref);
}
-int main(int argc, char* argv[])
+int __cdecl main(int argc, char* argv[])
{
//
// Initialize system info
//
- InitializeSystemInfo();
+ if (!GCToOSInterface::Initialize())
+ {
+ return -1;
+ }
//
// Initialize free object methodtable. The GC uses a special array-like methodtable as placeholder
@@ -170,7 +173,7 @@ int main(int argc, char* argv[])
My_MethodTable;
// 'My' contains the MethodTable*
- size_t baseSize = sizeof(My);
+ uint32_t baseSize = sizeof(My);
// GC expects the size of ObjHeader (extra void*) to be included in the size.
baseSize = baseSize + sizeof(ObjHeader);
// Add padding as necessary. GC requires the object size to be at least MIN_OBJECT_SIZE.
diff --git a/src/gc/sample/GCSample.vcxproj b/src/gc/sample/GCSample.vcxproj
index a0a79c59eb..eefca17fb3 100644
--- a/src/gc/sample/GCSample.vcxproj
+++ b/src/gc/sample/GCSample.vcxproj
@@ -83,7 +83,8 @@
<ClInclude Include="gcenv.h" />
</ItemGroup>
<ItemGroup>
- <ClCompile Include="gcenv.cpp" />
+ <ClCompile Include="gcenv.ee.cpp" />
+ <ClCompile Include="gcenv.windows.cpp" />
<ClCompile Include="GCSample.cpp" />
<ClCompile Include="..\gccommon.cpp" />
<ClCompile Include="..\gceewks.cpp" />
@@ -94,7 +95,6 @@
<ClCompile Include="..\handletablecore.cpp" />
<ClCompile Include="..\handletablescan.cpp" />
<ClCompile Include="..\objecthandle.cpp" />
- <ClCompile Include="..\env\gcenv.windows.cpp" />
<ClCompile Include="..\env\common.cpp">
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">Create</PrecompiledHeader>
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">Create</PrecompiledHeader>
diff --git a/src/gc/sample/GCSample.vcxproj.filters b/src/gc/sample/GCSample.vcxproj.filters
index 1e9facde09..e46c054565 100644
--- a/src/gc/sample/GCSample.vcxproj.filters
+++ b/src/gc/sample/GCSample.vcxproj.filters
@@ -53,13 +53,13 @@
<ClCompile Include="..\gccommon.cpp">
<Filter>Source Files</Filter>
</ClCompile>
- <ClCompile Include="..\env\gcenv.windows.cpp">
+ <ClCompile Include="..\env\common.cpp">
<Filter>Source Files</Filter>
</ClCompile>
- <ClCompile Include="..\env\common.cpp">
+ <ClCompile Include="gcenv.ee.cpp">
<Filter>Source Files</Filter>
</ClCompile>
- <ClCompile Include="gcenv.cpp">
+ <ClCompile Include="gcenv.windows.cpp">
<Filter>Source Files</Filter>
</ClCompile>
</ItemGroup>
diff --git a/src/gc/sample/gcenv.cpp b/src/gc/sample/gcenv.cpp
deleted file mode 100644
index 2164fb0c5e..0000000000
--- a/src/gc/sample/gcenv.cpp
+++ /dev/null
@@ -1,156 +0,0 @@
-//
-// Copyright (c) Microsoft. All rights reserved.
-// Licensed under the MIT license. See LICENSE file in the project root for full license information.
-//
-
-#include "common.h"
-
-#include "windows.h"
-
-#include "gcenv.h"
-#include "gc.h"
-
-EEConfig * g_pConfig;
-
-#ifdef _MSC_VER
-__declspec(thread)
-#else
-__thread
-#endif
-Thread * pCurrentThread;
-
-Thread * GetThread()
-{
- return pCurrentThread;
-}
-
-Thread * g_pThreadList = NULL;
-
-Thread * ThreadStore::GetThreadList(Thread * pThread)
-{
- if (pThread == NULL)
- return g_pThreadList;
-
- return pThread->m_pNext;
-}
-
-void ThreadStore::AttachCurrentThread()
-{
- // TODO: Locks
-
- Thread * pThread = new Thread();
- pThread->GetAllocContext()->init();
- pCurrentThread = pThread;
-
- pThread->m_pNext = g_pThreadList;
- g_pThreadList = pThread;
-}
-
-void GCToEEInterface::SuspendEE(GCToEEInterface::SUSPEND_REASON reason)
-{
- GCHeap::GetGCHeap()->SetGCInProgress(TRUE);
-
- // TODO: Implement
-}
-
-void GCToEEInterface::RestartEE(bool bFinishedGC)
-{
- // TODO: Implement
-
- GCHeap::GetGCHeap()->SetGCInProgress(FALSE);
-}
-
-void GCToEEInterface::GcScanRoots(promote_func* fn, int condemned, int max_gen, ScanContext* sc)
-{
- // TODO: Implement - Scan stack roots on given thread
-}
-
-void GCToEEInterface::GcStartWork(int condemned, int max_gen)
-{
-}
-
-void GCToEEInterface::AfterGcScanRoots(int condemned, int max_gen, ScanContext* sc)
-{
-}
-
-void GCToEEInterface::GcBeforeBGCSweepWork()
-{
-}
-
-void GCToEEInterface::GcDone(int condemned)
-{
-}
-
-bool GCToEEInterface::RefCountedHandleCallbacks(Object * pObject)
-{
- return false;
-}
-
-bool GCToEEInterface::IsPreemptiveGCDisabled(Thread * pThread)
-{
- return pThread->PreemptiveGCDisabled();
-}
-
-void GCToEEInterface::EnablePreemptiveGC(Thread * pThread)
-{
- return pThread->EnablePreemptiveGC();
-}
-
-void GCToEEInterface::DisablePreemptiveGC(Thread * pThread)
-{
- pThread->DisablePreemptiveGC();
-}
-
-void GCToEEInterface::SetGCSpecial(Thread * pThread)
-{
- pThread->SetGCSpecial(true);
-}
-
-alloc_context * GCToEEInterface::GetAllocContext(Thread * pThread)
-{
- return pThread->GetAllocContext();
-}
-
-bool GCToEEInterface::CatchAtSafePoint(Thread * pThread)
-{
- return pThread->CatchAtSafePoint();
-}
-
-// does not acquire thread store lock
-void GCToEEInterface::AttachCurrentThread()
-{
- ThreadStore::AttachCurrentThread();
-}
-
-void GCToEEInterface::GcEnumAllocContexts (enum_alloc_context_func* fn, void* param)
-{
- Thread * pThread = NULL;
- while ((pThread = ThreadStore::GetThreadList(pThread)) != NULL)
- {
- fn(pThread->GetAllocContext(), param);
- }
-}
-
-
-void FinalizerThread::EnableFinalization()
-{
- // Signal to finalizer thread that there are objects to finalize
- // TODO: Implement for finalization
-}
-
-bool FinalizerThread::HaveExtraWorkForFinalizer()
-{
- return false;
-}
-
-bool PalStartBackgroundGCThread(BackgroundCallback callback, void* pCallbackContext)
-{
- // TODO: Implement for background GC
- return false;
-}
-
-bool IsGCSpecialThread()
-{
- // TODO: Implement for background GC
- return false;
-}
diff --git a/src/gc/sample/gcenv.ee.cpp b/src/gc/sample/gcenv.ee.cpp
new file mode 100644
index 0000000000..a1a0360f20
--- /dev/null
+++ b/src/gc/sample/gcenv.ee.cpp
@@ -0,0 +1,289 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+#include "common.h"
+
+#include "windows.h"
+
+#include "gcenv.h"
+#include "gc.h"
+
+EEConfig * g_pConfig;
+
+void CLREventStatic::CreateManualEvent(bool bInitialState)
+{
+ m_hEvent = CreateEventW(NULL, TRUE, bInitialState, NULL);
+ m_fInitialized = true;
+}
+
+void CLREventStatic::CreateAutoEvent(bool bInitialState)
+{
+ m_hEvent = CreateEventW(NULL, FALSE, bInitialState, NULL);
+ m_fInitialized = true;
+}
+
+void CLREventStatic::CreateOSManualEvent(bool bInitialState)
+{
+ m_hEvent = CreateEventW(NULL, TRUE, bInitialState, NULL);
+ m_fInitialized = true;
+}
+
+void CLREventStatic::CreateOSAutoEvent(bool bInitialState)
+{
+ m_hEvent = CreateEventW(NULL, FALSE, bInitialState, NULL);
+ m_fInitialized = true;
+}
+
+void CLREventStatic::CloseEvent()
+{
+ if (m_fInitialized && m_hEvent != INVALID_HANDLE_VALUE)
+ {
+ CloseHandle(m_hEvent);
+ m_hEvent = INVALID_HANDLE_VALUE;
+ }
+}
+
+bool CLREventStatic::IsValid() const
+{
+ return m_fInitialized && m_hEvent != INVALID_HANDLE_VALUE;
+}
+
+bool CLREventStatic::Set()
+{
+ if (!m_fInitialized)
+ return false;
+ return !!SetEvent(m_hEvent);
+}
+
+bool CLREventStatic::Reset()
+{
+ if (!m_fInitialized)
+ return false;
+ return !!ResetEvent(m_hEvent);
+}
+
+uint32_t CLREventStatic::Wait(uint32_t dwMilliseconds, bool bAlertable)
+{
+ DWORD result = WAIT_FAILED;
+
+ if (m_fInitialized)
+ {
+ bool disablePreemptive = false;
+ Thread * pCurThread = GetThread();
+
+ if (NULL != pCurThread)
+ {
+ if (GCToEEInterface::IsPreemptiveGCDisabled(pCurThread))
+ {
+ GCToEEInterface::EnablePreemptiveGC(pCurThread);
+ disablePreemptive = true;
+ }
+ }
+
+ result = WaitForSingleObjectEx(m_hEvent, dwMilliseconds, bAlertable);
+
+ if (disablePreemptive)
+ {
+ GCToEEInterface::DisablePreemptiveGC(pCurThread);
+ }
+ }
+
+ return result;
+}
+
+__declspec(thread) Thread * pCurrentThread;
+
+Thread * GetThread()
+{
+ return pCurrentThread;
+}
+
+Thread * g_pThreadList = NULL;
+
+Thread * ThreadStore::GetThreadList(Thread * pThread)
+{
+ if (pThread == NULL)
+ return g_pThreadList;
+
+ return pThread->m_pNext;
+}
+
+void ThreadStore::AttachCurrentThread()
+{
+ // TODO: Locks
+
+ Thread * pThread = new Thread();
+ pThread->GetAllocContext()->init();
+ pCurrentThread = pThread;
+
+ pThread->m_pNext = g_pThreadList;
+ g_pThreadList = pThread;
+}
+
+void GCToEEInterface::SuspendEE(GCToEEInterface::SUSPEND_REASON reason)
+{
+ GCHeap::GetGCHeap()->SetGCInProgress(TRUE);
+
+ // TODO: Implement
+}
+
+void GCToEEInterface::RestartEE(bool bFinishedGC)
+{
+ // TODO: Implement
+
+ GCHeap::GetGCHeap()->SetGCInProgress(FALSE);
+}
+
+void GCToEEInterface::GcScanRoots(promote_func* fn, int condemned, int max_gen, ScanContext* sc)
+{
+ // TODO: Implement - Scan stack roots on given thread
+}
+
+void GCToEEInterface::GcStartWork(int condemned, int max_gen)
+{
+}
+
+void GCToEEInterface::AfterGcScanRoots(int condemned, int max_gen, ScanContext* sc)
+{
+}
+
+void GCToEEInterface::GcBeforeBGCSweepWork()
+{
+}
+
+void GCToEEInterface::GcDone(int condemned)
+{
+}
+
+bool GCToEEInterface::RefCountedHandleCallbacks(Object * pObject)
+{
+ return false;
+}
+
+bool GCToEEInterface::IsPreemptiveGCDisabled(Thread * pThread)
+{
+ return pThread->PreemptiveGCDisabled();
+}
+
+void GCToEEInterface::EnablePreemptiveGC(Thread * pThread)
+{
+ return pThread->EnablePreemptiveGC();
+}
+
+void GCToEEInterface::DisablePreemptiveGC(Thread * pThread)
+{
+ pThread->DisablePreemptiveGC();
+}
+
+void GCToEEInterface::SetGCSpecial(Thread * pThread)
+{
+ pThread->SetGCSpecial(true);
+}
+
+alloc_context * GCToEEInterface::GetAllocContext(Thread * pThread)
+{
+ return pThread->GetAllocContext();
+}
+
+bool GCToEEInterface::CatchAtSafePoint(Thread * pThread)
+{
+ return pThread->CatchAtSafePoint();
+}
+
+// does not acquire thread store lock
+void GCToEEInterface::AttachCurrentThread()
+{
+ ThreadStore::AttachCurrentThread();
+}
+
+void GCToEEInterface::GcEnumAllocContexts (enum_alloc_context_func* fn, void* param)
+{
+ Thread * pThread = NULL;
+ while ((pThread = ThreadStore::GetThreadList(pThread)) != NULL)
+ {
+ fn(pThread->GetAllocContext(), param);
+ }
+}
+
+void GCToEEInterface::SyncBlockCacheWeakPtrScan(HANDLESCANPROC /*scanProc*/, uintptr_t /*lp1*/, uintptr_t /*lp2*/)
+{
+}
+
+void GCToEEInterface::SyncBlockCacheDemote(int /*max_gen*/)
+{
+}
+
+void GCToEEInterface::SyncBlockCachePromotionsGranted(int /*max_gen*/)
+{
+}
+
+void FinalizerThread::EnableFinalization()
+{
+ // Signal to finalizer thread that there are objects to finalize
+ // TODO: Implement for finalization
+}
+
+bool FinalizerThread::HaveExtraWorkForFinalizer()
+{
+ return false;
+}
+
+bool PalStartBackgroundGCThread(BackgroundCallback callback, void* pCallbackContext)
+{
+ // TODO: Implement for background GC
+ return false;
+}
+
+bool IsGCSpecialThread()
+{
+ // TODO: Implement for background GC
+ return false;
+}
+
+void StompWriteBarrierEphemeral()
+{
+}
+
+void StompWriteBarrierResize(bool /*bReqUpperBoundsCheck*/)
+{
+}
+
+VOID LogSpewAlways(const char * /*fmt*/, ...)
+{
+}
+
+uint32_t CLRConfig::GetConfigValue(ConfigDWORDInfo eType)
+{
+ switch (eType)
+ {
+ case UNSUPPORTED_BGCSpinCount:
+ return 140;
+
+ case UNSUPPORTED_BGCSpin:
+ return 2;
+
+ case UNSUPPORTED_GCLogEnabled:
+ case UNSUPPORTED_GCLogFile:
+ case UNSUPPORTED_GCLogFileSize:
+ case EXTERNAL_GCStressStart:
+ case INTERNAL_GCStressStartAtJit:
+ case INTERNAL_DbgDACSkipVerifyDlls:
+ return 0;
+
+ case Config_COUNT:
+ default:
+#ifdef _MSC_VER
+#pragma warning(suppress:4127) // Constant conditional expression in ASSERT below
+#endif
+ ASSERT(!"Unknown config value type");
+ return 0;
+ }
+}
+
+HRESULT CLRConfig::GetConfigValue(ConfigStringInfo /*eType*/, wchar_t * * outVal)
+{
+ *outVal = NULL;
+ return 0;
+}
diff --git a/src/gc/sample/gcenv.h b/src/gc/sample/gcenv.h
index 1e391ab80c..c09d012ec4 100644
--- a/src/gc/sample/gcenv.h
+++ b/src/gc/sample/gcenv.h
@@ -16,11 +16,20 @@
#define _ASSERTE(_expr) ASSERT(_expr)
#endif
+typedef wchar_t WCHAR;
+#define W(s) L##s
+
#include "gcenv.structs.h"
#include "gcenv.base.h"
+#include "gcenv.ee.h"
+#include "gcenv.os.h"
+#include "gcenv.interlocked.h"
+#include "gcenv.interlocked.inl"
#include "gcenv.object.h"
#include "gcenv.sync.h"
+#define MAX_LONGPATH 1024
+
//
// Thread
//
diff --git a/src/gc/env/gcenv.unix.cpp b/src/gc/sample/gcenv.unix.cpp
index c9186d5d43..33d9bd649c 100644
--- a/src/gc/env/gcenv.unix.cpp
+++ b/src/gc/sample/gcenv.unix.cpp
@@ -115,6 +115,7 @@ void GetProcessMemoryLoad(GCMemoryStatus* pGCMemStatus)
}
}
+#if 0
void CLREventStatic::CreateManualEvent(bool bInitialState)
{
// TODO: Implement
@@ -196,6 +197,12 @@ uint32_t CLREventStatic::Wait(uint32_t dwMilliseconds, bool bAlertable)
return result;
}
+#endif // 0
+
+void DestroyThread(Thread * pThread)
+{
+ // TODO: implement
+}
bool __SwitchToThread(uint32_t dwSleepMSec, uint32_t dwSwitchCount)
{
@@ -328,6 +335,7 @@ int32_t g_TrapReturningThreads;
bool g_fFinalizerRunOnShutDown;
+#if 0
#ifdef _MSC_VER
__declspec(thread)
#else
@@ -361,18 +369,68 @@ void ThreadStore::AttachCurrentThread(bool fAcquireThreadStoreLock)
pThread->m_pNext = g_pThreadList;
g_pThreadList = pThread;
}
-
+#endif // 0
void DestroyThread(Thread * pThread)
{
// TODO: Implement
}
+#if 0
+void GCToEEInterface::SuspendEE(GCToEEInterface::SUSPEND_REASON reason)
+{
+ GCHeap::GetGCHeap()->SetGCInProgress(TRUE);
+
+ // TODO: Implement
+}
+
+void GCToEEInterface::RestartEE(bool bFinishedGC)
+{
+ // TODO: Implement
+
+ GCHeap::GetGCHeap()->SetGCInProgress(FALSE);
+}
+
+void GCToEEInterface::GcScanRoots(promote_func* fn, int condemned, int max_gen, ScanContext* sc)
+{
+ // TODO: Implement - Scan stack roots
+}
+
+void GCToEEInterface::GcStartWork(int condemned, int max_gen)
+{
+}
+
+void GCToEEInterface::AfterGcScanRoots(int condemned, int max_gen, ScanContext* sc)
+{
+}
+
+void GCToEEInterface::GcBeforeBGCSweepWork()
+{
+}
+
+void GCToEEInterface::GcDone(int condemned)
+{
+}
+
void FinalizerThread::EnableFinalization()
{
// Signal to finalizer thread that there are objects to finalize
// TODO: Implement for finalization
}
+bool PalStartBackgroundGCThread(BackgroundCallback callback, void* pCallbackContext)
+{
+ // TODO: Implement for background GC
+ return false;
+}
+
+bool IsGCSpecialThread()
+{
+ // TODO: Implement for background GC
+ return false;
+}
+
+#endif // 0
+
bool PalHasCapability(PalCapability capability)
{
// TODO: Implement for background GC
@@ -424,14 +482,6 @@ VirtualUnlock(
}
-WINBASEAPI
-VOID
-WINAPI
-FlushProcessWriteBuffers()
-{
- // TODO: Implement
-}
-
const int tccSecondsToMillieSeconds = 1000;
const int tccSecondsToMicroSeconds = 1000000;
const int tccMillieSecondsToMicroSeconds = 1000; // 10^3
diff --git a/src/gc/sample/gcenv.windows.cpp b/src/gc/sample/gcenv.windows.cpp
new file mode 100644
index 0000000000..c9f0edb1f2
--- /dev/null
+++ b/src/gc/sample/gcenv.windows.cpp
@@ -0,0 +1,446 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+//
+// Implementation of the GC environment
+//
+
+#include "common.h"
+
+#include "windows.h"
+
+#include "gcenv.h"
+#include "gc.h"
+
+static LARGE_INTEGER performanceFrequency;
+
+MethodTable * g_pFreeObjectMethodTable;
+
+int32_t g_TrapReturningThreads;
+
+bool g_fFinalizerRunOnShutDown;
+
+GCSystemInfo g_SystemInfo;
+
+// Initialize the interface implementation
+// Return:
+// true if it has succeeded, false if it has failed
+bool GCToOSInterface::Initialize()
+{
+ if (!::QueryPerformanceFrequency(&performanceFrequency))
+ {
+ return false;
+ }
+
+ SYSTEM_INFO systemInfo;
+ GetSystemInfo(&systemInfo);
+
+ g_SystemInfo.dwNumberOfProcessors = systemInfo.dwNumberOfProcessors;
+ g_SystemInfo.dwPageSize = systemInfo.dwPageSize;
+ g_SystemInfo.dwAllocationGranularity = systemInfo.dwAllocationGranularity;
+
+ return true;
+}
+
+// Shutdown the interface implementation
+void GCToOSInterface::Shutdown()
+{
+}
+
+// Get numeric id of the current thread if possible on the
+// current platform. It is indended for logging purposes only.
+// Return:
+// Numeric id of the current thread or 0 if the
+uint32_t GCToOSInterface::GetCurrentThreadIdForLogging()
+{
+ return ::GetCurrentThreadId();
+}
+
+// Get id of the process
+// Return:
+// Id of the current process
+uint32_t GCToOSInterface::GetCurrentProcessId()
+{
+ return ::GetCurrentProcessId();
+}
+
+// Set ideal affinity for the current thread
+// Parameters:
+// affinity - ideal processor affinity for the thread
+// Return:
+// true if it has succeeded, false if it has failed
+bool GCToOSInterface::SetCurrentThreadIdealAffinity(GCThreadAffinity* affinity)
+{
+ bool success = true;
+
+#if !defined(FEATURE_CORESYSTEM)
+ SetThreadIdealProcessor(GetCurrentThread(), (DWORD)affinity->Processor);
+#elif !defined(FEATURE_PAL)
+ PROCESSOR_NUMBER proc;
+
+ if (affinity->Group != -1)
+ {
+ proc.Group = (WORD)affinity->Group;
+ proc.Number = (BYTE)affinity->Processor;
+ proc.Reserved = 0;
+
+ success = !!SetThreadIdealProcessorEx(GetCurrentThread(), &proc, NULL);
+ }
+ else
+ {
+ if (GetThreadIdealProcessorEx(GetCurrentThread(), &proc))
+ {
+ proc.Number = affinity->Processor;
+ success = !!SetThreadIdealProcessorEx(GetCurrentThread(), &proc, NULL);
+ }
+ }
+#endif
+
+ return success;
+}
+
+// Get the number of the current processor
+uint32_t GCToOSInterface::GetCurrentProcessorNumber()
+{
+ _ASSERTE(GCToOSInterface::CanGetCurrentProcessorNumber());
+ return ::GetCurrentProcessorNumber();
+}
+
+// Check if the OS supports getting current processor number
+bool GCToOSInterface::CanGetCurrentProcessorNumber()
+{
+ return true;
+}
+
+// Flush write buffers of processors that are executing threads of the current process
+void GCToOSInterface::FlushProcessWriteBuffers()
+{
+ ::FlushProcessWriteBuffers();
+}
+
+// Break into a debugger
+void GCToOSInterface::DebugBreak()
+{
+ ::DebugBreak();
+}
+
+// Get number of logical processors
+uint32_t GCToOSInterface::GetLogicalCpuCount()
+{
+ return g_SystemInfo.dwNumberOfProcessors;
+}
+
+// Causes the calling thread to sleep for the specified number of milliseconds
+// Parameters:
+// sleepMSec - time to sleep before switching to another thread
+void GCToOSInterface::Sleep(uint32_t sleepMSec)
+{
+ ::Sleep(sleepMSec);
+}
+
+// Causes the calling thread to yield execution to another thread that is ready to run on the current processor.
+// Parameters:
+// switchCount - number of times the YieldThread was called in a loop
+void GCToOSInterface::YieldThread(uint32_t switchCount)
+{
+ SwitchToThread();
+}
+
+// Reserve virtual memory range.
+// Parameters:
+// address - starting virtual address, it can be NULL to let the function choose the starting address
+// size - size of the virtual memory range
+// alignment - requested memory alignment
+// flags - flags to control special settings like write watching
+// Return:
+// Starting virtual address of the reserved range
+void* GCToOSInterface::VirtualReserve(void* address, size_t size, size_t alignment, uint32_t flags)
+{
+ DWORD memFlags = (flags & VirtualReserveFlags::WriteWatch) ? (MEM_RESERVE | MEM_WRITE_WATCH) : MEM_RESERVE;
+ return ::VirtualAlloc(0, size, memFlags, PAGE_READWRITE);
+}
+
+// Release virtual memory range previously reserved using VirtualReserve
+// Parameters:
+// address - starting virtual address
+// size - size of the virtual memory range
+// Return:
+// true if it has succeeded, false if it has failed
+bool GCToOSInterface::VirtualRelease(void* address, size_t size)
+{
+ UNREFERENCED_PARAMETER(size);
+ return !!::VirtualFree(address, 0, MEM_RELEASE);
+}
+
+// Commit virtual memory range. It must be part of a range reserved using VirtualReserve.
+// Parameters:
+// address - starting virtual address
+// size - size of the virtual memory range
+// Return:
+// true if it has succeeded, false if it has failed
+bool GCToOSInterface::VirtualCommit(void* address, size_t size)
+{
+ return ::VirtualAlloc(address, size, MEM_COMMIT, PAGE_READWRITE) != NULL;
+}
+
+// Decomit virtual memory range.
+// Parameters:
+// address - starting virtual address
+// size - size of the virtual memory range
+// Return:
+// true if it has succeeded, false if it has failed
+bool GCToOSInterface::VirtualDecommit(void* address, size_t size)
+{
+ return !!::VirtualFree(address, size, MEM_DECOMMIT);
+}
+
+// Reset virtual memory range. Indicates that data in the memory range specified by address and size is no
+// longer of interest, but it should not be decommitted.
+// Parameters:
+// address - starting virtual address
+// size - size of the virtual memory range
+// unlock - true if the memory range should also be unlocked
+// Return:
+// true if it has succeeded, false if it has failed
+bool GCToOSInterface::VirtualReset(void * address, size_t size, bool unlock)
+{
+ bool success = ::VirtualAlloc(address, size, MEM_RESET, PAGE_READWRITE) != NULL;
+#ifndef FEATURE_PAL
+ if (success && unlock)
+ {
+ // Remove the page range from the working set
+ ::VirtualUnlock(address, size);
+ }
+#endif // FEATURE_PAL
+
+ return success;
+}
+
+// Check if the OS supports write watching
+bool GCToOSInterface::SupportsWriteWatch()
+{
+ return false;
+}
+
+// Reset the write tracking state for the specified virtual memory range.
+// Parameters:
+// address - starting virtual address
+// size - size of the virtual memory range
+void GCToOSInterface::ResetWriteWatch(void* address, size_t size)
+{
+}
+
+// Retrieve addresses of the pages that are written to in a region of virtual memory
+// Parameters:
+// resetState - true indicates to reset the write tracking state
+// address - starting virtual address
+// size - size of the virtual memory range
+// pageAddresses - buffer that receives an array of page addresses in the memory region
+// pageAddressesCount - on input, size of the lpAddresses array, in array elements
+// on output, the number of page addresses that are returned in the array.
+// Return:
+// true if it has succeeded, false if it has failed
+bool GCToOSInterface::GetWriteWatch(bool resetState, void* address, size_t size, void** pageAddresses, uintptr_t* pageAddressesCount)
+{
+ return false;
+}
+
+// Get size of the largest cache on the processor die
+// Parameters:
+// trueSize - true to return true cache size, false to return scaled up size based on
+// the processor architecture
+// Return:
+// Size of the cache
+size_t GCToOSInterface::GetLargestOnDieCacheSize(bool trueSize)
+{
+ // TODO: implement
+ return 0;
+}
+
+// Get affinity mask of the current process
+// Parameters:
+// processMask - affinity mask for the specified process
+// systemMask - affinity mask for the system
+// Return:
+// true if it has succeeded, false if it has failed
+// Remarks:
+// A process affinity mask is a bit vector in which each bit represents the processors that
+// a process is allowed to run on. A system affinity mask is a bit vector in which each bit
+// represents the processors that are configured into a system.
+// A process affinity mask is a subset of the system affinity mask. A process is only allowed
+// to run on the processors configured into a system. Therefore, the process affinity mask cannot
+// specify a 1 bit for a processor when the system affinity mask specifies a 0 bit for that processor.
+bool GCToOSInterface::GetCurrentProcessAffinityMask(uintptr_t* processMask, uintptr_t* systemMask)
+{
+ return false;
+}
+
+// Get number of processors assigned to the current process
+// Return:
+// The number of processors
+uint32_t GCToOSInterface::GetCurrentProcessCpuCount()
+{
+ return g_SystemInfo.dwNumberOfProcessors;
+}
+
+// Get global memory status
+// Parameters:
+// ms - pointer to the structure that will be filled in with the memory status
+void GCToOSInterface::GetMemoryStatus(GCMemoryStatus* ms)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ MEMORYSTATUSEX memStatus;
+
+ memStatus.dwLength = sizeof(MEMORYSTATUSEX);
+ BOOL fRet = GlobalMemoryStatusEx(&memStatus);
+ _ASSERTE (fRet);
+
+ // If the machine has more RAM than virtual address limit, let us cap it.
+ // Our GC can never use more than virtual address limit.
+ if (memStatus.ullAvailPhys > memStatus.ullTotalVirtual)
+ {
+ memStatus.ullAvailPhys = memStatus.ullAvailVirtual;
+ }
+
+ // Convert Windows struct to abstract struct
+ ms->dwMemoryLoad = memStatus.dwMemoryLoad ;
+ ms->ullTotalPhys = memStatus.ullTotalPhys ;
+ ms->ullAvailPhys = memStatus.ullAvailPhys ;
+ ms->ullTotalPageFile = memStatus.ullTotalPageFile ;
+ ms->ullAvailPageFile = memStatus.ullAvailPageFile ;
+ ms->ullTotalVirtual = memStatus.ullTotalVirtual ;
+ ms->ullAvailVirtual = memStatus.ullAvailVirtual ;
+}
+
+// Get a high precision performance counter
+// Return:
+// The counter value
+int64_t GCToOSInterface::QueryPerformanceCounter()
+{
+ LARGE_INTEGER ts;
+ if (!::QueryPerformanceCounter(&ts))
+ {
+ _ASSERTE(!"Fatal Error - cannot query performance counter.");
+ abort();
+ }
+
+ return ts.QuadPart;
+}
+
+// Get a frequency of the high precision performance counter
+// Return:
+// The counter frequency
+int64_t GCToOSInterface::QueryPerformanceFrequency()
+{
+ LARGE_INTEGER frequency;
+ if (!::QueryPerformanceFrequency(&frequency))
+ {
+ _ASSERTE(!"Fatal Error - cannot query performance counter.");
+ abort();
+ }
+
+ return frequency.QuadPart;
+}
+
+// Get a time stamp with a low precision
+// Return:
+// Time stamp in milliseconds
+uint32_t GCToOSInterface::GetLowPrecisionTimeStamp()
+{
+ return ::GetTickCount();
+}
+
+// Parameters of the GC thread stub
+struct GCThreadStubParam
+{
+ GCThreadFunction GCThreadFunction;
+ void* GCThreadParam;
+};
+
+// GC thread stub to convert GC thread function to an OS specific thread function
+static DWORD GCThreadStub(void* param)
+{
+ GCThreadStubParam *stubParam = (GCThreadStubParam*)param;
+ stubParam->GCThreadFunction(stubParam->GCThreadParam);
+
+ return 0;
+}
+
+// Create a new thread
+// Parameters:
+// function - the function to be executed by the thread
+// param - parameters of the thread
+// affinity - processor affinity of the thread
+// Return:
+// true if it has succeeded, false if it has failed
+bool GCToOSInterface::CreateThread(GCThreadFunction function, void* param, GCThreadAffinity* affinity)
+{
+ DWORD thread_id;
+ GCThreadStubParam stubParam;
+
+ stubParam.GCThreadFunction = function;
+ stubParam.GCThreadParam = param;
+
+ HANDLE gc_thread = ::CreateThread(NULL, 0, GCThreadStub, &stubParam, CREATE_SUSPENDED, &thread_id);
+
+ if (!gc_thread)
+ {
+ return false;
+ }
+
+ SetThreadPriority(gc_thread, /* THREAD_PRIORITY_ABOVE_NORMAL );*/ THREAD_PRIORITY_HIGHEST );
+
+ ResumeThread(gc_thread);
+
+ CloseHandle(gc_thread);
+
+ return true;
+}
+
+// Open a file
+// Parameters:
+// filename - name of the file to open
+// mode - mode to open the file in (like in the CRT fopen)
+// Return:
+// FILE* of the opened file
+FILE* GCToOSInterface::OpenFile(const WCHAR* filename, const WCHAR* mode)
+{
+ return _wfopen(filename, mode);
+}
+
+// Initialize the critical section
+void CLRCriticalSection::Initialize()
+{
+ ::InitializeCriticalSection(&m_cs);
+}
+
+// Destroy the critical section
+void CLRCriticalSection::Destroy()
+{
+ ::DeleteCriticalSection(&m_cs);
+}
+
+// Enter the critical section. Blocks until the section can be entered.
+void CLRCriticalSection::Enter()
+{
+ ::EnterCriticalSection(&m_cs);
+}
+
+// Leave the critical section
+void CLRCriticalSection::Leave()
+{
+ ::LeaveCriticalSection(&m_cs);
+}
+
+void DestroyThread(Thread * pThread)
+{
+ // TODO: implement
+}
diff --git a/src/pal/inc/pal.h b/src/pal/inc/pal.h
index cec19c290e..c139efcba8 100644
--- a/src/pal/inc/pal.h
+++ b/src/pal/inc/pal.h
@@ -3547,6 +3547,7 @@ SetErrorMode(
#define MEM_RESERVE 0x2000
#define MEM_DECOMMIT 0x4000
#define MEM_RELEASE 0x8000
+#define MEM_RESET 0x80000
#define MEM_FREE 0x10000
#define MEM_PRIVATE 0x20000
#define MEM_MAPPED 0x40000
diff --git a/src/pal/src/exception/seh.cpp b/src/pal/src/exception/seh.cpp
index 87bc677296..7acb3dce1e 100644
--- a/src/pal/src/exception/seh.cpp
+++ b/src/pal/src/exception/seh.cpp
@@ -77,20 +77,16 @@ Return value :
BOOL
SEHInitialize (CPalThread *pthrCurrent, DWORD flags)
{
- BOOL bRet = FALSE;
-
#if !HAVE_MACH_EXCEPTIONS
if (!SEHInitializeSignals())
{
ERROR("SEHInitializeSignals failed!\n");
SEHCleanup();
- goto SEHInitializeExit;
+ return FALSE;
}
#endif
- bRet = TRUE;
-SEHInitializeExit:
- return bRet;
+ return TRUE;
}
/*++
diff --git a/src/pal/src/misc/time.cpp b/src/pal/src/misc/time.cpp
index 939c86996a..17fe037372 100644
--- a/src/pal/src/misc/time.cpp
+++ b/src/pal/src/misc/time.cpp
@@ -202,6 +202,7 @@ QueryPerformanceCounter(
PERF_ENTRY(QueryPerformanceCounter);
ENTRY("QueryPerformanceCounter()\n");
+ do
#if HAVE_CLOCK_MONOTONIC
{
struct timespec ts;
@@ -209,7 +210,7 @@ QueryPerformanceCounter(
{
ASSERT("clock_gettime(CLOCK_MONOTONIC) failed; errno is %d (%s)\n", errno, strerror(errno));
retval = FALSE;
- goto EXIT;
+ break;
}
lpPerformanceCount->QuadPart =
(LONGLONG)ts.tv_sec * (LONGLONG)tccSecondsToNanoSeconds + (LONGLONG)ts.tv_nsec;
@@ -230,7 +231,7 @@ QueryPerformanceCounter(
{
ASSERT("time_base_to_time() failed; errno is %d (%s)\n", errno, strerror(errno));
retval = FALSE;
- goto EXIT;
+ break;
}
lpPerformanceCount->QuadPart =
(LONGLONG)tb.tb_high * (LONGLONG)tccSecondsToNanoSeconds + (LONGLONG)tb.tb_low;
@@ -242,13 +243,14 @@ QueryPerformanceCounter(
{
ASSERT("gettimeofday() failed; errno is %d (%s)\n", errno, strerror(errno));
retval = FALSE;
- goto EXIT;
+ break;
}
lpPerformanceCount->QuadPart =
(LONGLONG)tv.tv_sec * (LONGLONG)tccSecondsToMicroSeconds + (LONGLONG)tv.tv_usec;
}
#endif // HAVE_CLOCK_MONOTONIC
-EXIT:
+ while (false);
+
LOGEXIT("QueryPerformanceCounter\n");
PERF_EXIT(QueryPerformanceCounter);
return retval;
diff --git a/src/pal/src/thread/context.cpp b/src/pal/src/thread/context.cpp
index ebd4383a71..dfb1c4baf1 100644
--- a/src/pal/src/thread/context.cpp
+++ b/src/pal/src/thread/context.cpp
@@ -220,7 +220,6 @@ BOOL CONTEXT_GetRegisters(DWORD processId, LPCONTEXT lpContext)
bRet = TRUE;
#if HAVE_BSD_REGS_T
-EXIT :
if (regFd != -1)
{
close(regFd);
diff --git a/src/pal/tests/palsuite/eventprovider/CMakeLists.txt b/src/pal/tests/palsuite/eventprovider/CMakeLists.txt
index 32b59592ee..41289f92f2 100644
--- a/src/pal/tests/palsuite/eventprovider/CMakeLists.txt
+++ b/src/pal/tests/palsuite/eventprovider/CMakeLists.txt
@@ -10,7 +10,6 @@ set(SOURCES
include_directories(${COREPAL_SOURCE_DIR}/prebuilt/inc)
include_directories(${COREPAL_SOURCE_DIR}/inc/rt)
-
add_executable(eventprovidertest
${SOURCES}
)
@@ -18,6 +17,8 @@ set(EVENT_PROVIDER_DEPENDENCIES "")
set(EVENT_PROVIDER_LINKER_OTPTIONS "")
if(CMAKE_SYSTEM_NAME STREQUAL Linux)
+ add_definitions(-DFEATURE_EVENT_TRACE=1)
+
list(APPEND EVENT_PROVIDER_DEPENDENCIES
eventprovider
)
diff --git a/src/strongname/api/api.props b/src/strongname/api/api.props
index 2c4c278a69..cba28116b8 100644
--- a/src/strongname/api/api.props
+++ b/src/strongname/api/api.props
@@ -7,7 +7,8 @@
$(Clrbase)\src\inc;
$(Clrbase)\src\md\inc;
$(Clrbase)\src\md\compiler;
- $(Clrbase)\src\vm
+ $(Clrbase)\src\vm;
+ $(Clrbase)\src\gc\env
</UserIncludes>
<UserIncludes Condition="'$(StrongnameInVm)' == 'true'">
diff --git a/src/strongname/api/common.h b/src/strongname/api/common.h
index 45f14a8b2b..ea315e44ac 100644
--- a/src/strongname/api/common.h
+++ b/src/strongname/api/common.h
@@ -270,6 +270,9 @@ namespace Loader
#if STRONGNAME_IN_VM
// src/vm
+#include "gcenv.interlocked.h"
+#include "gcenv.interlocked.inl"
+
#include "util.hpp"
#include "ibclogger.h"
#include "eepolicy.h"
diff --git a/src/vm/CMakeLists.txt b/src/vm/CMakeLists.txt
index e6238ed56f..964c987532 100644
--- a/src/vm/CMakeLists.txt
+++ b/src/vm/CMakeLists.txt
@@ -195,7 +195,8 @@ set(VM_SOURCES_WKS
finalizerthread.cpp
frameworkexceptionloader.cpp
gccover.cpp
- gcenv.cpp
+ gcenv.ee.cpp
+ gcenv.os.cpp
gchelpers.cpp
genmeth.cpp
../gc/gceesvr.cpp
diff --git a/src/vm/appdomain.cpp b/src/vm/appdomain.cpp
index d89146dbfa..c9f6588cdd 100644
--- a/src/vm/appdomain.cpp
+++ b/src/vm/appdomain.cpp
@@ -14,7 +14,6 @@
#include "excep.h"
#include "eeconfig.h"
#include "gc.h"
-#include "gcenv.h"
#include "eventtrace.h"
#ifdef FEATURE_FUSION
#include "assemblysink.h"
diff --git a/src/vm/common.h b/src/vm/common.h
index e323333ef5..2d23f8c471 100644
--- a/src/vm/common.h
+++ b/src/vm/common.h
@@ -288,7 +288,6 @@ namespace Loader
} LoadFlag;
}
-
// src/inc
#include "utilcode.h"
#include "log.h"
@@ -297,6 +296,9 @@ namespace Loader
#include "lazycow.h"
// src/vm
+#include "gcenv.interlocked.h"
+#include "gcenv.interlocked.inl"
+
#include "util.hpp"
#include "ibclogger.h"
#include "eepolicy.h"
diff --git a/src/vm/crst.cpp b/src/vm/crst.cpp
index 72665493a4..9ecdee748d 100644
--- a/src/vm/crst.cpp
+++ b/src/vm/crst.cpp
@@ -653,9 +653,9 @@ void CrstBase::PostEnter()
}
_ASSERTE((m_entercount == 0 && m_holderthreadid.IsUnknown()) ||
- m_holderthreadid.IsSameThread() ||
+ m_holderthreadid.IsCurrentThread() ||
IsAtProcessExit());
- m_holderthreadid.SetThreadId();
+ m_holderthreadid.SetToCurrentThread();
m_entercount++;
if (m_entercount == 1)
@@ -715,7 +715,7 @@ void CrstBase::PreLeave()
_ASSERTE(m_entercount > 0);
m_entercount--;
if (!m_entercount) {
- m_holderthreadid.ResetThreadId();
+ m_holderthreadid.Clear();
// Delink it from the Thread's chain of OwnedChain
if (m_prev)
@@ -796,7 +796,7 @@ void CrstBase::DebugInit(CrstType crstType, CrstFlags flags)
m_crstType = crstType;
m_tag = GetCrstName(crstType);
m_crstlevel = GetCrstLevel(crstType);
- m_holderthreadid.ResetThreadId();
+ m_holderthreadid.Clear();
m_entercount = 0;
m_next = NULL;
m_prev = NULL;
@@ -868,7 +868,7 @@ void CrstBase::DebugDestroy()
}
FillMemory(&m_criticalsection, sizeof(m_criticalsection), 0xcc);
- m_holderthreadid.ResetThreadId();
+ m_holderthreadid.Clear();
m_entercount = 0xcccccccc;
m_next = (CrstBase*)POISONC;
@@ -914,7 +914,7 @@ BOOL CrstBase::IsSafeToTake()
(GCHeap::IsGCInProgress() && pThread == ThreadSuspend::GetSuspensionThread()));
END_GETTHREAD_ALLOWED;
- if (m_holderthreadid.IsSameThread())
+ if (m_holderthreadid.IsCurrentThread())
{
// If we already hold it, we can't violate level order.
// Check if client wanted to allow reentrancy.
@@ -949,7 +949,7 @@ BOOL CrstBase::IsSafeToTake()
for (CrstBase *pcrst = GetThreadsOwnedCrsts(); pcrst != NULL; pcrst = pcrst->m_next)
{
fSafe =
- !pcrst->m_holderthreadid.IsSameThread()
+ !pcrst->m_holderthreadid.IsCurrentThread()
|| (pcrst->m_crstlevel == CRSTUNORDERED)
|| (pcrst->m_crstlevel > m_crstlevel)
|| (pcrst->m_crstlevel == m_crstlevel && (m_dwFlags & CRST_UNSAFE_SAMELEVEL) != 0);
diff --git a/src/vm/crst.h b/src/vm/crst.h
index 010cb5f8fb..730327e20d 100644
--- a/src/vm/crst.h
+++ b/src/vm/crst.h
@@ -261,7 +261,7 @@ public:
#ifdef CROSSGEN_COMPILE
return TRUE;
#else
- return m_holderthreadid.IsSameThread();
+ return m_holderthreadid.IsCurrentThread();
#endif
}
diff --git a/src/vm/eehash.inl b/src/vm/eehash.inl
index 6a09b29344..9910b5525b 100644
--- a/src/vm/eehash.inl
+++ b/src/vm/eehash.inl
@@ -17,7 +17,7 @@ BOOL EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::OwnLock()
return TRUE;
if (m_pfnLockOwner == NULL) {
- return m_writerThreadId.IsSameThread();
+ return m_writerThreadId.IsCurrentThread();
}
else {
BOOL ret = m_pfnLockOwner(m_lockData);
@@ -211,7 +211,7 @@ BOOL EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::Init(DWORD dwNumBucke
}
if (m_pfnLockOwner == NULL) {
- m_writerThreadId.SetThreadId();
+ m_writerThreadId.SetToCurrentThread();
}
m_CheckThreadSafety = CheckThreadSafety;
#endif
diff --git a/src/vm/gcenv.cpp b/src/vm/gcenv.ee.cpp
index 99880b54a1..ab1d66f82b 100644
--- a/src/vm/gcenv.cpp
+++ b/src/vm/gcenv.ee.cpp
@@ -4,7 +4,7 @@
//
/*
- * GCENV.CPP
+ * GCENV.EE.CPP
*
* GCToEEInterface implementation
*
@@ -36,7 +36,7 @@ void GCToEEInterface::SuspendEE(SUSPEND_REASON reason)
ThreadSuspend::SuspendEE((ThreadSuspend::SUSPEND_REASON)reason);
}
-void GCToEEInterface::RestartEE(BOOL bFinishedGC)
+void GCToEEInterface::RestartEE(bool bFinishedGC)
{
WRAPPER_NO_CONTRACT;
@@ -428,7 +428,7 @@ StackWalkAction GcStackCrawlCallBack(CrawlFrame* pCF, VOID* pData)
return SWA_CONTINUE;
}
-VOID GCToEEInterface::SyncBlockCacheWeakPtrScan(HANDLESCANPROC scanProc, LPARAM lp1, LPARAM lp2)
+VOID GCToEEInterface::SyncBlockCacheWeakPtrScan(HANDLESCANPROC scanProc, uintptr_t lp1, uintptr_t lp2)
{
CONTRACTL
{
@@ -726,3 +726,21 @@ void GCToEEInterface::GcEnumAllocContexts(enum_alloc_context_func* fn, void* par
fn(pThread->GetAllocContext(), param);
}
}
+
+bool GCToEEInterface::IsPreemptiveGCDisabled(Thread * pThread)
+{
+ WRAPPER_NO_CONTRACT;
+ return !!pThread->PreemptiveGCDisabled();
+}
+
+void GCToEEInterface::EnablePreemptiveGC(Thread * pThread)
+{
+ WRAPPER_NO_CONTRACT;
+ pThread->EnablePreemptiveGC();
+}
+
+void GCToEEInterface::DisablePreemptiveGC(Thread * pThread)
+{
+ WRAPPER_NO_CONTRACT;
+ pThread->DisablePreemptiveGC();
+}
diff --git a/src/vm/gcenv.ee.h b/src/vm/gcenv.ee.h
new file mode 100644
index 0000000000..2baf24ca36
--- /dev/null
+++ b/src/vm/gcenv.ee.h
@@ -0,0 +1,6 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+#include "../gc/env/gcenv.ee.h"
diff --git a/src/vm/gcenv.h b/src/vm/gcenv.h
index 73e65852a7..7ddbe5e46a 100644
--- a/src/vm/gcenv.h
+++ b/src/vm/gcenv.h
@@ -45,96 +45,14 @@
#include <mscoruefwrapper.h>
#endif // FEATURE_UEF_CHAINMANAGER
-
-struct ScanContext;
-class CrawlFrame;
-
-typedef void promote_func(PTR_PTR_Object, ScanContext*, uint32_t);
-
-typedef void enum_alloc_context_func(alloc_context*, void*);
-
-typedef struct
-{
- promote_func* f;
- ScanContext* sc;
- CrawlFrame * cf;
-} GCCONTEXT;
-
-
-class GCToEEInterface
-{
-public:
- //
- // Suspend/Resume callbacks
- //
- typedef enum
- {
- SUSPEND_FOR_GC = 1,
- SUSPEND_FOR_GC_PREP = 6
- } SUSPEND_REASON;
-
- static void SuspendEE(SUSPEND_REASON reason);
- static void RestartEE(BOOL bFinishedGC); //resume threads.
-
- //
- // The GC roots enumeration callback
- //
- static void GcScanRoots(promote_func* fn, int condemned, int max_gen, ScanContext* sc);
-
- //
- // Callbacks issues during GC that the execution engine can do its own bookeeping
- //
-
- // start of GC call back - single threaded
- static void GcStartWork(int condemned, int max_gen);
-
- //EE can perform post stack scanning action, while the
- // user threads are still suspended
- static void AfterGcScanRoots(int condemned, int max_gen, ScanContext* sc);
-
- // Called before BGC starts sweeping, the heap is walkable
- static void GcBeforeBGCSweepWork();
-
- // post-gc callback.
- static void GcDone(int condemned);
-
- // Promote refcounted handle callback
- static bool RefCountedHandleCallbacks(Object * pObject);
-
- // Sync block cache management
- static void SyncBlockCacheWeakPtrScan(HANDLESCANPROC scanProc, LPARAM lp1, LPARAM lp2);
- static void SyncBlockCacheDemote(int max_gen);
- static void SyncBlockCachePromotionsGranted(int max_gen);
-
- // Thread functions
- static bool IsPreemptiveGCDisabled(Thread * pThread)
- {
- WRAPPER_NO_CONTRACT;
- return !!pThread->PreemptiveGCDisabled();
- }
-
- static void EnablePreemptiveGC(Thread * pThread)
- {
- WRAPPER_NO_CONTRACT;
- pThread->EnablePreemptiveGC();
- }
-
- static void DisablePreemptiveGC(Thread * pThread)
- {
- WRAPPER_NO_CONTRACT;
- pThread->DisablePreemptiveGC();
- }
-
- static void SetGCSpecial(Thread * pThread);
- static alloc_context * GetAllocContext(Thread * pThread);
- static bool CatchAtSafePoint(Thread * pThread);
-
- static void GcEnumAllocContexts(enum_alloc_context_func* fn, void* param);
-};
-
#define GCMemoryStatus MEMORYSTATUSEX
-#define CLR_MUTEX_COOKIE MUTEX_COOKIE
+#include "util.hpp"
+
+#include "gcenv.ee.h"
+#include "gcenv.os.h"
+#include "gcenv.interlocked.h"
+#include "gcenv.interlocked.inl"
namespace ETW
{
diff --git a/src/vm/gcenv.interlocked.h b/src/vm/gcenv.interlocked.h
new file mode 100644
index 0000000000..39c6ad3078
--- /dev/null
+++ b/src/vm/gcenv.interlocked.h
@@ -0,0 +1,6 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+#include "../gc/env/gcenv.interlocked.h"
diff --git a/src/vm/gcenv.interlocked.inl b/src/vm/gcenv.interlocked.inl
new file mode 100644
index 0000000000..8d334168dc
--- /dev/null
+++ b/src/vm/gcenv.interlocked.inl
@@ -0,0 +1,6 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+#include "../gc/env/gcenv.interlocked.inl"
diff --git a/src/vm/gcenv.os.cpp b/src/vm/gcenv.os.cpp
new file mode 100644
index 0000000000..4fdd34e9a0
--- /dev/null
+++ b/src/vm/gcenv.os.cpp
@@ -0,0 +1,520 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+/*
+ * gcenv.os.cpp
+ *
+ * GCToOSInterface implementation
+ *
+
+ *
+ */
+
+#include "common.h"
+#include "gcenv.h"
+
+// Initialize the interface implementation
+// Return:
+// true if it has succeeded, false if it has failed
+bool GCToOSInterface::Initialize()
+{
+ LIMITED_METHOD_CONTRACT;
+ return true;
+}
+
+// Shutdown the interface implementation
+void GCToOSInterface::Shutdown()
+{
+ LIMITED_METHOD_CONTRACT;
+}
+
+// Get numeric id of the current thread if possible on the
+// current platform. It is indended for logging purposes only.
+// Return:
+// Numeric id of the current thread or 0 if the
+uint32_t GCToOSInterface::GetCurrentThreadIdForLogging()
+{
+ LIMITED_METHOD_CONTRACT;
+ return ::GetCurrentThreadId();
+}
+
+// Get id of the process
+// Return:
+// Id of the current process
+uint32_t GCToOSInterface::GetCurrentProcessId()
+{
+ LIMITED_METHOD_CONTRACT;
+ return ::GetCurrentProcessId();
+}
+
+// Set ideal affinity for the current thread
+// Parameters:
+// affinity - ideal processor affinity for the thread
+// Return:
+// true if it has succeeded, false if it has failed
+bool GCToOSInterface::SetCurrentThreadIdealAffinity(GCThreadAffinity* affinity)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ bool success = true;
+
+#if !defined(FEATURE_CORESYSTEM)
+ SetThreadIdealProcessor(GetCurrentThread(), (DWORD)affinity->Processor);
+#elif !defined(FEATURE_PAL)
+ PROCESSOR_NUMBER proc;
+
+ if (affinity->Group != -1)
+ {
+ proc.Group = (WORD)affinity->Group;
+ proc.Number = (BYTE)affinity->Processor;
+ proc.Reserved = 0;
+
+ success = !!SetThreadIdealProcessorEx(GetCurrentThread(), &proc, NULL);
+ }
+ else
+ {
+ if (GetThreadIdealProcessorEx(GetCurrentThread(), &proc))
+ {
+ proc.Number = affinity->Processor;
+ success = !!SetThreadIdealProcessorEx(GetCurrentThread(), &proc, NULL);
+ }
+ }
+#endif
+
+ return success;
+}
+
+// Get the number of the current processor
+uint32_t GCToOSInterface::GetCurrentProcessorNumber()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(CanGetCurrentProcessorNumber());
+ return ::GetCurrentProcessorNumber();
+}
+
+// Check if the OS supports getting current processor number
+bool GCToOSInterface::CanGetCurrentProcessorNumber()
+{
+ LIMITED_METHOD_CONTRACT;
+
+#ifdef FEATURE_PAL
+ return PAL_HasGetCurrentProcessorNumber();
+#else
+ // on all Windows platforms we support this API exists
+ return true;
+#endif
+}
+
+// Flush write buffers of processors that are executing threads of the current process
+void GCToOSInterface::FlushProcessWriteBuffers()
+{
+ LIMITED_METHOD_CONTRACT;
+ ::FlushProcessWriteBuffers();
+}
+
+// Break into a debugger
+void GCToOSInterface::DebugBreak()
+{
+ LIMITED_METHOD_CONTRACT;
+ ::DebugBreak();
+}
+
+// Get number of logical processors
+uint32_t GCToOSInterface::GetLogicalCpuCount()
+{
+ LIMITED_METHOD_CONTRACT;
+ return ::GetLogicalCpuCount();
+}
+
+// Causes the calling thread to sleep for the specified number of milliseconds
+// Parameters:
+// sleepMSec - time to sleep before switching to another thread
+void GCToOSInterface::Sleep(uint32_t sleepMSec)
+{
+ LIMITED_METHOD_CONTRACT;
+ __SwitchToThread(sleepMSec, 0);
+}
+
+// Causes the calling thread to yield execution to another thread that is ready to run on the current processor.
+// Parameters:
+// switchCount - number of times the YieldThread was called in a loop
+void GCToOSInterface::YieldThread(uint32_t switchCount)
+{
+ LIMITED_METHOD_CONTRACT;
+ __SwitchToThread(0, switchCount);
+}
+
+// Reserve virtual memory range.
+// Parameters:
+// address - starting virtual address, it can be NULL to let the function choose the starting address
+// size - size of the virtual memory range
+// alignment - requested memory alignment
+// flags - flags to control special settings like write watching
+// Return:
+// Starting virtual address of the reserved range
+void* GCToOSInterface::VirtualReserve(void* address, size_t size, size_t alignment, uint32_t flags)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DWORD memFlags = (flags & VirtualReserveFlags::WriteWatch) ? (MEM_RESERVE | MEM_WRITE_WATCH) : MEM_RESERVE;
+ if (alignment == 0)
+ {
+ return ::ClrVirtualAlloc(0, size, memFlags, PAGE_READWRITE);
+ }
+ else
+ {
+ return ::ClrVirtualAllocAligned(0, size, memFlags, PAGE_READWRITE, alignment);
+ }
+}
+
+// Release virtual memory range previously reserved using VirtualReserve
+// Parameters:
+// address - starting virtual address
+// size - size of the virtual memory range
+// Return:
+// true if it has succeeded, false if it has failed
+bool GCToOSInterface::VirtualRelease(void* address, size_t size)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ UNREFERENCED_PARAMETER(size);
+ return !!::ClrVirtualFree(address, 0, MEM_RELEASE);
+}
+
+// Commit virtual memory range. It must be part of a range reserved using VirtualReserve.
+// Parameters:
+// address - starting virtual address
+// size - size of the virtual memory range
+// Return:
+// true if it has succeeded, false if it has failed
+bool GCToOSInterface::VirtualCommit(void* address, size_t size)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return ::ClrVirtualAlloc(address, size, MEM_COMMIT, PAGE_READWRITE) != NULL;
+}
+
+// Decomit virtual memory range.
+// Parameters:
+// address - starting virtual address
+// size - size of the virtual memory range
+// Return:
+// true if it has succeeded, false if it has failed
+bool GCToOSInterface::VirtualDecommit(void* address, size_t size)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return !!::ClrVirtualFree(address, size, MEM_DECOMMIT);
+}
+
+// Reset virtual memory range. Indicates that data in the memory range specified by address and size is no
+// longer of interest, but it should not be decommitted.
+// Parameters:
+// address - starting virtual address
+// size - size of the virtual memory range
+// unlock - true if the memory range should also be unlocked
+// Return:
+// true if it has succeeded, false if it has failed
+bool GCToOSInterface::VirtualReset(void * address, size_t size, bool unlock)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ bool success = ::ClrVirtualAlloc(address, size, MEM_RESET, PAGE_READWRITE) != NULL;
+#ifndef FEATURE_PAL
+ if (success && unlock)
+ {
+ // Remove the page range from the working set
+ ::VirtualUnlock(address, size);
+ }
+#endif // FEATURE_PAL
+
+ return success;
+}
+
+// Check if the OS supports write watching
+bool GCToOSInterface::SupportsWriteWatch()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ bool writeWatchSupported = false;
+
+ // check if the OS supports write-watch.
+ // Drawbridge does not support write-watch so we still need to do the runtime detection for them.
+ // Otherwise, all currently supported OSes do support write-watch.
+ void* mem = VirtualReserve (0, g_SystemInfo.dwAllocationGranularity, 0, VirtualReserveFlags::WriteWatch);
+ if (mem != NULL)
+ {
+ VirtualRelease (mem, g_SystemInfo.dwAllocationGranularity);
+ writeWatchSupported = true;
+ }
+
+ return writeWatchSupported;
+}
+
+// Reset the write tracking state for the specified virtual memory range.
+// Parameters:
+// address - starting virtual address
+// size - size of the virtual memory range
+void GCToOSInterface::ResetWriteWatch(void* address, size_t size)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ ::ResetWriteWatch(address, size);
+}
+
+// Retrieve addresses of the pages that are written to in a region of virtual memory
+// Parameters:
+// resetState - true indicates to reset the write tracking state
+// address - starting virtual address
+// size - size of the virtual memory range
+// pageAddresses - buffer that receives an array of page addresses in the memory region
+// pageAddressesCount - on input, size of the lpAddresses array, in array elements
+// on output, the number of page addresses that are returned in the array.
+// Return:
+// true if it has succeeded, false if it has failed
+bool GCToOSInterface::GetWriteWatch(bool resetState, void* address, size_t size, void** pageAddresses, uintptr_t* pageAddressesCount)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ uint32_t flags = resetState ? 1 : 0;
+ ULONG granularity;
+
+ bool success = ::GetWriteWatch(flags, address, size, pageAddresses, (ULONG_PTR*)pageAddressesCount, &granularity) == 0;
+ _ASSERTE (granularity == OS_PAGE_SIZE);
+
+ return success;
+}
+
+// Get size of the largest cache on the processor die
+// Parameters:
+// trueSize - true to return true cache size, false to return scaled up size based on
+// the processor architecture
+// Return:
+// Size of the cache
+size_t GCToOSInterface::GetLargestOnDieCacheSize(bool trueSize)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return ::GetLargestOnDieCacheSize(trueSize);
+}
+
+// Get affinity mask of the current process
+// Parameters:
+// processMask - affinity mask for the specified process
+// systemMask - affinity mask for the system
+// Return:
+// true if it has succeeded, false if it has failed
+// Remarks:
+// A process affinity mask is a bit vector in which each bit represents the processors that
+// a process is allowed to run on. A system affinity mask is a bit vector in which each bit
+// represents the processors that are configured into a system.
+// A process affinity mask is a subset of the system affinity mask. A process is only allowed
+// to run on the processors configured into a system. Therefore, the process affinity mask cannot
+// specify a 1 bit for a processor when the system affinity mask specifies a 0 bit for that processor.
+bool GCToOSInterface::GetCurrentProcessAffinityMask(uintptr_t* processMask, uintptr_t* systemMask)
+{
+ LIMITED_METHOD_CONTRACT;
+
+#ifndef FEATURE_CORECLR
+ return !!::GetProcessAffinityMask(GetCurrentProcess(), (PDWORD_PTR)processMask, (PDWORD_PTR)systemMask);
+#else
+ return false;
+#endif
+}
+
+// Get number of processors assigned to the current process
+// Return:
+// The number of processors
+uint32_t GCToOSInterface::GetCurrentProcessCpuCount()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return ::GetCurrentProcessCpuCount();
+}
+
+// Get global memory status
+// Parameters:
+// ms - pointer to the structure that will be filled in with the memory status
+void GCToOSInterface::GetMemoryStatus(GCMemoryStatus* ms)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ MEMORYSTATUSEX msEx;
+ msEx.dwLength = sizeof(MEMORYSTATUSEX);
+
+ ::GetProcessMemoryLoad(&msEx);
+
+ // Convert Windows struct to abstract struct
+ ms->dwMemoryLoad = msEx.dwMemoryLoad;
+ ms->ullTotalPhys = msEx.ullTotalPhys;
+ ms->ullAvailPhys = msEx.ullAvailPhys;
+ ms->ullTotalPageFile = msEx.ullTotalPageFile;
+ ms->ullAvailPageFile = msEx.ullAvailPageFile;
+ ms->ullTotalVirtual = msEx.ullTotalVirtual;
+ ms->ullAvailVirtual = msEx.ullAvailVirtual;
+}
+
+// Get a high precision performance counter
+// Return:
+// The counter value
+int64_t GCToOSInterface::QueryPerformanceCounter()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ LARGE_INTEGER ts;
+ if (!::QueryPerformanceCounter(&ts))
+ {
+ DebugBreak();
+ _ASSERTE(!"Fatal Error - cannot query performance counter.");
+ EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE); // TODO: fatal error
+ }
+
+ return ts.QuadPart;
+}
+
+// Get a frequency of the high precision performance counter
+// Return:
+// The counter frequency
+int64_t GCToOSInterface::QueryPerformanceFrequency()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ LARGE_INTEGER frequency;
+ if (!::QueryPerformanceFrequency(&frequency))
+ {
+ DebugBreak();
+ _ASSERTE(!"Fatal Error - cannot query performance counter.");
+ EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE); // TODO: fatal error
+ }
+
+ return frequency.QuadPart;
+}
+
+// Get a time stamp with a low precision
+// Return:
+// Time stamp in milliseconds
+uint32_t GCToOSInterface::GetLowPrecisionTimeStamp()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return ::GetTickCount();
+}
+
+// Parameters of the GC thread stub
+struct GCThreadStubParam
+{
+ GCThreadFunction GCThreadFunction;
+ void* GCThreadParam;
+};
+
+// GC thread stub to convert GC thread function to an OS specific thread function
+static DWORD GCThreadStub(void* param)
+{
+ WRAPPER_NO_CONTRACT;
+
+ GCThreadStubParam *stubParam = (GCThreadStubParam*)param;
+ GCThreadFunction function = stubParam->GCThreadFunction;
+ void* threadParam = stubParam->GCThreadParam;
+
+ delete stubParam;
+
+ function(threadParam);
+
+ return 0;
+}
+
+// Create a new thread
+// Parameters:
+// function - the function to be executed by the thread
+// param - parameters of the thread
+// affinity - processor affinity of the thread
+// Return:
+// true if it has succeeded, false if it has failed
+bool GCToOSInterface::CreateThread(GCThreadFunction function, void* param, GCThreadAffinity* affinity)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ uint32_t thread_id;
+
+ GCThreadStubParam* stubParam = new (nothrow) GCThreadStubParam();
+ stubParam->GCThreadFunction = function;
+ stubParam->GCThreadParam = param;
+
+ HANDLE gc_thread = Thread::CreateUtilityThread(Thread::StackSize_Medium, GCThreadStub, stubParam, CREATE_SUSPENDED, (DWORD*)&thread_id);
+
+ if (!gc_thread)
+ {
+ return false;
+ }
+
+ SetThreadPriority(gc_thread, /* THREAD_PRIORITY_ABOVE_NORMAL );*/ THREAD_PRIORITY_HIGHEST );
+
+#ifndef FEATURE_CORECLR
+ if (affinity->Group != -1)
+ {
+ _ASSERTE(affinity->Processor != -1);
+ GROUP_AFFINITY ga;
+ ga.Group = (WORD)affinity->Group;
+ ga.Reserved[0] = 0; // reserve must be filled with zero
+ ga.Reserved[1] = 0; // otherwise call may fail
+ ga.Reserved[2] = 0;
+ ga.Mask = 1 << affinity->Processor;
+
+ CPUGroupInfo::SetThreadGroupAffinity(gc_thread, &ga, NULL);
+ }
+ else if (affinity->Processor != -1)
+ {
+ SetThreadAffinityMask(gc_thread, 1 << affinity->Processor);
+ }
+#endif // !FEATURE_CORECLR
+
+ ResumeThread(gc_thread);
+ CloseHandle(gc_thread);
+
+ return true;
+}
+
+// Open a file
+// Parameters:
+// filename - name of the file to open
+// mode - mode to open the file in (like in the CRT fopen)
+// Return:
+// FILE* of the opened file
+FILE* GCToOSInterface::OpenFile(const WCHAR* filename, const WCHAR* mode)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return _wfopen(filename, mode);
+}
+
+// Initialize the critical section
+void CLRCriticalSection::Initialize()
+{
+ WRAPPER_NO_CONTRACT;
+ UnsafeInitializeCriticalSection(&m_cs);
+}
+
+// Destroy the critical section
+void CLRCriticalSection::Destroy()
+{
+ WRAPPER_NO_CONTRACT;
+ UnsafeDeleteCriticalSection(&m_cs);
+}
+
+// Enter the critical section. Blocks until the section can be entered.
+void CLRCriticalSection::Enter()
+{
+ WRAPPER_NO_CONTRACT;
+ UnsafeEnterCriticalSection(&m_cs);
+}
+
+// Leave the critical section
+void CLRCriticalSection::Leave()
+{
+ WRAPPER_NO_CONTRACT;
+ UnsafeLeaveCriticalSection(&m_cs);
+}
+
diff --git a/src/vm/gcenv.os.h b/src/vm/gcenv.os.h
new file mode 100644
index 0000000000..b6897aed8f
--- /dev/null
+++ b/src/vm/gcenv.os.h
@@ -0,0 +1,6 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+#include "../gc/env/gcenv.os.h"
diff --git a/src/vm/hash.cpp b/src/vm/hash.cpp
index 8a0ab28652..8e8408c9a2 100644
--- a/src/vm/hash.cpp
+++ b/src/vm/hash.cpp
@@ -298,7 +298,7 @@ void HashMap::Init(DWORD cbInitialSize, Compare* pCompare, BOOL fAsyncMode, Lock
m_pfnLockOwner = pLock->lockOwnerFunc;
}
if (m_pfnLockOwner == NULL) {
- m_writerThreadId.SetThreadId();
+ m_writerThreadId.SetToCurrentThread();
}
#endif // _DEBUG
}
@@ -1079,7 +1079,7 @@ BOOL HashMap::OwnLock()
DEBUG_ONLY_FUNCTION;
if (m_pfnLockOwner == NULL) {
- return m_writerThreadId.IsSameThread();
+ return m_writerThreadId.IsCurrentThread();
}
else {
BOOL ret = m_pfnLockOwner(m_lockData);
diff --git a/src/vm/spinlock.cpp b/src/vm/spinlock.cpp
index 2360d5cf0c..68b9150158 100644
--- a/src/vm/spinlock.cpp
+++ b/src/vm/spinlock.cpp
@@ -160,7 +160,7 @@ BOOL SpinLock::OwnedByCurrentThread()
}
CONTRACTL_END;
- return m_holdingThreadId.IsSameThread();
+ return m_holdingThreadId.IsCurrentThread();
}
#endif
@@ -222,7 +222,7 @@ void SpinLock::GetLock(Thread* pThread)
INCTHREADLOCKCOUNTTHREAD(pThread);
#ifdef _DEBUG
- m_holdingThreadId.SetThreadId();
+ m_holdingThreadId.SetToCurrentThread();
dbg_EnterLock();
#endif
}
@@ -289,7 +289,7 @@ void SpinLock::FreeLock(Thread* pThread)
#ifdef _DEBUG
_ASSERTE(OwnedByCurrentThread());
- m_holdingThreadId.ResetThreadId();
+ m_holdingThreadId.Clear();
dbg_LeaveLock();
#endif
diff --git a/src/vm/syncblk.cpp b/src/vm/syncblk.cpp
index 7163658ef3..9e8dec4b62 100644
--- a/src/vm/syncblk.cpp
+++ b/src/vm/syncblk.cpp
@@ -1348,7 +1348,7 @@ void SyncBlockCache::GCDeleteSyncBlock(SyncBlock *psb)
m_FreeBlockList = &psb->m_Link;
}
-void SyncBlockCache::GCWeakPtrScan(HANDLESCANPROC scanProc, LPARAM lp1, LPARAM lp2)
+void SyncBlockCache::GCWeakPtrScan(HANDLESCANPROC scanProc, uintptr_t lp1, uintptr_t lp2)
{
CONTRACTL
{
diff --git a/src/vm/syncblk.h b/src/vm/syncblk.h
index ad527b9338..60f0fa240d 100644
--- a/src/vm/syncblk.h
+++ b/src/vm/syncblk.h
@@ -1008,7 +1008,7 @@ class SyncBlockCache
// return sync block to cache or delete, called from GC
void GCDeleteSyncBlock(SyncBlock *sb);
- void GCWeakPtrScan(HANDLESCANPROC scanProc, LPARAM lp1, LPARAM lp2);
+ void GCWeakPtrScan(HANDLESCANPROC scanProc, uintptr_t lp1, uintptr_t lp2);
void GCDone(BOOL demoting, int max_gen);
diff --git a/src/vm/threads.cpp b/src/vm/threads.cpp
index a614ddbdff..22e3b88d79 100644
--- a/src/vm/threads.cpp
+++ b/src/vm/threads.cpp
@@ -680,8 +680,8 @@ DWORD Thread::StartThread()
DWORD dwRetVal = (DWORD) -1;
#ifdef _DEBUG
- _ASSERTE (m_Creater.IsSameThread());
- m_Creater.ResetThreadId();
+ _ASSERTE (m_Creater.IsCurrentThread());
+ m_Creater.Clear();
#endif
#ifdef FEATURE_INCLUDE_ALL_INTERFACES
HostComHolder<IHostTask> pHostTask(GetHostTaskWithAddRef());
@@ -1922,7 +1922,7 @@ Thread::Thread()
#ifdef _DEBUG
dbg_m_cSuspendedThreads = 0;
dbg_m_cSuspendedThreadsWithoutOSLock = 0;
- m_Creater.ResetThreadId();
+ m_Creater.Clear();
m_dwUnbreakableLockCount = 0;
#endif
@@ -3132,7 +3132,7 @@ BOOL Thread::CreateNewOSThread(SIZE_T sizeToCommitOrReserve, LPTHREAD_START_ROUT
FastInterlockIncrement(&ThreadStore::s_pThreadStore->m_PendingThreadCount);
#ifdef _DEBUG
- m_Creater.SetThreadId();
+ m_Creater.SetToCurrentThread();
#endif
return TRUE;
@@ -3188,7 +3188,7 @@ BOOL Thread::CreateNewHostTask(SIZE_T stackSize, LPTHREAD_START_ROUTINE start, v
FastInterlockIncrement(&ThreadStore::s_pThreadStore->m_PendingThreadCount);
#ifdef _DEBUG
- m_Creater.SetThreadId();
+ m_Creater.SetToCurrentThread();
#endif
return TRUE;
@@ -11202,7 +11202,7 @@ BOOL ThreadStore::HoldingThreadStore(Thread *pThread)
}
else
{
- return (s_pThreadStore->m_holderthreadid.IsSameThread());
+ return (s_pThreadStore->m_holderthreadid.IsCurrentThread());
}
}
diff --git a/src/vm/threadsuspend.cpp b/src/vm/threadsuspend.cpp
index 0d823ddda1..705ae835c7 100644
--- a/src/vm/threadsuspend.cpp
+++ b/src/vm/threadsuspend.cpp
@@ -480,7 +480,7 @@ DWORD Thread::ResumeThread()
DWORD res = ::ResumeThread(m_ThreadHandleForResume);
_ASSERTE (res != 0 && "Thread is not previously suspended");
#ifdef _DEBUG_IMPL
- _ASSERTE (!m_Creater.IsSameThread());
+ _ASSERTE (!m_Creater.IsCurrentThread());
if ((res != (DWORD)-1) && (res != 0))
{
Thread * pCurThread = GetThread();
@@ -3102,7 +3102,7 @@ void ThreadSuspend::LockThreadStore(ThreadSuspend::SUSPEND_REASON reason)
_ASSERTE(ThreadStore::s_pThreadStore->m_holderthreadid.IsUnknown());
- ThreadStore::s_pThreadStore->m_holderthreadid.SetThreadId();
+ ThreadStore::s_pThreadStore->m_holderthreadid.SetToCurrentThread();
LOG((LF_SYNC, INFO3, "Locked thread store\n"));
@@ -3147,12 +3147,12 @@ void ThreadSuspend::UnlockThreadStore(BOOL bThreadDestroyed, ThreadSuspend::SUSP
// If Thread object has been destroyed, we need to reset the ownership info in Crst.
_ASSERTE(!bThreadDestroyed || GetThread() == NULL);
if (bThreadDestroyed) {
- ThreadStore::s_pThreadStore->m_Crst.m_holderthreadid.SetThreadId();
+ ThreadStore::s_pThreadStore->m_Crst.m_holderthreadid.SetToCurrentThread();
}
#endif
ThreadStore::s_pThreadStore->m_HoldingThread = NULL;
- ThreadStore::s_pThreadStore->m_holderthreadid.ResetThreadId();
+ ThreadStore::s_pThreadStore->m_holderthreadid.Clear();
ThreadStore::s_pThreadStore->Leave();
Thread::EndThreadAffinity();
diff --git a/src/vm/util.hpp b/src/vm/util.hpp
index b444357124..62508bc92b 100644
--- a/src/vm/util.hpp
+++ b/src/vm/util.hpp
@@ -652,14 +652,14 @@ public:
}
#endif
- void SetThreadId()
+ void SetToCurrentThread()
{
WRAPPER_NO_CONTRACT;
m_FiberPtrId = ClrTeb::GetFiberPtrId();
}
- BOOL IsSameThread() const
+ bool IsCurrentThread() const
{
WRAPPER_NO_CONTRACT;
@@ -668,13 +668,13 @@ public:
#ifdef _DEBUG
- BOOL IsUnknown() const
+ bool IsUnknown() const
{
LIMITED_METHOD_CONTRACT;
return m_FiberPtrId == NULL;
}
#endif
- void ResetThreadId()
+ void Clear()
{
LIMITED_METHOD_CONTRACT;
m_FiberPtrId = NULL;
diff --git a/src/vm/wks/wks.targets b/src/vm/wks/wks.targets
index 5f2770b267..5e5f18bd28 100644
--- a/src/vm/wks/wks.targets
+++ b/src/vm/wks/wks.targets
@@ -103,7 +103,8 @@
<CppCompile Include="$(VmSourcesDir)\FusionSink.cpp" Condition="'$(FeatureFusion)' == 'true'"/>
<CppCompile Include="$(VmSourcesDir)\FusionBind.cpp" Condition="'$(FeatureFusion)' == 'true'"/>
<CppCompile Include="$(VmSourcesDir)\GCDecode.cpp" />
- <CppCompile Include="$(VmSourcesDir)\gcenv.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\gcenv.ee.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\gcenv.os.cpp" />
<CppCompile Include="$(VmSourcesDir)\gchelpers.cpp" />
<CppCompile Include="$(VmSourcesDir)\gchost.cpp" />
<CppCompile Include="$(VmSourcesDir)\genericdict.cpp" />