summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBen Pye <ben@curlybracket.co.uk>2015-07-01 15:10:09 +0100
committerBen Pye <ben@curlybracket.co.uk>2015-07-24 16:45:35 +0100
commit9cd8273572260317c6acc126333e5a6e56aaeb06 (patch)
treef125e83a6908151322aa20940b63c64c621c1169
parentacca43b33dcd97d1dc5d92147a3047a3bdfaf1ce (diff)
downloadcoreclr-9cd8273572260317c6acc126333e5a6e56aaeb06.tar.gz
coreclr-9cd8273572260317c6acc126333e5a6e56aaeb06.tar.bz2
coreclr-9cd8273572260317c6acc126333e5a6e56aaeb06.zip
Add ARM target for CoreCLR on Linux.
c_runtime/vprintf/test1 is disabled as casting NULL to va_list is against the C specification. Fix SetFilePointer tests on 32 bit platforms. Define _FILE_OFFSET_BITS=64 so that we have long file support on 32 bit platforms. Implement context capture/restore for ARM. Link libgcc_s before libunwind on ARM so C++ exceptions work. Translate armasm to gas syntax. Specify Thumb, VFPv3, ARMv7 for the ARM target. Add ARM configuration to mscorlib build Implement GetLogicalProcessorCacheSizeFromOS in PAL. Set UNWIND_CONTEXT_IS_UCONTEXT_T from configure check.
-rw-r--r--CMakeLists.txt78
-rw-r--r--build.cmd1
-rwxr-xr-xbuild.sh8
-rw-r--r--src/CMakeLists.txt9
-rw-r--r--src/ToolBox/SOS/Strike/CMakeLists.txt33
-rw-r--r--src/ToolBox/SOS/lldbplugin/CMakeLists.txt15
-rw-r--r--src/ToolBox/SOS/lldbplugin/debugclient.cpp21
-rw-r--r--src/binder/CMakeLists.txt8
-rw-r--r--src/classlibnative/CMakeLists.txt23
-rw-r--r--src/debug/daccess/CMakeLists.txt35
-rw-r--r--src/debug/daccess/daccess.cpp2
-rw-r--r--src/debug/di/CMakeLists.txt25
-rw-r--r--src/debug/di/arm/cordbregisterset.cpp1
-rw-r--r--src/debug/di/arm/primitives.cpp2
-rw-r--r--src/debug/di/shimremotedatatarget.cpp2
-rw-r--r--src/debug/ee/CMakeLists.txt34
-rw-r--r--src/debug/ee/arm/dbghelpers.S64
-rw-r--r--src/debug/ee/wks/CMakeLists.txt9
-rw-r--r--src/inc/clrnt.h8
-rw-r--r--src/inc/volatile.h11
-rw-r--r--src/inc/winwrap.h2
-rw-r--r--src/jit/CMakeLists.txt36
-rw-r--r--src/jit/codegenarm.cpp4
-rw-r--r--src/jit/codegenclassic.h2
-rw-r--r--src/jit/codegencommon.cpp3
-rw-r--r--src/jit/codegenlegacy.cpp52
-rw-r--r--src/jit/compiler.h6
-rw-r--r--src/jit/emit.h2
-rw-r--r--src/jit/emitarm.cpp45
-rw-r--r--src/jit/flowgraph.cpp2
-rw-r--r--src/jit/instr.cpp2
-rw-r--r--src/jit/regalloc.cpp103
-rw-r--r--src/jit/registerarm.h2
-rw-r--r--src/jit/registerfp.cpp4
-rw-r--r--src/jit/regset.cpp6
-rw-r--r--src/jit/unwind.h3
-rw-r--r--src/jit/unwindarm.cpp6
-rw-r--r--src/pal/inc/pal.h208
-rw-r--r--src/pal/inc/pal_mstypes.h12
-rw-r--r--src/pal/inc/rt/intsafe.h2
-rw-r--r--src/pal/inc/rt/ntimage.h1
-rw-r--r--src/pal/inc/rt/palrt.h25
-rw-r--r--src/pal/inc/unixasmmacros.inc305
-rw-r--r--src/pal/inc/unixasmmacrosamd64.inc305
-rw-r--r--src/pal/inc/unixasmmacrosarm.inc225
-rw-r--r--src/pal/src/CMakeLists.txt46
-rw-r--r--src/pal/src/arch/arm/context2.S217
-rw-r--r--src/pal/src/arch/arm/processor.cpp43
-rw-r--r--src/pal/src/configure.cmake19
-rw-r--r--src/pal/src/cruntime/misc.cpp2
-rw-r--r--src/pal/src/exception/seh-unwind.cpp51
-rw-r--r--src/pal/src/include/pal/context.h48
-rw-r--r--src/pal/src/misc/sysinfo.cpp19
-rw-r--r--src/pal/src/thread/context.cpp (renamed from src/pal/src/arch/i386/context.cpp)56
-rw-r--r--src/pal/tests/CMakeLists.txt17
-rw-r--r--src/pal/tests/palsuite/c_runtime/vprintf/CMakeLists.txt3
-rw-r--r--src/pal/tests/palsuite/paltestlist.txt1
-rw-r--r--src/pal/tests/palsuite/paltestlist_to_be_reviewed.txt1
-rw-r--r--src/unwinder/CMakeLists.txt37
-rw-r--r--src/unwinder/arm/unwinder_arm.cpp50
-rw-r--r--src/unwinder/dac/CMakeLists.txt21
-rw-r--r--src/utilcode/md5.cpp5
-rw-r--r--src/utilcode/perflog.cpp2
-rw-r--r--src/utilcode/util_nodependencies.cpp4
-rw-r--r--src/vm/CMakeLists.txt138
-rw-r--r--src/vm/arm/armsinglestepper.cpp48
-rw-r--r--src/vm/arm/asmconstants.h2
-rw-r--r--src/vm/arm/asmhelpers.S1361
-rw-r--r--src/vm/arm/cgencpu.h11
-rw-r--r--src/vm/arm/crthelpers.S60
-rw-r--r--src/vm/arm/ehhelpers.S146
-rw-r--r--src/vm/arm/gmscpu.h2
-rw-r--r--src/vm/arm/memcpy.S37
-rw-r--r--src/vm/arm/patchedcode.S72
-rw-r--r--src/vm/arm/stubs.cpp45
-rw-r--r--src/vm/arm/unixstubs.cpp39
-rw-r--r--src/vm/clrvarargs.cpp8
-rw-r--r--src/vm/codeman.cpp2
-rw-r--r--src/vm/crossgen/CMakeLists.txt16
-rw-r--r--src/vm/exceptionhandling.cpp18
-rw-r--r--src/vm/gcinfodecoder.cpp19
-rw-r--r--src/vm/jitinterface.cpp4
-rw-r--r--src/vm/stublink.cpp20
-rw-r--r--src/vm/stublink.h18
-rw-r--r--src/vm/util.cpp1
-rw-r--r--src/vm/wks/CMakeLists.txt6
-rw-r--r--src/zap/zapcode.cpp2
87 files changed, 3809 insertions, 660 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 81fd930488..0539dc7dcc 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -17,7 +17,18 @@ set(GENERATED_INCLUDE_DIR ${CMAKE_CURRENT_BINARY_DIR}/src/inc)
if(CMAKE_SYSTEM_NAME STREQUAL Linux)
set(CLR_CMAKE_PLATFORM_UNIX 1)
- set(CLR_CMAKE_PLATFORM_UNIX_TARGET_AMD64 1)
+ if(CMAKE_SYSTEM_PROCESSOR STREQUAL x86_64)
+ set(CLR_CMAKE_PLATFORM_UNIX_TARGET_AMD64 1)
+ elseif(CMAKE_SYSTEM_PROCESSOR STREQUAL armv7l)
+ set(CLR_CMAKE_PLATFORM_UNIX_TARGET_ARM 1)
+ # Because we don't use CMAKE_C_COMPILER/CMAKE_CXX_COMPILER to use clang
+ # we have to set the triple by adding a compiler argument
+ add_compile_options(-target armv7-linux-gnueabihf)
+ add_compile_options(-mthumb)
+ add_compile_options(-mfpu=vfpv3)
+ else()
+ message(FATAL_ERROR "Only AMD64 and ARM supported")
+ endif()
set(CLR_CMAKE_PLATFORM_LINUX 1)
endif(CMAKE_SYSTEM_NAME STREQUAL Linux)
@@ -46,6 +57,14 @@ if(CMAKE_SYSTEM_NAME STREQUAL NetBSD)
set(CLR_CMAKE_PLATFORM_NETBSD 1)
endif(CMAKE_SYSTEM_NAME STREQUAL NetBSD)
+if(CLR_CMAKE_PLATFORM_UNIX_TARGET_ARM)
+ set(CLR_CMAKE_PLATFORM_ARCH_ARM 1)
+elseif(CLR_CMAKE_PLATFORM_UNIX_TARGET_AMD64)
+ set(CLR_CMAKE_PLATFORM_ARCH_AMD64 1)
+elseif(WIN32)
+ set(CLR_CMAKE_PLATFORM_ARCH_AMD64 1)
+endif()
+
if(WIN32)
enable_language(ASM_MASM)
else()
@@ -370,15 +389,22 @@ if (CLR_CMAKE_PLATFORM_UNIX)
if(IS_64BIT_BUILD)
add_definitions(-DBIT64=1)
- add_definitions(-DFEATURE_PAL)
- else (IS_64BIT_BUILD)
- message(FATAL_ERROR "error: Detected non x86_64 target processor. Not supported!")
endif(IS_64BIT_BUILD)
+ add_definitions(-DFEATURE_PAL)
+
if(CLR_CMAKE_PLATFORM_LINUX)
add_definitions(-D__LINUX__=1)
- message("Detected Linux x86_64")
- add_definitions(-DLINUX64)
+ if(CLR_CMAKE_PLATFORM_UNIX_TARGET_AMD64)
+ message("Detected Linux x86_64")
+ add_definitions(-DLINUX64)
+ elseif(CLR_CMAKE_PLATFORM_ARCH_ARM)
+ message("Detected Linux ARM")
+ add_definitions(-DLINUX32)
+ else()
+ message(FATAL_ERROR "Only AMD64 and ARM supported")
+ endif()
+
endif(CLR_CMAKE_PLATFORM_LINUX)
if(CLR_CMAKE_PLATFORM_DARWIN)
message("Detected OSX x86_64")
@@ -407,6 +433,12 @@ if (IS_64BIT_BUILD EQUAL 1)
endif (CLR_CMAKE_PLATFORM_UNIX_TARGET_AMD64)
add_definitions(-D_TARGET_AMD64_=1)
add_definitions(-DDBG_TARGET_AMD64)
+elseif (CLR_CMAKE_PLATFORM_ARCH_ARM)
+ if (CLR_CMAKE_PLATFORM_UNIX_TARGET_ARM)
+ add_definitions(-DDBG_TARGET_ARM_UNIX)
+ endif (CLR_CMAKE_PLATFORM_UNIX_TARGET_ARM)
+ add_definitions(-D_TARGET_ARM_=1)
+ add_definitions(-DDBG_TARGET_ARM)
else ()
# TODO: Support this
message(FATAL_ERROR "Not Implemented!")
@@ -428,7 +460,11 @@ add_definitions(-DFEATURE_APPDOMAIN_RESOURCE_MONITORING)
add_definitions(-DFEATURE_ARRAYSTUB_AS_IL)
if (CLR_CMAKE_PLATFORM_UNIX)
add_definitions(-DFEATURE_STUBS_AS_IL)
- add_definitions(-DUNIX_AMD64_ABI)
+ if (CLR_CMAKE_PLATFORM_UNIX_TARGET_AMD64)
+ add_definitions(-DUNIX_AMD64_ABI)
+ elseif (CLR_CMAKE_PLATFORM_UNIX_TARGET_ARM)
+ add_definitions(-DUNIX_ARM_ABI)
+ endif()
endif(CLR_CMAKE_PLATFORM_UNIX)
add_definitions(-DFEATURE_ASYNC_IO)
add_definitions(-DFEATURE_BCL_FORMATTING)
@@ -519,16 +555,20 @@ if(CLR_CMAKE_PLATFORM_UNIX)
add_definitions(-DFEATURE_DBGIPC_TRANSPORT_VM)
endif(CLR_CMAKE_PLATFORM_UNIX)
-if (IS_64BIT_BUILD EQUAL 1)
-add_definitions(-D_AMD64_)
-add_definitions(-D_AMD64_SIMULATOR_)
-add_definitions(-D_AMD64_SIMULATOR_PERF_)
-add_definitions(-D_AMD64_WORKAROUND_)
-add_definitions(-D_WIN64)
-add_definitions(-DAMD64)
+if (CLR_CMAKE_PLATFORM_ARCH_AMD64)
+ add_definitions(-D_AMD64_)
+ add_definitions(-D_AMD64_SIMULATOR_)
+ add_definitions(-D_AMD64_SIMULATOR_PERF_)
+ add_definitions(-D_AMD64_WORKAROUND_)
+ add_definitions(-D_WIN64)
+ add_definitions(-DAMD64)
+elseif (CLR_CMAKE_PLATFORM_ARCH_ARM)
+ add_definitions(-D_ARM_)
+ add_definitions(-DARM)
+ add_definitions(-D_WIN32)
else ()
-# TODO - Support this
-endif (IS_64BIT_BUILD EQUAL 1)
+ message(FATAL_ERROR "Only AMD64 and ARM supported")
+endif ()
add_definitions(-D_SKIP_IF_SIMULATOR_)
add_definitions(-D_SECURE_SCL=0)
@@ -542,7 +582,11 @@ add_definitions(-D_UNICODE)
if (IS_64BIT_BUILD EQUAL 1)
set(ARCH_SOURCES_DIR amd64)
else ()
- set(ARCH_SOURCES_DIR i386)
+ if (CLR_CMAKE_PLATFORM_ARCH_ARM)
+ set(ARCH_SOURCES_DIR arm)
+ else ()
+ set(ARCH_SOURCES_DIR i386)
+ endif ()
endif (IS_64BIT_BUILD EQUAL 1)
add_subdirectory(src)
diff --git a/build.cmd b/build.cmd
index 449c0beb0e..fb15d813b4 100644
--- a/build.cmd
+++ b/build.cmd
@@ -26,6 +26,7 @@ set __SkipTestBuild=
if "%1" == "" goto ArgsDone
if /i "%1" == "/?" goto Usage
if /i "%1" == "x64" (set __BuildArch=x64&&shift&goto Arg_Loop)
+if /i "%1" == "arm" (set __BuildArch=arm&&shift&goto Arg_Loop)
if /i "%1" == "debug" (set __BuildType=Debug&shift&goto Arg_Loop)
if /i "%1" == "release" (set __BuildType=Release&shift&goto Arg_Loop)
diff --git a/build.sh b/build.sh
index fe5577cba4..351d6a9e73 100755
--- a/build.sh
+++ b/build.sh
@@ -3,7 +3,7 @@
usage()
{
echo "Usage: $0 [BuildArch] [BuildType] [clean] [verbose] [clangx.y]"
- echo "BuildArch can be: x64"
+ echo "BuildArch can be: x64, ARM"
echo "BuildType can be: Debug, Release"
echo "clean - optional argument to force a clean build."
echo "verbose - optional argument to enable verbose build output."
@@ -92,7 +92,7 @@ echo "Commencing CoreCLR Repo build"
# Argument types supported by this script:
#
-# Build architecture - valid value is: x64.
+# Build architecture - valid values are: x64, ARM.
# Build Type - valid values are: Debug, Release
#
# Set the default arguments for build
@@ -157,6 +157,10 @@ for i in "$@"
__BuildArch=x64
__MSBuildBuildArch=x64
;;
+ arm)
+ __BuildArch=arm
+ __MSBuildBuildArch=arm
+ ;;
debug)
__BuildType=Debug
;;
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index 6cebd0669a..6b967e3107 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -3,7 +3,14 @@ include_directories("strongname/inc")
include_directories("inc/winrt")
include_directories("debug/inc")
-include_directories("debug/inc/amd64")
+if(CLR_CMAKE_PLATFORM_ARCH_AMD64)
+ include_directories("debug/inc/amd64")
+elseif(CLR_CMAKE_PLATFORM_ARCH_ARM)
+ include_directories("debug/inc/arm")
+else()
+ message(FATAL_ERROR "Only ARM and AMD64 is supported")
+endif()
+
include_directories("debug/inc/dump")
include_directories("md/inc")
include_directories("classlibnative/bcltype")
diff --git a/src/ToolBox/SOS/Strike/CMakeLists.txt b/src/ToolBox/SOS/Strike/CMakeLists.txt
index 9e9ef54ca0..ce909d7989 100644
--- a/src/ToolBox/SOS/Strike/CMakeLists.txt
+++ b/src/ToolBox/SOS/Strike/CMakeLists.txt
@@ -1,7 +1,14 @@
-add_definitions(-DSOS_TARGET_AMD64=1)
-add_definitions(-D_TARGET_WIN64_=1)
-add_definitions(-DDBG_TARGET_64BIT)
-add_definitions(-DDBG_TARGET_WIN64=1)
+if(CLR_CMAKE_PLATFORM_ARCH_AMD64)
+ add_definitions(-DSOS_TARGET_AMD64=1)
+ add_definitions(-D_TARGET_WIN64_=1)
+ add_definitions(-DDBG_TARGET_64BIT)
+ add_definitions(-DDBG_TARGET_WIN64=1)
+elseif(CLR_CMAKE_PLATFORM_ARCH_ARM)
+ add_definitions(-DSOS_TARGET_ARM=1)
+ add_definitions(-D_TARGET_WIN32_=1)
+ add_definitions(-DDBG_TARGET_32BIT)
+ add_definitions(-DDBG_TARGET_WIN32=1)
+endif()
add_definitions(-DSTRIKE)
remove_definitions(-DUNICODE)
@@ -38,12 +45,18 @@ if(WIN32)
WatchCmd.cpp
Native.rc
)
-
- set(SOS_SOURCES_AMD64
- disasmX86.cpp
- )
-
- list(APPEND SOS_SOURCES ${SOS_SOURCES_AMD64})
+
+ if(CLR_CMAKE_PLATFORM_ARCH_AMD64)
+ set(SOS_SOURCES_ARCH
+ disasmX86.cpp
+ )
+ elseif(CLR_CMAKE_PLATFORM_ARCH_ARM)
+ set(SOS_SOURCES_ARCH
+ disasmARM.cpp
+ )
+ endif()
+
+ list(APPEND SOS_SOURCES ${SOS_SOURCES_ARCH})
add_definitions(-DFX_VER_INTERNALNAME_STR=SOS.dll)
diff --git a/src/ToolBox/SOS/lldbplugin/CMakeLists.txt b/src/ToolBox/SOS/lldbplugin/CMakeLists.txt
index 945c23acf8..798c91a142 100644
--- a/src/ToolBox/SOS/lldbplugin/CMakeLists.txt
+++ b/src/ToolBox/SOS/lldbplugin/CMakeLists.txt
@@ -2,10 +2,17 @@ project(sosplugin)
set(CMAKE_INCLUDE_CURRENT_DIR ON)
-add_definitions(-D_TARGET_AMD64_=1)
-add_definitions(-DDBG_TARGET_64BIT=1)
-add_definitions(-DDBG_TARGET_AMD64=1)
-add_definitions(-DDBG_TARGET_WIN64=1)
+if(CLR_CMAKE_PLATFORM_ARCH_AMD64)
+ add_definitions(-D_TARGET_AMD64_=1)
+ add_definitions(-DDBG_TARGET_64BIT=1)
+ add_definitions(-DDBG_TARGET_AMD64=1)
+ add_definitions(-DDBG_TARGET_WIN64=1)
+elseif(CLR_CMAKE_PLATFORM_ARCH_ARM)
+ add_definitions(-D_TARGET_ARM_=1)
+ add_definitions(-DDBG_TARGET_32BIT=1)
+ add_definitions(-DDBG_TARGET_ARM=1)
+ add_definitions(-DDBG_TARGET_WIN32=1)
+endif()
set(ENABLE_LLDBPLUGIN ${CLR_CMAKE_PLATFORM_UNIX} CACHE BOOL "Enable building the SOS plugin for LLDB.")
set(REQUIRE_LLDBPLUGIN ${CLR_CMAKE_PLATFORM_LINUX} CACHE BOOL "Require building the SOS plugin for LLDB.")
diff --git a/src/ToolBox/SOS/lldbplugin/debugclient.cpp b/src/ToolBox/SOS/lldbplugin/debugclient.cpp
index 75d5cee594..f2a690b73b 100644
--- a/src/ToolBox/SOS/lldbplugin/debugclient.cpp
+++ b/src/ToolBox/SOS/lldbplugin/debugclient.cpp
@@ -849,6 +849,7 @@ DebugClient::GetThreadContextById(
dtcontext = (DT_CONTEXT*)context;
dtcontext->ContextFlags = contextFlags;
+#ifdef DBG_TARGET_AMD64
dtcontext->Rip = frame.GetPC();
dtcontext->Rsp = frame.GetSP();
dtcontext->Rbp = frame.GetFP();
@@ -875,6 +876,26 @@ DebugClient::GetThreadContextById(
dtcontext->SegEs = GetRegister(frame, "es");
dtcontext->SegFs = GetRegister(frame, "fs");
dtcontext->SegGs = GetRegister(frame, "gs");
+#elif DBG_TARGET_ARM
+ dtcontext->Pc = frame.GetPC();
+ dtcontext->Sp = frame.GetSP();
+ dtcontext->Lr = GetRegister(frame, "lr");
+ dtcontext->Cpsr = GetRegister(frame, "cpsr");
+
+ dtcontext->R0 = GetRegister(frame, "r0");
+ dtcontext->R1 = GetRegister(frame, "r1");
+ dtcontext->R2 = GetRegister(frame, "r2");
+ dtcontext->R3 = GetRegister(frame, "r3");
+ dtcontext->R4 = GetRegister(frame, "r4");
+ dtcontext->R5 = GetRegister(frame, "r5");
+ dtcontext->R6 = GetRegister(frame, "r6");
+ dtcontext->R7 = GetRegister(frame, "r7");
+ dtcontext->R8 = GetRegister(frame, "r8");
+ dtcontext->R9 = GetRegister(frame, "r9");
+ dtcontext->R10 = GetRegister(frame, "r10");
+ dtcontext->R11 = GetRegister(frame, "r11");
+ dtcontext->R12 = GetRegister(frame, "r12");
+#endif
hr = S_OK;
diff --git a/src/binder/CMakeLists.txt b/src/binder/CMakeLists.txt
index af38572b4c..b6e9da3f46 100644
--- a/src/binder/CMakeLists.txt
+++ b/src/binder/CMakeLists.txt
@@ -1,6 +1,12 @@
set(CMAKE_INCLUDE_CURRENT_DIR ON)
-include_directories(BEFORE "../vm/amd64")
+if(CLR_CMAKE_PLATFORM_ARCH_AMD64)
+ include_directories(BEFORE "../vm/amd64")
+elseif(CLR_CMAKE_PLATFORM_ARCH_ARM)
+ include_directories(BEFORE "../vm/arm")
+else()
+ message(FATAL_ERROR "Only ARM and AMD64 is supported")
+endif()
include_directories(BEFORE "../vm")
include_directories(BEFORE "inc")
diff --git a/src/classlibnative/CMakeLists.txt b/src/classlibnative/CMakeLists.txt
index de4c2b20b3..00a6941920 100644
--- a/src/classlibnative/CMakeLists.txt
+++ b/src/classlibnative/CMakeLists.txt
@@ -1,13 +1,28 @@
include_directories(BEFORE "../vm")
include_directories("../inc")
-include_directories("../vm/amd64") # TODO: use the target cpu
+if(CLR_CMAKE_PLATFORM_ARCH_AMD64)
+ include_directories(BEFORE "../vm/amd64")
+elseif(CLR_CMAKE_PLATFORM_ARCH_ARM)
+ include_directories(BEFORE "../vm/arm")
+else()
+ message(FATAL_ERROR "Only ARM and AMD64 is supported")
+endif()
include_directories("../debug/inc")
include_directories("../debug/inc/dump")
#TODO for x86
-add_definitions(-DDBG_TARGET_64BIT=1)
-add_definitions(-DDBG_TARGET_AMD64=1)
-add_definitions(-DDBG_TARGET_WIN64=1)
+
+if(CLR_CMAKE_PLATFORM_ARCH_AMD64)
+ add_definitions(-DDBG_TARGET_64BIT=1)
+ add_definitions(-DDBG_TARGET_AMD64=1)
+ add_definitions(-DDBG_TARGET_WIN64=1)
+elseif(CLR_CMAKE_PLATFORM_ARCH_ARM)
+ add_definitions(-DDBG_TARGET_32BIT=1)
+ add_definitions(-DDBG_TARGET_ARM=1)
+ add_definitions(-DDBG_TARGET_WIN32=1)
+else()
+ message(FATAL_ERROR "Only ARM and AMD64 is supported")
+endif()
add_subdirectory(bcltype)
add_subdirectory(float)
diff --git a/src/debug/daccess/CMakeLists.txt b/src/debug/daccess/CMakeLists.txt
index c84fe625b9..0d93d75966 100644
--- a/src/debug/daccess/CMakeLists.txt
+++ b/src/debug/daccess/CMakeLists.txt
@@ -2,11 +2,22 @@
include(${CLR_DIR}/dac.cmake)
add_definitions(-DFEATURE_NO_HOST)
-add_definitions(-D_TARGET_AMD64_=1)
-add_definitions(-DDBG_TARGET_64BIT=1)
-add_definitions(-DDBG_TARGET_AMD64=1)
-add_definitions(-DDBG_TARGET_WIN64=1)
-add_definitions(-D_WIN64=1)
+
+if(CLR_CMAKE_PLATFORM_ARCH_AMD64)
+ add_definitions(-D_TARGET_AMD64_=1)
+ add_definitions(-DDBG_TARGET_64BIT=1)
+ add_definitions(-DDBG_TARGET_AMD64=1)
+ add_definitions(-DDBG_TARGET_WIN64=1)
+ add_definitions(-D_WIN64=1)
+elseif(CLR_CMAKE_PLATFORM_ARCH_ARM)
+ add_definitions(-D_TARGET_ARM_=1)
+ add_definitions(-DDBG_TARGET_32BIT=1)
+ add_definitions(-DDBG_TARGET_ARM=1)
+ add_definitions(-DDBG_TARGET_WIN32=1)
+ add_definitions(-D_WIN32=1)
+else()
+ message(FATAL_ERROR "Only ARM and AMD64 is supported")
+endif()
include_directories(BEFORE ${VM_DIR})
include_directories(BEFORE ${VM_DIR}/${ARCH_SOURCES_DIR})
@@ -38,17 +49,25 @@ set(DACCESS_SOURCES
datatargetadapter.cpp
)
-if(IS_64BIT_BUILD EQUAL 1)
+if(CLR_CMAKE_PLATFORM_ARCH_AMD64)
include_directories(amd64)
list(APPEND DACCESS_SOURCES
amd64/primitives.cpp
)
-else()
+elseif(CLR_CMAKE_PLATFORM_ARCH_ARM)
+ include_directories(arm)
+
+ list(APPEND DACCESS_SOURCES
+ arm/primitives.cpp
+ )
+elseif(CLR_CMAKE_PLATFORM_ARCH_I386)
+ include_directories(i386)
+
list(APPEND DACCESS_SOURCES
i386/primitives.cpp
)
-endif(IS_64BIT_BUILD EQUAL 1)
+endif()
convert_to_absolute_path(DACCESS_SOURCES ${DACCESS_SOURCES})
diff --git a/src/debug/daccess/daccess.cpp b/src/debug/daccess/daccess.cpp
index 9d21dcad2c..b737ecd912 100644
--- a/src/debug/daccess/daccess.cpp
+++ b/src/debug/daccess/daccess.cpp
@@ -5496,6 +5496,8 @@ ClrDataAccess::Initialize(void)
CorDebugPlatform hostPlatform = CORDB_PLATFORM_POSIX_X86;
#elif defined(DBG_TARGET_AMD64)
CorDebugPlatform hostPlatform = CORDB_PLATFORM_POSIX_AMD64;
+ #elif defined(DBG_TARGET_ARM)
+ CorDebugPlatform hostPlatform = CORDB_PLATFORM_POSIX_ARM;
#else
#error Unknown Processor.
#endif
diff --git a/src/debug/di/CMakeLists.txt b/src/debug/di/CMakeLists.txt
index cbf1065895..55b5a44657 100644
--- a/src/debug/di/CMakeLists.txt
+++ b/src/debug/di/CMakeLists.txt
@@ -1,4 +1,17 @@
-add_definitions(-DDBG_TARGET_AMD64=1 -DDBG_TARGET_WIN64=1 -DDBG_TARGET_64BIT=1 -D_TARGET_AMD64_=1)
+if(CLR_CMAKE_PLATFORM_ARCH_AMD64)
+ add_definitions(-D_TARGET_WIN64_=1)
+ add_definitions(-DDBG_TARGET_64BIT)
+ add_definitions(-DDBG_TARGET_WIN64=1)
+ add_definitions(-DDBG_TARGET_AMD64=1)
+elseif(CLR_CMAKE_PLATFORM_ARCH_ARM)
+ add_definitions(-D_TARGET_WIN32_=1)
+ add_definitions(-DDBG_TARGET_32BIT)
+ add_definitions(-DDBG_TARGET_WIN32=1)
+ add_definitions(-DDBG_TARGET_ARM=1)
+else()
+ message(FATAL_ERROR "Only ARM and AMD64 is supported")
+endif()
+
add_definitions(-DFEATURE_METADATA_CUSTOM_DATA_SOURCE -DFEATURE_METADATA_DEBUGGEE_DATA_SOURCE -DFEATURE_NO_HOST -DFEATURE_METADATA_LOAD_TRUSTED_IMAGES)
set(CORDBDI_SOURCES
@@ -42,10 +55,12 @@ if(WIN32)
elseif(CLR_CMAKE_PLATFORM_UNIX)
add_compile_options(-fPIC)
- set(CORDBDI_SOURCES
- ${CORDBDI_SOURCES}
- amd64/floatconversion.S
- )
+ if(CLR_CMAKE_PLATFORM_ARCH_AMD64)
+ set(CORDBDI_SOURCES
+ ${CORDBDI_SOURCES}
+ amd64/floatconversion.S
+ )
+ endif()
endif(WIN32)
diff --git a/src/debug/di/arm/cordbregisterset.cpp b/src/debug/di/arm/cordbregisterset.cpp
index 680b814bea..3a2002c846 100644
--- a/src/debug/di/arm/cordbregisterset.cpp
+++ b/src/debug/di/arm/cordbregisterset.cpp
@@ -8,7 +8,6 @@
//
//*****************************************************************************
-#include "stdafx.h"
#include "primitives.h"
HRESULT CordbRegisterSet::GetRegistersAvailable(ULONG64 *pAvailable)
diff --git a/src/debug/di/arm/primitives.cpp b/src/debug/di/arm/primitives.cpp
index 53ec1fc774..7ec727301f 100644
--- a/src/debug/di/arm/primitives.cpp
+++ b/src/debug/di/arm/primitives.cpp
@@ -5,6 +5,4 @@
//
-#include "stdafx.h"
-
#include "../../shared/arm/primitives.cpp"
diff --git a/src/debug/di/shimremotedatatarget.cpp b/src/debug/di/shimremotedatatarget.cpp
index c6036f6324..cd1304be40 100644
--- a/src/debug/di/shimremotedatatarget.cpp
+++ b/src/debug/di/shimremotedatatarget.cpp
@@ -215,6 +215,8 @@ ShimRemoteDataTarget::GetPlatform(
*pPlatform = CORDB_PLATFORM_POSIX_X86;
#elif defined(DBG_TARGET_AMD64)
*pPlatform = CORDB_PLATFORM_POSIX_AMD64;
+ #elif defined(DBG_TARGET_ARM)
+ *pPlatform = CORDB_PLATFORM_POSIX_ARM;
#else
#error Unknown Processor.
#endif
diff --git a/src/debug/ee/CMakeLists.txt b/src/debug/ee/CMakeLists.txt
index aa59a7a957..e37e2573f3 100644
--- a/src/debug/ee/CMakeLists.txt
+++ b/src/debug/ee/CMakeLists.txt
@@ -1,10 +1,21 @@
set(CMAKE_INCLUDE_CURRENT_DIR ON)
add_definitions(-DFEATURE_NO_HOST)
-add_definitions(-D_TARGET_AMD64_=1)
-add_definitions(-DDBG_TARGET_64BIT=1)
-add_definitions(-DDBG_TARGET_AMD64=1)
-add_definitions(-DDBG_TARGET_WIN64=1)
+
+if(CLR_CMAKE_PLATFORM_ARCH_AMD64)
+ add_definitions(-D_TARGET_AMD64_=1)
+ add_definitions(-DDBG_TARGET_64BIT=1)
+ add_definitions(-DDBG_TARGET_AMD64=1)
+ add_definitions(-DDBG_TARGET_WIN64=1)
+elseif(CLR_CMAKE_PLATFORM_ARCH_ARM)
+ add_definitions(-D_TARGET_ARM_=1)
+ add_definitions(-DDBG_TARGET_32BIT=1)
+ add_definitions(-DDBG_TARGET_ARM=1)
+ add_definitions(-DDBG_TARGET_WIN32=1)
+else()
+ message(FATAL_ERROR "Only ARM and AMD64 is supported")
+endif()
+
include_directories(BEFORE ${VM_DIR})
include_directories(BEFORE ${VM_DIR}/${ARCH_SOURCES_DIR})
@@ -29,7 +40,6 @@ set(CORDBEE_SOURCES_WKS
shared.cpp
frameinfo.cpp
${ARCH_SOURCES_DIR}/primitives.cpp
- ${ARCH_SOURCES_DIR}/debuggerregdisplayhelper.cpp
)
set(CORDBEE_SOURCES_DAC
@@ -42,11 +52,19 @@ if(CLR_CMAKE_PLATFORM_UNIX)
)
endif(CLR_CMAKE_PLATFORM_UNIX)
-if (IS_64BIT_BUILD EQUAL 1)
+if(CLR_CMAKE_PLATFORM_ARCH_AMD64)
+ list(APPEND CORDBEE_SOURCES_WKS
+ ${ARCH_SOURCES_DIR}/debuggerregdisplayhelper.cpp
+ )
list(APPEND CORDBEE_SOURCES_WKS amd64/amd64walker.cpp)
-else ()
+elseif(CLR_CMAKE_PLATFORM_ARCH_I386)
+ list(APPEND CORDBEE_SOURCES_WKS
+ ${ARCH_SOURCES_DIR}/debuggerregdisplayhelper.cpp
+ )
list(APPEND CORDBEE_SOURCES_WKS i386/x86walker.cpp)
-endif (IS_64BIT_BUILD EQUAL 1)
+elseif(CLR_CMAKE_PLATFORM_ARCH_ARM)
+ list(APPEND CORDBEE_SOURCES_WKS arm/armwalker.cpp)
+endif()
convert_to_absolute_path(CORDBEE_SOURCES_DAC ${CORDBEE_SOURCES_DAC})
convert_to_absolute_path(CORDBEE_SOURCES_WKS ${CORDBEE_SOURCES_WKS})
diff --git a/src/debug/ee/arm/dbghelpers.S b/src/debug/ee/arm/dbghelpers.S
new file mode 100644
index 0000000000..a71404a9c9
--- /dev/null
+++ b/src/debug/ee/arm/dbghelpers.S
@@ -0,0 +1,64 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+#include "unixasmmacros.inc"
+
+.syntax unified
+.thumb
+
+//
+// hijacking stub used to perform a func-eval, see Debugger::FuncEvalSetup() for use.
+//
+// on entry:
+// r0 : pointer to DebuggerEval object
+//
+
+NESTED_ENTRY FuncEvalHijack, _TEXT, FuncEvalHijackPersonalityRoutine
+
+ // NOTE: FuncEvalHijackPersonalityRoutine is dependent on the stack layout so if
+ // you change the prolog you will also need to update the personality routine.
+
+ // push arg to the stack so our personality routine can find it
+ // push lr to get good stacktrace in debugger
+ push {r0,lr}
+
+ CHECK_STACK_ALIGNMENT
+
+ // FuncEvalHijackWorker returns the address we should jump to.
+ bl C_FUNC(FuncEvalHijackWorker)
+
+ // effective NOP to terminate unwind
+ mov r2, r2
+
+ free_stack 8
+ bx r0
+
+NESTED_END FuncEvalHijack, _TEXT
+
+//
+// This is the general purpose hijacking stub. DacDbiInterfaceImpl::Hijack() will
+// set the registers with the appropriate parameters from out-of-process.
+//
+// on entry:
+// r0 : pointer to CONTEXT
+// r1 : pointer to EXCEPTION_RECORD
+// r2 : EHijackReason
+// r3 : void* pdata
+//
+
+NESTED_ENTRY ExceptionHijack, _TEXT, ExceptionHijackPersonalityRoutine
+
+ CHECK_STACK_ALIGNMENT
+
+ // make the call
+ bl C_FUNC(ExceptionHijackWorker)
+
+ // effective NOP to terminate unwind
+ mov r3, r3
+
+ // *** should never get here ***
+ EMIT_BREAKPOINT
+
+NESTED_END ExceptionHijackEnd, _TEXT
diff --git a/src/debug/ee/wks/CMakeLists.txt b/src/debug/ee/wks/CMakeLists.txt
index 9438b00b53..e2fd7b2bfd 100644
--- a/src/debug/ee/wks/CMakeLists.txt
+++ b/src/debug/ee/wks/CMakeLists.txt
@@ -26,6 +26,13 @@ add_library(cordbee_wks ${CORDBEE_SOURCES_WKS} ${CMAKE_CURRENT_BINARY_DIR}/dbghe
else ()
add_compile_options(-fPIC)
-add_library(cordbee_wks ${CORDBEE_SOURCES_WKS} ../amd64/dbghelpers.S)
+
+if(CLR_CMAKE_PLATFORM_ARCH_AMD64)
+ add_library(cordbee_wks ${CORDBEE_SOURCES_WKS} ../amd64/dbghelpers.S)
+elseif(CLR_CMAKE_PLATFORM_ARCH_ARM)
+ add_library(cordbee_wks ${CORDBEE_SOURCES_WKS} ../arm/dbghelpers.S)
+else()
+ message(FATAL_ERROR "Only ARM and AMD64 is supported")
+endif()
endif (WIN32)
diff --git a/src/inc/clrnt.h b/src/inc/clrnt.h
index ed52e04a53..97d249ad88 100644
--- a/src/inc/clrnt.h
+++ b/src/inc/clrnt.h
@@ -937,6 +937,14 @@ typedef struct _DISPATCHER_CONTEXT {
#ifdef _TARGET_ARM_
#include "daccess.h"
+
+//
+// Define unwind information flags.
+//
+
+#define UNW_FLAG_NHANDLER 0x0 /* any handler */
+#define UNW_FLAG_EHANDLER 0x1 /* filter handler */
+#define UNW_FLAG_UHANDLER 0x2 /* unwind handler */
// This function returns the length of a function using the new unwind info on arm.
// Taken from minkernel\ntos\rtl\arm\ntrtlarm.h.
diff --git a/src/inc/volatile.h b/src/inc/volatile.h
index 9c742fb3d5..5310a65ff1 100644
--- a/src/inc/volatile.h
+++ b/src/inc/volatile.h
@@ -72,11 +72,15 @@
#error The Volatile type is currently only defined for Visual C++ and GNU C++
#endif
-#if defined(__GNUC__) && !defined(_X86_) && !defined(_AMD64_)
-#error The Volatile type is currently only defined for GCC when targeting x86 or AMD64 CPUs
+#if defined(__GNUC__) && !defined(_X86_) && !defined(_AMD64_) && !defined(_ARM_)
+#error The Volatile type is currently only defined for GCC when targeting x86, AMD64 or ARM CPUs
#endif
-#ifdef __GNUC__
+#if defined(__GNUC__)
+#if defined(_ARM_)
+// This is functionally equivalent to the MemoryBarrier() macro used on ARM on Windows.
+#define VOLATILE_MEMORY_BARRIER() asm volatile ("dmb sy" : : : "memory")
+#else
//
// For GCC, we prevent reordering by the compiler by inserting the following after a volatile
// load (to prevent subsequent operations from moving before the read), and before a volatile
@@ -89,6 +93,7 @@
// notice.
//
#define VOLATILE_MEMORY_BARRIER() asm volatile ("" : : : "memory")
+#endif // !_ARM_
#elif defined(_ARM_) && _ISO_VOLATILE
// ARM has a very weak memory model and very few tools to control that model. We're forced to perform a full
// memory barrier to preserve the volatile semantics. Technically this is only necessary on MP systems but we
diff --git a/src/inc/winwrap.h b/src/inc/winwrap.h
index 65a8e6c2d0..0f61f47bd2 100644
--- a/src/inc/winwrap.h
+++ b/src/inc/winwrap.h
@@ -838,7 +838,7 @@ InterlockedCompareExchangePointer (
#endif // _X86_ && _MSC_VER
-#ifdef _ARM_
+#if defined(_ARM_) & !defined(FEATURE_PAL)
//
// InterlockedCompareExchangeAcquire/InterlockedCompareExchangeRelease is not mapped in SDK to the correct intrinsics. Remove once
// the SDK definition is fixed (OS Bug #516255)
diff --git a/src/jit/CMakeLists.txt b/src/jit/CMakeLists.txt
index 69a3fa371c..2612331d32 100644
--- a/src/jit/CMakeLists.txt
+++ b/src/jit/CMakeLists.txt
@@ -12,7 +12,7 @@ if (IS_64BIT_BUILD EQUAL 1)
endif (IS_64BIT_BUILD EQUAL 1)
endif (WIN32)
-set( SOURCES
+set( JIT_SOURCES
alloc.cpp
bitset.cpp
block.cpp
@@ -59,15 +59,35 @@ set( SOURCES
loopcloning.cpp
lower.cpp
lsra.cpp
- emitxarch.cpp
- targetamd64.cpp
- lowerxarch.cpp
- codegenxarch.cpp
- simd.cpp
- simdcodegenxarch.cpp
- unwindamd64.cpp
)
+if(CLR_CMAKE_PLATFORM_ARCH_AMD64)
+ set( ARCH_SOURCES
+ targetamd64.cpp
+ unwindamd64.cpp
+ emitxarch.cpp
+ lowerxarch.cpp
+ codegenxarch.cpp
+ simdcodegenxarch.cpp
+ simd.cpp
+ )
+elseif(CLR_CMAKE_PLATFORM_ARCH_ARM)
+ set( ARCH_SOURCES
+ emitarm.cpp
+ targetarm.cpp
+ lowerarm.cpp
+ codegenarm.cpp
+ unwindarm.cpp
+ )
+else()
+ message(FATAL_ERROR "Only ARM and AMD64 is supported")
+endif()
+
+set( SOURCES
+ ${JIT_SOURCES}
+ ${ARCH_SOURCES}
+ )
+
convert_to_absolute_path(SOURCES ${SOURCES})
if( WIN32 )
diff --git a/src/jit/codegenarm.cpp b/src/jit/codegenarm.cpp
index db35662f94..b3dc2d2534 100644
--- a/src/jit/codegenarm.cpp
+++ b/src/jit/codegenarm.cpp
@@ -778,7 +778,7 @@ void CodeGen::genCodeForBBlist()
}
}
}
-#endif _TARGET_AMD64_
+#endif //_TARGET_AMD64_
/* Do we need to generate a jump or return? */
@@ -1010,7 +1010,7 @@ void CodeGen::instGen_Set_Reg_To_Imm(emitAttr size,
getEmitter()->emitIns_R_AI(INS_lea, EA_PTR_DSP_RELOC, reg, imm);
}
else
-#endif _TARGET_AMD64_
+#endif // _TARGET_AMD64_
{
getEmitter()->emitIns_R_I(INS_mov, size, reg, imm);
}
diff --git a/src/jit/codegenclassic.h b/src/jit/codegenclassic.h
index 04f49a3541..6919599cdf 100644
--- a/src/jit/codegenclassic.h
+++ b/src/jit/codegenclassic.h
@@ -703,4 +703,4 @@ protected :
#endif // LEGACY_BACKEND
-#endif _CODEGENCLASSIC_H_
+#endif // _CODEGENCLASSIC_H_
diff --git a/src/jit/codegencommon.cpp b/src/jit/codegencommon.cpp
index 6e8a403295..0828a160c9 100644
--- a/src/jit/codegencommon.cpp
+++ b/src/jit/codegencommon.cpp
@@ -10824,6 +10824,9 @@ CORINFO_CLASS_HANDLE Compiler::GetHfaClassHandle(GenTreePtr tree)
case GT_ASG:
assert(tree->gtOp.gtOp1->gtOper == GT_LCL_VAR || tree->gtOp.gtOp1->gtOper == GT_LCL_FLD);
return GetHfaClassHandle(tree->gtOp.gtOp1);
+
+ default:
+ unreached();
}
}
return NO_CLASS_HANDLE;
diff --git a/src/jit/codegenlegacy.cpp b/src/jit/codegenlegacy.cpp
index 18277f114f..e37322d3b4 100644
--- a/src/jit/codegenlegacy.cpp
+++ b/src/jit/codegenlegacy.cpp
@@ -15,7 +15,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#ifdef _MSC_VER
#pragma hdrstop
#endif
-#include "CodeGen.h"
+#include "codegen.h"
#ifdef LEGACY_BACKEND // This file is NOT used for the '!LEGACY_BACKEND' that uses the linear scan register allocator
@@ -655,6 +655,9 @@ void CodeGen::genComputeReg(GenTreePtr tree,
bool freeOnly)
{
noway_assert(tree->gtType != TYP_VOID);
+
+ regNumber reg;
+ regNumber rg2;
#if FEATURE_STACK_FP_X87
noway_assert(genActualType(tree->gtType) == TYP_INT ||
@@ -692,8 +695,7 @@ void CodeGen::genComputeReg(GenTreePtr tree,
if ((tree->OperGet() == GT_MUL) && (tree->gtFlags & GTF_MUL_64RSLT))
goto REG_OK;
- regNumber reg = tree->gtRegNum;
- regNumber rg2;
+ reg = tree->gtRegNum;
/* Did the value end up in an acceptable register? */
@@ -1396,6 +1398,12 @@ bool CodeGen::genMakeIndAddrMode(GenTreePtr addr,
GenTreePtr tmp;
int ixv = INT_MAX; // unset value
+
+ GenTreePtr scaledIndexVal;
+
+ regMaskTP newLiveMask;
+ regMaskTP rv1Mask;
+ regMaskTP rv2Mask;
/* Deferred address mode forming NYI for x86 */
@@ -1476,7 +1484,7 @@ bool CodeGen::genMakeIndAddrMode(GenTreePtr addr,
the scaled value */
scaledIndex = NULL;
- GenTreePtr scaledIndexVal = NULL;
+ scaledIndexVal = NULL;
if (operIsArrIndex && rv2 != NULL
&& (rv2->gtOper == GT_MUL || rv2->gtOper == GT_LSH)
@@ -1692,9 +1700,9 @@ bool CodeGen::genMakeIndAddrMode(GenTreePtr addr,
/* Generate the second operand first */
// Determine what registers go live between rv2 and rv1
- regMaskTP newLiveMask = genNewLiveRegMask(rv2, rv1);
+ newLiveMask = genNewLiveRegMask(rv2, rv1);
- regMaskTP rv2Mask = regMask & ~newLiveMask;
+ rv2Mask = regMask & ~newLiveMask;
rv2Mask &= ~rv1->gtRsvdRegs;
if (rv2Mask == RBM_NONE)
@@ -1731,9 +1739,9 @@ bool CodeGen::genMakeIndAddrMode(GenTreePtr addr,
/* Get the first operand into a register */
// Determine what registers go live between rv1 and rv2
- regMaskTP newLiveMask = genNewLiveRegMask(rv1, rv2);
+ newLiveMask = genNewLiveRegMask(rv1, rv2);
- regMaskTP rv1Mask = regMask & ~newLiveMask;
+ rv1Mask = regMask & ~newLiveMask;
rv1Mask &= ~rv2->gtRsvdRegs;
if (rv1Mask == RBM_NONE)
@@ -3094,7 +3102,7 @@ AGAIN:
// do not need an additional null-check
/* Do this only if the GTF_EXCEPT or GTF_IND_VOLATILE flag is set on the indir */
else if ((tree->gtFlags & GTF_IND_ARR_INDEX) == 0 &&
- (tree->gtFlags & GTF_EXCEPT | GTF_IND_VOLATILE))
+ ((tree->gtFlags & GTF_EXCEPT) | GTF_IND_VOLATILE))
{
/* Compare against any register to do null-check */
#if defined(_TARGET_XARCH_)
@@ -3966,6 +3974,12 @@ emitJumpKind CodeGen::genCondSetFlags(GenTreePtr cond)
regMaskTP addrReg2 = RBM_NONE;
emitJumpKind jumpKind = EJ_jmp; // We borrow EJ_jmp for the cases where we don't know yet
// which conditional instruction to use.
+
+ bool byteCmp;
+ bool shortCmp;
+
+ regMaskTP newLiveMask;
+ regNumber op1Reg;
/* Are we comparing against a constant? */
@@ -4337,8 +4351,8 @@ emitJumpKind CodeGen::genCondSetFlags(GenTreePtr cond)
// We reach here if op2 was not a GT_CNS_INT
//
- bool byteCmp; byteCmp = false;
- bool shortCmp; shortCmp = false;
+ byteCmp = false;
+ shortCmp = false;
if (op1Type == op2->gtType)
{
@@ -4445,7 +4459,7 @@ NO_SMALL_CMP:
assert(addrReg1 == 0);
// Determine what registers go live between op1 and op2
- regMaskTP newLiveMask = genNewLiveRegMask(op1, op2);
+ newLiveMask = genNewLiveRegMask(op1, op2);
// Setup regNeed with the set of register that we suggest for op1 to be in
//
@@ -4468,7 +4482,7 @@ NO_SMALL_CMP:
genComputeReg(op1, regNeed, RegSet::ANY_REG, RegSet::FREE_REG);
noway_assert(op1->gtFlags & GTF_REG_VAL);
- regNumber op1Reg; op1Reg = op1->gtRegNum;
+ op1Reg = op1->gtRegNum;
// Setup regNeed with the set of register that we require for op1 to be in
//
@@ -5374,7 +5388,7 @@ void CodeGen::genCodeForTreeLeaf_GT_JMP(GenTreePtr tree)
}
}
else
-#endif _TARGET_ARM_
+#endif //_TARGET_ARM_
{
var_types loadType = varDsc->TypeGet();
regNumber argReg = varDsc->lvArgReg; // incoming arg register
@@ -6601,6 +6615,8 @@ void CodeGen::genCodeForTreeSmpBinArithLogOp(GenTreePtr tree,
bEnoughRegs &&
genMakeIndAddrMode(tree, NULL, true, needReg, RegSet::FREE_REG, &regs, false))
{
+ emitAttr size;
+
/* Is the value now computed in some register? */
if (tree->gtFlags & GTF_REG_VAL)
@@ -6671,7 +6687,7 @@ void CodeGen::genCodeForTreeSmpBinArithLogOp(GenTreePtr tree,
// caused when op1 or op2 are enregistered variables.
reg = regSet.rsPickReg(needReg, bestReg);
- emitAttr size = emitActualTypeSize(treeType);
+ size = emitActualTypeSize(treeType);
/* Generate "lea reg, [addr-mode]" */
@@ -10648,7 +10664,7 @@ void CodeGen::genCodeForNumericCast(GenTreePtr tree,
reg = op1->gtRegNum;
#else // _TARGET_64BIT_
reg = genRegPairLo(op1->gtRegPair);
-#endif _TARGET_64BIT_
+#endif //_TARGET_64BIT_
genCodeForTree_DONE(tree, reg);
return;
@@ -12854,7 +12870,7 @@ void CodeGen::genCodeForBBlist()
genStackLevel = 0;
#if FEATURE_STACK_FP_X87
genResetFPstkLevel();
-#endif FEATURE_STACK_FP_X87
+#endif //FEATURE_STACK_FP_X87
#if !FEATURE_FIXED_OUT_ARGS
/* Check for inserted throw blocks and adjust genStackLevel */
@@ -15952,7 +15968,7 @@ void CodeGen::genEmitHelperCall(unsigned helper,
void * addr = NULL, **pAddr = NULL;
// Don't ask VM if it hasn't requested ELT hooks
-#if defined(_TARGET_ARM_) && defined(DEBUG)
+#if defined(_TARGET_ARM_) && defined(DEBUG) && defined(PROFILING_SUPPORTED)
if (!compiler->compProfilerHookNeeded &&
compiler->opts.compJitELTHookEnabled &&
(helper == CORINFO_HELP_PROF_FCN_ENTER ||
diff --git a/src/jit/compiler.h b/src/jit/compiler.h
index 1facb9f85f..e1b8e9bbd9 100644
--- a/src/jit/compiler.h
+++ b/src/jit/compiler.h
@@ -1411,8 +1411,8 @@ public:
var_types GetHfaType(GenTreePtr tree);
unsigned GetHfaSlots(GenTreePtr tree);
- inline var_types GetHfaType(CORINFO_CLASS_HANDLE hClass);
- inline unsigned GetHfaSlots(CORINFO_CLASS_HANDLE hClass);
+ var_types GetHfaType(CORINFO_CLASS_HANDLE hClass);
+ unsigned GetHfaSlots(CORINFO_CLASS_HANDLE hClass);
#endif // _TARGET_ARM_
@@ -6618,7 +6618,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#else
assert(!"getFPInstructionSet() is not implemented for target arch");
unreached();
- InstructionSet_NONE;
+ return InstructionSet_NONE;
#endif
}
diff --git a/src/jit/emit.h b/src/jit/emit.h
index 913fdcb692..fa53f2c5cc 100644
--- a/src/jit/emit.h
+++ b/src/jit/emit.h
@@ -932,7 +932,7 @@ protected:
}
void iiaSetInstrCount(int count)
{
- assert(abs(count < 10));
+ assert(abs(count) < 10);
iiaEncodedInstrCount = (count << iaut_SHIFT) | iaut_INST_COUNT;
}
diff --git a/src/jit/emitarm.cpp b/src/jit/emitarm.cpp
index cca259ab2e..2c87c55860 100644
--- a/src/jit/emitarm.cpp
+++ b/src/jit/emitarm.cpp
@@ -1256,6 +1256,11 @@ emitter::insSize emitter::emitInsSize(insFormat insFmt)
unsigned uval32 = (unsigned) val32;
unsigned imm8 = uval32 & 0xff;
unsigned encode = imm8 >> 7;
+ unsigned imm32a;
+ unsigned imm32b;
+ unsigned imm32c;
+ unsigned mask32;
+ unsigned temp;
/* encode = 0000x */
if (imm8 == uval32)
@@ -1263,7 +1268,7 @@ emitter::insSize emitter::emitInsSize(insFormat insFmt)
goto DONE;
}
- unsigned imm32a = (imm8 << 16) | imm8;
+ imm32a = (imm8 << 16) | imm8;
/* encode = 0001x */
if (imm32a == uval32)
{
@@ -1271,7 +1276,7 @@ emitter::insSize emitter::emitInsSize(insFormat insFmt)
goto DONE;
}
- unsigned imm32b = (imm32a << 8);
+ imm32b = (imm32a << 8);
/* encode = 0010x */
if (imm32b == uval32)
{
@@ -1279,7 +1284,7 @@ emitter::insSize emitter::emitInsSize(insFormat insFmt)
goto DONE;
}
- unsigned imm32c = (imm32a | imm32b);
+ imm32c = (imm32a | imm32b);
/* encode = 0011x */
if (imm32c == uval32)
{
@@ -1287,8 +1292,7 @@ emitter::insSize emitter::emitInsSize(insFormat insFmt)
goto DONE;
}
- unsigned mask32 = 0x00000ff;
- unsigned temp;
+ mask32 = 0x00000ff;
encode = 31; /* 11111 */
do {
@@ -1557,6 +1561,9 @@ COMMON_PUSH_POP:
assert(!"Instruction cannot be encoded");
}
break;
+
+ default:
+ unreached();
}
assert((fmt == IF_T1_B) ||
(fmt == IF_T1_L0) ||
@@ -1616,7 +1623,9 @@ void emitter::emitIns_R(instruction ins,
case INS_mvn:
emitIns_R_R_I(ins, attr, reg, reg, 0);
return;
-
+
+ default:
+ unreached();
}
assert((fmt == IF_T1_D1) ||
(fmt == IF_T2_E2));
@@ -1929,6 +1938,9 @@ void emitter::emitIns_R_I(instruction ins,
assert(!"Instruction cannot be encoded");
}
break;
+
+ default:
+ unreached();
}
assert((fmt == IF_T1_F ) ||
(fmt == IF_T1_J0) ||
@@ -2313,6 +2325,9 @@ void emitter::emitIns_R_I_I(instruction ins,
sf = INS_FLAGS_NOT_SET;
}
break;
+
+ default:
+ unreached();
}
assert(fmt == IF_T2_D1);
assert(sf != INS_FLAGS_DONT_CARE);
@@ -3145,6 +3160,9 @@ void emitter::emitIns_R_R_I_I(instruction ins,
fmt = IF_T2_D0;
sf = INS_FLAGS_NOT_SET;
break;
+
+ default:
+ unreached();
}
assert((fmt == IF_T2_D0));
assert(sf != INS_FLAGS_DONT_CARE);
@@ -3326,6 +3344,9 @@ COMMON_THUMB2_LDST:
assert(!"Instruction cannot be encoded");
}
break;
+
+ default:
+ unreached();
}
assert((fmt == IF_T2_C0) ||
(fmt == IF_T2_E0) ||
@@ -3385,6 +3406,8 @@ void emitter::emitIns_R_R_R_R(instruction ins,
case INS_mls:
fmt = IF_T2_F2;
break;
+ default:
+ unreached();
}
assert((fmt == IF_T2_F1) || (fmt == IF_T2_F2));
@@ -4246,6 +4269,9 @@ void emitter::emitIns_J(instruction ins,
case INS_ble:
fmt = IF_LARGEJMP; /* Assume the jump will be long */
break;
+
+ default:
+ unreached();
}
assert((fmt == IF_LARGEJMP) ||
(fmt == IF_T2_J2));
@@ -4390,6 +4416,8 @@ void emitter::emitIns_R_L (instruction ins,
case INS_movw:
fmt = IF_T2_N1;
break;
+ default:
+ unreached();
}
assert(fmt == IF_T2_N1);
@@ -6661,6 +6689,9 @@ static bool insAlwaysSetFlags(instruction ins)
case INS_tst:
result = true;
break;
+
+ default:
+ break;
}
return result;
}
@@ -6853,7 +6884,7 @@ void emitter::emitDispReg(regNumber reg, emitAttr attr, bool add
{
if (isFloatReg(reg))
{
- char *size = attr == EA_8BYTE ? "d" : "s";
+ const char *size = attr == EA_8BYTE ? "d" : "s";
printf("%s%s", size, emitFloatRegName(reg, attr)+1);
}
else
diff --git a/src/jit/flowgraph.cpp b/src/jit/flowgraph.cpp
index 62bf78758b..e7b7584ae8 100644
--- a/src/jit/flowgraph.cpp
+++ b/src/jit/flowgraph.cpp
@@ -12269,7 +12269,7 @@ void Compiler::fgCreateFunclets()
// Setup the root FuncInfoDsc and prepare to start associating
// FuncInfoDsc's with their corresponding EH region
- memset(funcInfo, 0, funcCnt * sizeof(FuncInfoDsc));
+ memset((void*)funcInfo, 0, funcCnt * sizeof(FuncInfoDsc));
assert(funcInfo[0].funKind == FUNC_ROOT);
funcIdx = 1;
diff --git a/src/jit/instr.cpp b/src/jit/instr.cpp
index b771f7cdea..8486175f10 100644
--- a/src/jit/instr.cpp
+++ b/src/jit/instr.cpp
@@ -496,7 +496,7 @@ void CodeGen::inst_RV_IV(instruction ins,
#ifndef LEGACY_BACKEND
// TODO-Cleanup: Add a comment about why this is unreached() for RyuJIT backend.
unreached();
-#else LEGACY_BACKEND
+#else //LEGACY_BACKEND
regNumber tmpReg = regSet.rsGrabReg(RBM_ALLINT & ~genRegMask(reg));
instGen_Set_Reg_To_Imm(size, tmpReg, val);
getEmitter()->emitIns_R_R(ins, size, reg, tmpReg, flags);
diff --git a/src/jit/regalloc.cpp b/src/jit/regalloc.cpp
index faa03d2bef..839f497f4a 100644
--- a/src/jit/regalloc.cpp
+++ b/src/jit/regalloc.cpp
@@ -1930,49 +1930,49 @@ PREDICT_REG_COMMON:
else if (rpHasVarIndexForPredict(predictReg))
{
/* Get the tracked local variable that has an lvVarIndex of tgtIndex1 */
+ {
+ unsigned tgtIndex1 = rpGetVarIndexForPredict(predictReg);
+ LclVarDsc * tgtVar = lvaTable + lvaTrackedToVarNum[tgtIndex1];
+ VarSetOps::MakeSingleton(this, tgtIndex1);
- unsigned tgtIndex1 = rpGetVarIndexForPredict(predictReg);
- LclVarDsc * tgtVar = lvaTable + lvaTrackedToVarNum[tgtIndex1];
- VarSetOps::MakeSingleton(this, tgtIndex1);
-
- noway_assert(tgtVar->lvVarIndex == tgtIndex1);
- noway_assert(tgtVar->lvRegNum != REG_STK); /* Must have been enregistered */
+ noway_assert(tgtVar->lvVarIndex == tgtIndex1);
+ noway_assert(tgtVar->lvRegNum != REG_STK); /* Must have been enregistered */
#ifndef _TARGET_AMD64_
- // On amd64 we have the occasional spec-allowed implicit conversion from TYP_I_IMPL to TYP_INT
- // so this assert is meaningless
- noway_assert((type != TYP_LONG) || (tgtVar->TypeGet() == TYP_LONG));
+ // On amd64 we have the occasional spec-allowed implicit conversion from TYP_I_IMPL to TYP_INT
+ // so this assert is meaningless
+ noway_assert((type != TYP_LONG) || (tgtVar->TypeGet() == TYP_LONG));
#endif // !_TARGET_AMD64_
-
- if (varDsc->lvTracked)
- {
- unsigned srcIndex; srcIndex = varDsc->lvVarIndex;
-
- // If this register has it's last use here then we will prefer
- // to color to the same register as tgtVar.
- if (lastUse)
- {
- /*
- * Add an entry in the lvaVarPref graph to indicate
- * that it would be worthwhile to color these two variables
- * into the same physical register.
- * This will help us avoid having an extra copy instruction
- */
- VarSetOps::AddElemD(this, lvaVarPref[srcIndex], tgtIndex1);
- VarSetOps::AddElemD(this, lvaVarPref[tgtIndex1], srcIndex);
- }
-
- // Add a variable interference from srcIndex to each of the last use variables
- if (!VarSetOps::IsEmpty(this, rpLastUseVars))
+
+ if (varDsc->lvTracked)
{
- rpRecordVarIntf(srcIndex, rpLastUseVars
- DEBUGARG( "src reg conflict"));
+ unsigned srcIndex; srcIndex = varDsc->lvVarIndex;
+
+ // If this register has it's last use here then we will prefer
+ // to color to the same register as tgtVar.
+ if (lastUse)
+ {
+ /*
+ * Add an entry in the lvaVarPref graph to indicate
+ * that it would be worthwhile to color these two variables
+ * into the same physical register.
+ * This will help us avoid having an extra copy instruction
+ */
+ VarSetOps::AddElemD(this, lvaVarPref[srcIndex], tgtIndex1);
+ VarSetOps::AddElemD(this, lvaVarPref[tgtIndex1], srcIndex);
+ }
+
+ // Add a variable interference from srcIndex to each of the last use variables
+ if (!VarSetOps::IsEmpty(this, rpLastUseVars))
+ {
+ rpRecordVarIntf(srcIndex, rpLastUseVars
+ DEBUGARG( "src reg conflict"));
+ }
}
- }
- rpAsgVarNum = tgtIndex1;
-
- /* We will rely on the target enregistered variable from the GT_ASG */
- varDsc = tgtVar;
-
+ rpAsgVarNum = tgtIndex1;
+
+ /* We will rely on the target enregistered variable from the GT_ASG */
+ varDsc = tgtVar;
+ }
GRAB_COUNT:
unsigned grabCount; grabCount = 0;
@@ -5405,6 +5405,16 @@ regMaskTP Compiler::rpPredictAssignRegVars(regMaskTP regAvail)
for (unsigned sortNum = 0; sortNum < lvaCount; sortNum++)
{
bool notWorthy = false;
+
+ unsigned varIndex;
+ bool isDouble;
+ regMaskTP regAvailForType;
+ var_types regType;
+ regMaskTP avoidReg;
+ unsigned customVarOrderSize;
+ regNumber customVarOrder[MAX_VAR_ORDER_SIZE];
+ bool firstHalf;
+ regNumber saveOtherReg;
varDsc = lvaRefSorted[sortNum];
@@ -5430,7 +5440,7 @@ regMaskTP Compiler::rpPredictAssignRegVars(regMaskTP regAvail)
goto CANT_REG;
/* Get hold of the index and the interference mask for the variable */
- unsigned varIndex = varDsc->lvVarIndex;
+ varIndex = varDsc->lvVarIndex;
// Remove 'varIndex' from unprocessedVars
VarSetOps::RemoveElemD(this, unprocessedVars, varIndex);
@@ -5479,9 +5489,6 @@ regMaskTP Compiler::rpPredictAssignRegVars(regMaskTP regAvail)
OK_TO_ENREGISTER:
- regMaskTP regAvailForType;
- var_types regType;
-
if (varTypeIsFloating(varDsc->TypeGet()))
{
regType = varDsc->TypeGet();
@@ -5494,7 +5501,7 @@ OK_TO_ENREGISTER:
}
#ifdef _TARGET_ARM_
- bool isDouble = (varDsc->TypeGet() == TYP_DOUBLE);
+ isDouble = (varDsc->TypeGet() == TYP_DOUBLE);
if (isDouble)
{
@@ -5516,7 +5523,6 @@ OK_TO_ENREGISTER:
}
// Set of registers to avoid when performing register allocation
- regMaskTP avoidReg;
avoidReg = RBM_NONE;
if (!varDsc->lvIsRegArg)
@@ -5568,13 +5574,12 @@ OK_TO_ENREGISTER:
// Now we will try to predict which register the variable
// could be enregistered in
- unsigned customVarOrderSize; customVarOrderSize = MAX_VAR_ORDER_SIZE;
- regNumber customVarOrder[MAX_VAR_ORDER_SIZE];
+ customVarOrderSize = MAX_VAR_ORDER_SIZE;
raSetRegVarOrder(regType, customVarOrder, &customVarOrderSize, varDsc->lvPrefReg, avoidReg);
- bool firstHalf; firstHalf = false;
- regNumber saveOtherReg; saveOtherReg = DUMMY_INIT(REG_NA);
+ firstHalf = false;
+ saveOtherReg = DUMMY_INIT(REG_NA);
for (regInx = 0;
regInx < customVarOrderSize;
@@ -5641,7 +5646,7 @@ OK_TO_ENREGISTER:
// otherwise we will spill this callee saved registers,
// because its uses when combined with the uses of
// other yet to be processed candidates exceed our threshold.
- totalRefCntWtd = totalRefCntWtd;
+ // totalRefCntWtd = totalRefCntWtd;
}
diff --git a/src/jit/registerarm.h b/src/jit/registerarm.h
index 1de25b289e..2eb4691699 100644
--- a/src/jit/registerarm.h
+++ b/src/jit/registerarm.h
@@ -32,7 +32,7 @@ REGDEF(LR, 14, 0x4000, "lr" )
REGDEF(PC, 15, 0x8000, "pc" )
#define FPBASE 16
-#define VFPMASK(x) (__int64(1) << (x+FPBASE))
+#define VFPMASK(x) (((__int64)1) << (x+FPBASE))
REGDEF(F0, 0+FPBASE, VFPMASK(0), "f0")
REGDEF(F1, 1+FPBASE, VFPMASK(1), "f1")
diff --git a/src/jit/registerfp.cpp b/src/jit/registerfp.cpp
index 39d619a1cb..b5a40b9d1f 100644
--- a/src/jit/registerfp.cpp
+++ b/src/jit/registerfp.cpp
@@ -479,6 +479,8 @@ void CodeGen::genFloatAssign(GenTree *tree)
genUpdateLife(op2);
goto CHK_VOLAT_UNALIGN;
+ default:
+ break;
}
// Is the op2 (RHS) more complex than op1 (LHS)?
@@ -730,6 +732,8 @@ void CodeGen::genLoadFloat(GenTreePtr tree, regNumber reg)
}
}
break;
+ default:
+ break;
}
if (unalignedLoad)
diff --git a/src/jit/regset.cpp b/src/jit/regset.cpp
index 0f4425f960..15e6eb7275 100644
--- a/src/jit/regset.cpp
+++ b/src/jit/regset.cpp
@@ -1831,6 +1831,8 @@ regNumber RegSet::rsPickReg(regMaskTP regMask,
regMaskTP regBest)
{
regNumber regNum;
+ regMaskTP spillMask;
+ regMaskTP canGrabMask;
#ifdef DEBUG
if (rsStressRegs() >= 1 )
@@ -1906,8 +1908,6 @@ TRY_ALL:
/* Now let's consider all available registers */
- regMaskTP spillMask;
-
/* Were we limited in our consideration? */
if (!regMask)
@@ -1936,7 +1936,7 @@ TRY_ALL:
/* Make sure we can spill some register. */
- regMaskTP canGrabMask = rsRegMaskCanGrab();
+ canGrabMask = rsRegMaskCanGrab();
if ((spillMask & canGrabMask) == 0)
spillMask = canGrabMask;
diff --git a/src/jit/unwind.h b/src/jit/unwind.h
index df5b1ca58f..a66226ab08 100644
--- a/src/jit/unwind.h
+++ b/src/jit/unwind.h
@@ -71,7 +71,8 @@ protected:
UnwindBase() { }
~UnwindBase() { }
-#ifdef DEBUG
+// TODO: How do we get the ability to access uwiComp without error on Clang?
+#if defined(DEBUG) && !defined(__GNUC__)
template<typename T>
T dspPtr(T p)
diff --git a/src/jit/unwindarm.cpp b/src/jit/unwindarm.cpp
index 55495f85d1..feef942fa7 100644
--- a/src/jit/unwindarm.cpp
+++ b/src/jit/unwindarm.cpp
@@ -414,9 +414,9 @@ void Compiler::unwindEmit(void* pHotCode, void* pColdCode)
void Compiler::unwindEmitFunc(FuncInfoDsc* func, void* pHotCode, void* pColdCode)
{
// Verify that the JIT enum is in sync with the JIT-EE interface enum
- static_assert_no_msg(FUNC_ROOT == CORJIT_FUNC_ROOT);
- static_assert_no_msg(FUNC_HANDLER == CORJIT_FUNC_HANDLER);
- static_assert_no_msg(FUNC_FILTER == CORJIT_FUNC_FILTER);
+ static_assert_no_msg(FUNC_ROOT == (FuncKind)CORJIT_FUNC_ROOT);
+ static_assert_no_msg(FUNC_HANDLER == (FuncKind)CORJIT_FUNC_HANDLER);
+ static_assert_no_msg(FUNC_FILTER == (FuncKind)CORJIT_FUNC_FILTER);
func->uwi.Allocate((CorJitFuncKind)func->funKind, pHotCode, pColdCode, true);
diff --git a/src/pal/inc/pal.h b/src/pal/inc/pal.h
index 547fab1a55..a942ed4f63 100644
--- a/src/pal/inc/pal.h
+++ b/src/pal/inc/pal.h
@@ -94,6 +94,8 @@ extern "C" {
#define _M_IA64 64100
#elif defined(__x86_64__) && !defined(_M_AMD64)
#define _M_AMD64 100
+#elif defined(__ARM_ARCH) && !defined(_M_ARM)
+#define _M_ARM 7
#endif
#if defined(_M_IX86) && !defined(_X86_)
@@ -114,6 +116,8 @@ extern "C" {
#define _IA64_
#elif defined(_M_AMD64) && !defined(_AMD64_)
#define _AMD64_
+#elif defined(_M_ARM) && !defined(_ARM_)
+#define _ARM_
#endif
#endif // !_MSC_VER
@@ -191,6 +195,14 @@ extern "C" {
#endif // _MSC_VER
+#ifndef FORCEINLINE
+#if _MSC_VER < 1200
+#define FORCEINLINE inline
+#else
+#define FORCEINLINE __forceinline
+#endif
+#endif
+
#ifdef _M_ALPHA
typedef struct {
@@ -2942,6 +2954,185 @@ typedef struct _KNONVOLATILE_CONTEXT_POINTERS {
} KNONVOLATILE_CONTEXT_POINTERS, *PKNONVOLATILE_CONTEXT_POINTERS;
+#elif defined(_ARM_)
+
+#define CONTEXT_ARM 0x00200000L
+
+// end_wx86
+
+#define CONTEXT_CONTROL (CONTEXT_ARM | 0x1L)
+#define CONTEXT_INTEGER (CONTEXT_ARM | 0x2L)
+#define CONTEXT_FLOATING_POINT (CONTEXT_ARM | 0x4L)
+#define CONTEXT_DEBUG_REGISTERS (CONTEXT_ARM | 0x8L)
+
+#define CONTEXT_FULL (CONTEXT_CONTROL | CONTEXT_INTEGER | CONTEXT_FLOATING_POINT)
+
+#define CONTEXT_ALL (CONTEXT_CONTROL | CONTEXT_INTEGER | CONTEXT_FLOATING_POINT | CONTEXT_DEBUG_REGISTERS)
+
+#define CONTEXT_EXCEPTION_ACTIVE 0x8000000L
+#define CONTEXT_SERVICE_ACTIVE 0x10000000L
+#define CONTEXT_EXCEPTION_REQUEST 0x40000000L
+#define CONTEXT_EXCEPTION_REPORTING 0x80000000L
+
+//
+// This flag is set by the unwinder if it has unwound to a call
+// site, and cleared whenever it unwinds through a trap frame.
+// It is used by language-specific exception handlers to help
+// differentiate exception scopes during dispatching.
+//
+
+#define CONTEXT_UNWOUND_TO_CALL 0x20000000
+
+//
+// Specify the number of breakpoints and watchpoints that the OS
+// will track. Architecturally, ARM supports up to 16. In practice,
+// however, almost no one implements more than 4 of each.
+//
+
+#define ARM_MAX_BREAKPOINTS 8
+#define ARM_MAX_WATCHPOINTS 1
+
+typedef struct _NEON128 {
+ ULONGLONG Low;
+ LONGLONG High;
+} NEON128, *PNEON128;
+
+//
+// Context Frame
+//
+// This frame has a several purposes: 1) it is used as an argument to
+// NtContinue, 2) it is used to constuct a call frame for APC delivery,
+// and 3) it is used in the user level thread creation routines.
+//
+//
+// The flags field within this record controls the contents of a CONTEXT
+// record.
+//
+// If the context record is used as an input parameter, then for each
+// portion of the context record controlled by a flag whose value is
+// set, it is assumed that that portion of the context record contains
+// valid context. If the context record is being used to modify a threads
+// context, then only that portion of the threads context is modified.
+//
+// If the context record is used as an output parameter to capture the
+// context of a thread, then only those portions of the thread's context
+// corresponding to set flags will be returned.
+//
+// CONTEXT_CONTROL specifies Sp, Lr, Pc, and Cpsr
+//
+// CONTEXT_INTEGER specifies R0-R12
+//
+// CONTEXT_FLOATING_POINT specifies Q0-Q15 / D0-D31 / S0-S31
+//
+// CONTEXT_DEBUG_REGISTERS specifies up to 16 of DBGBVR, DBGBCR, DBGWVR,
+// DBGWCR.
+//
+
+typedef struct DECLSPEC_ALIGN(8) _CONTEXT {
+
+ //
+ // Control flags.
+ //
+
+ DWORD ContextFlags;
+
+ //
+ // Integer registers
+ //
+
+ DWORD R0;
+ DWORD R1;
+ DWORD R2;
+ DWORD R3;
+ DWORD R4;
+ DWORD R5;
+ DWORD R6;
+ DWORD R7;
+ DWORD R8;
+ DWORD R9;
+ DWORD R10;
+ DWORD R11;
+ DWORD R12;
+
+ //
+ // Control Registers
+ //
+
+ DWORD Sp;
+ DWORD Lr;
+ DWORD Pc;
+ DWORD Cpsr;
+
+ //
+ // Floating Point/NEON Registers
+ //
+
+ DWORD Fpscr;
+ DWORD Padding;
+ union {
+ NEON128 Q[16];
+ ULONGLONG D[32];
+ DWORD S[32];
+ };
+
+ //
+ // Debug registers
+ //
+
+ DWORD Bvr[ARM_MAX_BREAKPOINTS];
+ DWORD Bcr[ARM_MAX_BREAKPOINTS];
+ DWORD Wvr[ARM_MAX_WATCHPOINTS];
+ DWORD Wcr[ARM_MAX_WATCHPOINTS];
+
+ DWORD Padding2[2];
+
+} CONTEXT, *PCONTEXT, *LPCONTEXT;
+
+//
+// Nonvolatile context pointer record.
+//
+
+typedef struct _KNONVOLATILE_CONTEXT_POINTERS {
+
+ PDWORD R4;
+ PDWORD R5;
+ PDWORD R6;
+ PDWORD R7;
+ PDWORD R8;
+ PDWORD R9;
+ PDWORD R10;
+ PDWORD R11;
+ PDWORD Lr;
+
+ PULONGLONG D8;
+ PULONGLONG D9;
+ PULONGLONG D10;
+ PULONGLONG D11;
+ PULONGLONG D12;
+ PULONGLONG D13;
+ PULONGLONG D14;
+ PULONGLONG D15;
+
+} KNONVOLATILE_CONTEXT_POINTERS, *PKNONVOLATILE_CONTEXT_POINTERS;
+
+typedef struct _IMAGE_ARM_RUNTIME_FUNCTION_ENTRY {
+ DWORD BeginAddress;
+ union {
+ DWORD UnwindData;
+ struct {
+ DWORD Flag : 2;
+ DWORD FunctionLength : 11;
+ DWORD Ret : 2;
+ DWORD H : 1;
+ DWORD Reg : 3;
+ DWORD R : 1;
+ DWORD L : 1;
+ DWORD C : 1;
+ DWORD StackAdjust : 10;
+ };
+ };
+} IMAGE_ARM_RUNTIME_FUNCTION_ENTRY, * PIMAGE_ARM_RUNTIME_FUNCTION_ENTRY;
+
#else
#error Unknown architecture for defining CONTEXT.
#endif
@@ -3041,6 +3232,13 @@ DWORD
PALAPI
PAL_GetLogicalCpuCountFromOS();
+PALIMPORT
+size_t
+PALAPI
+PAL_GetLogicalProcessorCacheSizeFromOS();
+
+#define GetLogicalProcessorCacheSizeFromOS PAL_GetLogicalProcessorCacheSizeFromOS
+
#ifdef PLATFORM_UNIX
#if defined(__FreeBSD__) && defined(_X86_)
@@ -3059,7 +3257,10 @@ PAL_GetLogicalCpuCountFromOS();
#define PAL_CS_NATIVE_DATA_SIZE 120
#elif defined(__LINUX__) && defined(__x86_64__)
#define PAL_CS_NATIVE_DATA_SIZE 96
+#elif defined(__LINUX__) && defined(_ARM_)
+#define PAL_CS_NATIVE_DATA_SIZE 80
#else
+#warning
#error PAL_CS_NATIVE_DATA_SIZE is not defined for this architecture
#endif
@@ -3378,7 +3579,6 @@ VirtualProtect(
IN DWORD flNewProtect,
OUT PDWORD lpflOldProtect);
-#if defined(_AMD64_)
typedef struct _MEMORYSTATUSEX {
DWORD dwLength;
DWORD dwMemoryLoad;
@@ -3397,8 +3597,6 @@ PALAPI
GlobalMemoryStatusEx(
IN OUT LPMEMORYSTATUSEX lpBuffer);
-#endif // _AMD64_
-
typedef struct _MEMORY_BASIC_INFORMATION {
PVOID BaseAddress;
PVOID AllocationBase_PAL_Undefined;
@@ -5535,11 +5733,11 @@ PALIMPORT double __cdecl _copysign(double, double);
#ifdef __cplusplus
extern "C++" {
-#if defined(BIT64) && !defined(PAL_STDCPP_COMPAT)
+#if !defined(PAL_STDCPP_COMPAT)
inline __int64 abs(__int64 _X) {
return llabs(_X);
}
-#endif // defined(BIT64) && !defined(PAL_STDCPP_COMPAT)
+#endif // !defined(PAL_STDCPP_COMPAT)
}
#endif
diff --git a/src/pal/inc/pal_mstypes.h b/src/pal/inc/pal_mstypes.h
index e6ec83a098..3b8065f442 100644
--- a/src/pal/inc/pal_mstypes.h
+++ b/src/pal/inc/pal_mstypes.h
@@ -563,9 +563,14 @@ typedef LONG_PTR SSIZE_T, *PSSIZE_T;
#endif
#ifndef PAL_STDCPP_COMPAT
-#if defined(__APPLE_CC__) || defined(__LINUX__)
+#if defined(__APPLE_CC__) || defined(__LINUX__)
+#ifdef BIT64
typedef unsigned long size_t;
typedef long ptrdiff_t;
+#else // !BIT64
+typedef unsigned int size_t;
+typedef int ptrdiff_t;
+#endif // !BIT64
#else
typedef ULONG_PTR size_t;
typedef LONG_PTR ptrdiff_t;
@@ -591,8 +596,13 @@ typedef char16_t WCHAR;
typedef wchar_t WCHAR;
#if defined(__LINUX__)
+#ifdef BIT64
typedef long int intptr_t;
typedef unsigned long int uintptr_t;
+#else // !BIT64
+typedef int intptr_t;
+typedef unsigned int uintptr_t;
+#endif // !BIT64
#else
typedef INT_PTR intptr_t;
typedef UINT_PTR uintptr_t;
diff --git a/src/pal/inc/rt/intsafe.h b/src/pal/inc/rt/intsafe.h
index 75c8371227..b54cc64244 100644
--- a/src/pal/inc/rt/intsafe.h
+++ b/src/pal/inc/rt/intsafe.h
@@ -99,7 +99,7 @@ typedef LONG HRESULT;
#if defined(MIDL_PASS) || defined(RC_INVOKED) || defined(_M_CEE_PURE) \
|| defined(_68K_) || defined(_MPPC_) || defined(_PPC_) \
- || defined(_M_IA64) || defined(_M_AMD64)
+ || defined(_M_IA64) || defined(_M_AMD64) || defined(__ARM_ARCH)
#ifndef UInt32x32To64
#define UInt32x32To64(a, b) ((unsigned __int64)((ULONG)(a)) * (unsigned __int64)((ULONG)(b)))
diff --git a/src/pal/inc/rt/ntimage.h b/src/pal/inc/rt/ntimage.h
index 66201f0c92..77a19a9b9f 100644
--- a/src/pal/inc/rt/ntimage.h
+++ b/src/pal/inc/rt/ntimage.h
@@ -430,6 +430,7 @@ typedef PIMAGE_NT_HEADERS32 PIMAGE_NT_HEADERS;
// IMAGE_LIBRARY_THREAD_TERM 0x0008 // Reserved.
#define IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE 0x0040 // DLL can move
#define IMAGE_DLLCHARACTERISTICS_NX_COMPAT 0x0100 // Image ix NX compatible
+#define IMAGE_DLLCHARACTERISTICS_NO_SEH 0x0400 // Image does not use SEH. No SE handler may reside in this image
#define IMAGE_DLLCHARACTERISTICS_NO_BIND 0x0800 // Do not bind this image.
#define IMAGE_DLLCHARACTERISTICS_APPCONTAINER 0x1000 // Image should execute in an AppContainer
#define IMAGE_DLLCHARACTERISTICS_WDM_DRIVER 0x2000 // Driver uses WDM model
diff --git a/src/pal/inc/rt/palrt.h b/src/pal/inc/rt/palrt.h
index 0468d17b08..994aa6c0fa 100644
--- a/src/pal/inc/rt/palrt.h
+++ b/src/pal/inc/rt/palrt.h
@@ -208,6 +208,7 @@ inline void *__cdecl operator new(size_t, void *_P)
#define NTAPI __stdcall
#define WINAPI __stdcall
#define CALLBACK __stdcall
+#define NTSYSAPI
#define _WINNT_
@@ -1616,7 +1617,27 @@ EXCEPTION_DISPOSITION
PCONTEXT ContextRecord,
PVOID DispatcherContext
);
-
+
+#if defined(_ARM_)
+
+typedef struct _DISPATCHER_CONTEXT {
+ DWORD ControlPc;
+ DWORD ImageBase;
+ PRUNTIME_FUNCTION FunctionEntry;
+ DWORD EstablisherFrame;
+ DWORD TargetPc;
+ PCONTEXT ContextRecord;
+ PEXCEPTION_ROUTINE LanguageHandler;
+ PVOID HandlerData;
+ PUNWIND_HISTORY_TABLE HistoryTable;
+ DWORD ScopeIndex;
+ BOOLEAN ControlPcIsUnwound;
+ PBYTE NonVolatileRegisters;
+ DWORD Reserved;
+} DISPATCHER_CONTEXT, *PDISPATCHER_CONTEXT;
+
+#else
+
typedef struct _DISPATCHER_CONTEXT {
ULONG64 ControlPc;
ULONG64 ImageBase;
@@ -1629,6 +1650,8 @@ typedef struct _DISPATCHER_CONTEXT {
PUNWIND_HISTORY_TABLE HistoryTable;
} DISPATCHER_CONTEXT, *PDISPATCHER_CONTEXT;
+#endif
+
// #endif // !defined(_TARGET_MAC64)
typedef DISPATCHER_CONTEXT *PDISPATCHER_CONTEXT;
diff --git a/src/pal/inc/unixasmmacros.inc b/src/pal/inc/unixasmmacros.inc
index ee139c451e..45a2642592 100644
--- a/src/pal/inc/unixasmmacros.inc
+++ b/src/pal/inc/unixasmmacros.inc
@@ -5,25 +5,6 @@
#define INVALIDGCVALUE -0x33333333 // 0CCCCCCCDh - the assembler considers it to be a signed integer constant
-.macro NOP_3_BYTE
- nop dword ptr [rax]
-.endm
-
-.macro NOP_2_BYTE
- xchg ax, ax
-.endm
-
-.macro REPRET
- .byte 0xf3
- .byte 0xc3
-.endm
-
-.macro TAILJMP_RAX
- .byte 0x48
- .byte 0xFF
- .byte 0xE0
-.endm
-
#if defined(__APPLE__)
#define C_FUNC(name) _##name
#define EXTERNAL_C_FUNC(name) C_FUNC(name)
@@ -40,51 +21,10 @@
#define C_PLTFUNC(name) name@PLT
#endif
-.macro PATCH_LABEL Name
- .global C_FUNC(\Name)
-C_FUNC(\Name):
-.endm
-
-.macro LEAF_ENTRY Name, Section
- .global C_FUNC(\Name)
-#if defined(__APPLE__)
- .text
-#else
- .type \Name, %function
-#endif
-C_FUNC(\Name):
- .cfi_startproc
-.endm
-
-.macro LEAF_END_MARKED Name, Section
-C_FUNC(\Name\()_End):
- .global C_FUNC(\Name\()_End)
-#if !defined(__APPLE__)
- .size \Name, .-\Name
-#endif
- .cfi_endproc
-.endm
-
.macro LEAF_END Name, Section
LEAF_END_MARKED \Name, \Section
.endm
-.macro PREPARE_EXTERNAL_VAR Name, HelperReg
- mov \HelperReg, [rip + C_FUNC(\Name)@GOTPCREL]
-.endm
-
-.macro push_nonvol_reg Register
- push \Register
- .cfi_adjust_cfa_offset 8
- .cfi_rel_offset \Register, 0
-.endm
-
-.macro pop_nonvol_reg Register
- pop \Register
- .cfi_adjust_cfa_offset -8
- .cfi_restore \Register
-.endm
-
.macro NESTED_ENTRY Name, Section, Handler
LEAF_ENTRY \Name, \Section
.ifnc \Handler, NoHandler
@@ -112,243 +52,12 @@ C_FUNC(\Name\()_End):
.macro END_PROLOGUE
.endm
-.macro alloc_stack Size
-.att_syntax
- lea -\Size(%rsp), %rsp
-.intel_syntax noprefix
- .cfi_adjust_cfa_offset \Size
-.endm
-
-.macro free_stack Size
-.att_syntax
- lea \Size(%rsp), %rsp
-.intel_syntax noprefix
- .cfi_adjust_cfa_offset -\Size
-.endm
-
-.macro set_cfa_register Reg, Offset
- .cfi_def_cfa_register \Reg
- .cfi_def_cfa_offset \Offset
-.endm
-
-.macro save_reg_postrsp Reg, Offset
- __Offset = \Offset
- mov qword ptr [rsp + __Offset], \Reg
- .cfi_rel_offset \Reg, __Offset
-.endm
-
-.macro restore_reg Reg, Offset
- __Offset = \Offset
- mov \Reg, [rsp + __Offset]
- .cfi_restore \Reg
-.endm
-
-.macro save_xmm128_postrsp Reg, Offset
- __Offset = \Offset
- movdqa [rsp + __Offset], \Reg
- // NOTE: We cannot use ".cfi_rel_offset \Reg, __Offset" here,
- // the xmm registers are not supported by the libunwind
-.endm
-
-.macro restore_xmm128 Reg, ofs
- __Offset = \ofs
- movdqa \Reg, [rsp + __Offset]
- // NOTE: We cannot use ".cfi_restore \Reg" here,
- // the xmm registers are not supported by the libunwind
-
-.endm
-
-.macro POP_CALLEE_SAVED_REGISTERS
-
- pop_nonvol_reg r12
- pop_nonvol_reg r13
- pop_nonvol_reg r14
- pop_nonvol_reg r15
- pop_nonvol_reg rbx
- pop_nonvol_reg rbp
-
-.endm
-
-.macro push_register Reg
- push \Reg
- .cfi_adjust_cfa_offset 8
-.endm
-
-.macro push_eflags
- pushfq
- .cfi_adjust_cfa_offset 8
-.endm
-
-.macro push_argument_register Reg
- push_register \Reg
-.endm
-
-.macro PUSH_ARGUMENT_REGISTERS
-
- push_argument_register r9
- push_argument_register r8
- push_argument_register rcx
- push_argument_register rdx
- push_argument_register rsi
- push_argument_register rdi
-
-.endm
-
-.macro pop_register Reg
- pop \Reg
- .cfi_adjust_cfa_offset -8
+.macro SETALIAS New, Old
+ .equiv \New, \Old
.endm
-.macro pop_eflags
- popfq
- .cfi_adjust_cfa_offset -8
-.endm
-
-.macro pop_argument_register Reg
- pop_register \Reg
-.endm
-
-.macro POP_ARGUMENT_REGISTERS
-
- pop_argument_register rdi
- pop_argument_register rsi
- pop_argument_register rdx
- pop_argument_register rcx
- pop_argument_register r8
- pop_argument_register r9
-
-.endm
-
-.macro SAVE_FLOAT_ARGUMENT_REGISTERS ofs
-
- save_xmm128_postrsp xmm0, \ofs
- save_xmm128_postrsp xmm1, \ofs + 0x10
- save_xmm128_postrsp xmm2, \ofs + 0x20
- save_xmm128_postrsp xmm3, \ofs + 0x30
- save_xmm128_postrsp xmm4, \ofs + 0x40
- save_xmm128_postrsp xmm5, \ofs + 0x50
- save_xmm128_postrsp xmm6, \ofs + 0x60
- save_xmm128_postrsp xmm7, \ofs + 0x70
-
-.endm
-
-.macro RESTORE_FLOAT_ARGUMENT_REGISTERS ofs
-
- restore_xmm128 xmm0, \ofs
- restore_xmm128 xmm1, \ofs + 0x10
- restore_xmm128 xmm2, \ofs + 0x20
- restore_xmm128 xmm3, \ofs + 0x30
- restore_xmm128 xmm4, \ofs + 0x40
- restore_xmm128 xmm5, \ofs + 0x50
- restore_xmm128 xmm6, \ofs + 0x60
- restore_xmm128 xmm7, \ofs + 0x70
-
-.endm
-
-// Stack layout:
-//
-// (stack parameters)
-// ...
-// return address
-// CalleeSavedRegisters::rbp
-// CalleeSavedRegisters::rbx
-// CalleeSavedRegisters::r15
-// CalleeSavedRegisters::r14
-// CalleeSavedRegisters::r13
-// CalleeSavedRegisters::r12
-// ArgumentRegisters::r9
-// ArgumentRegisters::r8
-// ArgumentRegisters::rcx
-// ArgumentRegisters::rdx
-// ArgumentRegisters::rsi
-// ArgumentRegisters::rdi <- __PWTB_StackAlloc, __PWTB_TransitionBlock
-// padding to align xmm save area
-// xmm7
-// xmm6
-// xmm5
-// xmm4
-// xmm3
-// xmm2
-// xmm1
-// xmm0 <- __PWTB_FloatArgumentRegisters
-// extra locals + padding to qword align
-.macro PROLOG_WITH_TRANSITION_BLOCK extraLocals = 0, stackAllocOnEntry = 0, stackAllocSpill1, stackAllocSpill2, stackAllocSpill3
-
- __PWTB_FloatArgumentRegisters = \extraLocals
-
- .if ((__PWTB_FloatArgumentRegisters % 16) != 0)
- __PWTB_FloatArgumentRegisters = __PWTB_FloatArgumentRegisters + 8
- .endif
-
- __PWTB_StackAlloc = __PWTB_FloatArgumentRegisters + 8 * 16 + 8 // 8 floating point registers
- __PWTB_TransitionBlock = __PWTB_StackAlloc
-
- .if \stackAllocOnEntry >= 4*8
- .error "Max supported stackAllocOnEntry is 3*8"
- .endif
-
- .if \stackAllocOnEntry > 0
- .cfi_adjust_cfa_offset \stackAllocOnEntry
- .endif
-
- // PUSH_CALLEE_SAVED_REGISTERS expanded here
-
- .if \stackAllocOnEntry < 8
- push_nonvol_reg rbp
- mov rbp, rsp
- .endif
-
- .if \stackAllocOnEntry < 2*8
- push_nonvol_reg rbx
- .endif
-
- .if \stackAllocOnEntry < 3*8
- push_nonvol_reg r15
- .endif
-
- push_nonvol_reg r14
- push_nonvol_reg r13
- push_nonvol_reg r12
-
- // ArgumentRegisters
- PUSH_ARGUMENT_REGISTERS
-
- .if \stackAllocOnEntry >= 3*8
- mov \stackAllocSpill3, [rsp + 0x48]
- save_reg_postrsp r15, 0x48
- .endif
-
- .if \stackAllocOnEntry >= 2*8
- mov \stackAllocSpill2, [rsp + 0x50]
- save_reg_postrsp rbx, 0x50
- .endif
-
- .if \stackAllocOnEntry >= 8
- mov \stackAllocSpill1, [rsp + 0x58]
- save_reg_postrsp rbp, 0x58
- lea rbp, [rsp + 0x58]
- .endif
-
- alloc_stack __PWTB_StackAlloc
- SAVE_FLOAT_ARGUMENT_REGISTERS __PWTB_FloatArgumentRegisters
-
- END_PROLOGUE
-
-.endm
-
-.macro EPILOG_WITH_TRANSITION_BLOCK_RETURN
-
- add rsp, __PWTB_StackAlloc
- POP_CALLEE_SAVED_REGISTERS
- ret
-
-.endm
-
-.macro EPILOG_WITH_TRANSITION_BLOCK_TAILCALL
-
- RESTORE_FLOAT_ARGUMENT_REGISTERS __PWTB_FloatArgumentRegisters
- free_stack __PWTB_StackAlloc
- POP_ARGUMENT_REGISTERS
- POP_CALLEE_SAVED_REGISTERS
-
-.endm
+#if defined(_AMD64_)
+#include "unixasmmacrosamd64.inc"
+#elif defined(_ARM_)
+#include "unixasmmacrosarm.inc"
+#endif \ No newline at end of file
diff --git a/src/pal/inc/unixasmmacrosamd64.inc b/src/pal/inc/unixasmmacrosamd64.inc
new file mode 100644
index 0000000000..4689ad1c90
--- /dev/null
+++ b/src/pal/inc/unixasmmacrosamd64.inc
@@ -0,0 +1,305 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+.macro PATCH_LABEL Name
+ .global C_FUNC(\Name)
+C_FUNC(\Name):
+.endm
+
+.macro LEAF_ENTRY Name, Section
+ .global C_FUNC(\Name)
+#if defined(__APPLE__)
+ .text
+#else
+ .type \Name, %function
+#endif
+C_FUNC(\Name):
+ .cfi_startproc
+.endm
+
+.macro LEAF_END_MARKED Name, Section
+C_FUNC(\Name\()_End):
+ .global C_FUNC(\Name\()_End)
+#if !defined(__APPLE__)
+ .size \Name, .-\Name
+#endif
+ .cfi_endproc
+.endm
+
+.macro NOP_3_BYTE
+ nop dword ptr [rax]
+.endm
+
+.macro NOP_2_BYTE
+ xchg ax, ax
+.endm
+
+.macro REPRET
+ .byte 0xf3
+ .byte 0xc3
+.endm
+
+.macro TAILJMP_RAX
+ .byte 0x48
+ .byte 0xFF
+ .byte 0xE0
+.endm
+
+.macro PREPARE_EXTERNAL_VAR Name, HelperReg
+ mov \HelperReg, [rip + C_FUNC(\Name)@GOTPCREL]
+.endm
+
+.macro push_nonvol_reg Register
+ push \Register
+ .cfi_adjust_cfa_offset 8
+ .cfi_rel_offset \Register, 0
+.endm
+
+.macro pop_nonvol_reg Register
+ pop \Register
+ .cfi_adjust_cfa_offset -8
+ .cfi_restore \Register
+.endm
+
+.macro alloc_stack Size
+.att_syntax
+ lea -\Size(%rsp), %rsp
+.intel_syntax noprefix
+ .cfi_adjust_cfa_offset \Size
+.endm
+
+.macro free_stack Size
+.att_syntax
+ lea \Size(%rsp), %rsp
+.intel_syntax noprefix
+ .cfi_adjust_cfa_offset -\Size
+.endm
+
+.macro set_cfa_register Reg, Offset
+ .cfi_def_cfa_register \Reg
+ .cfi_def_cfa_offset \Offset
+.endm
+
+.macro save_reg_postrsp Reg, Offset
+ __Offset = \Offset
+ mov qword ptr [rsp + __Offset], \Reg
+ .cfi_rel_offset \Reg, __Offset
+.endm
+
+.macro restore_reg Reg, Offset
+ __Offset = \Offset
+ mov \Reg, [rsp + __Offset]
+ .cfi_restore \Reg
+.endm
+
+.macro save_xmm128_postrsp Reg, Offset
+ __Offset = \Offset
+ movdqa [rsp + __Offset], \Reg
+ // NOTE: We cannot use ".cfi_rel_offset \Reg, __Offset" here,
+ // the xmm registers are not supported by the libunwind
+.endm
+
+.macro restore_xmm128 Reg, ofs
+ __Offset = \ofs
+ movdqa \Reg, [rsp + __Offset]
+ // NOTE: We cannot use ".cfi_restore \Reg" here,
+ // the xmm registers are not supported by the libunwind
+
+.endm
+
+.macro POP_CALLEE_SAVED_REGISTERS
+
+ pop_nonvol_reg r12
+ pop_nonvol_reg r13
+ pop_nonvol_reg r14
+ pop_nonvol_reg r15
+ pop_nonvol_reg rbx
+ pop_nonvol_reg rbp
+
+.endm
+
+.macro push_register Reg
+ push \Reg
+ .cfi_adjust_cfa_offset 8
+.endm
+
+.macro push_eflags
+ pushfq
+ .cfi_adjust_cfa_offset 8
+.endm
+
+.macro push_argument_register Reg
+ push_register \Reg
+.endm
+
+.macro PUSH_ARGUMENT_REGISTERS
+
+ push_argument_register r9
+ push_argument_register r8
+ push_argument_register rcx
+ push_argument_register rdx
+ push_argument_register rsi
+ push_argument_register rdi
+
+.endm
+
+.macro pop_register Reg
+ pop \Reg
+ .cfi_adjust_cfa_offset -8
+.endm
+
+.macro pop_eflags
+ popfq
+ .cfi_adjust_cfa_offset -8
+.endm
+
+.macro pop_argument_register Reg
+ pop_register \Reg
+.endm
+
+.macro POP_ARGUMENT_REGISTERS
+
+ pop_argument_register rdi
+ pop_argument_register rsi
+ pop_argument_register rdx
+ pop_argument_register rcx
+ pop_argument_register r8
+ pop_argument_register r9
+
+.endm
+
+.macro SAVE_FLOAT_ARGUMENT_REGISTERS ofs
+
+ save_xmm128_postrsp xmm0, \ofs
+ save_xmm128_postrsp xmm1, \ofs + 0x10
+ save_xmm128_postrsp xmm2, \ofs + 0x20
+ save_xmm128_postrsp xmm3, \ofs + 0x30
+ save_xmm128_postrsp xmm4, \ofs + 0x40
+ save_xmm128_postrsp xmm5, \ofs + 0x50
+ save_xmm128_postrsp xmm6, \ofs + 0x60
+ save_xmm128_postrsp xmm7, \ofs + 0x70
+
+.endm
+
+.macro RESTORE_FLOAT_ARGUMENT_REGISTERS ofs
+
+ restore_xmm128 xmm0, \ofs
+ restore_xmm128 xmm1, \ofs + 0x10
+ restore_xmm128 xmm2, \ofs + 0x20
+ restore_xmm128 xmm3, \ofs + 0x30
+ restore_xmm128 xmm4, \ofs + 0x40
+ restore_xmm128 xmm5, \ofs + 0x50
+ restore_xmm128 xmm6, \ofs + 0x60
+ restore_xmm128 xmm7, \ofs + 0x70
+
+.endm
+
+// Stack layout:
+//
+// (stack parameters)
+// ...
+// return address
+// CalleeSavedRegisters::rbp
+// CalleeSavedRegisters::rbx
+// CalleeSavedRegisters::r15
+// CalleeSavedRegisters::r14
+// CalleeSavedRegisters::r13
+// CalleeSavedRegisters::r12
+// ArgumentRegisters::r9
+// ArgumentRegisters::r8
+// ArgumentRegisters::rcx
+// ArgumentRegisters::rdx
+// ArgumentRegisters::rsi
+// ArgumentRegisters::rdi <- __PWTB_StackAlloc, __PWTB_TransitionBlock
+// padding to align xmm save area
+// xmm7
+// xmm6
+// xmm5
+// xmm4
+// xmm3
+// xmm2
+// xmm1
+// xmm0 <- __PWTB_FloatArgumentRegisters
+// extra locals + padding to qword align
+.macro PROLOG_WITH_TRANSITION_BLOCK extraLocals = 0, stackAllocOnEntry = 0, stackAllocSpill1, stackAllocSpill2, stackAllocSpill3
+
+ __PWTB_FloatArgumentRegisters = \extraLocals
+
+ .if ((__PWTB_FloatArgumentRegisters % 16) != 0)
+ __PWTB_FloatArgumentRegisters = __PWTB_FloatArgumentRegisters + 8
+ .endif
+
+ __PWTB_StackAlloc = __PWTB_FloatArgumentRegisters + 8 * 16 + 8 // 8 floating point registers
+ __PWTB_TransitionBlock = __PWTB_StackAlloc
+
+ .if \stackAllocOnEntry >= 4*8
+ .error "Max supported stackAllocOnEntry is 3*8"
+ .endif
+
+ .if \stackAllocOnEntry > 0
+ .cfi_adjust_cfa_offset \stackAllocOnEntry
+ .endif
+
+ // PUSH_CALLEE_SAVED_REGISTERS expanded here
+
+ .if \stackAllocOnEntry < 8
+ push_nonvol_reg rbp
+ mov rbp, rsp
+ .endif
+
+ .if \stackAllocOnEntry < 2*8
+ push_nonvol_reg rbx
+ .endif
+
+ .if \stackAllocOnEntry < 3*8
+ push_nonvol_reg r15
+ .endif
+
+ push_nonvol_reg r14
+ push_nonvol_reg r13
+ push_nonvol_reg r12
+
+ // ArgumentRegisters
+ PUSH_ARGUMENT_REGISTERS
+
+ .if \stackAllocOnEntry >= 3*8
+ mov \stackAllocSpill3, [rsp + 0x48]
+ save_reg_postrsp r15, 0x48
+ .endif
+
+ .if \stackAllocOnEntry >= 2*8
+ mov \stackAllocSpill2, [rsp + 0x50]
+ save_reg_postrsp rbx, 0x50
+ .endif
+
+ .if \stackAllocOnEntry >= 8
+ mov \stackAllocSpill1, [rsp + 0x58]
+ save_reg_postrsp rbp, 0x58
+ lea rbp, [rsp + 0x58]
+ .endif
+
+ alloc_stack __PWTB_StackAlloc
+ SAVE_FLOAT_ARGUMENT_REGISTERS __PWTB_FloatArgumentRegisters
+
+ END_PROLOGUE
+
+.endm
+
+.macro EPILOG_WITH_TRANSITION_BLOCK_RETURN
+
+ add rsp, __PWTB_StackAlloc
+ POP_CALLEE_SAVED_REGISTERS
+ ret
+
+.endm
+
+.macro EPILOG_WITH_TRANSITION_BLOCK_TAILCALL
+
+ RESTORE_FLOAT_ARGUMENT_REGISTERS __PWTB_FloatArgumentRegisters
+ free_stack __PWTB_StackAlloc
+ POP_ARGUMENT_REGISTERS
+ POP_CALLEE_SAVED_REGISTERS
+
+.endm \ No newline at end of file
diff --git a/src/pal/inc/unixasmmacrosarm.inc b/src/pal/inc/unixasmmacrosarm.inc
new file mode 100644
index 0000000000..40daa7a71c
--- /dev/null
+++ b/src/pal/inc/unixasmmacrosarm.inc
@@ -0,0 +1,225 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+.macro PATCH_LABEL Name
+ .thumb_func
+ .global C_FUNC(\Name)
+C_FUNC(\Name):
+.endm
+
+.macro LEAF_ENTRY Name, Section
+ .thumb_func
+ .global C_FUNC(\Name)
+ .type \Name, %function
+C_FUNC(\Name):
+ .cfi_startproc
+.endm
+
+.macro LEAF_END_MARKED Name, Section
+ .thumb_func
+ .global C_FUNC(\Name\()_End)
+C_FUNC(\Name\()_End):
+ .size \Name, .-\Name
+ .cfi_endproc
+.endm
+
+.macro PREPARE_EXTERNAL_VAR Name, HelperReg
+ ldr \HelperReg, [pc, #C_FUNC(\Name)@GOTPCREL]
+.endm
+
+.macro push_nonvol_reg Register
+ push {\Register}
+ .cfi_adjust_cfa_offset 4
+ .cfi_rel_offset \Register, 0
+.endm
+
+.macro pop_nonvol_reg Register
+ pop {\Register}
+ .cfi_adjust_cfa_offset -4
+ .cfi_restore \Register
+.endm
+
+.macro alloc_stack Size
+ sub sp, sp, \Size
+ .cfi_adjust_cfa_offset \Size
+.endm
+
+.macro free_stack Size
+ add sp, sp, \Size
+ .cfi_adjust_cfa_offset -\Size
+.endm
+
+.macro set_cfa_register Reg, Offset
+ .cfi_def_cfa_register \Reg
+ .cfi_def_cfa_offset \Offset
+.endm
+
+.macro save_reg_postsp Reg, Offset
+ str \Reg, [sp, #\Offset]
+ .cfi_rel_offset \Reg, __Offset
+.endm
+
+.macro restore_reg Reg, Offset
+ ldr \Reg, [sp, #\Offset]
+ .cfi_restore \Reg
+.endm
+
+.macro POP_CALLEE_SAVED_REGISTERS
+ pop {r4-r11, lr}
+ .cfi_adjust_cfa_offset -(4*9)
+ .cfi_restore r4
+ .cfi_restore r5
+ .cfi_restore r6
+ .cfi_restore r7
+ .cfi_restore r8
+ .cfi_restore r9
+ .cfi_restore r10
+ .cfi_restore r11
+ .cfi_restore lr
+.endm
+
+.macro PUSH_CALLEE_SAVED_REGISTERS
+ push {r4-r11, lr}
+ .cfi_adjust_cfa_offset (4*9)
+.endm
+
+.macro push_register Reg
+ push {\Reg}
+ .cfi_adjust_cfa_offset 4
+.endm
+
+.macro push_argument_register Reg
+ push_register \Reg
+.endm
+
+.macro PUSH_ARGUMENT_REGISTERS
+ push {r0-r3}
+ .cfi_adjust_cfa_offset (4*4)
+.endm
+
+.macro pop_register Reg
+ pop {\Reg}
+ .cfi_adjust_cfa_offset -4
+.endm
+
+.macro pop_argument_register Reg
+ pop_register \Reg
+.endm
+
+.macro POP_ARGUMENT_REGISTERS
+ pop {r0-r3}
+ .cfi_adjust_cfa_offset -(4*4)
+.endm
+
+// Stack layout:
+//
+// (stack parameters)
+// ...
+// ArgumentRegisters::r3
+// ArgumentRegisters::r2
+// ArgumentRegisters::r1
+// ArgumentRegisters::r0
+// CalleeSavedRegisters::lr
+// CalleeSavedRegisters::r11
+// CalleeSavedRegisters::r10
+// CalleeSavedRegisters::r9
+// CalleeSavedRegisters::r8
+// CalleeSavedRegisters::r7
+// CalleeSavedRegisters::r6
+// CalleeSavedRegisters::r5
+// CalleeSavedRegisters::r4 <- __PWTB_StackAlloc, __PWTB_TransitionBlock
+// padding to align float save area
+// d7
+// d6
+// d5
+// d4
+// d3
+// d2
+// d1
+// d0 <- __PWTB_FloatArgumentRegisters
+.macro PROLOG_WITH_TRANSITION_BLOCK extraLocals = 0, saveFpArgs = 0, pushArgRegs = 0
+
+ __PWTB_FloatArgumentRegisters = \extraLocals
+ __PWTB_SaveFPArgs = \saveFpArgs
+
+ .if (__PWTB_SaveFPArgs == 1)
+ .if ((__PWTB_FloatArgumentRegisters % 8) != 0)
+ __PWTB_FloatArgumentRegisters = __PWTB_FloatArgumentRegisters + 4
+ .endif
+
+ __PWTB_TransitionBlock = __PWTB_FloatArgumentRegisters + 8 * 8 + 8 // 8 floating point registers
+ .else
+ .if ((__PWTB_FloatArgumentRegisters % 8) == 0)
+ __PWTB_FloatArgumentRegisters = __PWTB_FloatArgumentRegisters + 4
+ .endif
+
+ __PWTB_TransitionBlock = __PWTB_FloatArgumentRegisters
+ .endif
+
+ __PWTB_StackAlloc = __PWTB_TransitionBlock
+
+ .ifnc \pushArgRegs, DoNotPushArgRegs
+ PUSH_ARGUMENT_REGISTERS
+ .endif
+
+ PUSH_CALLEE_SAVED_REGISTERS
+
+ alloc_stack __PWTB_StackAlloc
+
+ .if (__PWTB_SaveFPArgs == 1)
+ add r6, sp, #(__PWTB_FloatArgumentRegisters)
+ vstm r6, {d0-d7}
+ .endif
+
+ CHECK_STACK_ALIGNMENT
+
+ END_PROLOGUE
+
+.endm
+
+.macro EPILOG_WITH_TRANSITION_BLOCK_RETURN
+
+ free_stack __PWTB_StackAlloc
+ POP_CALLEE_SAVED_REGISTERS
+ free_stack 16
+ bx lr
+
+.endm
+
+.macro EPILOG_WITH_TRANSITION_BLOCK_TAILCALL
+
+ .if (__PWTB_SaveFPArgs == 1)
+ add r6, sp, #(__PWTB_FloatArgumentRegisters)
+ vldm r6, {d0-d7}
+ .endif
+
+ free_stack __PWTB_StackAlloc
+ POP_CALLEE_SAVED_REGISTERS
+ POP_ARGUMENT_REGISTERS
+
+.endm
+
+.macro EMIT_BREAKPOINT
+ .inst.w 0xde01
+.endm
+
+//-----------------------------------------------------------------------------
+// Macro used to check (in debug builds only) whether the stack is 64-bit aligned (a requirement before calling
+// out into C++/OS code). Invoke this directly after your prolog (if the stack frame size is fixed) or directly
+// before a call (if you have a frame pointer and a dynamic stack). A breakpoint will be invoked if the stack
+// is misaligned.
+//
+.macro CHECK_STACK_ALIGNMENT
+
+#ifdef _DEBUG
+ push {r0}
+ add r0, sp, #4
+ tst r0, #7
+ pop {r0}
+ beq 0f
+ EMIT_BREAKPOINT
+0:
+#endif
+.endm \ No newline at end of file
diff --git a/src/pal/src/CMakeLists.txt b/src/pal/src/CMakeLists.txt
index 890fc4c46c..07aa78cce4 100644
--- a/src/pal/src/CMakeLists.txt
+++ b/src/pal/src/CMakeLists.txt
@@ -24,6 +24,16 @@ endif()
# Compile options
+if(CMAKE_SYSTEM_PROCESSOR STREQUAL x86_64 OR CMAKE_SYSTEM_PROCESSOR STREQUAL amd64 OR CMAKE_SYSTEM_PROCESSOR STREQUAL AMD64)
+ set(PAL_CMAKE_PLATFORM_ARCH_AMD64 1)
+ add_definitions(-D_AMD64_)
+elseif(CMAKE_SYSTEM_PROCESSOR STREQUAL armv7l)
+ set(PAL_CMAKE_PLATFORM_ARCH_ARM 1)
+ add_definitions(-D_ARM_)
+else()
+ message(FATAL_ERROR "Only ARM and AMD64 is supported")
+endif()
+
if(CMAKE_SYSTEM_NAME STREQUAL Darwin)
add_definitions(-D_TARGET_MAC64)
set(PLATFORM_SOURCES
@@ -43,16 +53,32 @@ add_definitions(-DLP64COMPATIBLE=1)
add_definitions(-DFEATURE_PAL=1)
add_definitions(-DCORECLR=1)
add_definitions(-DPIC=1)
-add_definitions(-DBIT64=1)
-add_definitions(-D_WIN64=1)
+add_definitions(-D_FILE_OFFSET_BITS=64)
+if(PAL_CMAKE_PLATFORM_ARCH_AMD64)
+ add_definitions(-DBIT64=1)
+ add_definitions(-D_WIN64=1)
+elseif(PAL_CMAKE_PLATFORM_ARCH_ARM)
+ add_definitions(-DBIT32=1)
+ add_definitions(-D_WIN32=1)
+endif()
+
add_compile_options(-fno-builtin)
add_compile_options(-fPIC)
+if(PAL_CMAKE_PLATFORM_ARCH_AMD64)
+ set(ARCH_SOURCES
+ arch/i386/context2.S
+ arch/i386/processor.cpp
+ )
+elseif(PAL_CMAKE_PLATFORM_ARCH_ARM)
+ set(ARCH_SOURCES
+ arch/arm/context2.S
+ arch/arm/processor.cpp
+ )
+endif()
+
set(SOURCES
- arch/i386/context.cpp
- arch/i386/context2.S
- arch/i386/processor.cpp
cruntime/file.cpp
cruntime/filecrt.cpp
cruntime/finite.cpp
@@ -151,6 +177,7 @@ set(SOURCES
synchmgr/synchcontrollers.cpp
synchmgr/synchmanager.cpp
synchmgr/wait.cpp
+ thread/context.cpp
thread/process.cpp
thread/thread.cpp
thread/threadsusp.cpp
@@ -160,10 +187,19 @@ set(SOURCES
add_library(coreclrpal
STATIC
${SOURCES}
+ ${ARCH_SOURCES}
${PLATFORM_SOURCES}
)
if(CMAKE_SYSTEM_NAME STREQUAL Linux)
+ # On ARM linking with libunwind will break C++ exceptions unless we first
+ # link with gcc_s, this is a libunwind issue
+ if(PAL_CMAKE_PLATFORM_ARCH_ARM)
+ target_link_libraries(coreclrpal
+ gcc_s
+ )
+ endif()
+
target_link_libraries(coreclrpal
pthread
rt
diff --git a/src/pal/src/arch/arm/context2.S b/src/pal/src/arch/arm/context2.S
new file mode 100644
index 0000000000..88aee3b321
--- /dev/null
+++ b/src/pal/src/arch/arm/context2.S
@@ -0,0 +1,217 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// Implementation of _CONTEXT_CaptureContext for the ARM platform.
+// This function is processor dependent. It is used by exception handling,
+// and is always apply to the current thread.
+//
+
+#include "unixasmmacros.inc"
+
+.syntax unified
+.thumb
+
+#define CONTEXT_ARM 0x00200000
+
+#define CONTEXT_CONTROL 1 // Sp, Lr, Pc, Cpsr
+#define CONTEXT_INTEGER 2 // R0-R12
+#define CONTEXT_SEGMENTS 4 //
+#define CONTEXT_FLOATING_POINT 8
+#define CONTEXT_DEBUG_REGISTERS 16 //
+
+#define CONTEXT_FULL (CONTEXT_CONTROL | CONTEXT_INTEGER | CONTEXT_FLOATING_POINT)
+
+#define CONTEXT_ContextFlags 0
+#define CONTEXT_R0 CONTEXT_ContextFlags+4
+#define CONTEXT_R1 CONTEXT_R0+4
+#define CONTEXT_R2 CONTEXT_R1+4
+#define CONTEXT_R3 CONTEXT_R2+4
+#define CONTEXT_R4 CONTEXT_R3+4
+#define CONTEXT_R5 CONTEXT_R4+4
+#define CONTEXT_R6 CONTEXT_R5+4
+#define CONTEXT_R7 CONTEXT_R6+4
+#define CONTEXT_R8 CONTEXT_R7+4
+#define CONTEXT_R9 CONTEXT_R8+4
+#define CONTEXT_R10 CONTEXT_R9+4
+#define CONTEXT_R11 CONTEXT_R10+4
+#define CONTEXT_R12 CONTEXT_R11+4
+#define CONTEXT_Sp CONTEXT_R12+4
+#define CONTEXT_Lr CONTEXT_Sp+4
+#define CONTEXT_Pc CONTEXT_Lr+4
+#define CONTEXT_Cpsr CONTEXT_Pc+4
+#define CONTEXT_Fpscr CONTEXT_Cpsr+4
+#define CONTEXT_Padding CONTEXT_Fpscr+4
+#define CONTEXT_D0 CONTEXT_Padding+4
+#define CONTEXT_D1 CONTEXT_D0+8
+#define CONTEXT_D2 CONTEXT_D1+8
+#define CONTEXT_D3 CONTEXT_D2+8
+#define CONTEXT_D4 CONTEXT_D3+8
+#define CONTEXT_D5 CONTEXT_D4+8
+#define CONTEXT_D6 CONTEXT_D5+8
+#define CONTEXT_D7 CONTEXT_D6+8
+#define CONTEXT_D8 CONTEXT_D7+8
+#define CONTEXT_D9 CONTEXT_D8+8
+#define CONTEXT_D10 CONTEXT_D9+8
+#define CONTEXT_D11 CONTEXT_D10+8
+#define CONTEXT_D12 CONTEXT_D11+8
+#define CONTEXT_D13 CONTEXT_D12+8
+#define CONTEXT_D14 CONTEXT_D13+8
+#define CONTEXT_D15 CONTEXT_D14+8
+#define CONTEXT_D16 CONTEXT_D15+8
+#define CONTEXT_D17 CONTEXT_D16+8
+#define CONTEXT_D18 CONTEXT_D17+8
+#define CONTEXT_D19 CONTEXT_D18+8
+#define CONTEXT_D20 CONTEXT_D19+8
+#define CONTEXT_D21 CONTEXT_D20+8
+#define CONTEXT_D22 CONTEXT_D21+8
+#define CONTEXT_D23 CONTEXT_D22+8
+#define CONTEXT_D24 CONTEXT_D23+8
+#define CONTEXT_D25 CONTEXT_D24+8
+#define CONTEXT_D26 CONTEXT_D25+8
+#define CONTEXT_D27 CONTEXT_D26+8
+#define CONTEXT_D28 CONTEXT_D27+8
+#define CONTEXT_D29 CONTEXT_D28+8
+#define CONTEXT_D30 CONTEXT_D29+8
+#define CONTEXT_D31 CONTEXT_D30+8
+
+// Incoming:
+// r0: Context*
+//
+LEAF_ENTRY CONTEXT_CaptureContext, _TEXT
+ // Ensure we save these registers
+ push {r4-r11}
+ // Save processor flags before calling any of the following 'test' instructions
+ // because they will modify state of some flags
+ push {r1}
+ mrs r1, apsr // Get APSR - equivalent to eflags
+ push {r1} // Save APSR
+ END_PROLOGUE
+
+ push {r2}
+ ldr r2, [r0, #(CONTEXT_ContextFlags)]
+ tst r2, #(CONTEXT_INTEGER)
+ pop {r2}
+
+ // Add 4 to stack so we point at R1, pop, then sub 8 to point at APSR
+ add sp, sp, #4
+ pop {r1}
+ sub sp, sp, #8
+
+ itttt ne
+ strne r0, [r0, #(CONTEXT_R0)]
+ addne r0, CONTEXT_R1
+ stmiane r0, {r1-r12}
+ subne r0, CONTEXT_R1
+
+ ldr r2, [r0, #(CONTEXT_ContextFlags)]
+ tst r2, #(CONTEXT_CONTROL)
+
+ ittt ne
+ addne sp, sp, #(10*4) // This needs to put the stack in the same state as it started
+ strne sp, [r0, #(CONTEXT_Sp)]
+ subne sp, sp, #(10*4)
+
+ itt ne
+ strne lr, [r0, #(CONTEXT_Lr)]
+ strne lr, [r0, #(CONTEXT_Pc)]
+
+ // Get the APSR pushed onto the stack at the start
+ pop {r1}
+ it ne
+ strne r1, [r0, #(CONTEXT_Cpsr)]
+
+ ldr r2, [r0, #(CONTEXT_ContextFlags)]
+ tst r2, #(CONTEXT_FLOATING_POINT)
+
+ itt ne
+ vmrsne r3, fpscr
+ strne r3, [r0, #(CONTEXT_Fpscr)]
+
+ itttt ne
+ addne r0, CONTEXT_D0
+ vstmiane r0!, {d0-d15}
+ vstmiane r0!, {d16-d31}
+ subne r0, CONTEXT_D31
+
+ // Make sure sp is restored
+ add sp, sp, #4
+
+ // Restore callee saved registers
+ pop {r4-r11}
+ bx lr
+LEAF_END CONTEXT_CaptureContext, _TEXT
+
+// Incoming:
+// R0: Context*
+//
+LEAF_ENTRY RtlCaptureContext, _TEXT
+ push {r1}
+ mov r1, #0
+ orr r1, r1, #CONTEXT_ARM
+ orr r1, r1, #CONTEXT_INTEGER
+ orr r1, r1, #CONTEXT_CONTROL
+ orr r1, r1, #CONTEXT_FLOATING_POINT
+ str r1, [r0, #(CONTEXT_ContextFlags)]
+ pop {r1}
+ b C_FUNC(CONTEXT_CaptureContext)
+LEAF_END RtlCaptureContext, _TEXT
+
+// Incoming:
+// r0: Context*
+// r1: Exception*
+//
+LEAF_ENTRY RtlRestoreContext, _TEXT
+ END_PROLOGUE
+
+ ldr r2, [r0, #(CONTEXT_ContextFlags)]
+ tst r2, #(CONTEXT_FLOATING_POINT)
+
+ itttt ne
+ addne r0, CONTEXT_D0
+ vldmiane r0!, {d0-d15}
+ vldmiane r0, {d16-d31}
+ subne r0, CONTEXT_D16
+
+ itt ne
+ ldrne r3, [r0, #(CONTEXT_Fpscr)]
+ vmrsne r3, FPSCR
+
+ ldr r2, [r0, #(CONTEXT_ContextFlags)]
+ tst r2, #(CONTEXT_CONTROL)
+
+ it eq
+ beq LOCAL_LABEL(No_Restore_CONTEXT_CONTROL)
+
+ ldr r2, [r0, #(CONTEXT_ContextFlags)]
+ tst r2, #(CONTEXT_INTEGER)
+
+ it eq
+ beq LOCAL_LABEL(No_Restore_CONTEXT_INTEGER)
+
+ ldr R2, [r0, #(CONTEXT_Cpsr)]
+ msr APSR, r2
+
+ add r0, CONTEXT_R0
+ ldmia r0, {r0-r12, sp, lr, pc}
+
+LOCAL_LABEL(No_Restore_CONTEXT_INTEGER):
+
+ ldr r2, [r0, #(CONTEXT_Cpsr)]
+ msr APSR, r2
+
+ add r0, CONTEXT_Sp
+ ldmia r0, {sp, lr, pc}
+
+LOCAL_LABEL(No_Restore_CONTEXT_CONTROL):
+ ldr r2, [r0, #(CONTEXT_ContextFlags)]
+ tst r2, #(CONTEXT_INTEGER)
+
+ itt ne
+ addne r0, CONTEXT_R0
+ ldmiane r0, {r0-r12}
+
+ sub sp, sp, #4
+ bx lr
+LEAF_END RtlRestoreContext, _TEXT \ No newline at end of file
diff --git a/src/pal/src/arch/arm/processor.cpp b/src/pal/src/arch/arm/processor.cpp
new file mode 100644
index 0000000000..b7973486e0
--- /dev/null
+++ b/src/pal/src/arch/arm/processor.cpp
@@ -0,0 +1,43 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+/*++
+
+
+
+Module Name:
+
+ processor.cpp
+
+Abstract:
+
+ Implementation of processor related functions for the ARM
+ platform. These functions are processor dependent.
+
+
+
+--*/
+
+#include "pal/palinternal.h"
+
+/*++
+Function:
+YieldProcessor
+
+The YieldProcessor function signals to the processor to give resources
+to threads that are waiting for them. This macro is only effective on
+processors that support technology allowing multiple threads running
+on a single processor, such as Intel's Hyper-Threading technology.
+
+--*/
+void
+PALAPI
+YieldProcessor(
+ VOID)
+{
+ // Pretty sure ARM has no useful function here?
+ return;
+}
+
diff --git a/src/pal/src/configure.cmake b/src/pal/src/configure.cmake
index e5d2dc0589..6d1f141d49 100644
--- a/src/pal/src/configure.cmake
+++ b/src/pal/src/configure.cmake
@@ -12,6 +12,9 @@ if(CMAKE_SYSTEM_NAME STREQUAL FreeBSD)
elseif(NOT CMAKE_SYSTEM_NAME STREQUAL Darwin)
set(CMAKE_REQUIRED_DEFINITIONS "-D_DEFAULT_SOURCE -D_POSIX_C_SOURCE=200809L")
endif()
+
+list(APPEND CMAKE_REQUIRED_DEFINITIONS -D_FILE_OFFSET_BITS=64)
+
check_include_files(ieeefp.h HAVE_IEEEFP_H)
check_include_files(alloca.h HAVE_ALLOCA_H)
check_include_files(sys/vmparam.h HAVE_SYS_VMPARAM_H)
@@ -848,6 +851,19 @@ set(CMAKE_REQUIRED_DEFINITIONS)
set(SYNCHMGR_SUSPENSION_SAFE_CONDITION_SIGNALING 1)
set(ERROR_FUNC_FOR_GLOB_HAS_FIXED_PARAMS 1)
+check_cxx_source_compiles("
+#include <libunwind.h>
+#include <ucontext.h>
+
+int main(int argc, char **argv)
+{
+ unw_context_t libUnwindContext;
+ ucontext_t uContext;
+
+ libUnwindContext = uContext;
+ return 0;
+}" UNWIND_CONTEXT_IS_UCONTEXT_T)
+
if(CMAKE_SYSTEM_NAME STREQUAL Darwin)
set(HAVE_COREFOUNDATION 1)
set(HAVE__NSGETENVIRON 1)
@@ -862,7 +878,6 @@ if(CMAKE_SYSTEM_NAME STREQUAL Darwin)
set(KO_KR_LOCALE_NAME ko_KR.eucKR)
set(ZH_TW_LOCALE_NAME zh_TG.BIG5)
set(HAS_FTRUNCATE_LENGTH_ISSUE 1)
- set(UNWIND_CONTEXT_IS_UCONTEXT_T 0)
elseif(CMAKE_SYSTEM_NAME STREQUAL FreeBSD)
if(NOT HAVE_LIBUNWIND_H)
unset(HAVE_LIBUNWIND_H CACHE)
@@ -879,7 +894,6 @@ elseif(CMAKE_SYSTEM_NAME STREQUAL FreeBSD)
set(KO_KR_LOCALE_NAME ko_KR_LOCALE_NOT_FOUND)
set(ZH_TW_LOCALE_NAME zh_TW_LOCALE_NOT_FOUND)
set(HAS_FTRUNCATE_LENGTH_ISSUE 0)
- set(UNWIND_CONTEXT_IS_UCONTEXT_T 1)
if(EXISTS "/lib/libc.so.7")
set(FREEBSD_LIBC "/lib/libc.so.7")
@@ -903,7 +917,6 @@ else() # Anything else is Linux
set(KO_KR_LOCALE_NAME ko_KR_LOCALE_NOT_FOUND)
set(ZH_TW_LOCALE_NAME zh_TW_LOCALE_NOT_FOUND)
set(HAS_FTRUNCATE_LENGTH_ISSUE 0)
- set(UNWIND_CONTEXT_IS_UCONTEXT_T 1)
endif(CMAKE_SYSTEM_NAME STREQUAL Darwin)
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/config.h.in ${CMAKE_CURRENT_BINARY_DIR}/config.h)
diff --git a/src/pal/src/cruntime/misc.cpp b/src/pal/src/cruntime/misc.cpp
index 09bce67d91..2e90c58ae6 100644
--- a/src/pal/src/cruntime/misc.cpp
+++ b/src/pal/src/cruntime/misc.cpp
@@ -35,7 +35,9 @@ Abstract:
#if HAVE_CRT_EXTERNS_H
#include <crt_externs.h>
#endif // HAVE_CRT_EXTERNS_H
+#if defined(_AMD64_) || defined(_x86_)
#include <xmmintrin.h>
+#endif // defined(_AMD64_) || defined(_x86_)
SET_DEFAULT_DEBUG_CHANNEL(CRT);
diff --git a/src/pal/src/exception/seh-unwind.cpp b/src/pal/src/exception/seh-unwind.cpp
index 2e77814b9b..9dd025c51d 100644
--- a/src/pal/src/exception/seh-unwind.cpp
+++ b/src/pal/src/exception/seh-unwind.cpp
@@ -49,10 +49,10 @@ Abstract:
ASSIGN_REG(R12) \
ASSIGN_REG(R13) \
ASSIGN_REG(R14) \
- ASSIGN_REG(R15)
-#else // _AMD64_
+ ASSIGN_REG(R15)
+#else
#error unsupported architecture
-#endif // _AMD64_
+#endif
static void WinContextToUnwindContext(CONTEXT *winContext, unw_context_t *unwContext)
{
@@ -72,8 +72,18 @@ static void WinContextToUnwindCursor(CONTEXT *winContext, unw_cursor_t *cursor)
unw_set_reg(cursor, UNW_X86_64_R13, winContext->R13);
unw_set_reg(cursor, UNW_X86_64_R14, winContext->R14);
unw_set_reg(cursor, UNW_X86_64_R15, winContext->R15);
-#else
-#error unsupported architecture
+#elif defined(_ARM_)
+ unw_set_reg(cursor, UNW_REG_IP, winContext->Pc);
+ unw_set_reg(cursor, UNW_REG_SP, winContext->Sp);
+ unw_set_reg(cursor, UNW_ARM_R14, winContext->Lr);
+ unw_set_reg(cursor, UNW_ARM_R4, winContext->R4);
+ unw_set_reg(cursor, UNW_ARM_R5, winContext->R5);
+ unw_set_reg(cursor, UNW_ARM_R6, winContext->R6);
+ unw_set_reg(cursor, UNW_ARM_R7, winContext->R7);
+ unw_set_reg(cursor, UNW_ARM_R8, winContext->R8);
+ unw_set_reg(cursor, UNW_ARM_R9, winContext->R9);
+ unw_set_reg(cursor, UNW_ARM_R10, winContext->R10);
+ unw_set_reg(cursor, UNW_ARM_R11, winContext->R11);
#endif
}
#endif
@@ -89,12 +99,24 @@ static void UnwindContextToWinContext(unw_cursor_t *cursor, CONTEXT *winContext)
unw_get_reg(cursor, UNW_X86_64_R13, (unw_word_t *) &winContext->R13);
unw_get_reg(cursor, UNW_X86_64_R14, (unw_word_t *) &winContext->R14);
unw_get_reg(cursor, UNW_X86_64_R15, (unw_word_t *) &winContext->R15);
+#elif defined(_ARM_)
+ unw_get_reg(cursor, UNW_REG_IP, (unw_word_t *) &winContext->Pc);
+ unw_get_reg(cursor, UNW_REG_SP, (unw_word_t *) &winContext->Sp);
+ unw_get_reg(cursor, UNW_ARM_R14, (unw_word_t *) &winContext->Lr);
+ unw_get_reg(cursor, UNW_ARM_R4, (unw_word_t *) &winContext->R4);
+ unw_get_reg(cursor, UNW_ARM_R5, (unw_word_t *) &winContext->R5);
+ unw_get_reg(cursor, UNW_ARM_R6, (unw_word_t *) &winContext->R6);
+ unw_get_reg(cursor, UNW_ARM_R7, (unw_word_t *) &winContext->R7);
+ unw_get_reg(cursor, UNW_ARM_R8, (unw_word_t *) &winContext->R8);
+ unw_get_reg(cursor, UNW_ARM_R9, (unw_word_t *) &winContext->R9);
+ unw_get_reg(cursor, UNW_ARM_R10, (unw_word_t *) &winContext->R10);
+ unw_get_reg(cursor, UNW_ARM_R11, (unw_word_t *) &winContext->R11);
#else
#error unsupported architecture
#endif
}
-static void GetContextPointer(unw_cursor_t *cursor, unw_context_t *unwContext, int reg, PDWORD64 *contextPointer)
+static void GetContextPointer(unw_cursor_t *cursor, unw_context_t *unwContext, int reg, SIZE_T **contextPointer)
{
#if defined(__APPLE__)
// Returning NULL indicates that we don't have context pointers available
@@ -104,10 +126,10 @@ static void GetContextPointer(unw_cursor_t *cursor, unw_context_t *unwContext, i
unw_get_save_loc(cursor, reg, &saveLoc);
if (saveLoc.type == UNW_SLT_MEMORY)
{
- PDWORD64 pLoc = (PDWORD64)saveLoc.u.addr;
+ SIZE_T *pLoc = (SIZE_T *)saveLoc.u.addr;
// Filter out fake save locations that point to unwContext
- if ((pLoc < (PDWORD64)unwContext) || ((PDWORD64)(unwContext + 1) <= pLoc))
- *contextPointer = (PDWORD64)saveLoc.u.addr;
+ if ((pLoc < (SIZE_T *)unwContext) || ((SIZE_T *)(unwContext + 1) <= pLoc))
+ *contextPointer = (SIZE_T *)saveLoc.u.addr;
}
#endif
}
@@ -121,6 +143,15 @@ static void GetContextPointers(unw_cursor_t *cursor, unw_context_t *unwContext,
GetContextPointer(cursor, unwContext, UNW_X86_64_R13, &contextPointers->R13);
GetContextPointer(cursor, unwContext, UNW_X86_64_R14, &contextPointers->R14);
GetContextPointer(cursor, unwContext, UNW_X86_64_R15, &contextPointers->R15);
+#elif defined(_ARM_)
+ GetContextPointer(cursor, unwContext, UNW_ARM_R4, &contextPointers->R4);
+ GetContextPointer(cursor, unwContext, UNW_ARM_R5, &contextPointers->R5);
+ GetContextPointer(cursor, unwContext, UNW_ARM_R6, &contextPointers->R6);
+ GetContextPointer(cursor, unwContext, UNW_ARM_R7, &contextPointers->R7);
+ GetContextPointer(cursor, unwContext, UNW_ARM_R8, &contextPointers->R8);
+ GetContextPointer(cursor, unwContext, UNW_ARM_R9, &contextPointers->R9);
+ GetContextPointer(cursor, unwContext, UNW_ARM_R10, &contextPointers->R10);
+ GetContextPointer(cursor, unwContext, UNW_ARM_R11, &contextPointers->R11);
#else
#error unsupported architecture
#endif
@@ -226,6 +257,8 @@ static void RtlpRaiseException(EXCEPTION_RECORD *ExceptionRecord)
ExceptionRecord->ExceptionAddress = (void *) ContextRecord.Eip;
#elif defined(_AMD64_)
ExceptionRecord->ExceptionAddress = (void *) ContextRecord.Rip;
+#elif defined(_ARM_)
+ ExceptionRecord->ExceptionAddress = (void *) ContextRecord.Pc;
#else
#error unsupported architecture
#endif
diff --git a/src/pal/src/include/pal/context.h b/src/pal/src/include/pal/context.h
index 0a801f3988..b1dc76c420 100644
--- a/src/pal/src/include/pal/context.h
+++ b/src/pal/src/include/pal/context.h
@@ -141,6 +141,28 @@ typedef ucontext_t native_context_t;
#else // BIT64
+#if defined(_ARM_)
+
+#define MCREG_R0(mc) ((mc).arm_r0)
+#define MCREG_R1(mc) ((mc).arm_r1)
+#define MCREG_R2(mc) ((mc).arm_r2)
+#define MCREG_R3(mc) ((mc).arm_r3)
+#define MCREG_R4(mc) ((mc).arm_r4)
+#define MCREG_R5(mc) ((mc).arm_r5)
+#define MCREG_R6(mc) ((mc).arm_r6)
+#define MCREG_R7(mc) ((mc).arm_r7)
+#define MCREG_R8(mc) ((mc).arm_r8)
+#define MCREG_R9(mc) ((mc).arm_r9)
+#define MCREG_R10(mc) ((mc).arm_r10)
+#define MCREG_R11(mc) ((mc).arm_fp)
+#define MCREG_R12(mc) ((mc).arm_ip)
+#define MCREG_Sp(mc) ((mc).arm_sp)
+#define MCREG_Lr(mc) ((mc).arm_lr)
+#define MCREG_Pc(mc) ((mc).arm_pc)
+#define MCREG_Cpsr(mc) ((mc).arm_cpsr)
+
+#elif defined(_X86_)
+
#define MCREG_Ebx(mc) ((mc).mc_ebx)
#define MCREG_Ecx(mc) ((mc).mc_ecx)
#define MCREG_Edx(mc) ((mc).mc_edx)
@@ -154,6 +176,10 @@ typedef ucontext_t native_context_t;
#define MCREG_Esp(mc) ((mc).mc_esp)
#define MCREG_SegSs(mc) ((mc).mc_ss)
+#else
+#error "Unsupported arch"
+#endif
+
#endif // BIT64
#endif // HAVE_GREGSET_T
@@ -184,6 +210,25 @@ typedef ucontext_t native_context_t;
#else // BIT64
+#if defined(_ARM_)
+#define PTREG_R0(ptreg) ((ptreg).uregs[0])
+#define PTREG_R1(ptreg) ((ptreg).uregs[1])
+#define PTREG_R2(ptreg) ((ptreg).uregs[2])
+#define PTREG_R3(ptreg) ((ptreg).uregs[3])
+#define PTREG_R4(ptreg) ((ptreg).uregs[4])
+#define PTREG_R5(ptreg) ((ptreg).uregs[5])
+#define PTREG_R6(ptreg) ((ptreg).uregs[6])
+#define PTREG_R7(ptreg) ((ptreg).uregs[7])
+#define PTREG_R8(ptreg) ((ptreg).uregs[8])
+#define PTREG_R9(ptreg) ((ptreg).uregs[9])
+#define PTREG_R10(ptreg) ((ptreg).uregs[10])
+#define PTREG_R11(ptreg) ((ptreg).uregs[11])
+#define PTREG_R12(ptreg) ((ptreg).uregs[12])
+#define PTREG_Sp(ptreg) ((ptreg).uregs[13])
+#define PTREG_Lr(ptreg) ((ptreg).uregs[14])
+#define PTREG_Pc(ptreg) ((ptreg).uregs[15])
+#define PTREG_Cpsr(ptreg) ((ptreg).uregs[16])
+#elif defined(_X86_)
#define PTREG_Ebx(ptreg) ((ptreg).ebx)
#define PTREG_Ecx(ptreg) ((ptreg).ecx)
#define PTREG_Edx(ptreg) ((ptreg).edx)
@@ -195,6 +240,9 @@ typedef ucontext_t native_context_t;
#define PTREG_SegCs(ptreg) ((ptreg).xcs)
#define PTREG_SegSs(ptreg) ((ptreg).xss)
#define PTREG_Esp(ptreg) ((ptreg).esp)
+#else
+#error "Unsupported arch"
+#endif
#endif // BIT64
diff --git a/src/pal/src/misc/sysinfo.cpp b/src/pal/src/misc/sysinfo.cpp
index aece529f36..9b4e2b8f64 100644
--- a/src/pal/src/misc/sysinfo.cpp
+++ b/src/pal/src/misc/sysinfo.cpp
@@ -195,7 +195,6 @@ GetSystemInfo(
PERF_EXIT(GetSystemInfo);
}
-#if defined(_AMD64_)
/*++
Function:
GlobalMemoryStatusEx
@@ -325,4 +324,20 @@ PAL_GetLogicalCpuCountFromOS()
return numLogicalCores;
}
-#endif // defined(_AMD64_)
+size_t
+PALAPI
+PAL_GetLogicalProcessorCacheSizeFromOS()
+{
+ size_t cacheSize = 0;
+
+#if HAVE_SYSCONF && defined(__LINUX__)
+ cacheSize = max(cacheSize, sysconf(_SC_LEVEL1_DCACHE_SIZE));
+ cacheSize = max(cacheSize, sysconf(_SC_LEVEL1_ICACHE_SIZE));
+ cacheSize = max(cacheSize, sysconf(_SC_LEVEL2_CACHE_SIZE));
+ cacheSize = max(cacheSize, sysconf(_SC_LEVEL3_CACHE_SIZE));
+ cacheSize = max(cacheSize, sysconf(_SC_LEVEL4_CACHE_SIZE));
+#endif
+
+ return cacheSize;
+}
+
diff --git a/src/pal/src/arch/i386/context.cpp b/src/pal/src/thread/context.cpp
index 14a96bb9b0..3742b5ddf1 100644
--- a/src/pal/src/arch/i386/context.cpp
+++ b/src/pal/src/thread/context.cpp
@@ -13,8 +13,8 @@ Module Name:
Abstract:
- Implementation of GetThreadContext/SetThreadContext/DebugBreak functions for
- the Intel x86 platform. These functions are processor dependent.
+ Implementation of GetThreadContext/SetThreadContext/DebugBreak.
+ There are a lot of architecture specifics here.
@@ -39,6 +39,8 @@ extern void CONTEXT_CaptureContext(LPCONTEXT lpContext);
#define CONTEXT_ALL_FLOATING (CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS)
#elif defined(_AMD64_)
#define CONTEXT_ALL_FLOATING CONTEXT_FLOATING_POINT
+#elif defined(_ARM_)
+#define CONTEXT_ALL_FLOATING CONTEXT_FLOATING_POINT
#else
#error Unexpected architecture.
#endif
@@ -53,7 +55,7 @@ extern void CONTEXT_CaptureContext(LPCONTEXT lpContext);
#include <asm/ptrace.h>
#endif // HAVE_PT_REGS
-#ifdef BIT64
+#ifdef _AMD64_
#define ASSIGN_CONTROL_REGS \
ASSIGN_REG(Rbp) \
ASSIGN_REG(Rip) \
@@ -77,7 +79,7 @@ extern void CONTEXT_CaptureContext(LPCONTEXT lpContext);
ASSIGN_REG(R14) \
ASSIGN_REG(R15) \
-#else // BIT64
+#elif defined(_X86_)
#define ASSIGN_CONTROL_REGS \
ASSIGN_REG(Ebp) \
ASSIGN_REG(Eip) \
@@ -94,7 +96,28 @@ extern void CONTEXT_CaptureContext(LPCONTEXT lpContext);
ASSIGN_REG(Ecx) \
ASSIGN_REG(Eax) \
-#endif //BIT64
+#elif defined(_ARM_)
+#define ASSIGN_CONTROL_REGS \
+ ASSIGN_REG(Sp) \
+ ASSIGN_REG(Lr) \
+ ASSIGN_REG(Pc) \
+ ASSIGN_REG(Cpsr) \
+
+#define ASSIGN_INTEGER_REGS \
+ ASSIGN_REG(R0) \
+ ASSIGN_REG(R1) \
+ ASSIGN_REG(R2) \
+ ASSIGN_REG(R3) \
+ ASSIGN_REG(R4) \
+ ASSIGN_REG(R5) \
+ ASSIGN_REG(R6) \
+ ASSIGN_REG(R7) \
+ ASSIGN_REG(R8) \
+ ASSIGN_REG(R9) \
+ ASSIGN_REG(R10) \
+ ASSIGN_REG(R11) \
+ ASSIGN_REG(R12)
+#endif
#define ASSIGN_ALL_REGS \
ASSIGN_CONTROL_REGS \
@@ -390,6 +413,7 @@ void CONTEXTToNativeContext(CONST CONTEXT *lpContext, native_context_t *native)
if ((lpContext->ContextFlags & CONTEXT_FLOATING_POINT) == CONTEXT_FLOATING_POINT)
{
+#ifdef _AMD64_
FPREG_ControlWord(native) = lpContext->FltSave.ControlWord;
FPREG_StatusWord(native) = lpContext->FltSave.StatusWord;
FPREG_TagWord(native) = lpContext->FltSave.TagWord;
@@ -409,6 +433,7 @@ void CONTEXTToNativeContext(CONST CONTEXT *lpContext, native_context_t *native)
{
FPREG_Xmm(native, i) = ((M128U*)lpContext->FltSave.XmmRegisters)[i];
}
+#endif
}
}
@@ -447,6 +472,7 @@ void CONTEXTFromNativeContext(const native_context_t *native, LPCONTEXT lpContex
if ((contextFlags & CONTEXT_FLOATING_POINT) == CONTEXT_FLOATING_POINT)
{
+#ifdef _AMD64_
lpContext->FltSave.ControlWord = FPREG_ControlWord(native);
lpContext->FltSave.StatusWord = FPREG_StatusWord(native);
lpContext->FltSave.TagWord = FPREG_TagWord(native);
@@ -466,6 +492,7 @@ void CONTEXTFromNativeContext(const native_context_t *native, LPCONTEXT lpContex
{
((M128U*)lpContext->FltSave.XmmRegisters)[i] = FPREG_Xmm(native, i);
}
+#endif
}
}
@@ -484,11 +511,13 @@ Return value :
--*/
LPVOID CONTEXTGetPC(const native_context_t *context)
{
-#ifdef BIT64
+#ifdef _AMD64_
return (LPVOID)MCREG_Rip(context->uc_mcontext);
-#else
+#elif defined(_X86_)
return (LPVOID) MCREG_Eip(context->uc_mcontext);
-#endif // BIT64
+#elif defined(_ARM_)
+ return (LPVOID) MCREG_Pc(context->uc_mcontext);
+#endif
}
/*++
@@ -738,7 +767,7 @@ DWORD CONTEXTGetExceptionCodeForSignal(const siginfo_t *siginfo,
#include <mach/message.h>
#include <mach/thread_act.h>
-#include "../../exception/machexception.h"
+#include "../exception/machexception.h"
/*++
Function:
@@ -1204,7 +1233,12 @@ See MSDN doc.
VOID
DBG_DebugBreak()
{
+#if defined(_AMD64_) || defined(_X86_)
__asm__ __volatile__("int $3");
+#elif defined(_ARM_)
+ // This assumes thumb
+ __asm__ __volatile__(".inst.w 0xde01");
+#endif
}
@@ -1220,6 +1254,8 @@ DBG_FlushInstructionCache(
IN LPCVOID lpBaseAddress,
IN SIZE_T dwSize)
{
- // Intel x86 hardware has cache coherency, so nothing needs to be done.
+ // Intrinsic should do the right thing across all platforms
+ __builtin___clear_cache((char *)lpBaseAddress, (char *)((INT_PTR)lpBaseAddress + dwSize));
+
return TRUE;
}
diff --git a/src/pal/tests/CMakeLists.txt b/src/pal/tests/CMakeLists.txt
index b9ad7ed4f9..d3b91d2ac2 100644
--- a/src/pal/tests/CMakeLists.txt
+++ b/src/pal/tests/CMakeLists.txt
@@ -1,5 +1,11 @@
cmake_minimum_required(VERSION 2.8.12.2)
+if(CLR_CMAKE_PLATFORM_ARCH_AMD64)
+ set(PAL_CMAKE_PLATFORM_ARCH_AMD64 1)
+elseif(CLR_CMAKE_PLATFORM_ARCH_ARM)
+ set(PAL_CMAKE_PLATFORM_ARCH_ARM 1)
+endif()
+
# Compile options
add_definitions(-DPLATFORM_UNIX=1)
add_definitions(-D__LINUX__=1)
@@ -7,8 +13,15 @@ add_definitions(-DLP64COMPATIBLE=1)
add_definitions(-DFEATURE_PAL=1)
add_definitions(-DCORECLR=1)
add_definitions(-DPIC=1)
-add_definitions(-DBIT64=1)
-add_definitions(-D_WIN64=1)
+if(PAL_CMAKE_PLATFORM_ARCH_AMD64)
+ add_definitions(-DBIT64=1)
+ add_definitions(-D_WIN64=1)
+elseif(PAL_CMAKE_PLATFORM_ARCH_ARM)
+ add_definitions(-DBIT32=1)
+ add_definitions(-D_WIN32=1)
+else()
+ message(FATAL_ERROR "Only ARM and AMD64 is supported")
+endif()
add_compile_options(-Wno-empty-body)
diff --git a/src/pal/tests/palsuite/c_runtime/vprintf/CMakeLists.txt b/src/pal/tests/palsuite/c_runtime/vprintf/CMakeLists.txt
index cafb9536b0..c7e7647ea9 100644
--- a/src/pal/tests/palsuite/c_runtime/vprintf/CMakeLists.txt
+++ b/src/pal/tests/palsuite/c_runtime/vprintf/CMakeLists.txt
@@ -1,6 +1,7 @@
cmake_minimum_required(VERSION 2.8.12.2)
-add_subdirectory(test1)
+# This test fails to build on ARM
+#add_subdirectory(test1)
add_subdirectory(test10)
add_subdirectory(test11)
add_subdirectory(test12)
diff --git a/src/pal/tests/palsuite/paltestlist.txt b/src/pal/tests/palsuite/paltestlist.txt
index 978ba7b8a6..fba4c77172 100644
--- a/src/pal/tests/palsuite/paltestlist.txt
+++ b/src/pal/tests/palsuite/paltestlist.txt
@@ -245,7 +245,6 @@ c_runtime/vfprintf/test6/paltest_vfprintf_test6
c_runtime/vfprintf/test7/paltest_vfprintf_test7
c_runtime/vfprintf/test8/paltest_vfprintf_test8
c_runtime/vfprintf/test9/paltest_vfprintf_test9
-c_runtime/vprintf/test1/paltest_vprintf_test1
c_runtime/vprintf/test10/paltest_vprintf_test10
c_runtime/vprintf/test11/paltest_vprintf_test11
c_runtime/vprintf/test12/paltest_vprintf_test12
diff --git a/src/pal/tests/palsuite/paltestlist_to_be_reviewed.txt b/src/pal/tests/palsuite/paltestlist_to_be_reviewed.txt
index 64b87d518a..a6729839a2 100644
--- a/src/pal/tests/palsuite/paltestlist_to_be_reviewed.txt
+++ b/src/pal/tests/palsuite/paltestlist_to_be_reviewed.txt
@@ -18,6 +18,7 @@ c_runtime/iswprint/test1/paltest_iswprint_test1
c_runtime/swprintf/test2/paltest_swprintf_test2
c_runtime/swprintf/test7/paltest_swprintf_test7
c_runtime/ungetc/test2/paltest_ungetc_test2
+c_runtime/vprintf/test1/paltest_vprintf_test1
c_runtime/vswprintf/test2/paltest_vswprintf_test2
c_runtime/vswprintf/test7/paltest_vswprintf_test7
c_runtime/wprintf/test2/paltest_wprintf_test2
diff --git a/src/unwinder/CMakeLists.txt b/src/unwinder/CMakeLists.txt
index 3ff57eabcd..27e613cd3c 100644
--- a/src/unwinder/CMakeLists.txt
+++ b/src/unwinder/CMakeLists.txt
@@ -7,29 +7,34 @@ include_directories(${CLR_DIR}/src/gc)
include_directories(${CLR_DIR}/src/gcdump)
include_directories(${CLR_DIR}/src/debug/daccess)
-if(CLR_CMAKE_PLATFORM_UNIX)
- add_compile_options(-fPIC)
-endif(CLR_CMAKE_PLATFORM_UNIX)
+set(UNWINDER_SOURCES
+ unwinder.cpp
+)
-
-if(IS_64BIT_BUILD EQUAL 1)
+if(CLR_CMAKE_PLATFORM_ARCH_AMD64)
include_directories(amd64)
+
+ list(APPEND UNWINDER_SOURCES
+ amd64/unwinder_amd64.cpp
+ )
+elseif(CLR_CMAKE_PLATFORM_ARCH_ARM)
+ include_directories(arm)
+
+ list(APPEND UNWINDER_SOURCES
+ arm/unwinder_arm.cpp
+ )
+else()
+ message(FATAL_ERROR "Only ARM and AMD64 is supported")
+endif()
+
+convert_to_absolute_path(UNWINDER_SOURCES ${UNWINDER_SOURCES})
if(CLR_CMAKE_PLATFORM_UNIX)
add_compile_options(-fPIC)
endif(CLR_CMAKE_PLATFORM_UNIX)
- set(UNWINDER_SOURCES
- unwinder.cpp
- amd64/unwinder_amd64.cpp
- )
-
- convert_to_absolute_path(UNWINDER_SOURCES ${UNWINDER_SOURCES})
-
- add_subdirectory(dac)
if(CLR_CMAKE_PLATFORM_UNIX)
add_subdirectory(wks)
-endif(CLR_CMAKE_PLATFORM_UNIX)
-
-endif(IS_64BIT_BUILD EQUAL 1)
+endif(CLR_CMAKE_PLATFORM_UNIX)
+add_subdirectory(dac) \ No newline at end of file
diff --git a/src/unwinder/arm/unwinder_arm.cpp b/src/unwinder/arm/unwinder_arm.cpp
index c2463f58f7..a07d9e74a1 100644
--- a/src/unwinder/arm/unwinder_arm.cpp
+++ b/src/unwinder/arm/unwinder_arm.cpp
@@ -65,22 +65,22 @@
static const UINT16 ConditionTable[16] =
{
- ZSET_MASK, // EQ: Z
- ~ZSET_MASK, // NE: !Z
- CSET_MASK, // CS: C
- ~CSET_MASK, // CC: !C
- NSET_MASK, // MI: N
- ~NSET_MASK, // PL: !N
- VSET_MASK, // VS: V
- ~VSET_MASK, // VC: !V
- CSET_MASK & ~ZSET_MASK, // HI: C & !Z
- ~CSET_MASK | ZSET_MASK, // LO: !C | Z
- NEQUALV_MASK, // GE: N == V
- ~NEQUALV_MASK, // LT: N != V
- NEQUALV_MASK & ~ZSET_MASK, // GT: (N == V) & !Z
- ~NEQUALV_MASK | ZSET_MASK, // LE: (N != V) | Z
- 0xffff, // AL: always
- 0x0000 // NV: never
+ (UINT16)(ZSET_MASK), // EQ: Z
+ (UINT16)(~ZSET_MASK), // NE: !Z
+ (UINT16)(CSET_MASK), // CS: C
+ (UINT16)(~CSET_MASK), // CC: !C
+ (UINT16)(NSET_MASK), // MI: N
+ (UINT16)(~NSET_MASK), // PL: !N
+ (UINT16)(VSET_MASK), // VS: V
+ (UINT16)(~VSET_MASK), // VC: !V
+ (UINT16)(CSET_MASK & ~ZSET_MASK), // HI: C & !Z
+ (UINT16)(~CSET_MASK | ZSET_MASK), // LO: !C | Z
+ (UINT16)(NEQUALV_MASK), // GE: N == V
+ (UINT16)(~NEQUALV_MASK), // LT: N != V
+ (UINT16)(NEQUALV_MASK & ~ZSET_MASK), // GT: (N == V) & !Z
+ (UINT16)(~NEQUALV_MASK | ZSET_MASK), // LE: (N != V) | Z
+ (UINT16)(0xffff), // AL: always
+ (UINT16)(0x0000) // NV: never
};
@@ -1467,3 +1467,21 @@ BOOL DacUnwindStackFrame(T_CONTEXT *pContext, T_KNONVOLATILE_CONTEXT_POINTERS* p
return res;
}
+
+#if defined(FEATURE_PAL)
+PEXCEPTION_ROUTINE RtlVirtualUnwind(
+ __in ULONG HandlerType,
+ __in ULONG ImageBase,
+ __in ULONG ControlPc,
+ __in PRUNTIME_FUNCTION FunctionEntry,
+ __in OUT PCONTEXT ContextRecord,
+ __out PVOID *HandlerData,
+ __out PULONG EstablisherFrame,
+ __inout_opt PKNONVOLATILE_CONTEXT_POINTERS ContextPointers
+ )
+{
+ PORTABILITY_ASSERT("Implement for PAL");
+
+ return NULL;
+}
+#endif
diff --git a/src/unwinder/dac/CMakeLists.txt b/src/unwinder/dac/CMakeLists.txt
index e82046197c..12163af12a 100644
--- a/src/unwinder/dac/CMakeLists.txt
+++ b/src/unwinder/dac/CMakeLists.txt
@@ -1,10 +1,21 @@
include(${CLR_DIR}/dac.cmake)
add_definitions(-DFEATURE_NO_HOST)
-add_definitions(-D_TARGET_AMD64_=1)
-add_definitions(-DDBG_TARGET_64BIT=1)
-add_definitions(-DDBG_TARGET_AMD64=1)
-add_definitions(-DDBG_TARGET_WIN64=1)
-add_definitions(-D_WIN64=1)
+
+if(CLR_CMAKE_PLATFORM_ARCH_AMD64)
+ add_definitions(-D_TARGET_AMD64_=1)
+ add_definitions(-DDBG_TARGET_64BIT=1)
+ add_definitions(-DDBG_TARGET_AMD64=1)
+ add_definitions(-DDBG_TARGET_WIN64=1)
+ add_definitions(-D_WIN64=1)
+elseif(CLR_CMAKE_PLATFORM_ARCH_ARM)
+ add_definitions(-D_TARGET_ARM_=1)
+ add_definitions(-DDBG_TARGET_32BIT=1)
+ add_definitions(-DDBG_TARGET_ARM=1)
+ add_definitions(-DDBG_TARGET_WIN32=1)
+ add_definitions(-D_WIN32=1)
+else()
+ message(FATAL_ERROR "Only ARM and AMD64 is supported")
+endif()
add_library(unwinder_dac ${UNWINDER_SOURCES})
diff --git a/src/utilcode/md5.cpp b/src/utilcode/md5.cpp
index 5b316122ee..039b00198f 100644
--- a/src/utilcode/md5.cpp
+++ b/src/utilcode/md5.cpp
@@ -133,7 +133,12 @@ void MD5::GetHashValue(MD5HASHDATA* phash)
//
// but our compiler has an intrinsic!
+ #if defined(_ARM_) && defined(PLATFORM_UNIX)
+ #define ROL(x, n) (((x) << (n)) | ((x) >> (32-(n))))
+ #define ROTATE_LEFT(x,n) (x) = ROL(x,n)
+ #else
#define ROTATE_LEFT(x,n) (x) = _lrotl(x,n)
+ #endif
////////////////////////////////////////////////////////////////
//
diff --git a/src/utilcode/perflog.cpp b/src/utilcode/perflog.cpp
index 14e4737d13..1ee98d460e 100644
--- a/src/utilcode/perflog.cpp
+++ b/src/utilcode/perflog.cpp
@@ -115,7 +115,7 @@ void PerfLog::PerfLogInitialize()
// the file here for writing and close in PerfLogDone().
m_hPerfLogFileHandle = WszCreateFile (
#ifdef PLATFORM_UNIX
- L"/tmp/PerfData.dat",
+ W("/tmp/PerfData.dat"),
#else
W("C:\\PerfData.dat"),
#endif
diff --git a/src/utilcode/util_nodependencies.cpp b/src/utilcode/util_nodependencies.cpp
index f29e1df50d..135e1737c5 100644
--- a/src/utilcode/util_nodependencies.cpp
+++ b/src/utilcode/util_nodependencies.cpp
@@ -211,6 +211,9 @@ CHECK_SUPPORTED:
// Returns TRUE if we are running on a 64-bit OS in WoW, FALSE otherwise.
BOOL RunningInWow64()
{
+ #ifdef PLATFORM_UNIX
+ return FALSE;
+ #else
static int s_Wow64Process;
if (s_Wow64Process == 0)
@@ -224,6 +227,7 @@ BOOL RunningInWow64()
}
return (s_Wow64Process == 1) ? TRUE : FALSE;
+ #endif
}
#endif
diff --git a/src/vm/CMakeLists.txt b/src/vm/CMakeLists.txt
index 788c6aeeaa..fcc16acf78 100644
--- a/src/vm/CMakeLists.txt
+++ b/src/vm/CMakeLists.txt
@@ -3,17 +3,28 @@ set(CMAKE_INCLUDE_CURRENT_DIR ON)
# WINTODO: Conditionalize the next check
# AMD64 specific sources subdirectory
set(AMD64_SOURCES_DIR amd64)
+set(ARM_SOURCES_DIR arm)
# Needed due to the cmunged files being in the binary folders, the set(CMAKE_INCLUDE_CURRENT_DIR ON) is not enough
include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR})
include_directories(${CLR_DIR}/src/gc)
-include_directories(${AMD64_SOURCES_DIR})
-add_definitions(-D_TARGET_AMD64_=1)
-add_definitions(-DDBG_TARGET_64BIT=1)
-add_definitions(-DDBG_TARGET_AMD64=1)
-add_definitions(-DDBG_TARGET_WIN64=1)
+if(CLR_CMAKE_PLATFORM_ARCH_AMD64)
+ add_definitions(-D_TARGET_AMD64_=1)
+ add_definitions(-DDBG_TARGET_64BIT=1)
+ add_definitions(-DDBG_TARGET_AMD64=1)
+ add_definitions(-DDBG_TARGET_WIN64=1)
+ include_directories(${AMD64_SOURCES_DIR})
+elseif(CLR_CMAKE_PLATFORM_ARCH_ARM)
+ add_definitions(-D_TARGET_ARM_=1)
+ add_definitions(-DDBG_TARGET_32BIT=1)
+ add_definitions(-DDBG_TARGET_ARM=1)
+ add_definitions(-DDBG_TARGET_WIN32=1)
+ include_directories(${ARM_SOURCES_DIR})
+else()
+ message(FATAL_ERROR "Only ARM and AMD64 is supported")
+endif()
add_definitions(-DFEATURE_LEAVE_RUNTIME_HOLDER=1)
@@ -302,7 +313,7 @@ list(APPEND VM_SOURCES_DAC
)
# AMD64 specific asm sources
-set(VM_SOURCES_WKS_AMD64_ASM
+set(VM_SOURCES_WKS_ARCH_ASM
${AMD64_SOURCES_DIR}/AsmHelpers.asm
${AMD64_SOURCES_DIR}/CallDescrWorkerAMD64.asm
${AMD64_SOURCES_DIR}/ComCallPreStub.asm
@@ -326,64 +337,95 @@ set(VM_SOURCES_WKS_AMD64_ASM
)
else()
-set(VM_SOURCES_WKS_AMD64_ASM
- ${AMD64_SOURCES_DIR}/calldescrworkeramd64.S
- ${AMD64_SOURCES_DIR}/crthelpers.S
- ${AMD64_SOURCES_DIR}/externalmethodfixupthunk.S
- ${AMD64_SOURCES_DIR}/getstate.S
- ${AMD64_SOURCES_DIR}/jithelpers_fast.S
- ${AMD64_SOURCES_DIR}/jithelpers_fastwritebarriers.S
- ${AMD64_SOURCES_DIR}/jithelpers_slow.S
- ${AMD64_SOURCES_DIR}/theprestubamd64.S
- ${AMD64_SOURCES_DIR}/unixasmhelpers.S
- ${AMD64_SOURCES_DIR}/umthunkstub.S
- ${AMD64_SOURCES_DIR}/virtualcallstubamd64.S
-)
-endif(WIN32)
-
-# AMD64 specific cpp sources
-
-set(VM_SOURCES_DAC_AND_WKS_AMD64
- ${AMD64_SOURCES_DIR}/cgenamd64.cpp
- ${AMD64_SOURCES_DIR}/excepamd64.cpp
- ${AMD64_SOURCES_DIR}/gmsamd64.cpp
- ${AMD64_SOURCES_DIR}/stublinkeramd64.cpp
-)
-set(VM_SOURCES_WKS_AMD64
-#The following files need to be ported to Linux
- ${AMD64_SOURCES_DIR}/jithelpersamd64.cpp
- ${AMD64_SOURCES_DIR}/jitinterfaceamd64.cpp
- ${AMD64_SOURCES_DIR}/profiler.cpp
- jitinterfacegen.cpp
-)
+ if(CLR_CMAKE_PLATFORM_ARCH_AMD64)
+ set(VM_SOURCES_WKS_ARCH_ASM
+ ${AMD64_SOURCES_DIR}/calldescrworkeramd64.S
+ ${AMD64_SOURCES_DIR}/crthelpers.S
+ ${AMD64_SOURCES_DIR}/externalmethodfixupthunk.S
+ ${AMD64_SOURCES_DIR}/getstate.S
+ ${AMD64_SOURCES_DIR}/jithelpers_fast.S
+ ${AMD64_SOURCES_DIR}/jithelpers_fastwritebarriers.S
+ ${AMD64_SOURCES_DIR}/jithelpers_slow.S
+ ${AMD64_SOURCES_DIR}/theprestubamd64.S
+ ${AMD64_SOURCES_DIR}/unixasmhelpers.S
+ ${AMD64_SOURCES_DIR}/umthunkstub.S
+ ${AMD64_SOURCES_DIR}/virtualcallstubamd64.S
+ )
+ elseif(CLR_CMAKE_PLATFORM_ARCH_ARM)
+ set(VM_SOURCES_WKS_ARCH_ASM
+ ${ARM_SOURCES_DIR}/asmhelpers.S
+ ${ARM_SOURCES_DIR}/crthelpers.S
+ ${ARM_SOURCES_DIR}/ehhelpers.S
+ ${ARM_SOURCES_DIR}/memcpy.S
+ ${ARM_SOURCES_DIR}/patchedcode.S
+ )
+ endif()
+
+endif(WIN32)
-if(CLR_CMAKE_PLATFORM_UNIX)
-list(APPEND VM_SOURCES_WKS_AMD64
- ${AMD64_SOURCES_DIR}/unixstubs.cpp
-)
-
-endif(CLR_CMAKE_PLATFORM_UNIX)
+if(CLR_CMAKE_PLATFORM_ARCH_AMD64)
+ set(VM_SOURCES_DAC_AND_WKS_ARCH
+ ${AMD64_SOURCES_DIR}/cgenamd64.cpp
+ ${AMD64_SOURCES_DIR}/excepamd64.cpp
+ ${AMD64_SOURCES_DIR}/gmsamd64.cpp
+ ${AMD64_SOURCES_DIR}/stublinkeramd64.cpp
+ )
+
+ set(VM_SOURCES_WKS_ARCH
+ ${AMD64_SOURCES_DIR}/jithelpersamd64.cpp
+ ${AMD64_SOURCES_DIR}/jitinterfaceamd64.cpp
+ ${AMD64_SOURCES_DIR}/profiler.cpp
+ jitinterfacegen.cpp
+ )
+
+ if(CLR_CMAKE_PLATFORM_UNIX)
+
+ list(APPEND VM_SOURCES_WKS_ARCH
+ ${AMD64_SOURCES_DIR}/unixstubs.cpp
+ )
+
+ endif(CLR_CMAKE_PLATFORM_UNIX)
+elseif(CLR_CMAKE_PLATFORM_ARCH_ARM)
+ set(VM_SOURCES_DAC_AND_WKS_ARCH
+ ${ARM_SOURCES_DIR}/exceparm.cpp
+ ${ARM_SOURCES_DIR}/stubs.cpp
+ ${ARM_SOURCES_DIR}/armsinglestepper.cpp
+ )
+
+ set(VM_SOURCES_WKS_ARCH
+ ${ARM_SOURCES_DIR}/jithelpersarm.cpp
+ ${ARM_SOURCES_DIR}/profiler.cpp
+ )
+
+ if(CLR_CMAKE_PLATFORM_UNIX)
+
+ list(APPEND VM_SOURCES_WKS_ARCH
+ ${ARM_SOURCES_DIR}/unixstubs.cpp
+ )
+
+ endif(CLR_CMAKE_PLATFORM_UNIX)
+endif()
-set(VM_SOURCES_DAC_AMD64
+set(VM_SOURCES_DAC_ARCH
gcinfodecoder.cpp
dbggcinfodecoder.cpp
exceptionhandling.cpp
)
list(APPEND VM_SOURCES_WKS
- ${VM_SOURCES_WKS_AMD64}
- ${VM_SOURCES_DAC_AND_WKS_AMD64}
+ ${VM_SOURCES_WKS_ARCH}
+ ${VM_SOURCES_DAC_AND_WKS_ARCH}
)
list(APPEND VM_SOURCES_DAC
- ${VM_SOURCES_DAC_AMD64}
- ${VM_SOURCES_DAC_AND_WKS_AMD64}
+ ${VM_SOURCES_DAC_ARCH}
+ ${VM_SOURCES_DAC_AND_WKS_ARCH}
)
convert_to_absolute_path(VM_SOURCES_WKS ${VM_SOURCES_WKS})
-convert_to_absolute_path(VM_SOURCES_WKS_AMD64_ASM ${VM_SOURCES_WKS_AMD64_ASM})
+convert_to_absolute_path(VM_SOURCES_WKS_ARCH_ASM ${VM_SOURCES_WKS_ARCH_ASM})
convert_to_absolute_path(VM_SOURCES_DAC ${VM_SOURCES_DAC})
add_subdirectory(dac)
diff --git a/src/vm/arm/armsinglestepper.cpp b/src/vm/arm/armsinglestepper.cpp
index a5b1d68112..46df245243 100644
--- a/src/vm/arm/armsinglestepper.cpp
+++ b/src/vm/arm/armsinglestepper.cpp
@@ -90,7 +90,7 @@ void ITState::Set(T_CONTEXT *pCtx)
//
ArmSingleStepper::ArmSingleStepper()
: m_originalPc(0), m_targetPc(0), m_rgCode(0), m_state(Disabled),
- m_fEmulatedITInstruction(false), m_fRedirectedPc(false), m_fBypass(false), m_fEmulate(false), m_fSkipIT(false)
+ m_fEmulatedITInstruction(false), m_fRedirectedPc(false), m_fEmulate(false), m_fBypass(false), m_fSkipIT(false)
{
m_opcodes[0] = 0;
m_opcodes[1] = 0;
@@ -98,14 +98,14 @@ ArmSingleStepper::ArmSingleStepper()
ArmSingleStepper::~ArmSingleStepper()
{
-#ifndef DACCESS_COMPILE
+#if !defined(DACCESS_COMPILE) && !defined(FEATURE_PAL)
DeleteExecutable(m_rgCode);
#endif
}
void ArmSingleStepper::Init()
{
-#ifndef DACCESS_COMPILE
+#if !defined(DACCESS_COMPILE) && !defined(FEATURE_PAL)
if (m_rgCode == NULL)
{
m_rgCode = new (executable) WORD[kMaxCodeBuffer];
@@ -543,34 +543,50 @@ void ArmSingleStepper::SetReg(T_CONTEXT *pCtx, DWORD reg, DWORD value)
// fault.
bool ArmSingleStepper::GetMem(DWORD *pdwResult, DWORD_PTR pAddress, DWORD cbSize, bool fSignExtend)
{
- __try
+ struct Param
{
- switch (cbSize)
+ DWORD *pdwResult;
+ DWORD_PTR pAddress;
+ DWORD cbSize;
+ bool fSignExtend;
+ bool bReturnValue;
+ } param;
+
+ param.pdwResult = pdwResult;
+ param.pAddress = pAddress;
+ param.cbSize = cbSize;
+ param.fSignExtend = fSignExtend;
+ param.bReturnValue = true;
+
+ PAL_TRY(Param *, pParam, &param)
+ {
+ switch (pParam->cbSize)
{
case 1:
- *pdwResult = *(BYTE*)pAddress;
- if (fSignExtend && (*pdwResult & 0x00000080))
- *pdwResult |= 0xffffff00;
+ *pParam->pdwResult = *(BYTE*)pParam->pAddress;
+ if (pParam->fSignExtend && (*pParam->pdwResult & 0x00000080))
+ *pParam->pdwResult |= 0xffffff00;
break;
case 2:
- *pdwResult = *(WORD*)pAddress;
- if (fSignExtend && (*pdwResult & 0x00008000))
- *pdwResult |= 0xffff0000;
+ *pParam->pdwResult = *(WORD*)pParam->pAddress;
+ if (pParam->fSignExtend && (*pParam->pdwResult & 0x00008000))
+ *pParam->pdwResult |= 0xffff0000;
break;
case 4:
- *pdwResult = *(DWORD*)pAddress;
+ *pParam->pdwResult = *(DWORD*)pParam->pAddress;
break;
default:
UNREACHABLE();
- return false;
+ pParam->bReturnValue = false;
}
}
- __except(EXCEPTION_EXECUTE_HANDLER)
+ PAL_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
{
- return false;
+ param.bReturnValue = false;
}
+ PAL_ENDTRY;
- return true;
+ return param.bReturnValue;
}
// Wrapper around GetMem above that will automatically return from TryEmulate() indicating the instruction
diff --git a/src/vm/arm/asmconstants.h b/src/vm/arm/asmconstants.h
index 8682f85d98..f121bdc224 100644
--- a/src/vm/arm/asmconstants.h
+++ b/src/vm/arm/asmconstants.h
@@ -12,7 +12,7 @@
// #error this file should only be used on an ARM platform
// #endif // _ARM_
-#include "..\..\inc\switches.h"
+#include "../../inc/switches.h"
//-----------------------------------------------------------------------------
diff --git a/src/vm/arm/asmhelpers.S b/src/vm/arm/asmhelpers.S
new file mode 100644
index 0000000000..4e6e46b211
--- /dev/null
+++ b/src/vm/arm/asmhelpers.S
@@ -0,0 +1,1361 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+// ==++==
+//
+
+//
+// ==--==
+#include "unixasmmacros.inc"
+#include "asmconstants.h"
+
+.syntax unified
+.thumb
+
+// LPVOID __stdcall GetCurrentIP(void)//
+ LEAF_ENTRY GetCurrentIP, _TEXT
+ mov r0, lr
+ bx lr
+ LEAF_END GetCurrentIP, _TEXT
+
+// LPVOID __stdcall GetCurrentSP(void)//
+ LEAF_ENTRY GetCurrentSP, _TEXT
+ mov r0, sp
+ bx lr
+ LEAF_END GetCurrentSP, _TEXT
+
+//-----------------------------------------------------------------------------
+// This helper routine enregisters the appropriate arguments and makes the
+// actual call.
+//-----------------------------------------------------------------------------
+//void CallDescrWorkerInternal(CallDescrData * pCallDescrData)//
+ NESTED_ENTRY CallDescrWorkerInternal,_TEXT,NoHandler
+ push {r4,r5,r7,lr}
+ mov r7, sp
+
+ mov r5,r0 // save pCallDescrData in r5
+
+ ldr r1, [r5,#CallDescrData__numStackSlots]
+ cbz r1, LOCAL_LABEL(Ldonestack)
+
+ // Add frame padding to ensure frame size is a multiple of 8 (a requirement of the OS ABI).
+ // We push four registers (above) and numStackSlots arguments (below). If this comes to an odd number
+ // of slots we must pad with another. This simplifies to "if the low bit of numStackSlots is set,
+ // extend the stack another four bytes".
+ lsls r2, r1, #2
+ and r3, r2, #4
+ sub sp, sp, r3
+
+ // This loop copies numStackSlots words
+ // from [pSrcEnd-4,pSrcEnd-8,...] to [sp-4,sp-8,...]
+ ldr r0, [r5,#CallDescrData__pSrc]
+ add r0,r0,r2
+LOCAL_LABEL(Lstackloop):
+ ldr r2, [r0,#-4]!
+ str r2, [sp,#-4]!
+ subs r1, r1, #1
+ bne LOCAL_LABEL(Lstackloop)
+LOCAL_LABEL(Ldonestack):
+
+ // If FP arguments are supplied in registers (r3 != NULL) then initialize all of them from the pointer
+ // given in r3. Do not use "it" since it faults in floating point even when the instruction is not executed.
+ ldr r3, [r5,#CallDescrData__pFloatArgumentRegisters]
+ cbz r3, LOCAL_LABEL(LNoFloatingPoint)
+ vldm r3, {s0-s15}
+LOCAL_LABEL(LNoFloatingPoint):
+
+ // Copy [pArgumentRegisters, ..., pArgumentRegisters + 12]
+ // into r0, ..., r3
+
+ ldr r4, [r5,#CallDescrData__pArgumentRegisters]
+ ldm r4, {r0-r3}
+
+ CHECK_STACK_ALIGNMENT
+
+ // call pTarget
+ // Note that remoting expect target in r4.
+ ldr r4, [r5,#CallDescrData__pTarget]
+ blx r4
+
+ ldr r3, [r5,#CallDescrData__fpReturnSize]
+
+ // Save FP return value if appropriate
+ cbz r3, LOCAL_LABEL(LFloatingPointReturnDone)
+
+ // Float return case
+ // Do not use "it" since it faults in floating point even when the instruction is not executed.
+ cmp r3, #4
+ bne LOCAL_LABEL(LNoFloatReturn)
+ vmov r0, s0
+ b LOCAL_LABEL(LFloatingPointReturnDone)
+LOCAL_LABEL(LNoFloatReturn):
+
+ // Double return case
+ // Do not use "it" since it faults in floating point even when the instruction is not executed.
+ cmp r3, #8
+ bne LOCAL_LABEL(LNoDoubleReturn)
+ vmov r0, r1, s0, s1
+ b LOCAL_LABEL(LFloatingPointReturnDone)
+LOCAL_LABEL(LNoDoubleReturn):
+
+ add r2, r5, #CallDescrData__returnValue
+
+ cmp r3, #16
+ bne LOCAL_LABEL(LNoFloatHFAReturn)
+ vstm r2, {s0-s3}
+ b LOCAL_LABEL(LReturnDone)
+LOCAL_LABEL(LNoFloatHFAReturn):
+
+ cmp r3, #32
+ bne LOCAL_LABEL(LNoDoubleHFAReturn)
+ vstm r2, {d0-d3}
+ b LOCAL_LABEL(LReturnDone)
+LOCAL_LABEL(LNoDoubleHFAReturn):
+
+ EMIT_BREAKPOINT // Unreachable
+
+LOCAL_LABEL(LFloatingPointReturnDone):
+
+ // Save return value into retbuf
+ str r0, [r5, #(CallDescrData__returnValue + 0)]
+ str r1, [r5, #(CallDescrData__returnValue + 4)]
+
+LOCAL_LABEL(LReturnDone):
+
+#ifdef _DEBUG
+ // trash the floating point registers to ensure that the HFA return values
+ // won't survive by accident
+ vldm sp, {d0-d3}
+#endif
+
+ mov sp, r7
+ pop {r4,r5,r7,pc}
+
+ NESTED_END CallDescrWorkerInternal,_TEXT
+
+
+//-----------------------------------------------------------------------------
+// This helper routine is where returns for irregular tail calls end up
+// so they can dynamically pop their stack arguments.
+//-----------------------------------------------------------------------------
+//
+// Stack Layout (stack grows up, 0 at the top, offsets relative to frame pointer, r7):
+//
+// sp -> callee stack arguments
+// :
+// :
+// -0Ch gsCookie
+// TailCallHelperFrame ->
+// -08h __VFN_table
+// -04h m_Next
+// r7 ->
+// +00h m_calleeSavedRgisters.r4
+// +04h .r5
+// +08h .r6
+// +0Ch .r7
+// +10h .r8
+// +14h .r9
+// +18h .r10
+// r11->
+// +1Ch .r11
+// +20h .r14 -or- m_ReturnAddress
+//
+// r6 -> GetThread()
+// r5 -> r6->m_pFrame (old Frame chain head)
+// r11 is used to preserve the ETW call stack
+
+ NESTED_ENTRY TailCallHelperStub, _TEXT, NoHandler
+ //
+ // This prolog is never executed, but we keep it here for reference
+ // and for the unwind data it generates
+ //
+
+ // Spill callee saved registers and return address.
+ push {r4-r11,lr}
+
+ mov r7, sp
+
+ //
+ // This is the code that would have to run to setup this frame
+ // like the C++ helper does before calling RtlRestoreContext
+ //
+ // Allocate space for the rest of the frame and GSCookie.
+ // PROLOG_STACK_ALLOC 0x0C
+ //
+ // Set r11 for frame chain
+ //add r11, r7, 0x1C
+ //
+ // Set the vtable for TailCallFrame
+ //bl TCF_GETMETHODFRAMEVPTR
+ //str r0, [r7, #-8]
+ //
+ // Initialize the GSCookie within the Frame
+ //ldr r0, =s_gsCookie
+ //str r0, [r7, #-0x0C]
+ //
+ // Link the TailCallFrameinto the Frame chain
+ // and initialize r5 & r6 for unlinking later
+ //CALL_GETTHREAD
+ //mov r6, r0
+ //ldr r5, [r6, #Thread__m_pFrame]
+ //str r5, [r7, #-4]
+ //sub r0, r7, 8
+ //str r0, [r6, #Thread__m_pFrame]
+ //
+ // None of the previous stuff is ever executed,
+ // but we keep it here for reference
+ //
+
+ //
+ // Here's the pretend call (make it real so the unwinder
+ // doesn't think we're in the prolog)
+ //
+ bl C_FUNC(TailCallHelperStub)
+ //
+ // with the real return address pointing to this real epilog
+ //
+C_FUNC(JIT_TailCallHelperStub_ReturnAddress):
+.global C_FUNC(JIT_TailCallHelperStub_ReturnAddress)
+
+ //
+ // Our epilog (which also unlinks the StubHelperFrame)
+ // Be careful not to trash the return registers
+ //
+
+#ifdef _DEBUG
+ ldr r3, =s_gsCookie
+ ldr r3, [r3]
+ ldr r2, [r7, #-0x0C]
+ cmp r2, r3
+ beq LOCAL_LABEL(GoodGSCookie)
+ bl C_FUNC(DoJITFailFast)
+LOCAL_LABEL(GoodGSCookie):
+#endif // _DEBUG
+
+ //
+ // unlink the TailCallFrame
+ //
+ str r5, [r6, #Thread__m_pFrame]
+
+ //
+ // epilog
+ //
+ mov sp, r7
+ pop {r4-r11,lr}
+ bx lr
+
+ NESTED_END TailCallHelperStub, _TEXT
+
+// ------------------------------------------------------------------
+
+// void LazyMachStateCaptureState(struct LazyMachState *pState)//
+ LEAF_ENTRY LazyMachStateCaptureState, _TEXT
+
+ // marks that this is not yet valid
+ mov r1, #0
+ str r1, [r0, #MachState__isValid]
+
+ str lr, [r0, #LazyMachState_captureIp]
+ str sp, [r0, #LazyMachState_captureSp]
+
+ add r1, r0, #LazyMachState_captureR4_R11
+ stm r1, {r4-r11}
+
+ mov pc, lr
+
+ LEAF_END LazyMachStateCaptureState, _TEXT
+
+// void SinglecastDelegateInvokeStub(Delegate *pThis)
+ LEAF_ENTRY SinglecastDelegateInvokeStub, _TEXT
+ cmp r0, #0
+ beq LOCAL_LABEL(LNullThis)
+
+ ldr r12, [r0, #DelegateObject___methodPtr]
+ ldr r0, [r0, #DelegateObject___target]
+
+ bx r12
+
+LOCAL_LABEL(LNullThis):
+ mov r0, #CORINFO_NullReferenceException_ASM
+ b C_FUNC(JIT_InternalThrow)
+
+ LEAF_END SinglecastDelegateInvokeStub, _TEXT
+
+//
+// r12 = UMEntryThunk*
+//
+ NESTED_ENTRY TheUMEntryPrestub,_TEXT,NoHandler
+
+ push {r0-r4,lr}
+ vpush {d0-d7}
+
+ CHECK_STACK_ALIGNMENT
+
+ mov r0, r12
+ bl C_FUNC(TheUMEntryPrestubWorker)
+
+ // Record real target address in r12.
+ mov r12, r0
+
+ // Epilog
+ vpop {d0-d7}
+ pop {r0-r4,lr}
+ bx r12
+
+ NESTED_END TheUMEntryPrestub,_TEXT
+
+//
+// r12 = UMEntryThunk*
+//
+ NESTED_ENTRY UMThunkStub,_TEXT,NoHandler
+ push {r4,r5,r7,r11,lr}
+ push {r0-r3,r12}
+ mov r7, sp
+
+ //GBLA UMThunkStub_HiddenArg // offset of saved UMEntryThunk *
+ //GBLA UMThunkStub_StackArgs // offset of original stack args (total size of UMThunkStub frame)
+UMThunkStub_HiddenArg = 4*4
+UMThunkStub_StackArgs = 10*4
+
+ CHECK_STACK_ALIGNMENT
+
+ bl C_FUNC(GetThread)
+ cbz r0, LOCAL_LABEL(UMThunkStub_DoThreadSetup)
+
+LOCAL_LABEL(UMThunkStub_HaveThread):
+ mov r5, r0 // r5 = Thread *
+
+ ldr r2, =g_TrapReturningThreads
+
+ mov r4, 1
+ str r4, [r5, #Thread__m_fPreemptiveGCDisabled]
+
+ ldr r3, [r2]
+ cbnz r3, LOCAL_LABEL(UMThunkStub_DoTrapReturningThreads)
+
+LOCAL_LABEL(UMThunkStub_InCooperativeMode):
+ ldr r12, [r7, #UMThunkStub_HiddenArg]
+
+ ldr r0, [r5, #Thread__m_pDomain]
+ ldr r1, [r12, #UMEntryThunk__m_dwDomainId]
+ ldr r0, [r0, #AppDomain__m_dwId]
+ ldr r3, [r12, #UMEntryThunk__m_pUMThunkMarshInfo]
+ cmp r0, r1
+ bne LOCAL_LABEL(UMThunkStub_WrongAppDomain)
+
+ ldr r2, [r3, #UMThunkMarshInfo__m_cbActualArgSize]
+ cbz r2, LOCAL_LABEL(UMThunkStub_ArgumentsSetup)
+
+ add r0, r7, #UMThunkStub_StackArgs // Source pointer
+ add r0, r0, r2
+ lsr r1, r2, #2 // Count of stack slots to copy
+
+ and r2, r2, #4 // Align the stack
+ sub sp, sp, r2
+
+LOCAL_LABEL(UMThunkStub_StackLoop):
+ ldr r2, [r0,#-4]!
+ str r2, [sp,#-4]!
+ subs r1, r1, #1
+ bne LOCAL_LABEL(UMThunkStub_StackLoop)
+
+LOCAL_LABEL(UMThunkStub_ArgumentsSetup):
+ ldr r4, [r3, #UMThunkMarshInfo__m_pILStub]
+
+ // reload argument registers
+ ldm r7, {r0-r3}
+
+ CHECK_STACK_ALIGNMENT
+
+ blx r4
+
+LOCAL_LABEL(UMThunkStub_PostCall):
+ mov r4, 0
+ str r4, [r5, #Thread__m_fPreemptiveGCDisabled]
+
+ mov sp, r7
+ add sp, sp, #(4 * 5)
+ pop {r4,r5,r7,r11,pc}
+
+LOCAL_LABEL(UMThunkStub_DoThreadSetup):
+ sub sp, #SIZEOF__FloatArgumentRegisters
+ vstm sp, {d0-d7}
+ bl C_FUNC(CreateThreadBlockThrow)
+ vldm sp, {d0-d7}
+ add sp, #SIZEOF__FloatArgumentRegisters
+ b LOCAL_LABEL(UMThunkStub_HaveThread)
+
+LOCAL_LABEL(UMThunkStub_DoTrapReturningThreads):
+ sub sp, #SIZEOF__FloatArgumentRegisters
+ vstm sp, {d0-d7}
+ mov r0, r5 // Thread* pThread
+ ldr r1, [r7, #UMThunkStub_HiddenArg] // UMEntryThunk* pUMEntry
+ bl C_FUNC(UMThunkStubRareDisableWorker)
+ vldm sp, {d0-d7}
+ add sp, #SIZEOF__FloatArgumentRegisters
+ b LOCAL_LABEL(UMThunkStub_InCooperativeMode)
+
+LOCAL_LABEL(UMThunkStub_WrongAppDomain):
+ sub sp, #SIZEOF__FloatArgumentRegisters
+ vstm sp, {d0-d7}
+
+ ldr r0, [r7, #UMThunkStub_HiddenArg] // UMEntryThunk* pUMEntry
+ mov r2, r7 // void * pArgs
+ // remaining arguments are unused
+ bl C_FUNC(UM2MDoADCallBack)
+
+ // Restore non-FP return value.
+ ldr r0, [r7, #0]
+ ldr r1, [r7, #4]
+
+ // Restore FP return value or HFA.
+ vldm sp, {d0-d3}
+ b LOCAL_LABEL(UMThunkStub_PostCall)
+
+ NESTED_END UMThunkStub,_TEXT
+
+// UM2MThunk_WrapperHelper(void *pThunkArgs, // r0
+// int cbStackArgs, // r1 (unused)
+// void *pAddr, // r2 (unused)
+// UMEntryThunk *pEntryThunk, // r3
+// Thread *pThread) // [sp, #0]
+
+ NESTED_ENTRY UM2MThunk_WrapperHelper, _TEXT, NoHandler
+
+ push {r4-r7,r11,lr}
+ mov r7, sp
+
+ CHECK_STACK_ALIGNMENT
+
+ mov r12, r3 // r12 = UMEntryThunk *
+
+ //
+ // Note that layout of the arguments is given by UMThunkStub frame
+ //
+ mov r5, r0 // r5 = pArgs
+
+ ldr r3, [r12, #UMEntryThunk__m_pUMThunkMarshInfo]
+
+ ldr r2, [r3, #UMThunkMarshInfo__m_cbActualArgSize]
+ cbz r2, LOCAL_LABEL(UM2MThunk_WrapperHelper_ArgumentsSetup)
+
+ add r0, r5, #UMThunkStub_StackArgs // Source pointer
+ add r0, r0, r2
+ lsr r1, r2, #2 // Count of stack slots to copy
+
+ and r2, r2, #4 // Align the stack
+ sub sp, sp, r2
+
+LOCAL_LABEL(UM2MThunk_WrapperHelper_StackLoop):
+ ldr r2, [r0,#-4]!
+ str r2, [sp,#-4]!
+ subs r1, r1, #1
+ bne LOCAL_LABEL(UM2MThunk_WrapperHelper_StackLoop)
+
+LOCAL_LABEL(UM2MThunk_WrapperHelper_ArgumentsSetup):
+ ldr r4, [r3, #UMThunkMarshInfo__m_pILStub]
+
+ // reload floating point registers
+ sub r6, r5, #SIZEOF__FloatArgumentRegisters
+ vldm r6, {d0-d7}
+
+ // reload argument registers
+ ldm r5, {r0-r3}
+
+ CHECK_STACK_ALIGNMENT
+
+ blx r4
+
+ // Save non-floating point return
+ str r0, [r5, #0]
+ str r1, [r5, #4]
+
+ // Save FP return value or HFA.
+ vstm r6, {d0-d3}
+
+#ifdef _DEBUG
+ // trash the floating point registers to ensure that the HFA return values
+ // won't survive by accident
+ vldm sp, {d0-d3}
+#endif
+
+ mov sp, r7
+ pop {r4-r7,r11,pc}
+
+ NESTED_END UM2MThunk_WrapperHelper, _TEXT
+
+// ------------------------------------------------------------------
+
+ NESTED_ENTRY ThePreStub, _TEXT, NoHandler
+
+ PROLOG_WITH_TRANSITION_BLOCK
+
+ add r0, sp, #__PWTB_TransitionBlock // pTransitionBlock
+ mov r1, r12 // pMethodDesc
+
+ bl C_FUNC(PreStubWorker)
+
+ mov r12, r0
+
+ EPILOG_WITH_TRANSITION_BLOCK_TAILCALL
+ bx r12
+
+ NESTED_END ThePreStub, _TEXT
+
+// ------------------------------------------------------------------
+// This method does nothing. It's just a fixed function for the debugger to put a breakpoint on.
+ LEAF_ENTRY ThePreStubPatch, _TEXT
+ nop
+ThePreStubPatchLabel:
+ .global ThePreStubPatchLabel
+ bx lr
+ LEAF_END ThePreStubPatch, _TEXT
+
+// ------------------------------------------------------------------
+// The call in ndirect import precode points to this function.
+ NESTED_ENTRY NDirectImportThunk, _TEXT, NoHandler
+
+ push {r0-r4,lr} // Spill general argument registers, return address and
+ // arbitrary register to keep stack aligned
+ vpush {d0-d7} // Spill floating point argument registers
+
+ CHECK_STACK_ALIGNMENT
+
+ mov r0, r12
+ bl C_FUNC(NDirectImportWorker)
+ mov r12, r0
+
+ vpop {d0-d7}
+ pop {r0-r4,lr}
+
+ // If we got back from NDirectImportWorker, the MD has been successfully
+ // linked. Proceed to execute the original DLL call.
+ bx r12
+
+ NESTED_END NDirectImportThunk, _TEXT
+
+// ------------------------------------------------------------------
+// The call in fixup precode initally points to this function.
+// The pupose of this function is to load the MethodDesc and forward the call the prestub.
+ NESTED_ENTRY PrecodeFixupThunk, _TEXT, NoHandler
+
+ // r12 = FixupPrecode *
+
+ push {r0-r1}
+
+ // Inline computation done by FixupPrecode::GetMethodDesc()
+ ldrb r0, [r12, #3] // m_PrecodeChunkIndex
+ ldrb r1, [r12, #2] // m_MethodDescChunkIndex
+
+ add r12,r12,r0,lsl #3
+ add r0,r12,r0,lsl #2
+ ldr r0, [r0,#8]
+ add r12,r0,r1,lsl #2
+
+ pop {r0-r1}
+ b C_FUNC(ThePreStub)
+
+ NESTED_END PrecodeFixupThunk, _TEXT
+
+// ------------------------------------------------------------------
+// void ResolveWorkerAsmStub(r0, r1, r2, r3, r4:IndirectionCellAndFlags, r12:DispatchToken)
+//
+// The stub dispatch thunk which transfers control to VSD_ResolveWorker.
+ NESTED_ENTRY ResolveWorkerAsmStub, _TEXT, NoHandler
+
+ PROLOG_WITH_TRANSITION_BLOCK
+
+ add r0, sp, #__PWTB_TransitionBlock // pTransitionBlock
+ mov r2, r12 // token
+
+ // indirection cell in r4 - should be consistent with REG_ARM_STUB_SPECIAL
+ bic r1, r4, #3 // indirection cell
+ and r3, r4, #3 // flags
+
+ bl C_FUNC(VSD_ResolveWorker)
+
+ mov r12, r0
+
+ EPILOG_WITH_TRANSITION_BLOCK_TAILCALL
+ bx r12
+
+ NESTED_END ResolveWorkerAsmStub, _TEXT
+
+// ------------------------------------------------------------------
+// void ResolveWorkerChainLookupAsmStub(r0, r1, r2, r3, r4:IndirectionCellAndFlags, r12:DispatchToken)
+ NESTED_ENTRY ResolveWorkerChainLookupAsmStub, _TEXT, NoHandler
+
+ // ARMSTUB TODO: implement chained lookup
+ b C_FUNC(ResolveWorkerAsmStub)
+
+ NESTED_END ResolveWorkerChainLookupAsmStub, _TEXT
+
+ //
+ // If a preserved register were pushed onto the stack between
+ // the managed caller and the H_M_F, _R4_R11 will point to its
+ // location on the stack and it would have been updated on the
+ // stack by the GC already and it will be popped back into the
+ // appropriate register when the appropriate epilog is run.
+ //
+ // Otherwise, the register is preserved across all the code
+ // in this HCALL or FCALL, so we need to update those registers
+ // here because the GC will have updated our copies in the
+ // frame.
+ //
+ // So, if _R4_R11 points into the MachState, we need to update
+ // the register here. That's what this macro does.
+ //
+
+ .macro RestoreRegMS regIndex, reg
+
+ // Incoming:
+ //
+ // R0 = address of MachState
+ //
+ // $regIndex: Index of the register (R4-R11). For R4, index is 4.
+ // For R5, index is 5, and so on.
+ //
+ // $reg: Register name (e.g. R4, R5, etc)
+ //
+ // Get the address of the specified captured register from machine state
+ add r2, r0, #(MachState__captureR4_R11 + ((\regIndex-4)*4))
+
+ // Get the address of the specified preserved register from machine state
+ ldr r3, [r0, #(MachState___R4_R11 + ((\regIndex-4)*4))]
+
+ cmp r2, r3
+ bne 0f
+ ldr \reg, [r2]
+0:
+
+ .endm
+
+// EXTERN_C int __fastcall HelperMethodFrameRestoreState(
+// INDEBUG_COMMA(HelperMethodFrame *pFrame)
+// MachState *pState
+// )
+ LEAF_ENTRY HelperMethodFrameRestoreState, _TEXT
+
+#ifdef _DEBUG
+ mov r0, r1
+#endif
+
+ // If machine state is invalid, then simply exit
+ ldr r1, [r0, #MachState__isValid]
+ cmp r1, #0
+ beq LOCAL_LABEL(Done)
+
+ RestoreRegMS 4, R4
+ RestoreRegMS 5, R5
+ RestoreRegMS 6, R6
+ RestoreRegMS 7, R7
+ RestoreRegMS 8, R8
+ RestoreRegMS 9, R9
+ RestoreRegMS 10, R10
+ RestoreRegMS 11, R11
+LOCAL_LABEL(Done):
+ // Its imperative that the return value of HelperMethodFrameRestoreState is zero
+ // as it is used in the state machine to loop until it becomes zero.
+ // Refer to HELPER_METHOD_FRAME_END macro for details.
+ mov r0,#0
+ bx lr
+
+ LEAF_END HelperMethodFrameRestoreState, _TEXT
+
+#if 0
+// ------------------------------------------------------------------
+// Macro to generate Redirection Stubs
+//
+// $reason : reason for redirection
+// Eg. GCThreadControl
+// NOTE: If you edit this macro, make sure you update GetCONTEXTFromRedirectedStubStackFrame.
+// This function is used by both the personality routine and the debugger to retrieve the original CONTEXT.
+ .macro GenerateRedirectedHandledJITCaseStub reason
+
+ NESTED_ENTRY RedirectedHandledJITCaseFor\reason\()_Stub, _TEXT, NoHandler
+
+ push {r7,lr} // return address
+ alloc_stack 4 // stack slot to save the CONTEXT *
+ mov r7, sp
+
+ //REDIRECTSTUB_SP_OFFSET_CONTEXT is defined in asmconstants.h
+ //If CONTEXT is not saved at 0 offset from SP it must be changed as well.
+ //ASSERT REDIRECTSTUB_SP_OFFSET_CONTEXT == 0
+
+ // Runtime check for 8-byte alignment. This check is necessary as this function can be
+ // entered before complete execution of the prolog of another function.
+ and r0, r7, #4
+ sub sp, sp, r0
+
+ // stack must be 8 byte aligned
+ CHECK_STACK_ALIGNMENT
+
+ //
+ // Save a copy of the redirect CONTEXT*.
+ // This is needed for the debugger to unwind the stack.
+ //
+ bl GetCurrentSavedRedirectContext
+ str r0, [r7]
+
+ //
+ // Fetch the interrupted pc and save it as our return address.
+ //
+ ldr r1, [r0, #CONTEXT_Pc]
+ str r1, [r7, #8]
+
+ //
+ // Call target, which will do whatever we needed to do in the context
+ // of the target thread, and will RtlRestoreContext when it is done.
+ //
+ bl _RedirectedHandledJITCaseFor\reason\()_Stub@Thread@@CAXXZ
+
+ EMIT_BREAKPOINT // Unreachable
+
+// Put a label here to tell the debugger where the end of this function is.
+RedirectedHandledJITCaseFor\reason\()_StubEnd:
+ .global RedirectedHandledJITCaseFor\reason\()_StubEnd
+
+ NESTED_END RedirectedHandledJITCaseFor\reason\()_Stub, _TEXT
+
+ .endm
+
+// ------------------------------------------------------------------
+// Redirection Stub for GC in fully interruptible method
+ GenerateRedirectedHandledJITCaseStub GCThreadControl
+// ------------------------------------------------------------------
+ GenerateRedirectedHandledJITCaseStub DbgThreadControl
+// ------------------------------------------------------------------
+ GenerateRedirectedHandledJITCaseStub UserSuspend
+// ------------------------------------------------------------------
+ GenerateRedirectedHandledJITCaseStub YieldTask
+
+#ifdef _DEBUG
+// ------------------------------------------------------------------
+// Redirection Stub for GC Stress
+ GenerateRedirectedHandledJITCaseStub GCStress
+#endif
+
+#endif
+
+// ------------------------------------------------------------------
+// Functions to probe for stack space
+// Input reg r4 = amount of stack to probe for
+// value of reg r4 is preserved on exit from function
+// r12 is trashed
+// The below two functions were copied from vctools\crt\crtw32\startup\arm\chkstk.asm
+
+ NESTED_ENTRY checkStack, _TEXT, NoHandler
+ subs r12,sp,r4
+ mrc p15,#0,r4,c13,c0,#2 // get TEB *
+ ldr r4,[r4,#8] // get Stack limit
+ bcc LOCAL_LABEL(checkStack_neg) // if r12 is less then 0 set it to 0
+LOCAL_LABEL(checkStack_label1):
+ cmp r12, r4
+ bcc C_FUNC(stackProbe) // must probe to extend guardpage if r12 is beyond stackLimit
+ sub r4, sp, r12 // restore value of r4
+ bx lr
+LOCAL_LABEL(checkStack_neg):
+ mov r12, #0
+ b LOCAL_LABEL(checkStack_label1)
+ NESTED_END checkStack, _TEXT
+
+ NESTED_ENTRY stackProbe, _TEXT, NoHandler
+ push {r5,r6}
+ mov r6, r12
+ bfc r6, #0, #0xc // align down (4K)
+LOCAL_LABEL(stackProbe_loop):
+ sub r4,r4,#0x1000 // dec stack Limit by 4K as page size is 4K
+ ldr r5,[r4] // try to read ... this should move the guard page
+ cmp r4,r6
+ bne LOCAL_LABEL(stackProbe_loop)
+ pop {r5,r6}
+ sub r4,sp,r12
+ bx lr
+ NESTED_END stackProbe, _TEXT
+
+//------------------------------------------------
+// VirtualMethodFixupStub
+//
+// In NGEN images, virtual slots inherited from cross-module dependencies
+// point to a jump thunk that calls into the following function that will
+// call into a VM helper. The VM helper is responsible for patching up
+// thunk, upon executing the precode, so that all subsequent calls go directly
+// to the actual method body.
+//
+// This is done lazily for performance reasons.
+//
+// On entry:
+//
+// R0 = "this" pointer
+// R12 = Address of thunk + 4
+
+ NESTED_ENTRY VirtualMethodFixupStub, _TEXT, NoHandler
+
+ // Save arguments and return address
+ push {r0-r3, lr}
+
+ // Align stack
+ alloc_stack SIZEOF__FloatArgumentRegisters + 4
+ vstm sp, {d0-d7}
+
+
+ CHECK_STACK_ALIGNMENT
+
+ // R12 contains an address that is 4 bytes ahead of
+ // where the thunk starts. Refer to ZapImportVirtualThunk::Save
+ // for details on this.
+ //
+ // Move the correct thunk start address in R1
+ sub r1, r12, #4
+
+ // Call the helper in the VM to perform the actual fixup
+ // and tell us where to tail call. R0 already contains
+ // the this pointer.
+ bl C_FUNC(VirtualMethodFixupWorker)
+
+ // On return, R0 contains the target to tailcall to
+ mov r12, r0
+
+ // pop the stack and restore original register state
+ vldm sp, {d0-d7}
+ free_stack SIZEOF__FloatArgumentRegisters + 4
+ pop {r0-r3, lr}
+
+ PATCH_LABEL VirtualMethodFixupPatchLabel
+
+ // and tailcall to the actual method
+ bx r12
+
+ NESTED_END VirtualMethodFixupStub, _TEXT
+
+//------------------------------------------------
+// ExternalMethodFixupStub
+//
+// In NGEN images, calls to cross-module external methods initially
+// point to a jump thunk that calls into the following function that will
+// call into a VM helper. The VM helper is responsible for patching up the
+// thunk, upon executing the precode, so that all subsequent calls go directly
+// to the actual method body.
+//
+// This is done lazily for performance reasons.
+//
+// On entry:
+//
+// R12 = Address of thunk + 4
+
+ NESTED_ENTRY ExternalMethodFixupStub, _TEXT, NoHandler
+
+ PROLOG_WITH_TRANSITION_BLOCK
+
+ add r0, sp, #__PWTB_TransitionBlock // pTransitionBlock
+
+ // Adjust (read comment above for details) and pass the address of the thunk
+ sub r1, r12, #4 // pThunk
+
+ mov r2, #0 // sectionIndex
+ mov r3, #0 // pModule
+ bl C_FUNC(ExternalMethodFixupWorker)
+
+ // mov the address we patched to in R12 so that we can tail call to it
+ mov r12, r0
+
+ EPILOG_WITH_TRANSITION_BLOCK_TAILCALL
+ PATCH_LABEL ExternalMethodFixupPatchLabel
+ bx r12
+
+ NESTED_END ExternalMethodFixupStub, _TEXT
+
+//------------------------------------------------
+// StubDispatchFixupStub
+//
+// In NGEN images, calls to interface methods initially
+// point to a jump thunk that calls into the following function that will
+// call into a VM helper. The VM helper is responsible for patching up the
+// thunk with actual stub dispatch stub.
+//
+// On entry:
+//
+// R4 = Address of indirection cell
+
+ NESTED_ENTRY StubDispatchFixupStub, _TEXT, NoHandler
+
+ PROLOG_WITH_TRANSITION_BLOCK
+
+ // address of StubDispatchFrame
+ add r0, sp, #__PWTB_TransitionBlock // pTransitionBlock
+ mov r1, r4 // siteAddrForRegisterIndirect
+ mov r2, #0 // sectionIndex
+ mov r3, #0 // pModule
+
+ bl C_FUNC(StubDispatchFixupWorker)
+
+ // mov the address we patched to in R12 so that we can tail call to it
+ mov r12, r0
+
+ EPILOG_WITH_TRANSITION_BLOCK_TAILCALL
+ PATCH_LABEL StubDispatchFixupPatchLabel
+ bx r12
+
+ NESTED_END StubDispatchFixupStub, _TEXT
+
+//------------------------------------------------
+// JIT_RareDisableHelper
+//
+// The JIT expects this helper to preserve registers used for return values
+//
+ NESTED_ENTRY JIT_RareDisableHelper, _TEXT, NoHandler
+
+ push {r0-r1, r11, lr} // save integer return value
+ vpush {d0-d3} // floating point return value
+
+ CHECK_STACK_ALIGNMENT
+
+ bl C_FUNC(JIT_RareDisableHelperWorker)
+
+ vpop {d0-d3}
+ pop {r0-r1, r11, pc}
+
+ NESTED_END JIT_RareDisableHelper, _TEXT
+
+
+#ifdef FEATURE_CORECLR
+//
+// JIT Static access helpers for single appdomain case
+//
+
+// ------------------------------------------------------------------
+// void* JIT_GetSharedNonGCStaticBase(SIZE_T moduleDomainID, DWORD dwClassDomainID)
+
+ LEAF_ENTRY JIT_GetSharedNonGCStaticBase_SingleAppDomain, _TEXT
+
+ // If class is not initialized, bail to C++ helper
+ add r2, r0, #DomainLocalModule__m_pDataBlob
+ ldrb r2, [r2, r1]
+ tst r2, #1
+ beq LOCAL_LABEL(CallCppHelper1)
+
+ bx lr
+
+LOCAL_LABEL(CallCppHelper1):
+ // Tail call JIT_GetSharedNonGCStaticBase_Helper
+ b C_FUNC(JIT_GetSharedNonGCStaticBase_Helper)
+ LEAF_END JIT_GetSharedNonGCStaticBase_SingleAppDomain, _TEXT
+
+
+// ------------------------------------------------------------------
+// void* JIT_GetSharedNonGCStaticBaseNoCtor(SIZE_T moduleDomainID, DWORD dwClassDomainID)
+
+ LEAF_ENTRY JIT_GetSharedNonGCStaticBaseNoCtor_SingleAppDomain, _TEXT
+
+ bx lr
+ LEAF_END JIT_GetSharedNonGCStaticBaseNoCtor_SingleAppDomain, _TEXT
+
+
+// ------------------------------------------------------------------
+// void* JIT_GetSharedGCStaticBase(SIZE_T moduleDomainID, DWORD dwClassDomainID)
+
+ LEAF_ENTRY JIT_GetSharedGCStaticBase_SingleAppDomain, _TEXT
+
+ // If class is not initialized, bail to C++ helper
+ add r2, r0, #DomainLocalModule__m_pDataBlob
+ ldrb r2, [r2, r1]
+ tst r2, #1
+ beq LOCAL_LABEL(CallCppHelper3)
+
+ ldr r0, [r0, #DomainLocalModule__m_pGCStatics]
+ bx lr
+
+LOCAL_LABEL(CallCppHelper3):
+ // Tail call Jit_GetSharedGCStaticBase_Helper
+ b C_FUNC(JIT_GetSharedGCStaticBase_Helper)
+ LEAF_END JIT_GetSharedGCStaticBase_SingleAppDomain, _TEXT
+
+
+// ------------------------------------------------------------------
+// void* JIT_GetSharedGCStaticBaseNoCtor(SIZE_T moduleDomainID, DWORD dwClassDomainID)
+
+ LEAF_ENTRY JIT_GetSharedGCStaticBaseNoCtor_SingleAppDomain, _TEXT
+
+ ldr r0, [r0, #DomainLocalModule__m_pGCStatics]
+ bx lr
+ LEAF_END JIT_GetSharedGCStaticBaseNoCtor_SingleAppDomain, _TEXT
+
+#endif
+
+// ------------------------------------------------------------------
+// __declspec(naked) void F_CALL_CONV JIT_Stelem_Ref(PtrArray* array, unsigned idx, Object* val)
+ LEAF_ENTRY JIT_Stelem_Ref, _TEXT
+
+ // We retain arguments as they were passed and use r0 == array// r1 == idx// r2 == val
+
+ // check for null array
+ cbz r0, LOCAL_LABEL(ThrowNullReferenceException)
+
+ // idx bounds check
+ ldr r3,[r0,#ArrayBase__m_NumComponents]
+ cmp r3,r1
+ bls LOCAL_LABEL(ThrowIndexOutOfRangeException)
+
+ // fast path to null assignment (doesn't need any write-barriers)
+ cbz r2, LOCAL_LABEL(AssigningNull)
+
+ // Verify the array-type and val-type matches before writing
+ ldr r12, [r0] // r12 = array MT
+ ldr r3, [r2] // r3 = val->GetMethodTable()
+ ldr r12, [r12, #MethodTable__m_ElementType] // array->GetArrayElementTypeHandle()
+ cmp r3, r12
+ beq C_FUNC(JIT_Stelem_DoWrite)
+
+ // Types didnt match but allow writing into an array of objects
+ ldr r3, =g_pObjectClass
+ ldr r3, [r3] // r3 = *g_pObjectClass
+ cmp r3, r12 // array type matches with Object*
+ beq C_FUNC(JIT_Stelem_DoWrite)
+
+ // array type and val type do not exactly match. Raise frame and do detailed match
+ b C_FUNC(JIT_Stelem_Ref_NotExactMatch)
+
+LOCAL_LABEL(AssigningNull):
+ // Assigning null doesn't need write barrier
+ adds r0, r1, LSL #2 // r0 = r0 + (r1 x 4) = array->m_array[idx]
+ str r2, [r0, #PtrArray__m_Array] // array->m_array[idx] = val
+ bx lr
+
+LOCAL_LABEL(ThrowNullReferenceException):
+ // Tail call JIT_InternalThrow(NullReferenceException)
+ ldr r0, =CORINFO_NullReferenceException_ASM
+ b C_FUNC(JIT_InternalThrow)
+
+LOCAL_LABEL(ThrowIndexOutOfRangeException):
+ // Tail call JIT_InternalThrow(NullReferenceException)
+ ldr r0, =CORINFO_IndexOutOfRangeException_ASM
+ b C_FUNC(JIT_InternalThrow)
+
+ LEAF_END JIT_Stelem_Ref, _TEXT
+
+// ------------------------------------------------------------------
+// __declspec(naked) void F_CALL_CONV JIT_Stelem_Ref_NotExactMatch(PtrArray* array,
+// unsigned idx, Object* val)
+// r12 = array->GetArrayElementTypeHandle()
+//
+ NESTED_ENTRY JIT_Stelem_Ref_NotExactMatch, _TEXT, NoHandler
+ push {lr}
+ push {r0-r2}
+
+ CHECK_STACK_ALIGNMENT
+
+ // allow in case val can be casted to array element type
+ // call ObjIsInstanceOfNoGC(val, array->GetArrayElementTypeHandle())
+ mov r1, r12 // array->GetArrayElementTypeHandle()
+ mov r0, r2
+ bl C_FUNC(ObjIsInstanceOfNoGC)
+ cmp r0, TypeHandle_CanCast
+ beq LOCAL_LABEL(DoWrite) // ObjIsInstance returned TypeHandle::CanCast
+
+ // check via raising frame
+LOCAL_LABEL(NeedFrame):
+ mov r1, sp // r1 = &array
+ adds r0, sp, #8 // r0 = &val
+ bl C_FUNC(ArrayStoreCheck) // ArrayStoreCheck(&val, &array)
+
+LOCAL_LABEL(DoWrite):
+ pop {r0-r2}
+ pop {lr}
+ b C_FUNC(JIT_Stelem_DoWrite)
+
+ NESTED_END JIT_Stelem_Ref_NotExactMatch, _TEXT
+
+// ------------------------------------------------------------------
+// __declspec(naked) void F_CALL_CONV JIT_Stelem_DoWrite(PtrArray* array, unsigned idx, Object* val)
+ LEAF_ENTRY JIT_Stelem_DoWrite, _TEXT
+
+ // Setup args for JIT_WriteBarrier. r0 = &array->m_array[idx]// r1 = val
+ adds r0, #PtrArray__m_Array // r0 = &array->m_array
+ adds r0, r1, LSL #2
+ mov r1, r2 // r1 = val
+
+ // Branch to the write barrier (which is already correctly overwritten with
+ // single or multi-proc code based on the current CPU
+ b C_FUNC(JIT_WriteBarrier)
+
+ LEAF_END JIT_Stelem_DoWrite, _TEXT
+
+#define __wbScratch r3
+#define pShadow r7
+
+ .macro START_WRITE_BARRIER name
+ __\name\()__g_lowest_address_offset = 0xffff
+ __\name\()__g_highest_address_offset = 0xffff
+ __\name\()__g_ephemeral_low_offset = 0xffff
+ __\name\()__g_ephemeral_high_offset = 0xffff
+ __\name\()__g_card_table_offset = 0xffff
+ .endm
+
+ .macro LOAD_GC_GLOBAL name, regName, globalName
+\name\()__\globalName\()_offset:
+ __\name\()__\globalName\()_offset = (\name\()__\globalName\()_offset - \name)
+ movw \regName, #0
+ movt \regName, #0
+ .endm
+
+ .macro UPDATE_GC_SHADOW name, ptrReg, valReg
+ // Todo: implement, debugging helper
+ .endm
+
+ .macro UPDATE_CARD_TABLE name, ptrReg, valReg, mp, postGrow, tmpReg
+
+ LOAD_GC_GLOBAL \name, __wbScratch, g_ephemeral_low
+ cmp \valReg, __wbScratch
+ blo 0f
+
+ .if(\postGrow)
+ LOAD_GC_GLOBAL \name, __wbScratch, g_ephemeral_high
+ cmp \valReg, __wbScratch
+ bhs 0f
+ .endif
+
+ LOAD_GC_GLOBAL \name, __wbScratch, g_card_table
+ add __wbScratch, __wbScratch, \ptrReg, lsr #10
+
+ .if(\mp)
+ ldrb \tmpReg, [__wbScratch]
+ cmp \tmpReg, #0xff
+ itt ne
+ movne \tmpReg, 0xff
+ strbne \tmpReg, [__wbScratch]
+ .else
+ mov \tmpReg, #0xff
+ strb \tmpReg, [__wbScratch]
+ .endif
+
+0:
+ .endm
+
+ .macro CHECK_GC_HEAP_RANGE name, ptrReg, label
+ LOAD_GC_GLOBAL \name, __wbScratch, g_lowest_address
+ cmp \ptrReg, __wbScratch
+ blo \label
+ LOAD_GC_GLOBAL \name, __wbScratch, g_highest_address
+ cmp \ptrReg, __wbScratch
+ bhs \label
+ .endm
+
+ .macro JIT_WRITEBARRIER name, mp, post
+ LEAF_ENTRY \name, _TEXT
+ START_WRITE_BARRIER \name
+ .if(\mp)
+ dmb
+ .endif
+
+ str r1, [r0]
+ UPDATE_GC_SHADOW \name, r0, r1
+ UPDATE_CARD_TABLE \name, r0, r1, \mp, \post, r0
+ bx lr
+ LEAF_END \name, _TEXT
+ .endm
+
+ .macro JIT_CHECKEDWRITEBARRIER_SP name, post
+ LEAF_ENTRY \name, _TEXT
+ START_WRITE_BARRIER \name
+ str r1, [r0]
+ CHECK_GC_HEAP_RANGE \name, r0, 1f
+ UPDATE_GC_SHADOW \name, r0, r1
+ UPDATE_CARD_TABLE \name, r0, r1, 0, \post, r0
+1:
+ bx lr
+ LEAF_END \name, _TEXT
+ .endm
+
+ .macro JIT_CHECKEDWRITEBARRIER_MP name, post
+ LEAF_ENTRY \name, _TEXT
+ START_WRITE_BARRIER \name
+ dmb
+ str r1, [r0]
+ CHECK_GC_HEAP_RANGE \name, r0, 1f
+ UPDATE_GC_SHADOW \name, r0, r1
+ UPDATE_CARD_TABLE \name, r0, r1, 1, \post, r0
+ bx lr
+1:
+ str r1, [r0]
+ bx lr
+ LEAF_END \name, _TEXT
+ .endm
+
+ .macro JIT_BYREFWRITEBARRIER name, mp, post
+ LEAF_ENTRY \name, _TEXT
+ START_WRITE_BARRIER \name
+ .if(\mp)
+ dmb
+ .endif
+
+ ldr r2, [r1]
+ str r2, [r0]
+ CHECK_GC_HEAP_RANGE \name, r0, 1f
+ UPDATE_GC_SHADOW \name, r0, r2
+ UPDATE_CARD_TABLE \name, r0, r2, \mp, \post, r2
+1:
+ add r0, #4
+ add r1, #4
+ bx lr
+ LEAF_END \name, _TEXT
+ .endm
+
+ .macro JIT_WRITEBARRIER_DESCRIPTOR name
+ .word \name
+ .word \name\()_End
+ .word __\name\()__g_lowest_address_offset
+ .word __\name\()__g_highest_address_offset
+ .word __\name\()__g_ephemeral_low_offset
+ .word __\name\()__g_ephemeral_high_offset
+ .word __\name\()__g_card_table_offset
+ .endm
+
+ // There 4 versions of each write barriers. A 2x2 combination of multi-proc/single-proc and pre/post grow version
+ JIT_WRITEBARRIER JIT_WriteBarrier_SP_Pre, 0, 0
+ JIT_WRITEBARRIER JIT_WriteBarrier_SP_Post, 0, 1
+ JIT_WRITEBARRIER JIT_WriteBarrier_MP_Pre, 1, 0
+ JIT_WRITEBARRIER JIT_WriteBarrier_MP_Post, 1, 1
+
+ JIT_CHECKEDWRITEBARRIER_SP JIT_CheckedWriteBarrier_SP_Pre, 0
+ JIT_CHECKEDWRITEBARRIER_SP JIT_CheckedWriteBarrier_SP_Post, 1
+ JIT_CHECKEDWRITEBARRIER_MP JIT_CheckedWriteBarrier_MP_Pre, 0
+ JIT_CHECKEDWRITEBARRIER_MP JIT_CheckedWriteBarrier_MP_Post, 1
+
+ JIT_BYREFWRITEBARRIER JIT_ByRefWriteBarrier_SP_Pre, 0, 0
+ JIT_BYREFWRITEBARRIER JIT_ByRefWriteBarrier_SP_Post, 0, 1
+ JIT_BYREFWRITEBARRIER JIT_ByRefWriteBarrier_MP_Pre, 1, 0
+ JIT_BYREFWRITEBARRIER JIT_ByRefWriteBarrier_MP_Post, 1, 1
+
+// .section .clrwb, "d"
+g_rgWriteBarrierDescriptors:
+
+ JIT_WRITEBARRIER_DESCRIPTOR JIT_WriteBarrier_SP_Pre
+ JIT_WRITEBARRIER_DESCRIPTOR JIT_WriteBarrier_SP_Post
+ JIT_WRITEBARRIER_DESCRIPTOR JIT_WriteBarrier_MP_Pre
+ JIT_WRITEBARRIER_DESCRIPTOR JIT_WriteBarrier_MP_Post
+
+ JIT_WRITEBARRIER_DESCRIPTOR JIT_CheckedWriteBarrier_SP_Pre
+ JIT_WRITEBARRIER_DESCRIPTOR JIT_CheckedWriteBarrier_SP_Post
+ JIT_WRITEBARRIER_DESCRIPTOR JIT_CheckedWriteBarrier_MP_Pre
+ JIT_WRITEBARRIER_DESCRIPTOR JIT_CheckedWriteBarrier_MP_Post
+
+ JIT_WRITEBARRIER_DESCRIPTOR JIT_ByRefWriteBarrier_SP_Pre
+ JIT_WRITEBARRIER_DESCRIPTOR JIT_ByRefWriteBarrier_SP_Post
+ JIT_WRITEBARRIER_DESCRIPTOR JIT_ByRefWriteBarrier_MP_Pre
+ JIT_WRITEBARRIER_DESCRIPTOR JIT_ByRefWriteBarrier_MP_Post
+
+ // Sentinel value
+ .word 0
+
+// .text
+
+ .global g_rgWriteBarrierDescriptors
+
+#ifdef FEATURE_READYTORUN
+
+ NESTED_ENTRY DelayLoad_MethodCall_FakeProlog, _TEXT, NoHandler
+
+ // Match what the lazy thunk has pushed. The actual method arguments will be spilled later.
+ push {r1-r3}
+
+ // This is where execution really starts.
+DelayLoad_MethodCall:
+ .global DelayLoad_MethodCall
+
+ push {r0}
+
+ PROLOG_WITH_TRANSITION_BLOCK 0x0, 1, DoNotPushArgRegs
+
+ // Load the helper arguments
+ ldr r5, [sp,#(__PWTB_TransitionBlock+10*4)] // pModule
+ ldr r6, [sp,#(__PWTB_TransitionBlock+11*4)] // sectionIndex
+ ldr r7, [sp,#(__PWTB_TransitionBlock+12*4)] // indirection
+
+ // Spill the actual method arguments
+ str r1, [sp,#(__PWTB_TransitionBlock+10*4)]
+ str r2, [sp,#(__PWTB_TransitionBlock+11*4)]
+ str r3, [sp,#(__PWTB_TransitionBlock+12*4)]
+
+ add r0, sp, #__PWTB_TransitionBlock // pTransitionBlock
+
+ mov r1, r7 // pIndirection
+ mov r2, r6 // sectionIndex
+ mov r3, r5 // pModule
+
+ bl C_FUNC(ExternalMethodFixupWorker)
+
+ // mov the address we patched to in R12 so that we can tail call to it
+ mov r12, r0
+
+ EPILOG_WITH_TRANSITION_BLOCK_TAILCALL
+
+ // Share the patch label
+ b C_FUNC(ExternalMethodFixupPatchLabel)
+
+ NESTED_END DelayLoad_MethodCall_FakeProlog, _TEXT
+
+
+ .macro DynamicHelper frameFlags, suffix
+
+__FakePrologName="DelayLoad_Helper\suffix\()_FakeProlog"
+
+ NESTED_ENTRY DelayLoad_Helper\suffix\()_FakeProlog, _TEXT, NoHandler
+
+ // Match what the lazy thunk has pushed. The actual method arguments will be spilled later.
+ push {r1-r3}
+
+ // This is where execution really starts.
+DelayLoad_Helper\suffix:
+ .global DelayLoad_Helper\suffix
+
+ push {r0}
+
+ PROLOG_WITH_TRANSITION_BLOCK 0x4, 1, DoNotPushArgRegs
+
+ // Load the helper arguments
+ ldr r5, [sp,#(__PWTB_TransitionBlock+10*4)] // pModule
+ ldr r6, [sp,#(__PWTB_TransitionBlock+11*4)] // sectionIndex
+ ldr r7, [sp,#(__PWTB_TransitionBlock+12*4)] // indirection
+
+ // Spill the actual method arguments
+ str r1, [sp,#(__PWTB_TransitionBlock+10*4)]
+ str r2, [sp,#(__PWTB_TransitionBlock+11*4)]
+ str r3, [sp,#(__PWTB_TransitionBlock+12*4)]
+
+ add r0, sp, #__PWTB_TransitionBlock // pTransitionBlock
+
+ mov r1, r7 // pIndirection
+ mov r2, r6 // sectionIndex
+ mov r3, r5 // pModule
+
+ mov r4, \frameFlags
+ str r4, [sp,#0]
+
+ bl C_FUNC(DynamicHelperWorker)
+
+ cbnz r0, 0f
+ ldr r0, [sp,#(__PWTB_TransitionBlock+9*4)] // The result is stored in the argument area of the transition block
+
+ EPILOG_WITH_TRANSITION_BLOCK_RETURN
+
+0:
+ mov r12, r0
+ EPILOG_WITH_TRANSITION_BLOCK_TAILCALL
+ bx r12
+
+ NESTED_END DelayLoad_Helper\suffix\()_FakeProlog, _TEXT
+
+ .endm
+
+ DynamicHelper DynamicHelperFrameFlags_Default
+ DynamicHelper DynamicHelperFrameFlags_ObjectArg, _Obj
+ DynamicHelper DynamicHelperFrameFlags_ObjectArg | DynamicHelperFrameFlags_ObjectArg2, _ObjObj
+
+#endif // FEATURE_READYTORUN
+
+LEAF_ENTRY StartUnwindingNativeFrames, _TEXT
+ // TODO: Implement
+ bx lr
+LEAF_END StartUnwindingNativeFrames, _TEXT
diff --git a/src/vm/arm/cgencpu.h b/src/vm/arm/cgencpu.h
index 64619fd19f..362d86ebad 100644
--- a/src/vm/arm/cgencpu.h
+++ b/src/vm/arm/cgencpu.h
@@ -747,7 +747,7 @@ public:
else
{
_ASSERTE(reg1 != ThumbReg(15) && reg2 != ThumbReg(15));
- Emit16((WORD)(0x4500 | reg2 << 3 | reg1 & 0x7 | (reg1 & 0x8 ? 0x80 : 0x0)));
+ Emit16((WORD)(0x4500 | reg2 << 3 | (reg1 & 0x7) | (reg1 & 0x8 ? 0x80 : 0x0)));
}
}
@@ -931,7 +931,7 @@ inline BOOL IsUnmanagedValueTypeReturnedByRef(UINT sizeofvaluetype)
return (sizeofvaluetype > 4);
}
-DECLSPEC_ALIGN(4) struct UMEntryThunkCode
+struct DECLSPEC_ALIGN(4) UMEntryThunkCode
{
WORD m_code[4];
@@ -1002,6 +1002,7 @@ inline BOOL ClrFlushInstructionCache(LPCVOID pCodeAddr, size_t sizeOfCode)
#endif
}
+#ifndef FEATURE_IMPLICIT_TLS
//
// JIT HELPER ALIASING FOR PORTABILITY.
//
@@ -1013,7 +1014,11 @@ inline BOOL ClrFlushInstructionCache(LPCVOID pCodeAddr, size_t sizeOfCode)
#define JIT_GetSharedGCStaticBaseNoCtor JIT_GetSharedGCStaticBaseNoCtor_InlineGetAppDomain
#define JIT_GetSharedNonGCStaticBaseNoCtor JIT_GetSharedNonGCStaticBaseNoCtor_InlineGetAppDomain
+#endif
+
+#ifndef FEATURE_PAL
#define JIT_Stelem_Ref JIT_Stelem_Ref
+#endif
//------------------------------------------------------------------------
//
@@ -1329,6 +1334,6 @@ inline size_t GetARMInstructionLength(PBYTE pInstr)
return GetARMInstructionLength(*(WORD*)pInstr);
}
-EXTERN_C void FCallMemcpy(byte* dest, byte* src, int len);
+EXTERN_C void FCallMemcpy(BYTE* dest, BYTE* src, int len);
#endif // __cgencpu_h__
diff --git a/src/vm/arm/crthelpers.S b/src/vm/arm/crthelpers.S
new file mode 100644
index 0000000000..7ccf03a0ce
--- /dev/null
+++ b/src/vm/arm/crthelpers.S
@@ -0,0 +1,60 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+// ==++==
+//
+
+//
+// ==--==
+// ***********************************************************************
+// File: crthelpers.S
+//
+// ***********************************************************************
+
+#include "unixasmmacros.inc"
+#include "asmconstants.h"
+
+.syntax unified
+.thumb
+
+// JIT_MemSet/JIT_MemCpy
+//
+// It is IMPORANT that the exception handling code is able to find these guys
+// on the stack, but to keep them from being tailcalled by VC++ we need to turn
+// off optimization and it ends up being a wasteful implementation.
+//
+// Hence these assembly helpers.
+//
+//EXTERN_C void __stdcall JIT_MemSet(void* _dest, int c, size_t count)
+LEAF_ENTRY JIT_MemSet, _TEXT
+
+ cmp r2, #0
+ it eq
+ bxeq lr
+
+ ldr r3, [r0]
+
+ b C_PLTFUNC(memset)
+
+LEAF_END_MARKED JIT_MemSet, _TEXT
+
+
+//EXTERN_C void __stdcall JIT_MemCpy(void* _dest, const void *_src, size_t count)
+LEAF_ENTRY JIT_MemCpy, _TEXT
+//
+// It only requires 4 byte alignment
+// and doesn't return a value
+
+ cmp r2, #0
+ it eq
+ bxeq lr
+
+ ldr r3, [r0]
+ ldr r3, [r1]
+
+ b C_PLTFUNC(memcpy)
+
+LEAF_END_MARKED JIT_MemCpy, _TEXT
+
diff --git a/src/vm/arm/ehhelpers.S b/src/vm/arm/ehhelpers.S
new file mode 100644
index 0000000000..aaa464e243
--- /dev/null
+++ b/src/vm/arm/ehhelpers.S
@@ -0,0 +1,146 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+// ==++==
+//
+
+//
+// ==--==
+
+#include "unixasmmacros.inc"
+#include "asmconstants.h"
+
+.syntax unified
+.thumb
+
+//
+// WARNING!! These functions immediately ruin thread unwindability. This is
+// WARNING!! OK as long as there is a mechanism for saving the thread context
+// WARNING!! prior to running these functions as well as a mechanism for
+// WARNING!! restoring the context prior to any stackwalk. This means that
+// WARNING!! we need to ensure that no GC can occur while the stack is
+// WARNING!! unwalkable. This further means that we cannot allow any exception
+// WARNING!! to occur when the stack is unwalkable
+//
+
+ // GSCookie + alignment padding
+OFFSET_OF_FRAME=(4 + SIZEOF__GSCookie)
+
+ .macro GenerateRedirectedStubWithFrame STUB, TARGET
+
+ //
+ // This is the primary function to which execution will be redirected to.
+ //
+ NESTED_ENTRY \STUB, _TEXT, NoHandler
+
+ //
+ // IN: lr: original IP before redirect
+ //
+
+ push {r4,r7,lr}
+ alloc_stack OFFSET_OF_FRAME + SIZEOF__FaultingExceptionFrame
+
+ // At this point, the stack maybe misaligned if the thread abort was asynchronously
+ // triggered in the prolog or epilog of the managed method. For such a case, we must
+ // align the stack before calling into the VM.
+ //
+ // Runtime check for 8-byte alignment.
+ mov r7, sp
+ and r0, r7, #4
+ sub sp, sp, r0
+
+ // Save pointer to FEF for GetFrameFromRedirectedStubStackFrame
+ add r4, sp, #OFFSET_OF_FRAME
+
+ // Prepare to initialize to NULL
+ mov r1,#0
+ str r1, [r4] // Initialize vtbl (it is not strictly necessary)
+ str r1, [r4, #FaultingExceptionFrame__m_fFilterExecuted] // Initialize BOOL for personality routine
+
+ mov r0, r4 // move the ptr to FEF in R0
+
+ // stack must be 8 byte aligned
+ CHECK_STACK_ALIGNMENT
+
+ bl C_FUNC(\TARGET)
+
+ // Target should not return.
+ EMIT_BREAKPOINT
+
+ NESTED_END \STUB, _TEXT
+
+ .endm
+
+// ------------------------------------------------------------------
+//
+// Helpers for async (NullRef, AccessViolation) exceptions
+//
+
+ NESTED_ENTRY NakedThrowHelper2,_TEXT,FixContextHandler
+ push {r0, lr}
+
+ // On entry:
+ //
+ // R0 = Address of FaultingExceptionFrame
+ bl C_FUNC(LinkFrameAndThrow)
+
+ // Target should not return.
+ EMIT_BREAKPOINT
+
+ NESTED_END NakedThrowHelper2, _TEXT
+
+
+ GenerateRedirectedStubWithFrame NakedThrowHelper, NakedThrowHelper2
+
+// ------------------------------------------------------------------
+
+ // This helper enables us to call into a funclet after applying the non-volatiles
+ NESTED_ENTRY CallEHFunclet, _TEXT, NoHandler
+
+ push {r4-r11, lr}
+ alloc_stack 4
+
+ // On entry:
+ //
+ // R0 = throwable
+ // R1 = PC to invoke
+ // R2 = address of R4 register in CONTEXT record// used to restore the non-volatile registers of CrawlFrame
+ // R3 = address of the location where the SP of funclet's caller (i.e. this helper) should be saved.
+ //
+ // Save the SP of this function
+ str sp, [r3]
+ // apply the non-volatiles corresponding to the CrawlFrame
+ ldm r2, {r4-r11}
+ // Invoke the funclet
+ blx r1
+
+ free_stack 4
+ pop {r4-r11, pc}
+
+ NESTED_END CallEHFunclet, _TEXT
+
+ // This helper enables us to call into a filter funclet by passing it the CallerSP to lookup the
+ // frame pointer for accessing the locals in the parent method.
+ NESTED_ENTRY CallEHFilterFunclet, _TEXT, NoHandler
+
+ push {lr}
+ alloc_stack 4
+
+ // On entry:
+ //
+ // R0 = throwable
+ // R1 = SP of the caller of the method/funclet containing the filter
+ // R2 = PC to invoke
+ // R3 = address of the location where the SP of funclet's caller (i.e. this helper) should be saved.
+ //
+ // Save the SP of this function
+ str sp, [r3]
+ // Invoke the filter funclet
+ blx r2
+
+ free_stack 4
+ pop {pc}
+
+ NESTED_END CallEHFilterFunclet, _TEXT
diff --git a/src/vm/arm/gmscpu.h b/src/vm/arm/gmscpu.h
index 583b00acf0..bc41a008a0 100644
--- a/src/vm/arm/gmscpu.h
+++ b/src/vm/arm/gmscpu.h
@@ -69,7 +69,6 @@ protected:
until later. Note that we don't reuse slots, because we want
this to be threadsafe without locks */
-typedef DPTR(LazyMachState) PTR_LazyMachState;
struct LazyMachState : public MachState {
// compute the machine state of the processor as it will exist just
// after the return after at most'funCallDepth' number of functions.
@@ -158,6 +157,7 @@ inline void LazyMachState::setLazyStateFromUnwind(MachState* copy)
#endif // !DACCESS_COMPILE
}
+typedef DPTR(LazyMachState) PTR_LazyMachState;
// Do the initial capture of the machine state. This is meant to be
// as light weight as possible, as we may never need the state that
diff --git a/src/vm/arm/memcpy.S b/src/vm/arm/memcpy.S
new file mode 100644
index 0000000000..f5b2341c75
--- /dev/null
+++ b/src/vm/arm/memcpy.S
@@ -0,0 +1,37 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+#include "unixasmmacros.inc"
+#include "asmconstants.h"
+
+.syntax unified
+.thumb
+
+//
+// void *memcpy(void *dst, const void *src, size_t length)
+//
+// Copy a block of memory in a forward direction.
+//
+
+ LEAF_ENTRY FCallMemcpy, _TEXT
+ cmp r2, #0
+
+ beq LOCAL_LABEL(GC_POLL)
+
+ ldr r3, [r0]
+ ldr r3, [r1]
+
+ push {lr}
+ blx C_FUNC(memcpy)
+ pop {lr}
+
+LOCAL_LABEL(GC_POLL):
+ ldr r0, =g_TrapReturningThreads
+ ldr r0, [r0]
+ cmp r0, #0
+ it ne
+ bne C_FUNC(FCallMemCpy_GCPoll)
+ bx lr
+ LEAF_END FCallMemcpy, _TEXT
diff --git a/src/vm/arm/patchedcode.S b/src/vm/arm/patchedcode.S
new file mode 100644
index 0000000000..97b202ccbd
--- /dev/null
+++ b/src/vm/arm/patchedcode.S
@@ -0,0 +1,72 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+// ==++==
+//
+
+//
+// ==--==
+#include "unixasmmacros.inc"
+#include "asmconstants.h"
+
+.syntax unified
+.thumb
+
+// ------------------------------------------------------------------
+// Start of the writeable code region
+ LEAF_ENTRY JIT_PatchedCodeStart, _TEXT
+ bx lr
+ LEAF_END JIT_PatchedCodeStart, _TEXT
+
+// ------------------------------------------------------------------
+// Optimized TLS getters
+
+ LEAF_ENTRY GetTLSDummy, _TEXT
+ mov r0, #0
+ bx lr
+ LEAF_END GetTLSDummy, _TEXT
+
+ .align 4
+ LEAF_ENTRY ClrFlsGetBlock, _TEXT
+ // This will be overwritten at runtime with optimized ClrFlsGetBlock implementation
+ b C_FUNC(GetTLSDummy)
+ // Just allocate space that will be filled in at runtime
+ .space (TLS_GETTER_MAX_SIZE_ASM - 2)
+ LEAF_END ClrFlsGetBlock, _TEXT
+
+// ------------------------------------------------------------------
+// GC write barrier support.
+//
+// GC Write barriers are defined in asmhelpers.asm. The following functions are used to define
+// patchable location where the write-barriers are copied over at runtime
+
+ LEAF_ENTRY JIT_PatchedWriteBarrierStart, _TEXT
+ LEAF_END JIT_PatchedWriteBarrierStart, _TEXT
+
+ // These write barriers are overwritten on the fly
+ // See ValidateWriteBarriers on how the sizes of these should be calculated
+ .align 4
+ LEAF_ENTRY JIT_WriteBarrier, _TEXT
+ .space (0x84)
+ LEAF_END_MARKED JIT_WriteBarrier, _TEXT
+
+ .align 4
+ LEAF_ENTRY JIT_CheckedWriteBarrier, _TEXT
+ .space (0x9C)
+ LEAF_END_MARKED JIT_CheckedWriteBarrier, _TEXT
+
+ .align 4
+ LEAF_ENTRY JIT_ByRefWriteBarrier, _TEXT
+ .space (0xA0)
+ LEAF_END_MARKED JIT_ByRefWriteBarrier , _TEXT
+
+ LEAF_ENTRY JIT_PatchedWriteBarrierLast, _TEXT
+ LEAF_END JIT_PatchedWriteBarrierLast, _TEXT
+
+// ------------------------------------------------------------------
+// End of the writeable code region
+ LEAF_ENTRY JIT_PatchedCodeLast, _TEXT
+ bx lr
+ LEAF_END JIT_PatchedCodeLast, _TEXT
diff --git a/src/vm/arm/stubs.cpp b/src/vm/arm/stubs.cpp
index ba7d0a977f..91a3d7097d 100644
--- a/src/vm/arm/stubs.cpp
+++ b/src/vm/arm/stubs.cpp
@@ -367,14 +367,14 @@ void ValidateWriteBarriers()
#endif // _DEBUG
#define UPDATE_WB(_proc,_grow) \
- CopyWriteBarrier((PCODE)JIT_WriteBarrier, (PCODE)JIT_WriteBarrier_##_proc##_##_grow##, (PCODE)JIT_WriteBarrier_##_proc##_##_grow##_End); \
- wbMapping[WriteBarrierIndex].from = (PBYTE)JIT_WriteBarrier_##_proc##_##_grow##; \
+ CopyWriteBarrier((PCODE)JIT_WriteBarrier, (PCODE)JIT_WriteBarrier_ ## _proc ## _ ## _grow , (PCODE)JIT_WriteBarrier_ ## _proc ## _ ## _grow ## _End); \
+ wbMapping[WriteBarrierIndex].from = (PBYTE)JIT_WriteBarrier_ ## _proc ## _ ## _grow ; \
\
- CopyWriteBarrier((PCODE)JIT_CheckedWriteBarrier, (PCODE)JIT_CheckedWriteBarrier_##_proc##_##_grow##, (PCODE)JIT_CheckedWriteBarrier_##_proc##_##_grow##_End); \
- wbMapping[CheckedWriteBarrierIndex].from = (PBYTE)JIT_CheckedWriteBarrier_##_proc##_##_grow##; \
+ CopyWriteBarrier((PCODE)JIT_CheckedWriteBarrier, (PCODE)JIT_CheckedWriteBarrier_ ## _proc ## _ ## _grow , (PCODE)JIT_CheckedWriteBarrier_ ## _proc ## _ ## _grow ## _End); \
+ wbMapping[CheckedWriteBarrierIndex].from = (PBYTE)JIT_CheckedWriteBarrier_ ## _proc ## _ ## _grow ; \
\
- CopyWriteBarrier((PCODE)JIT_ByRefWriteBarrier, (PCODE)JIT_ByRefWriteBarrier_##_proc##_##_grow##, (PCODE)JIT_ByRefWriteBarrier_##_proc##_##_grow##_End); \
- wbMapping[ByRefWriteBarrierIndex].from = (PBYTE)JIT_ByRefWriteBarrier_##_proc##_##_grow##; \
+ CopyWriteBarrier((PCODE)JIT_ByRefWriteBarrier, (PCODE)JIT_ByRefWriteBarrier_ ## _proc ## _ ## _grow , (PCODE)JIT_ByRefWriteBarrier_ ## _proc ## _ ## _grow ## _End); \
+ wbMapping[ByRefWriteBarrierIndex].from = (PBYTE)JIT_ByRefWriteBarrier_ ## _proc ## _ ## _grow ; \
// Update the instructions in our various write barrier implementations that refer directly to the values
// of GC globals such as g_lowest_address and g_card_table. We don't particularly care which values have
@@ -1379,7 +1379,12 @@ Stub *GenerateInitPInvokeFrameHelper()
ThumbReg regThread = ThumbReg(5);
ThumbReg regScratch = ThumbReg(6);
+#ifdef FEATURE_IMPLICIT_TLS
+ TLSACCESSMODE mode = TLSACCESS_GENERIC;
+#else
TLSACCESSMODE mode = GetTLSAccessMode(GetThreadTLSIndex());
+#endif
+
if (mode == TLSACCESS_GENERIC)
{
@@ -1453,6 +1458,7 @@ Stub *GenerateInitPInvokeFrameHelper()
void StubLinkerCPU::ThumbEmitGetThread(TLSACCESSMODE mode, ThumbReg dest)
{
+#ifndef FEATURE_IMPLICIT_TLS
DWORD idxThread = GetThreadTLSIndex();
if (mode != TLSACCESS_GENERIC)
@@ -1493,6 +1499,16 @@ void StubLinkerCPU::ThumbEmitGetThread(TLSACCESSMODE mode, ThumbReg dest)
ThumbEmitMovRegReg(dest, ThumbReg(0));
}
}
+#else
+ ThumbEmitMovConstant(ThumbReg(0), (TADDR)GetThread);
+
+ ThumbEmitCallRegister(ThumbReg(0));
+
+ if (dest != ThumbReg(0))
+ {
+ ThumbEmitMovRegReg(dest, ThumbReg(0));
+ }
+#endif
}
#endif // CROSSGEN_COMPILE
@@ -2219,7 +2235,7 @@ void UpdateRegDisplayFromCalleeSavedRegisters(REGDISPLAY * pRD, CalleeSavedRegis
pRD->pCurrentContextPointers->Lr = NULL;
}
-
+#ifndef CROSSGEN_COMPILE
void TransitionFrame::UpdateRegDisplay(const PREGDISPLAY pRD)
{
pRD->IsCallerContextValid = FALSE;
@@ -2289,6 +2305,7 @@ void TailCallFrame::InitFromContext(T_CONTEXT * pContext)
}
#endif // !DACCESS_COMPILE
+#endif // !CROSSGEN_COMPILE
void FaultingExceptionFrame::UpdateRegDisplay(const PREGDISPLAY pRD)
{
@@ -2516,6 +2533,7 @@ EXTERN_C void JIT_NewArr1OBJ_MP_InlineGetThread__PatchTLSOffset();
extern "C" void STDCALL JIT_PatchedCodeStart();
extern "C" void STDCALL JIT_PatchedCodeLast();
+#ifndef FEATURE_IMPLICIT_TLS
static const LPVOID InlineGetThreadLocations[] = {
(PVOID)JIT_TrialAllocSFastMP_InlineGetThread__PatchTLSOffset,
(PVOID)JIT_BoxFastMP_InlineGetThread__PatchTLSOffset,
@@ -2523,6 +2541,7 @@ static const LPVOID InlineGetThreadLocations[] = {
(PVOID)JIT_NewArr1VC_MP_InlineGetThread__PatchTLSOffset,
(PVOID)JIT_NewArr1OBJ_MP_InlineGetThread__PatchTLSOffset,
};
+#endif
//EXTERN_C Object* JIT_TrialAllocSFastMP(CORINFO_CLASS_HANDLE typeHnd_);
Object* JIT_TrialAllocSFastMP(CORINFO_CLASS_HANDLE typeHnd_);
@@ -2550,7 +2569,7 @@ static const LPVOID InlineGetAppDomainLocations[] = {
(PVOID)JIT_GetSharedGCStaticBaseNoCtor__PatchTLSLabel
};
-
+#ifndef FEATURE_IMPLICIT_TLS
void FixupInlineGetters(DWORD tlsSlot, const LPVOID * pLocations, int nLocations)
{
STANDARD_VM_CONTRACT;
@@ -2576,12 +2595,13 @@ void FixupInlineGetters(DWORD tlsSlot, const LPVOID * pLocations, int nLocations
*((WORD*)(pInlineGetter + 6)) |= (WORD)offset;
}
}
-
-
+#endif
void InitJITHelpers1()
{
STANDARD_VM_CONTRACT;
+
+#ifndef FEATURE_IMPLICIT_TLS
if (gThreadTLSIndex < TLS_MINIMUM_AVAILABLE)
{
@@ -2654,6 +2674,7 @@ void InitJITHelpers1()
SetJitHelperFunction(CORINFO_HELP_GETSHARED_GCSTATIC_BASE_NOCTOR, JIT_GetSharedGCStaticBaseNoCtor_Portable);
SetJitHelperFunction(CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE_NOCTOR,JIT_GetSharedNonGCStaticBaseNoCtor_Portable);
}
+#endif
}
extern "C" Object *SetAppDomainInObject(Object *pObject)
@@ -2998,7 +3019,11 @@ void StubLinkerCPU::EmitStubLinkFrame(TADDR pFrameVptr, int offsetOfFrame, int o
// str r6, [r4 + #offsetof(MulticastFrame, m_Next)]
// str r4, [r5 + #offsetof(Thread, m_pFrame)]
+#ifdef FEATURE_IMPLICIT_TLS
+ TLSACCESSMODE mode = TLSACCESS_GENERIC;
+#else
TLSACCESSMODE mode = GetTLSAccessMode(GetThreadTLSIndex());
+#endif
ThumbEmitGetThread(mode, ThumbReg(5));
if (mode == TLSACCESS_GENERIC)
{
diff --git a/src/vm/arm/unixstubs.cpp b/src/vm/arm/unixstubs.cpp
new file mode 100644
index 0000000000..6369c8b12e
--- /dev/null
+++ b/src/vm/arm/unixstubs.cpp
@@ -0,0 +1,39 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+#include "common.h"
+
+extern "C"
+{
+ void RedirectForThrowControl()
+ {
+ PORTABILITY_ASSERT("Implement for PAL");
+ }
+
+ void GenericPInvokeCalliHelper()
+ {
+ PORTABILITY_ASSERT("Implement for PAL");
+ }
+
+ void PInvokeStubForHostInner(DWORD dwStackSize, LPVOID pStackFrame, LPVOID pTarget)
+ {
+ PORTABILITY_ASSERT("Implement for PAL");
+ }
+
+ void VarargPInvokeStub()
+ {
+ PORTABILITY_ASSERT("Implement for PAL");
+ }
+
+ void VarargPInvokeStub_RetBuffArg()
+ {
+ PORTABILITY_ASSERT("Implement for PAL");
+ }
+
+ void RedirectForThreadAbort()
+ {
+ PORTABILITY_ASSERT("Implement for PAL");
+ }
+};
diff --git a/src/vm/clrvarargs.cpp b/src/vm/clrvarargs.cpp
index 8411f6be1b..581cef0146 100644
--- a/src/vm/clrvarargs.cpp
+++ b/src/vm/clrvarargs.cpp
@@ -33,10 +33,14 @@ void VARARGS::MarshalToManagedVaList(va_list va, VARARGS *dataout)
{
WRAPPER_NO_CONTRACT
+#ifndef PLATFORM_UNIX
_ASSERTE(dataout != NULL);
dataout->SigPtr = SigPointer(NULL, 0);
dataout->ArgCookie = NULL;
dataout->ArgPtr = (BYTE*)va;
+#else
+ PORTABILITY_ASSERT("Implement for Unix");
+#endif
}
////////////////////////////////////////////////////////////////////////////////
@@ -46,6 +50,7 @@ void
VARARGS::MarshalToUnmanagedVaList(
va_list va, DWORD cbVaListSize, const VARARGS * data)
{
+#ifndef PLATFORM_UNIX
BYTE * pdstbuffer = (BYTE *)va;
int remainingArgs = data->RemainingArgs;
@@ -112,4 +117,7 @@ VARARGS::MarshalToUnmanagedVaList(
COMPlusThrow(kNotSupportedException);
}
}
+#else
+ PORTABILITY_ASSERT("Implement for Unix");
+#endif
} // VARARGS::MarshalToUnmanagedVaList
diff --git a/src/vm/codeman.cpp b/src/vm/codeman.cpp
index 922dbcb7b8..e00dff78ee 100644
--- a/src/vm/codeman.cpp
+++ b/src/vm/codeman.cpp
@@ -5114,7 +5114,7 @@ BOOL NativeImageJitManager::JitCodeToMethodInfo(RangeSection * pRangeSection,
}
//Get the function entry that corresponds to the real method desc.
- _ASSERTE(RelativePc >= RUNTIME_FUNCTION__BeginAddress(FunctionEntry));
+ _ASSERTE((RelativePc >= RUNTIME_FUNCTION__BeginAddress(FunctionEntry)));
if (pCodeInfo)
{
diff --git a/src/vm/crossgen/CMakeLists.txt b/src/vm/crossgen/CMakeLists.txt
index f21f149229..e3dd990695 100644
--- a/src/vm/crossgen/CMakeLists.txt
+++ b/src/vm/crossgen/CMakeLists.txt
@@ -98,13 +98,25 @@ set(VM_CROSSGEN_SOURCES
../dbggcinfodecoder.cpp
../gcinfodecoder.cpp
../sha1.cpp
- ../amd64/stublinkeramd64.cpp
../crossgencompile.cpp
)
include_directories(BEFORE ..)
include_directories(${CLR_DIR}/src/gc)
-include_directories(../amd64)
+
+if(CLR_CMAKE_PLATFORM_ARCH_AMD64)
+ include_directories(../amd64)
+ list(APPEND VM_CROSSGEN_SOURCES
+ ../amd64/stublinkeramd64.cpp
+ )
+elseif(CLR_CMAKE_PLATFORM_ARCH_ARM)
+ include_directories(../arm)
+ list(APPEND VM_CROSSGEN_SOURCES
+ ../arm/stubs.cpp
+ )
+else()
+ message(FATAL_ERROR "Only ARM and AMD64 is supported")
+endif()
if (WIN32)
list(APPEND VM_CROSSGEN_SOURCES
diff --git a/src/vm/exceptionhandling.cpp b/src/vm/exceptionhandling.cpp
index c22b40c0d2..d2b13a7315 100644
--- a/src/vm/exceptionhandling.cpp
+++ b/src/vm/exceptionhandling.cpp
@@ -4388,7 +4388,7 @@ VOID UnwindManagedExceptionPass2(EXCEPTION_RECORD* exceptionRecord, CONTEXT* unw
CONTEXT contextStorage;
DISPATCHER_CONTEXT dispatcherContext;
EECodeInfo codeInfo;
- ULONG64 establisherFrame = NULL;
+ UINT_PTR establisherFrame = NULL;
PVOID handlerData;
ULONG64 stackHighAddress = (ULONG64)PAL_GetStackBase();
ULONG64 stackLowAddress = (ULONG64)PAL_GetStackLimit();
@@ -4404,7 +4404,7 @@ VOID UnwindManagedExceptionPass2(EXCEPTION_RECORD* exceptionRecord, CONTEXT* unw
do
{
- controlPc = currentFrameContext->Rip;
+ controlPc = GetIP(currentFrameContext);
codeInfo.Init(controlPc);
dispatcherContext.FunctionEntry = codeInfo.GetFunctionEntry();
@@ -4471,13 +4471,13 @@ VOID UnwindManagedExceptionPass2(EXCEPTION_RECORD* exceptionRecord, CONTEXT* unw
}
// Check whether we are crossing managed-to-native boundary
- if (!ExecutionManager::IsManagedCode(currentFrameContext->Rip))
+ if (!ExecutionManager::IsManagedCode(GetIP(currentFrameContext)))
{
// Return back to the UnwindManagedExceptionPass1 and let it unwind the native frames
return;
}
- } while (IsSpInStackLimits(currentFrameContext->Rsp, stackLowAddress, stackHighAddress) &&
+ } while (IsSpInStackLimits(GetSP(currentFrameContext), stackLowAddress, stackHighAddress) &&
(establisherFrame != targetFrameSp));
_ASSERTE(!"UnwindManagedExceptionPass2: Unwinding failed. Reached the end of the stack");
@@ -4505,7 +4505,7 @@ VOID DECLSPEC_NORETURN UnwindManagedExceptionPass1(PAL_SEHException& ex)
DISPATCHER_CONTEXT dispatcherContext;
EECodeInfo codeInfo;
UINT_PTR controlPc;
- ULONG64 establisherFrame = NULL;
+ UINT_PTR establisherFrame = NULL;
PVOID handlerData;
ULONG64 stackHighAddress = (ULONG64)PAL_GetStackBase();
ULONG64 stackLowAddress = (ULONG64)PAL_GetStackLimit();
@@ -4516,7 +4516,7 @@ VOID DECLSPEC_NORETURN UnwindManagedExceptionPass1(PAL_SEHException& ex)
unwindStartContext = frameContext;
- if (!ExecutionManager::IsManagedCode(ex.ContextRecord.Rip))
+ if (!ExecutionManager::IsManagedCode(GetIP(&ex.ContextRecord)))
{
// This is the first time we see the managed exception, set its context to the managed frame that has caused
// the exception to be thrown
@@ -4570,7 +4570,7 @@ VOID DECLSPEC_NORETURN UnwindManagedExceptionPass1(PAL_SEHException& ex)
if (disposition == ExceptionContinueSearch)
{
// Exception handler not found. Try the parent frame.
- controlPc = frameContext.Rip;
+ controlPc = GetIP(&frameContext);
}
else if (disposition == ExceptionStackUnwind)
{
@@ -4628,7 +4628,7 @@ VOID DECLSPEC_NORETURN UnwindManagedExceptionPass1(PAL_SEHException& ex)
// Pop all frames that are below the block of native frames and that would be
// in the unwound part of the stack when UnwindManagedExceptionPass1 is resumed
// at the next managed frame.
- UnwindFrameChain(GetThread(), (VOID*)frameContext.Rsp);
+ UnwindFrameChain(GetThread(), (VOID*)GetSP(&frameContext));
// We are going to reclaim the stack range that was scanned by the exception tracker
// until now. We need to reset the explicit frames range so that if GC fires before
@@ -4644,7 +4644,7 @@ VOID DECLSPEC_NORETURN UnwindManagedExceptionPass1(PAL_SEHException& ex)
UNREACHABLE();
}
- } while (IsSpInStackLimits(frameContext.Rsp, stackLowAddress, stackHighAddress));
+ } while (IsSpInStackLimits(GetSP(&frameContext), stackLowAddress, stackHighAddress));
_ASSERTE(!"UnwindManagedExceptionPass1: Failed to find a handler. Reached the end of the stack");
EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE);
diff --git a/src/vm/gcinfodecoder.cpp b/src/vm/gcinfodecoder.cpp
index 2d886d4ad4..b231924e0f 100644
--- a/src/vm/gcinfodecoder.cpp
+++ b/src/vm/gcinfodecoder.cpp
@@ -1615,6 +1615,25 @@ OBJECTREF* GcInfoDecoder::GetRegisterSlot(
}
+#ifdef FEATURE_PAL
+OBJECTREF* GcInfoDecoder::GetCapturedRegister(
+ int regNum,
+ PREGDISPLAY pRD
+ )
+{
+ _ASSERTE(regNum >= 0 && regNum <= 14);
+ _ASSERTE(regNum != 13); // sp
+
+ // The fields of CONTEXT are in the same order as
+ // the processor encoding numbers.
+
+ ULONG *pR0;
+ pR0 = &pRD->pCurrentContext->R0;
+
+ return (OBJECTREF*)(pR0 + regNum);
+}
+#endif // FEATURE_PAL
+
bool GcInfoDecoder::IsScratchRegister(int regNum, PREGDISPLAY pRD)
{
diff --git a/src/vm/jitinterface.cpp b/src/vm/jitinterface.cpp
index e9fc25182e..6ac959bb32 100644
--- a/src/vm/jitinterface.cpp
+++ b/src/vm/jitinterface.cpp
@@ -11088,8 +11088,8 @@ void CEEJitInfo::allocUnwindInfo (
for (ULONG iUnwindInfo = 0; iUnwindInfo < m_usedUnwindInfos - 1; iUnwindInfo++)
{
PRUNTIME_FUNCTION pOtherFunction = m_CodeHeader->GetUnwindInfo(iUnwindInfo);
- _ASSERTE( RUNTIME_FUNCTION__BeginAddress(pOtherFunction) >= RUNTIME_FUNCTION__EndAddress(pRuntimeFunction, baseAddress)
- || RUNTIME_FUNCTION__EndAddress(pOtherFunction, baseAddress) <= RUNTIME_FUNCTION__BeginAddress(pRuntimeFunction));
+ _ASSERTE(( RUNTIME_FUNCTION__BeginAddress(pOtherFunction) >= RUNTIME_FUNCTION__EndAddress(pRuntimeFunction, baseAddress)
+ || RUNTIME_FUNCTION__EndAddress(pOtherFunction, baseAddress) <= RUNTIME_FUNCTION__BeginAddress(pRuntimeFunction)));
}
}
#endif // _DEBUG
diff --git a/src/vm/stublink.cpp b/src/vm/stublink.cpp
index 678a57669b..9dfe20fd1c 100644
--- a/src/vm/stublink.cpp
+++ b/src/vm/stublink.cpp
@@ -172,8 +172,8 @@ FindStubFunctionEntry (
RUNTIME_FUNCTION__BeginAddress(pCurFunction),
RUNTIME_FUNCTION__EndAddress(pCurFunction, (TADDR)pStubHeapSegment->pbBaseAddress)));
- CONSISTENCY_CHECK(RUNTIME_FUNCTION__EndAddress(pCurFunction, (TADDR)pStubHeapSegment->pbBaseAddress) > RUNTIME_FUNCTION__BeginAddress(pCurFunction));
- CONSISTENCY_CHECK(!pPrevFunction || RUNTIME_FUNCTION__EndAddress(pPrevFunction, (TADDR)pStubHeapSegment->pbBaseAddress) <= RUNTIME_FUNCTION__BeginAddress(pCurFunction));
+ CONSISTENCY_CHECK((RUNTIME_FUNCTION__EndAddress(pCurFunction, (TADDR)pStubHeapSegment->pbBaseAddress) > RUNTIME_FUNCTION__BeginAddress(pCurFunction)));
+ CONSISTENCY_CHECK((!pPrevFunction || RUNTIME_FUNCTION__EndAddress(pPrevFunction, (TADDR)pStubHeapSegment->pbBaseAddress) <= RUNTIME_FUNCTION__BeginAddress(pCurFunction)));
// The entry points are in increasing address order. They're
// also contiguous, so after we're sure it's after the start of
@@ -181,7 +181,7 @@ FindStubFunctionEntry (
// the end address.
if (RelativeAddress < RUNTIME_FUNCTION__EndAddress(pCurFunction, (TADDR)pStubHeapSegment->pbBaseAddress))
{
- CONSISTENCY_CHECK(RelativeAddress >= RUNTIME_FUNCTION__BeginAddress(pCurFunction));
+ CONSISTENCY_CHECK((RelativeAddress >= RUNTIME_FUNCTION__BeginAddress(pCurFunction)));
return pCurFunction;
}
@@ -215,7 +215,7 @@ void UnregisterUnwindInfoInLoaderHeapCallback (PVOID pvAllocationBase, SIZE_T cb
StubUnwindInfoHeapSegment *pStubHeapSegment;
for (StubUnwindInfoHeapSegment **ppPrevStubHeapSegment = &g_StubHeapSegments;
- pStubHeapSegment = *ppPrevStubHeapSegment; )
+ (pStubHeapSegment = *ppPrevStubHeapSegment); )
{
LOG((LF_STUBS, LL_INFO10000, " have unwind info for address %p size %p\n", pStubHeapSegment->pbBaseAddress, pStubHeapSegment->cbSegment));
@@ -354,6 +354,12 @@ StubLinker::StubLinker()
m_pPatchLabel = NULL;
m_stackSize = 0;
m_fDataOnly = FALSE;
+#ifdef _TARGET_ARM_
+ m_fProlog = FALSE;
+ m_cCalleeSavedRegs = 0;
+ m_cbStackFrame = 0;
+ m_fPushArgRegs = FALSE;
+#endif
#ifdef STUBLINKER_GENERATES_UNWIND_INFO
#ifdef _DEBUG
m_pUnwindInfoCheckLabel = NULL;
@@ -363,12 +369,6 @@ StubLinker::StubLinker()
m_nUnwindSlots = 0;
m_fHaveFramePointer = FALSE;
#endif
-#ifdef _TARGET_ARM_
- m_fProlog = FALSE;
- m_cCalleeSavedRegs = 0;
- m_cbStackFrame = 0;
- m_fPushArgRegs = FALSE;
-#endif
#ifdef _TARGET_ARM64_
m_fProlog = FALSE;
m_cIntRegArgs = 0;
diff --git a/src/vm/stublink.h b/src/vm/stublink.h
index ec3b102a54..8456432fdc 100644
--- a/src/vm/stublink.h
+++ b/src/vm/stublink.h
@@ -312,8 +312,17 @@ public:
// labels, and
// internals.
BOOL m_fDataOnly; // the stub contains only data - does not need FlushInstructionCache
+
+#ifdef _TARGET_ARM_
+protected:
+ BOOL m_fProlog; // True if DescribeProlog has been called
+ UINT m_cCalleeSavedRegs; // Count of callee saved registers (0 == none, 1 == r4, 2 ==
+ // r4-r5 etc. up to 8 == r4-r11)
+ UINT m_cbStackFrame; // Count of bytes in the stack frame (excl of saved regs)
+ BOOL m_fPushArgRegs; // If true, r0-r3 are saved before callee saved regs
+#endif // _TARGET_ARM_
-#ifdef STUBLINKER_GENERATES_UNWIND_INFO
+#ifdef STUBLINKER_GENERATES_UNWIND_INFO
#ifdef _DEBUG
CodeLabel *m_pUnwindInfoCheckLabel; // subfunction to call to unwind info check helper.
@@ -342,13 +351,6 @@ public:
#define MAX_UNWIND_CODE_WORDS 5 /* maximum number of 32-bit words to store unwind codes */
// Cache information about the stack frame set up in the prolog and use it in the generation of the
// epilog.
-protected:
- BOOL m_fProlog; // True if DescribeProlog has been called
- UINT m_cCalleeSavedRegs; // Count of callee saved registers (0 == none, 1 == r4, 2 ==
- // r4-r5 etc. up to 8 == r4-r11)
- UINT m_cbStackFrame; // Count of bytes in the stack frame (excl of saved regs)
- BOOL m_fPushArgRegs; // If true, r0-r3 are saved before callee saved regs
-
private:
// Reserve fixed size block that's big enough to fit any unwind info we can have
static const int c_nUnwindInfoSize = sizeof(RUNTIME_FUNCTION) + sizeof(DWORD) + MAX_UNWIND_CODE_WORDS *4;
diff --git a/src/vm/util.cpp b/src/vm/util.cpp
index faeb2002d7..46e7f4c7bc 100644
--- a/src/vm/util.cpp
+++ b/src/vm/util.cpp
@@ -2443,7 +2443,6 @@ size_t GetLargestOnDieCacheSize(BOOL bTrueSize)
return maxSize;
#else
-
size_t cache_size = GetLogicalProcessorCacheSizeFromOS() ; // Returns the size of the highest level processor cache
return cache_size;
diff --git a/src/vm/wks/CMakeLists.txt b/src/vm/wks/CMakeLists.txt
index bf810d60b7..b4c091cd5f 100644
--- a/src/vm/wks/CMakeLists.txt
+++ b/src/vm/wks/CMakeLists.txt
@@ -6,7 +6,7 @@ if (WIN32)
set_source_files_properties(../microsoft.comservices_i.c PROPERTIES COMPILE_FLAGS "/Y-")
endif (WIN32)
-add_library(cee_wks ${VM_SOURCES_WKS} ${VM_SOURCES_WKS_AMD64_ASM})
+add_library(cee_wks ${VM_SOURCES_WKS} ${VM_SOURCES_WKS_ARCH_ASM})
if (WIN32)
# Get the current list of definitions
@@ -29,10 +29,10 @@ endforeach()
# Add defines for the ASM. Unfortunately setting it on the target is ignored for asm by the cmake, so we have
# to set it on the sources.
-set_property(SOURCE ${VM_SOURCES_WKS_AMD64_ASM} PROPERTY COMPILE_DEFINITIONS ${ASM_DEFINITIONS})
+set_property(SOURCE ${VM_SOURCES_WKS_ARCH_ASM} PROPERTY COMPILE_DEFINITIONS ${ASM_DEFINITIONS})
foreach(CONFIG IN LISTS CMAKE_CONFIGURATION_TYPES)
string(TOUPPER ${CONFIG} CONFIG)
- set_property(SOURCE ${VM_SOURCES_WKS_AMD64_ASM} PROPERTY COMPILE_DEFINITIONS_${CONFIG} ${ASM_DEFINITIONS_${CONFIG}})
+ set_property(SOURCE ${VM_SOURCES_WKS_ARCH_ASM} PROPERTY COMPILE_DEFINITIONS_${CONFIG} ${ASM_DEFINITIONS_${CONFIG}})
endforeach()
# Convert AsmConstants.h into AsmConstants.inc
diff --git a/src/zap/zapcode.cpp b/src/zap/zapcode.cpp
index e7fd69b01c..a4c537dd62 100644
--- a/src/zap/zapcode.cpp
+++ b/src/zap/zapcode.cpp
@@ -1067,7 +1067,7 @@ public:
ZapCodeBlob * pZapCodeBlob = new (pMemory) ZapCodeBlobConst<alignment>(cbSize);
if (pData != NULL)
- memcpy(pZapCodeBlob + 1, pData, cbSize);
+ memcpy((void*)(pZapCodeBlob + 1), pData, cbSize);
return pZapCodeBlob;
}