summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/.nuget/Microsoft.NETCore.Runtime.CoreCLR/runtime.Linux.Microsoft.NETCore.Runtime.CoreCLR.props3
-rw-r--r--src/CMakeLists.txt6
-rw-r--r--src/ToolBox/SOS/NETCore/project.json9
-rw-r--r--src/ToolBox/SOS/Strike/util.h79
-rw-r--r--src/coreclr/hosts/inc/coreclrhost.h5
-rw-r--r--src/coreclr/hosts/unixcoreruncommon/coreruncommon.cpp12
-rw-r--r--src/debug/CMakeLists.txt2
-rw-r--r--src/debug/createdump/.gitmirrorall1
-rw-r--r--src/debug/createdump/CMakeLists.txt33
-rw-r--r--src/debug/createdump/crashinfo.cpp630
-rw-r--r--src/debug/createdump/crashinfo.h71
-rw-r--r--src/debug/createdump/createdump.cpp88
-rw-r--r--src/debug/createdump/createdump.h56
-rw-r--r--src/debug/createdump/datatarget.cpp263
-rw-r--r--src/debug/createdump/datatarget.h90
-rw-r--r--src/debug/createdump/dumpwriter.cpp486
-rw-r--r--src/debug/createdump/dumpwriter.h74
-rw-r--r--src/debug/createdump/memoryregion.h97
-rw-r--r--src/debug/createdump/threadinfo.cpp154
-rw-r--r--src/debug/createdump/threadinfo.h41
-rw-r--r--src/debug/daccess/dacfn.cpp2
-rw-r--r--src/debug/daccess/enummem.cpp103
-rw-r--r--src/debug/daccess/request_svr.cpp16
-rw-r--r--src/debug/inc/dump/dumpcommon.h29
-rw-r--r--src/dlls/mscordac/mscordac_unixexports.src4
-rw-r--r--src/dlls/mscoree/mscorwks_ntdef.src1
-rw-r--r--src/dlls/mscoree/mscorwks_unixexports.src1
-rw-r--r--src/dlls/mscoree/unixinterface.cpp41
-rw-r--r--src/gc/gchandletable.cpp76
-rw-r--r--src/gc/gchandletableimpl.h28
-rw-r--r--src/gc/gcinterface.h28
-rw-r--r--src/gc/handletable.cpp18
-rw-r--r--src/gc/handletable.h12
-rw-r--r--src/gc/objecthandle.cpp45
-rw-r--r--src/gc/objecthandle.h228
-rw-r--r--src/gc/sample/GCSample.cpp2
-rw-r--r--src/inc/arrayholder.h80
-rw-r--r--src/inc/corhost.h5
-rw-r--r--src/inc/eetwain.h5
-rw-r--r--src/jit/CMakeLists.txt6
-rw-r--r--src/jit/block.h1
-rw-r--r--src/jit/codegenarm.cpp1132
-rw-r--r--src/jit/codegenarm64.cpp1552
-rw-r--r--src/jit/codegenarmarch.cpp1687
-rw-r--r--src/jit/codegenxarch.cpp8
-rw-r--r--src/jit/compiler.h12
-rw-r--r--src/jit/decomposelongs.cpp173
-rw-r--r--src/jit/emitarm.cpp36
-rw-r--r--src/jit/emitarm.h8
-rw-r--r--src/jit/emitarm64.cpp8
-rw-r--r--src/jit/flowgraph.cpp12
-rw-r--r--src/jit/gcencode.cpp3
-rw-r--r--src/jit/importer.cpp12
-rw-r--r--src/jit/jit.settings.targets6
-rw-r--r--src/jit/lowerarm.cpp267
-rw-r--r--src/jit/lowerarm64.cpp284
-rw-r--r--src/jit/lowerarmarch.cpp346
-rw-r--r--src/jit/lsraarm.cpp752
-rw-r--r--src/jit/lsraarm64.cpp721
-rw-r--r--src/jit/lsraarmarch.cpp868
-rw-r--r--src/mscorlib/Resources/Strings.resx44
-rw-r--r--src/mscorlib/System.Private.CoreLib.csproj27
-rw-r--r--src/mscorlib/shared/Microsoft/Win32/SafeHandles/SafeFileHandle.Windows.cs3
-rw-r--r--src/mscorlib/shared/System.Private.CoreLib.Shared.projitems34
-rw-r--r--src/mscorlib/shared/System/ComponentModel/DefaultValueAttribute.cs228
-rw-r--r--src/mscorlib/shared/System/IO/EndOfStreamException.cs4
-rw-r--r--src/mscorlib/shared/System/IO/FileAccess.cs3
-rw-r--r--src/mscorlib/shared/System/IO/FileMode.cs3
-rw-r--r--src/mscorlib/shared/System/IO/FileOptions.cs3
-rw-r--r--src/mscorlib/shared/System/IO/FileShare.cs3
-rw-r--r--src/mscorlib/shared/System/IO/FileStream.cs3
-rw-r--r--src/mscorlib/shared/System/IO/Path.cs3
-rw-r--r--src/mscorlib/shared/System/Progress.cs3
-rw-r--r--src/mscorlib/shared/System/Runtime/CompilerServices/IsConst.cs10
-rw-r--r--src/mscorlib/shared/System/Runtime/CompilerServices/SpecialNameAttribute.cs12
-rw-r--r--src/mscorlib/shared/System/Runtime/CompilerServices/StrongBox.cs59
-rw-r--r--src/mscorlib/shared/System/Runtime/Serialization/IDeserializationCallback.cs11
-rw-r--r--src/mscorlib/shared/System/Runtime/Serialization/OnDeserializedAttribute.cs11
-rw-r--r--src/mscorlib/shared/System/Runtime/Serialization/OnDeserializingAttribute.cs11
-rw-r--r--src/mscorlib/shared/System/Runtime/Serialization/OnSerializedAttribute.cs11
-rw-r--r--src/mscorlib/shared/System/Runtime/Serialization/OnSerializingAttribute.cs11
-rw-r--r--src/mscorlib/shared/System/Runtime/Serialization/OptionalFieldAttribute.cs25
-rw-r--r--src/mscorlib/shared/System/Runtime/Serialization/SerializationException.cs (renamed from src/mscorlib/src/System/Runtime/Serialization/SerializationException.cs)22
-rw-r--r--src/mscorlib/shared/System/Runtime/Serialization/SerializationInfoEnumerator.cs127
-rw-r--r--src/mscorlib/shared/System/Runtime/Serialization/StreamingContext.cs53
-rw-r--r--src/mscorlib/shared/System/Runtime/Versioning/NonVersionableAttribute.cs (renamed from src/mscorlib/src/System/Runtime/Versioning/NonVersionableAttribute.cs)0
-rw-r--r--src/mscorlib/shared/System/Threading/AbandonedMutexException.cs (renamed from src/mscorlib/src/System/Threading/AbandonedMutexException.cs)39
-rw-r--r--src/mscorlib/shared/System/Threading/ApartmentState.cs (renamed from src/mscorlib/src/System/Threading/ApartmentState.cs)11
-rw-r--r--src/mscorlib/shared/System/Threading/AsyncLocal.cs (renamed from src/mscorlib/src/System/Threading/AsyncLocal.cs)3
-rw-r--r--src/mscorlib/shared/System/Threading/AutoResetEvent.cs (renamed from src/mscorlib/src/System/Threading/AutoResetEvent.cs)13
-rw-r--r--src/mscorlib/shared/System/Threading/EventResetMode.cs (renamed from src/mscorlib/src/System/Threading/EventResetMode.cs)5
-rw-r--r--src/mscorlib/shared/System/Threading/ExecutionContext.cs (renamed from src/mscorlib/src/System/Threading/ExecutionContext.cs)27
-rw-r--r--src/mscorlib/shared/System/Threading/LazyThreadSafetyMode.cs44
-rw-r--r--src/mscorlib/shared/System/Threading/LockRecursionException.cs (renamed from src/mscorlib/src/System/Threading/LockRecursionException.cs)28
-rw-r--r--src/mscorlib/shared/System/Threading/ManualResetEvent.cs (renamed from src/mscorlib/src/System/Threading/ManualResetEvent.cs)13
-rw-r--r--src/mscorlib/shared/System/Threading/ParameterizedThreadStart.cs (renamed from src/mscorlib/src/System/Threading/ParameterizedThreadStart.cs)6
-rw-r--r--src/mscorlib/shared/System/Threading/SemaphoreFullException.cs (renamed from src/mscorlib/src/System/Threading/SemaphoreFullException.cs)3
-rw-r--r--src/mscorlib/shared/System/Threading/SendOrPostCallback.cs (renamed from src/mscorlib/src/System/Threading/SendOrPostCallback.cs)8
-rw-r--r--src/mscorlib/shared/System/Threading/SynchronizationLockException.cs (renamed from src/mscorlib/src/System/Threading/SynchronizationLockException.cs)5
-rw-r--r--src/mscorlib/shared/System/Threading/ThreadPriority.cs (renamed from src/mscorlib/src/System/Threading/ThreadPriority.cs)13
-rw-r--r--src/mscorlib/shared/System/Threading/ThreadStart.cs (renamed from src/mscorlib/src/System/Threading/ThreadStart.cs)6
-rw-r--r--src/mscorlib/shared/System/Threading/ThreadStartException.cs (renamed from src/mscorlib/src/System/Threading/ThreadStartException.cs)11
-rw-r--r--src/mscorlib/shared/System/Threading/ThreadState.cs (renamed from src/mscorlib/src/System/Threading/ThreadState.cs)11
-rw-r--r--src/mscorlib/shared/System/Threading/ThreadStateException.cs (renamed from src/mscorlib/src/System/Threading/ThreadStateException.cs)5
-rw-r--r--src/mscorlib/shared/System/Threading/TimeoutHelper.cs54
-rw-r--r--src/mscorlib/shared/System/Threading/WaitHandleCannotBeOpenedException.cs (renamed from src/mscorlib/src/System/Threading/WaitHandleCannotBeOpenedException.cs)7
-rw-r--r--src/mscorlib/src/System/Collections/Generic/Dictionary.cs48
-rw-r--r--src/mscorlib/src/System/DelegateSerializationHolder.cs22
-rw-r--r--src/mscorlib/src/System/Reflection/AssemblyName.cs32
-rw-r--r--src/mscorlib/src/System/Runtime/Serialization/IDeserializationCallback.cs25
-rw-r--r--src/mscorlib/src/System/Runtime/Serialization/SerializationAttributes.cs61
-rw-r--r--src/mscorlib/src/System/Runtime/Serialization/SerializationInfoEnumerator.cs176
-rw-r--r--src/mscorlib/src/System/Runtime/Serialization/StreamingContext.cs84
-rw-r--r--src/mscorlib/src/System/Threading/LazyInitializer.cs31
-rw-r--r--src/mscorlib/src/System/Threading/ReaderWriterLockSlim.cs1311
-rw-r--r--src/mscorlib/src/System/Threading/SpinWait.cs48
-rw-r--r--src/pal/CMakeLists.txt10
-rw-r--r--src/pal/inc/pal.h7
-rw-r--r--src/pal/prebuilt/inc/mscoree.h16
-rw-r--r--src/pal/src/config.h.in1
-rw-r--r--src/pal/src/configure.cmake1
-rw-r--r--src/pal/src/include/pal/process.h19
-rw-r--r--src/pal/src/init/pal.cpp6
-rw-r--r--src/pal/src/thread/process.cpp177
-rwxr-xr-xsrc/pal/tools/gen-buildsys-clang.sh10
-rw-r--r--src/vm/CMakeLists.txt2
-rw-r--r--src/vm/amd64/JitHelpers_SingleAppDomain.asm64
-rw-r--r--src/vm/amd64/jithelpers_singleappdomain.S49
-rw-r--r--src/vm/appdomain.cpp73
-rw-r--r--src/vm/appdomain.hpp44
-rw-r--r--src/vm/arm64/asmconstants.h6
-rw-r--r--src/vm/arm64/asmhelpers.S57
-rw-r--r--src/vm/arm64/asmhelpers.asm60
-rw-r--r--src/vm/arm64/stubs.cpp15
-rw-r--r--src/vm/assembly.cpp5
-rw-r--r--src/vm/assemblyname.cpp37
-rw-r--r--src/vm/assemblyname.hpp1
-rw-r--r--src/vm/corhost.cpp41
-rw-r--r--src/vm/ecalllist.h1
-rw-r--r--src/vm/eetwain.cpp48
-rw-r--r--src/vm/exceptionhandling.cpp21
-rw-r--r--src/vm/exstate.cpp6
-rw-r--r--src/vm/exstate.h2
-rw-r--r--src/vm/gchandletableutilities.h221
-rw-r--r--src/vm/gcheaputilities.cpp3
-rw-r--r--src/vm/i386/asmhelpers.S37
-rw-r--r--src/vm/i386/excepx86.cpp7
-rw-r--r--src/vm/jitinterfacegen.cpp15
-rw-r--r--src/vm/marshalnative.cpp18
-rw-r--r--src/vm/object.h4
-rw-r--r--src/vm/safehandle.cpp4
-rw-r--r--src/vm/stackwalk.cpp7
-rw-r--r--src/vm/threads.cpp3
153 files changed, 8626 insertions, 6164 deletions
diff --git a/src/.nuget/Microsoft.NETCore.Runtime.CoreCLR/runtime.Linux.Microsoft.NETCore.Runtime.CoreCLR.props b/src/.nuget/Microsoft.NETCore.Runtime.CoreCLR/runtime.Linux.Microsoft.NETCore.Runtime.CoreCLR.props
index db8b03173b..4ad2538826 100644
--- a/src/.nuget/Microsoft.NETCore.Runtime.CoreCLR/runtime.Linux.Microsoft.NETCore.Runtime.CoreCLR.props
+++ b/src/.nuget/Microsoft.NETCore.Runtime.CoreCLR/runtime.Linux.Microsoft.NETCore.Runtime.CoreCLR.props
@@ -4,8 +4,7 @@
<_PlatformDoesNotSupportNiFiles Condition="'$(Platform)' == 'arm'">true</_PlatformDoesNotSupportNiFiles>
<_PlatformDoesNotSupportNiFiles Condition="'$(Platform)' == 'armel'">true</_PlatformDoesNotSupportNiFiles>
<_PlatformDoesNotSupportNiFiles Condition="'$(Platform)' == 'x86'">true</_PlatformDoesNotSupportNiFiles>
- <_PlatformDoesNotSupportEventTrace Condition="'$(Platform)' == 'arm'">true</_PlatformDoesNotSupportEventTrace>
- <_PlatformDoesNotSupportEventTrace Condition="'$(Platform)' == 'armel'">true</_PlatformDoesNotSupportEventTrace>
+ <_PlatformDoesNotSupportEventTrace Condition="'$(_runtimeOSFamily)' == 'tizen'">true</_PlatformDoesNotSupportEventTrace>
<_PlatformDoesNotSupportEventTrace Condition="'$(Platform)' == 'x86'">true</_PlatformDoesNotSupportEventTrace>
</PropertyGroup>
<ItemGroup>
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index d13e8f9a85..c2e0260ef1 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -96,6 +96,12 @@ function(add_executable_clr)
endfunction()
if(CLR_CMAKE_PLATFORM_UNIX)
+ if(CLR_CMAKE_PLATFORM_UBUNTU_LINUX)
+ if(CLR_CMAKE_PLATFORM_UNIX_AMD64)
+ add_subdirectory(debug/createdump)
+ endif(CLR_CMAKE_PLATFORM_UNIX_AMD64)
+ endif(CLR_CMAKE_PLATFORM_UBUNTU_LINUX)
+
add_subdirectory(ToolBox/SOS/Strike)
# Include the dummy c++ include files
diff --git a/src/ToolBox/SOS/NETCore/project.json b/src/ToolBox/SOS/NETCore/project.json
index 6b2061a577..a92b173569 100644
--- a/src/ToolBox/SOS/NETCore/project.json
+++ b/src/ToolBox/SOS/NETCore/project.json
@@ -11,14 +11,5 @@
"portable-net45+win8"
]
}
- },
- "runtimes": {
- "win7-x86": {},
- "win7-x64": {},
- "ubuntu.14.04-x64": {},
- "osx.10.10-x64": {},
- "centos.7-x64": {},
- "rhel.7-x64": {},
- "debian.8-x64": {}
}
}
diff --git a/src/ToolBox/SOS/Strike/util.h b/src/ToolBox/SOS/Strike/util.h
index 4612acc299..6d0e79622c 100644
--- a/src/ToolBox/SOS/Strike/util.h
+++ b/src/ToolBox/SOS/Strike/util.h
@@ -1429,84 +1429,7 @@ SafeReadMemory (TO_TADDR(src), &(dst), sizeof(dst), NULL)
extern "C" PDEBUG_DATA_SPACES g_ExtData;
-template <class T>
-class ArrayHolder
-{
-public:
- ArrayHolder(T *ptr)
- : mPtr(ptr)
- {
- }
-
- ~ArrayHolder()
- {
- Clear();
- }
-
- ArrayHolder(const ArrayHolder &rhs)
- {
- mPtr = const_cast<ArrayHolder *>(&rhs)->Detach();
- }
-
- ArrayHolder &operator=(T *ptr)
- {
- Clear();
- mPtr = ptr;
- return *this;
- }
-
- const T &operator[](int i) const
- {
- return mPtr[i];
- }
-
- T &operator[](int i)
- {
- return mPtr[i];
- }
-
- operator const T *() const
- {
- return mPtr;
- }
-
- operator T *()
- {
- return mPtr;
- }
-
- T **operator&()
- {
- return &mPtr;
- }
-
- T *GetPtr()
- {
- return mPtr;
- }
-
- T *Detach()
- {
- T *ret = mPtr;
- mPtr = NULL;
- return ret;
- }
-
-private:
- void Clear()
- {
- if (mPtr)
- {
- delete [] mPtr;
- mPtr = NULL;
- }
- }
-
-private:
- T *mPtr;
-};
-
-
+#include <arrayholder.h>
// This class acts a smart pointer which calls the Release method on any object
// you place in it when the ToRelease class falls out of scope. You may use it
diff --git a/src/coreclr/hosts/inc/coreclrhost.h b/src/coreclr/hosts/inc/coreclrhost.h
index f0d7952aa6..dd11cb6a51 100644
--- a/src/coreclr/hosts/inc/coreclrhost.h
+++ b/src/coreclr/hosts/inc/coreclrhost.h
@@ -29,6 +29,11 @@ CORECLR_HOSTING_API(coreclr_shutdown,
void* hostHandle,
unsigned int domainId);
+CORECLR_HOSTING_API(coreclr_shutdown_2,
+ void* hostHandle,
+ unsigned int domainId,
+ int* latchedExitCode);
+
CORECLR_HOSTING_API(coreclr_create_delegate,
void* hostHandle,
unsigned int domainId,
diff --git a/src/coreclr/hosts/unixcoreruncommon/coreruncommon.cpp b/src/coreclr/hosts/unixcoreruncommon/coreruncommon.cpp
index d40fb424e6..52ffda8bb5 100644
--- a/src/coreclr/hosts/unixcoreruncommon/coreruncommon.cpp
+++ b/src/coreclr/hosts/unixcoreruncommon/coreruncommon.cpp
@@ -321,7 +321,7 @@ int ExecuteManagedAssembly(
{
coreclr_initialize_ptr initializeCoreCLR = (coreclr_initialize_ptr)dlsym(coreclrLib, "coreclr_initialize");
coreclr_execute_assembly_ptr executeAssembly = (coreclr_execute_assembly_ptr)dlsym(coreclrLib, "coreclr_execute_assembly");
- coreclr_shutdown_ptr shutdownCoreCLR = (coreclr_shutdown_ptr)dlsym(coreclrLib, "coreclr_shutdown");
+ coreclr_shutdown_2_ptr shutdownCoreCLR = (coreclr_shutdown_2_ptr)dlsym(coreclrLib, "coreclr_shutdown_2");
if (initializeCoreCLR == nullptr)
{
@@ -333,7 +333,7 @@ int ExecuteManagedAssembly(
}
else if (shutdownCoreCLR == nullptr)
{
- fprintf(stderr, "Function coreclr_shutdown not found in the libcoreclr.so\n");
+ fprintf(stderr, "Function coreclr_shutdown_2 not found in the libcoreclr.so\n");
}
else
{
@@ -416,12 +416,18 @@ int ExecuteManagedAssembly(
exitCode = -1;
}
- st = shutdownCoreCLR(hostHandle, domainId);
+ int latchedExitCode = 0;
+ st = shutdownCoreCLR(hostHandle, domainId, &latchedExitCode);
if (!SUCCEEDED(st))
{
fprintf(stderr, "coreclr_shutdown failed - status: 0x%08x\n", st);
exitCode = -1;
}
+
+ if (exitCode != -1)
+ {
+ exitCode = latchedExitCode;
+ }
}
}
diff --git a/src/debug/CMakeLists.txt b/src/debug/CMakeLists.txt
index 1940aa9c79..bcfc257b9d 100644
--- a/src/debug/CMakeLists.txt
+++ b/src/debug/CMakeLists.txt
@@ -3,4 +3,4 @@ add_subdirectory(dbgutil)
add_subdirectory(ildbsymlib)
add_subdirectory(ee)
add_subdirectory(di)
-add_subdirectory(shim)
+add_subdirectory(shim) \ No newline at end of file
diff --git a/src/debug/createdump/.gitmirrorall b/src/debug/createdump/.gitmirrorall
new file mode 100644
index 0000000000..9ee5c57b99
--- /dev/null
+++ b/src/debug/createdump/.gitmirrorall
@@ -0,0 +1 @@
+This folder will be mirrored by the Git-TFS Mirror recursively. \ No newline at end of file
diff --git a/src/debug/createdump/CMakeLists.txt b/src/debug/createdump/CMakeLists.txt
new file mode 100644
index 0000000000..081a308345
--- /dev/null
+++ b/src/debug/createdump/CMakeLists.txt
@@ -0,0 +1,33 @@
+project(createdump)
+
+set(CMAKE_INCLUDE_CURRENT_DIR ON)
+
+remove_definitions(-DUNICODE)
+remove_definitions(-D_UNICODE)
+
+include_directories(BEFORE ${VM_DIR})
+
+add_definitions(-DPAL_STDCPP_COMPAT=1)
+
+add_compile_options(-fPIC)
+
+set(CREATEDUMP_SOURCES
+ createdump.cpp
+ crashinfo.cpp
+ threadinfo.cpp
+ datatarget.cpp
+ dumpwriter.cpp
+)
+
+_add_executable(createdump
+ ${CREATEDUMP_SOURCES}
+)
+
+target_link_libraries(createdump
+ # share the PAL in the dac module
+ mscordaccore
+)
+
+add_dependencies(createdump mscordaccore)
+
+install_clr(createdump)
diff --git a/src/debug/createdump/crashinfo.cpp b/src/debug/createdump/crashinfo.cpp
new file mode 100644
index 0000000000..8f72542e96
--- /dev/null
+++ b/src/debug/createdump/crashinfo.cpp
@@ -0,0 +1,630 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+#include "createdump.h"
+
+CrashInfo::CrashInfo(pid_t pid, DataTarget& dataTarget) :
+ m_ref(1),
+ m_pid(pid),
+ m_ppid(-1),
+ m_name(nullptr),
+ m_dataTarget(dataTarget)
+{
+ dataTarget.AddRef();
+ m_auxvValues.fill(0);
+}
+
+CrashInfo::~CrashInfo()
+{
+ if (m_name != nullptr)
+ {
+ free(m_name);
+ }
+ // Clean up the threads
+ for (ThreadInfo* thread : m_threads)
+ {
+ delete thread;
+ }
+ m_threads.clear();
+
+ // Module and other mappings have a file name to clean up.
+ for (const MemoryRegion& region : m_moduleMappings)
+ {
+ const_cast<MemoryRegion&>(region).Cleanup();
+ }
+ m_moduleMappings.clear();
+ for (const MemoryRegion& region : m_otherMappings)
+ {
+ const_cast<MemoryRegion&>(region).Cleanup();
+ }
+ m_otherMappings.clear();
+ m_dataTarget.Release();
+}
+
+STDMETHODIMP
+CrashInfo::QueryInterface(
+ ___in REFIID InterfaceId,
+ ___out PVOID* Interface)
+{
+ if (InterfaceId == IID_IUnknown ||
+ InterfaceId == IID_ICLRDataEnumMemoryRegionsCallback)
+ {
+ *Interface = (ICLRDataEnumMemoryRegionsCallback*)this;
+ AddRef();
+ return S_OK;
+ }
+ else
+ {
+ *Interface = NULL;
+ return E_NOINTERFACE;
+ }
+}
+
+STDMETHODIMP_(ULONG)
+CrashInfo::AddRef()
+{
+ LONG ref = InterlockedIncrement(&m_ref);
+ return ref;
+}
+
+STDMETHODIMP_(ULONG)
+CrashInfo::Release()
+{
+ LONG ref = InterlockedDecrement(&m_ref);
+ if (ref == 0)
+ {
+ delete this;
+ }
+ return ref;
+}
+
+HRESULT STDMETHODCALLTYPE
+CrashInfo::EnumMemoryRegion(
+ /* [in] */ CLRDATA_ADDRESS address,
+ /* [in] */ ULONG32 size)
+{
+ InsertMemoryRegion(address, size);
+ return S_OK;
+}
+
+bool
+CrashInfo::EnumerateAndSuspendThreads()
+{
+ char taskPath[128];
+ snprintf(taskPath, sizeof(taskPath), "/proc/%d/task", m_pid);
+
+ DIR* taskDir = opendir(taskPath);
+ if (taskDir == nullptr)
+ {
+ fprintf(stderr, "opendir(%s) FAILED %s\n", taskPath, strerror(errno));
+ return false;
+ }
+
+ struct dirent* entry;
+ while ((entry = readdir(taskDir)) != nullptr)
+ {
+ pid_t tid = static_cast<pid_t>(strtol(entry->d_name, nullptr, 10));
+ if (tid != 0)
+ {
+ // Reference: http://stackoverflow.com/questions/18577956/how-to-use-ptrace-to-get-a-consistent-view-of-multiple-threads
+ if (ptrace(PTRACE_ATTACH, tid, nullptr, nullptr) != -1)
+ {
+ int waitStatus;
+ waitpid(tid, &waitStatus, __WALL);
+
+ // Add to the list of (suspended) threads
+ ThreadInfo* thread = new ThreadInfo(tid);
+ m_threads.push_back(thread);
+ }
+ else
+ {
+ fprintf(stderr, "ptrace(ATTACH, %d) FAILED %s\n", tid, strerror(errno));
+ }
+ }
+ }
+
+ closedir(taskDir);
+ return true;
+}
+
+bool
+CrashInfo::GatherCrashInfo(const char* pszExePath, MINIDUMP_TYPE minidumpType)
+{
+ // Get the process info
+ if (!GetStatus(m_pid, &m_ppid, &m_tgid, &m_name))
+ {
+ return false;
+ }
+ // Get the info about the threads (registers, etc.)
+ for (ThreadInfo* thread : m_threads)
+ {
+ if (!thread->Initialize())
+ {
+ return false;
+ }
+ }
+ // Get the auxv data
+ if (!GetAuxvEntries())
+ {
+ return false;
+ }
+ // Get shared module debug info
+ if (!GetDSOInfo())
+ {
+ return false;
+ }
+ // Gather all the module memory mappings (from /dev/$pid/maps)
+ if (!EnumerateModuleMappings())
+ {
+ return false;
+ }
+ // Gather all the useful memory regions from the DAC
+ if (!EnumerateMemoryRegionsWithDAC(pszExePath, minidumpType))
+ {
+ return false;
+ }
+ // Add the thread's stack and some code memory to core
+ for (ThreadInfo* thread : m_threads)
+ {
+ uint64_t start;
+ size_t size;
+
+ // Add the thread's stack and some of the code
+ thread->GetThreadStack(*this, &start, &size);
+ InsertMemoryRegion(start, size);
+
+ thread->GetThreadCode(&start, &size);
+ InsertMemoryRegion(start, size);
+ }
+ // Join all adjacent memory regions
+ CombineMemoryRegions();
+ return true;
+}
+
+void
+CrashInfo::ResumeThreads()
+{
+ for (ThreadInfo* thread : m_threads)
+ {
+ thread->ResumeThread();
+ }
+}
+
+bool
+CrashInfo::GetAuxvEntries()
+{
+ char auxvPath[128];
+ snprintf(auxvPath, sizeof(auxvPath), "/proc/%d/auxv", m_pid);
+
+ int fd = open(auxvPath, O_RDONLY, 0);
+ if (fd == -1)
+ {
+ fprintf(stderr, "open(%s) FAILED %s\n", auxvPath, strerror(errno));
+ return false;
+ }
+ bool result = false;
+ elf_aux_entry auxvEntry;
+
+ while (read(fd, &auxvEntry, sizeof(elf_aux_entry)) == sizeof(elf_aux_entry))
+ {
+ m_auxvEntries.push_back(auxvEntry);
+ if (auxvEntry.a_type == AT_NULL)
+ {
+ break;
+ }
+ if (auxvEntry.a_type < AT_MAX)
+ {
+ m_auxvValues[auxvEntry.a_type] = auxvEntry.a_un.a_val;
+ TRACE("AUXV: %lu = %016lx\n", auxvEntry.a_type, auxvEntry.a_un.a_val);
+ result = true;
+ }
+ }
+
+ close(fd);
+ return result;
+}
+
+bool
+CrashInfo::EnumerateModuleMappings()
+{
+ // Here we read /proc/<pid>/maps file in order to parse it and figure out what it says
+ // about a library we are looking for. This file looks something like this:
+ //
+ // [address] [perms] [offset] [dev] [inode] [pathname] - HEADER is not preset in an actual file
+ //
+ // 35b1800000-35b1820000 r-xp 00000000 08:02 135522 /usr/lib64/ld-2.15.so
+ // 35b1a1f000-35b1a20000 r--p 0001f000 08:02 135522 /usr/lib64/ld-2.15.so
+ // 35b1a20000-35b1a21000 rw-p 00020000 08:02 135522 /usr/lib64/ld-2.15.so
+ // 35b1a21000-35b1a22000 rw-p 00000000 00:00 0 [heap]
+ // 35b1c00000-35b1dac000 r-xp 00000000 08:02 135870 /usr/lib64/libc-2.15.so
+ // 35b1dac000-35b1fac000 ---p 001ac000 08:02 135870 /usr/lib64/libc-2.15.so
+ // 35b1fac000-35b1fb0000 r--p 001ac000 08:02 135870 /usr/lib64/libc-2.15.so
+ // 35b1fb0000-35b1fb2000 rw-p 001b0000 08:02 135870 /usr/lib64/libc-2.15.so
+ char* line = NULL;
+ size_t lineLen = 0;
+ int count = 0;
+ ssize_t read;
+
+ // Making something like: /proc/123/maps
+ char mapPath[128];
+ int chars = snprintf(mapPath, sizeof(mapPath), "/proc/%d/maps", m_pid);
+ assert(chars > 0 && chars <= sizeof(mapPath));
+
+ FILE* mapsFile = fopen(mapPath, "r");
+ if (mapsFile == NULL)
+ {
+ fprintf(stderr, "fopen(%s) FAILED %s\n", mapPath, strerror(errno));
+ return false;
+ }
+ // linuxGateAddress is the beginning of the kernel's mapping of
+ // linux-gate.so in the process. It doesn't actually show up in the
+ // maps list as a filename, but it can be found using the AT_SYSINFO_EHDR
+ // aux vector entry, which gives the information necessary to special
+ // case its entry when creating the list of mappings.
+ // See http://www.trilithium.com/johan/2005/08/linux-gate/ for more
+ // information.
+ const void* linuxGateAddress = (const void*)m_auxvValues[AT_SYSINFO_EHDR];
+
+ // Reading maps file line by line
+ while ((read = getline(&line, &lineLen, mapsFile)) != -1)
+ {
+ uint64_t start, end, offset;
+ char* permissions = nullptr;
+ char* moduleName = nullptr;
+
+ int c = 0;
+ if ((c = sscanf(line, "%lx-%lx %m[-rwxsp] %lx %*[:0-9a-f] %*d %ms\n", &start, &end, &permissions, &offset, &moduleName)) == 5)
+ {
+ if (linuxGateAddress != nullptr && reinterpret_cast<void*>(start) == linuxGateAddress)
+ {
+ InsertMemoryRegion(start, end - start);
+ free(moduleName);
+ }
+ else {
+ uint32_t permissionFlags = 0;
+ if (strchr(permissions, 'r')) {
+ permissionFlags |= PF_R;
+ }
+ if (strchr(permissions, 'w')) {
+ permissionFlags |= PF_W;
+ }
+ if (strchr(permissions, 'x')) {
+ permissionFlags |= PF_X;
+ }
+ MemoryRegion memoryRegion(permissionFlags, start, end, offset, moduleName);
+
+ if (moduleName != nullptr && *moduleName == '/') {
+ m_moduleMappings.insert(memoryRegion);
+ }
+ else {
+ m_otherMappings.insert(memoryRegion);
+ }
+ }
+ free(permissions);
+ }
+ }
+
+ if (g_diagnostics)
+ {
+ TRACE("Module mappings:\n");
+ for (const MemoryRegion& region : m_moduleMappings)
+ {
+ region.Print();
+ }
+ TRACE("Other mappings:\n");
+ for (const MemoryRegion& region : m_otherMappings)
+ {
+ region.Print();
+ }
+ }
+
+ free(line); // We didn't allocate line, but as per contract of getline we should free it
+ fclose(mapsFile);
+
+ return true;
+}
+
+bool
+CrashInfo::EnumerateMemoryRegionsWithDAC(const char *pszExePath, MINIDUMP_TYPE minidumpType)
+{
+ PFN_CLRDataCreateInstance pfnCLRDataCreateInstance = nullptr;
+ ICLRDataEnumMemoryRegions *clrDataEnumRegions = nullptr;
+ HMODULE hdac = nullptr;
+ HRESULT hr = S_OK;
+ bool result = false;
+
+ // We assume that the DAC is in the same location as this createdump exe
+ ArrayHolder<char> dacPath = new char[MAX_LONGPATH];
+ strcpy_s(dacPath, MAX_LONGPATH, pszExePath);
+ char *last = strrchr(dacPath, '/');
+ if (last != nullptr)
+ {
+ *(last + 1) = '\0';
+ }
+ else
+ {
+ dacPath[0] = '\0';
+ }
+ strcat_s(dacPath, MAX_LONGPATH, MAKEDLLNAME_A("mscordaccore"));
+
+ // Load and initialize the DAC
+ hdac = LoadLibraryA(dacPath);
+ if (hdac == nullptr)
+ {
+ fprintf(stderr, "LoadLibraryA(%s) FAILED %d\n", (char*)dacPath, GetLastError());
+ goto exit;
+ }
+ pfnCLRDataCreateInstance = (PFN_CLRDataCreateInstance)GetProcAddress(hdac, "CLRDataCreateInstance");
+ if (pfnCLRDataCreateInstance == nullptr)
+ {
+ fprintf(stderr, "GetProcAddress(CLRDataCreateInstance) FAILED %d\n", GetLastError());
+ goto exit;
+ }
+ hr = pfnCLRDataCreateInstance(__uuidof(ICLRDataEnumMemoryRegions), &m_dataTarget, (void**)&clrDataEnumRegions);
+ if (FAILED(hr))
+ {
+ fprintf(stderr, "CLRDataCreateInstance(ICLRDataEnumMemoryRegions) FAILED %08x\n", hr);
+ goto exit;
+ }
+ // Calls CrashInfo::EnumMemoryRegion for each memory region found by the DAC
+ hr = clrDataEnumRegions->EnumMemoryRegions(this, minidumpType, CLRDATA_ENUM_MEM_DEFAULT);
+ if (FAILED(hr))
+ {
+ fprintf(stderr, "EnumMemoryRegions FAILED %08x\n", hr);
+ goto exit;
+ }
+ result = true;
+exit:
+ if (clrDataEnumRegions != nullptr)
+ {
+ clrDataEnumRegions->Release();
+ }
+ if (hdac != nullptr)
+ {
+ FreeLibrary(hdac);
+ }
+ return result;
+}
+
+bool
+CrashInfo::GetDSOInfo()
+{
+ Phdr* phdrAddr = reinterpret_cast<Phdr*>(m_auxvValues[AT_PHDR]);
+ int phnum = m_auxvValues[AT_PHNUM];
+ assert(m_auxvValues[AT_PHENT] == sizeof(Phdr));
+
+ if (phnum <= 0 || phdrAddr == nullptr) {
+ return false;
+ }
+ TRACE("DSO: phdr %p phnum %d\n", phdrAddr, phnum);
+
+ // Search for the program PT_DYNAMIC header
+ ElfW(Dyn)* dynamicAddr = nullptr;
+ for (int i = 0; i < phnum; i++, phdrAddr++)
+ {
+ Phdr ph;
+ if (!ReadMemory(phdrAddr, &ph, sizeof(ph))) {
+ return false;
+ }
+ TRACE("DSO: phdr %p type %d (%x) vaddr %016lx memsz %016lx offset %016lx\n",
+ phdrAddr, ph.p_type, ph.p_type, ph.p_vaddr, ph.p_memsz, ph.p_offset);
+
+ if (ph.p_type == PT_DYNAMIC)
+ {
+ dynamicAddr = reinterpret_cast<ElfW(Dyn)*>(ph.p_vaddr);
+ }
+ else if (ph.p_type == PT_GNU_EH_FRAME)
+ {
+ if (ph.p_vaddr != 0 && ph.p_memsz != 0)
+ {
+ InsertMemoryRegion(ph.p_vaddr, ph.p_memsz);
+ }
+ }
+ }
+
+ if (dynamicAddr == nullptr) {
+ return false;
+ }
+
+ // Search for dynamic debug (DT_DEBUG) entry
+ struct r_debug* rdebugAddr = nullptr;
+ for (;;) {
+ ElfW(Dyn) dyn;
+ if (!ReadMemory(dynamicAddr, &dyn, sizeof(dyn))) {
+ return false;
+ }
+ TRACE("DSO: dyn %p tag %ld (%lx) d_ptr %016lx\n", dynamicAddr, dyn.d_tag, dyn.d_tag, dyn.d_un.d_ptr);
+ if (dyn.d_tag == DT_DEBUG) {
+ rdebugAddr = reinterpret_cast<struct r_debug*>(dyn.d_un.d_ptr);
+ }
+ else if (dyn.d_tag == DT_NULL) {
+ break;
+ }
+ dynamicAddr++;
+ }
+
+ // Add the DSO r_debug entry
+ TRACE("DSO: rdebugAddr %p\n", rdebugAddr);
+ struct r_debug debugEntry;
+ if (!ReadMemory(rdebugAddr, &debugEntry, sizeof(debugEntry))) {
+ return false;
+ }
+
+ // Add the DSO link_map entries
+ for (struct link_map* linkMapAddr = debugEntry.r_map; linkMapAddr != nullptr;) {
+ struct link_map map;
+ if (!ReadMemory(linkMapAddr, &map, sizeof(map))) {
+ return false;
+ }
+ char moduleName[257] = { 0 };
+ if (map.l_name != nullptr) {
+ if (!ReadMemory(map.l_name, &moduleName, sizeof(moduleName) - 1)) {
+ return false;
+ }
+ }
+ TRACE("DSO: link_map entry %p l_ld %p l_addr %lx %s\n", linkMapAddr, map.l_ld, map.l_addr, moduleName);
+ linkMapAddr = map.l_next;
+ }
+
+ return true;
+}
+
+//
+// ReadMemory from target and add to memory regions list
+//
+bool
+CrashInfo::ReadMemory(void* address, void* buffer, size_t size)
+{
+ uint32_t read = 0;
+ if (FAILED(m_dataTarget.ReadVirtual(reinterpret_cast<CLRDATA_ADDRESS>(address), reinterpret_cast<PBYTE>(buffer), size, &read)))
+ {
+ return false;
+ }
+ InsertMemoryRegion(reinterpret_cast<uint64_t>(address), size);
+ return true;
+}
+
+//
+// Add this memory chunk to the list of regions to be
+// written to the core dump.
+//
+void
+CrashInfo::InsertMemoryRegion(uint64_t address, size_t size)
+{
+ // Round to page boundary
+ uint64_t start = address & PAGE_MASK;
+ assert(start > 0);
+
+ // Round up to page boundary
+ uint64_t end = ((address + size) + (PAGE_SIZE - 1)) & PAGE_MASK;
+ assert(end > 0);
+
+ MemoryRegion memoryRegionFull(start, end);
+
+ // First check if the full memory region can be added without conflicts
+ const auto& found = m_memoryRegions.find(memoryRegionFull);
+ if (found == m_memoryRegions.end())
+ {
+ // Add full memory region
+ m_memoryRegions.insert(memoryRegionFull);
+ }
+ else
+ {
+ // The memory region is not wholely contained in region found
+ if (!found->Contains(memoryRegionFull))
+ {
+ // The region overlaps/conflicts with one already in the set so
+ // add one page at a time to avoid the overlapping pages.
+ uint64_t numberPages = (end - start) >> PAGE_SHIFT;
+
+ for (int p = 0; p < numberPages; p++, start += PAGE_SIZE)
+ {
+ MemoryRegion memoryRegion(start, start + PAGE_SIZE);
+
+ const auto& found = m_memoryRegions.find(memoryRegion);
+ if (found == m_memoryRegions.end())
+ {
+ m_memoryRegions.insert(memoryRegion);
+ }
+ }
+ }
+ }
+}
+
+//
+// Combine any adjacent memory regions into one
+//
+void
+CrashInfo::CombineMemoryRegions()
+{
+ assert(!m_memoryRegions.empty());
+
+ std::set<MemoryRegion> memoryRegionsNew;
+
+ uint64_t start = m_memoryRegions.begin()->StartAddress();
+ uint64_t end = start;
+
+ for (const MemoryRegion& region : m_memoryRegions)
+ {
+ if (end == region.StartAddress())
+ {
+ end = region.EndAddress();
+ }
+ else
+ {
+ MemoryRegion memoryRegion(start, end);
+ assert(memoryRegionsNew.find(memoryRegion) == memoryRegionsNew.end());
+ memoryRegionsNew.insert(memoryRegion);
+
+ start = region.StartAddress();
+ end = region.EndAddress();
+ }
+ }
+
+ assert(start != end);
+ MemoryRegion memoryRegion(start, end);
+ assert(memoryRegionsNew.find(memoryRegion) == memoryRegionsNew.end());
+ memoryRegionsNew.insert(memoryRegion);
+
+ m_memoryRegions = memoryRegionsNew;
+
+ if (g_diagnostics)
+ {
+ TRACE("Memory Regions:\n");
+ uint64_t total = 0;
+ for (const MemoryRegion& region : m_memoryRegions)
+ {
+ region.Print();
+ total += region.Size();
+ }
+ TRACE("Total %ld bytes (%ld pages) to write\n", total, total >> PAGE_SHIFT);
+ }
+}
+
+bool
+CrashInfo::GetStatus(pid_t pid, pid_t* ppid, pid_t* tgid, char** name)
+{
+ char statusPath[128];
+ snprintf(statusPath, sizeof(statusPath), "/proc/%d/status", pid);
+
+ FILE *statusFile = fopen(statusPath, "r");
+ if (statusFile == nullptr)
+ {
+ fprintf(stderr, "GetStatus fopen(%s) FAILED\n", statusPath);
+ return false;
+ }
+
+ *ppid = -1;
+
+ char *line = nullptr;
+ size_t lineLen = 0;
+ ssize_t read;
+ while ((read = getline(&line, &lineLen, statusFile)) != -1)
+ {
+ if (strncmp("PPid:\t", line, 6) == 0)
+ {
+ *ppid = _atoi64(line + 6);
+ }
+ else if (strncmp("Tgid:\t", line, 6) == 0)
+ {
+ *tgid = _atoi64(line + 6);
+ }
+ else if (strncmp("Name:\t", line, 6) == 0)
+ {
+ if (name != nullptr)
+ {
+ char* n = strchr(line + 6, '\n');
+ if (n != nullptr)
+ {
+ *n = '\0';
+ }
+ *name = strdup(line + 6);
+ }
+ }
+ }
+
+ free(line);
+ fclose(statusFile);
+ return true;
+}
diff --git a/src/debug/createdump/crashinfo.h b/src/debug/createdump/crashinfo.h
new file mode 100644
index 0000000000..a03ebe396c
--- /dev/null
+++ b/src/debug/createdump/crashinfo.h
@@ -0,0 +1,71 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+// typedef for our parsing of the auxv variables in /proc/pid/auxv.
+#if defined(__i386) || defined(__ARM_EABI__)
+typedef Elf32_auxv_t elf_aux_entry;
+#elif defined(__x86_64) || defined(__aarch64__)
+typedef Elf64_auxv_t elf_aux_entry;
+#endif
+
+typedef __typeof__(((elf_aux_entry*) 0)->a_un.a_val) elf_aux_val_t;
+
+// All interesting auvx entry types are AT_SYSINFO_EHDR and below
+#define AT_MAX (AT_SYSINFO_EHDR + 1)
+
+class CrashInfo : public ICLRDataEnumMemoryRegionsCallback
+{
+private:
+ LONG m_ref; // reference count
+ pid_t m_pid; // pid
+ pid_t m_ppid; // parent pid
+ pid_t m_tgid; // process group
+ char* m_name; // exe name
+ DataTarget& m_dataTarget; // read process memory
+ std::array<elf_aux_val_t, AT_MAX> m_auxvValues; // auxv values
+ std::vector<elf_aux_entry> m_auxvEntries; // full auxv entries
+ std::vector<ThreadInfo*> m_threads; // threads found and suspended
+ std::set<MemoryRegion> m_moduleMappings; // module memory mappings
+ std::set<MemoryRegion> m_otherMappings; // other memory mappings
+ std::set<MemoryRegion> m_memoryRegions; // memory regions from DAC, etc.
+
+public:
+ CrashInfo(pid_t pid, DataTarget& dataTarget);
+ virtual ~CrashInfo();
+ bool EnumerateAndSuspendThreads();
+ bool GatherCrashInfo(const char* pszExePath, MINIDUMP_TYPE minidumpType);
+ void ResumeThreads();
+ static bool GetStatus(pid_t pid, pid_t* ppid, pid_t* tgid, char** name);
+
+ const pid_t Pid() const { return m_pid; }
+ const pid_t Ppid() const { return m_ppid; }
+ const pid_t Tgid() const { return m_tgid; }
+ const char* Name() const { return m_name; }
+
+ const std::vector<ThreadInfo*> Threads() const { return m_threads; }
+ const std::set<MemoryRegion> ModuleMappings() const { return m_moduleMappings; }
+ const std::set<MemoryRegion> OtherMappings() const { return m_otherMappings; }
+ const std::set<MemoryRegion> MemoryRegions() const { return m_memoryRegions; }
+ const std::vector<elf_aux_entry> AuxvEntries() const { return m_auxvEntries; }
+ const size_t GetAuxvSize() const { return m_auxvEntries.size() * sizeof(elf_aux_entry); }
+
+ // IUnknown
+ STDMETHOD(QueryInterface)(___in REFIID InterfaceId, ___out PVOID* Interface);
+ STDMETHOD_(ULONG, AddRef)();
+ STDMETHOD_(ULONG, Release)();
+
+ // ICLRDataEnumMemoryRegionsCallback
+ virtual HRESULT STDMETHODCALLTYPE EnumMemoryRegion(
+ /* [in] */ CLRDATA_ADDRESS address,
+ /* [in] */ ULONG32 size);
+
+private:
+ bool GetAuxvEntries();
+ bool EnumerateModuleMappings();
+ bool EnumerateMemoryRegionsWithDAC(const char* pszExePath, MINIDUMP_TYPE minidumpType);
+ bool GetDSOInfo();
+ bool ReadMemory(void* address, void* buffer, size_t size);
+ void InsertMemoryRegion(uint64_t address, size_t size);
+ void CombineMemoryRegions();
+};
diff --git a/src/debug/createdump/createdump.cpp b/src/debug/createdump/createdump.cpp
new file mode 100644
index 0000000000..863b3ec447
--- /dev/null
+++ b/src/debug/createdump/createdump.cpp
@@ -0,0 +1,88 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+#include "createdump.h"
+
+bool g_diagnostics = true;
+
+//
+// Create a minidump using the DAC's enum memory regions interface
+//
+static bool
+CreateDump(const char* pszExePath, const char* dumpPathTemplate, pid_t pid, MINIDUMP_TYPE minidumpType)
+{
+ DataTarget* dataTarget = new DataTarget(pid);
+ CrashInfo* crashInfo = new CrashInfo(pid, *dataTarget);
+ DumpWriter* dumpWriter = new DumpWriter(*dataTarget, *crashInfo);
+ ArrayHolder<char> dumpPath = new char[MAX_LONGPATH];
+ bool result = false;
+
+ // Suspend all the threads in the target process and build the list of threads
+ if (!crashInfo->EnumerateAndSuspendThreads())
+ {
+ goto exit;
+ }
+ // The initialize the data target's ReadVirtual support (opens /proc/$pid/mem)
+ if (!dataTarget->Initialize(crashInfo))
+ {
+ goto exit;
+ }
+ // Gather all the info about the process, threads (registers, etc.) and memory regions
+ if (!crashInfo->GatherCrashInfo(pszExePath, minidumpType))
+ {
+ goto exit;
+ }
+ snprintf(dumpPath, MAX_LONGPATH, dumpPathTemplate, pid);
+ if (!dumpWriter->OpenDump(dumpPath))
+ {
+ goto exit;
+ }
+ if (!dumpWriter->WriteDump())
+ {
+ goto exit;
+ }
+ result = true;
+exit:
+ dumpWriter->Release();
+ crashInfo->ResumeThreads();
+ crashInfo->Release();
+ dataTarget->Release();
+ return result;
+}
+
+//
+// main entry point
+//
+int __cdecl main(const int argc, const char* argv[])
+{
+ const char* dumpPathTemplate = "/tmp/coredump.%lu";
+
+ char* diagnostics = getenv("COMPlus_CreateDumpDiagnostics");
+ g_diagnostics = diagnostics != nullptr && strcmp(diagnostics, "1") == 0;
+
+ int exitCode = PAL_InitializeDLL();
+ if (exitCode != 0)
+ {
+ fprintf(stderr, "PAL_Initialize FAILED %d\n", exitCode);
+ return exitCode;
+ }
+ pid_t pid;
+ if (argc < 2)
+ {
+ fprintf(stderr, "Not enough arguments\n");
+ exitCode = -1;
+ goto exit;
+ }
+ pid = _atoi64(argv[1]);
+
+ //if (!CreateDump(argv[0], dumpPathTemplate, pid, MiniDumpWithPrivateReadWriteMemory))
+ if (!CreateDump(argv[0], dumpPathTemplate, pid, MiniDumpNormal))
+ {
+ exitCode = -1;
+ goto exit;
+ }
+exit:
+ PAL_TerminateEx(exitCode);
+ return exitCode;
+} \ No newline at end of file
diff --git a/src/debug/createdump/createdump.h b/src/debug/createdump/createdump.h
new file mode 100644
index 0000000000..5a8d52d27f
--- /dev/null
+++ b/src/debug/createdump/createdump.h
@@ -0,0 +1,56 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+#define ___in _SAL1_Source_(__in, (), _In_)
+#define ___out _SAL1_Source_(__out, (), _Out_)
+
+#ifndef _countof
+#define _countof(x) (sizeof(x)/sizeof(x[0]))
+#endif
+
+extern bool g_diagnostics;
+
+#define TRACE(args...) \
+ if (g_diagnostics) { \
+ printf(args); \
+ }
+
+#include <winternl.h>
+#include <winver.h>
+#include <windows.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <stddef.h>
+#include <string.h>
+#include <corhdr.h>
+#include <cor.h>
+#include <corsym.h>
+#include <clrdata.h>
+#include <xclrdata.h>
+#include <corerror.h>
+#include <cordebug.h>
+#include <xcordebug.h>
+#include <mscoree.h>
+#include <dumpcommon.h>
+#include <arrayholder.h>
+#include <unistd.h>
+#include <signal.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/ptrace.h>
+#include <sys/user.h>
+#include <sys/wait.h>
+#include <sys/procfs.h>
+#include <dirent.h>
+#include <fcntl.h>
+#include <elf.h>
+#include <link.h>
+#include <map>
+#include <set>
+#include <vector>
+#include "datatarget.h"
+#include "threadinfo.h"
+#include "memoryregion.h"
+#include "crashinfo.h"
+#include "dumpwriter.h"
diff --git a/src/debug/createdump/datatarget.cpp b/src/debug/createdump/datatarget.cpp
new file mode 100644
index 0000000000..b8c09e05c8
--- /dev/null
+++ b/src/debug/createdump/datatarget.cpp
@@ -0,0 +1,263 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+#include "createdump.h"
+
+#define IMAGE_FILE_MACHINE_AMD64 0x8664 // AMD64 (K8)
+
+DataTarget::DataTarget(pid_t pid) :
+ m_ref(1),
+ m_pid(pid),
+ m_fd(-1),
+ m_crashInfo(nullptr)
+{
+}
+
+DataTarget::~DataTarget()
+{
+ if (m_fd != -1)
+ {
+ close(m_fd);
+ m_fd = -1;
+ }
+}
+
+bool
+DataTarget::Initialize(CrashInfo * crashInfo)
+{
+ char memPath[128];
+ _snprintf_s(memPath, sizeof(memPath), sizeof(memPath), "/proc/%lu/mem", m_pid);
+
+ m_fd = open(memPath, O_RDONLY);
+ if (m_fd == -1)
+ {
+ fprintf(stderr, "open(%s) FAILED %d (%s)\n", memPath, errno, strerror(errno));
+ return false;
+ }
+ m_crashInfo = crashInfo;
+ return true;
+}
+
+STDMETHODIMP
+DataTarget::QueryInterface(
+ ___in REFIID InterfaceId,
+ ___out PVOID* Interface
+ )
+{
+ if (InterfaceId == IID_IUnknown ||
+ InterfaceId == IID_ICLRDataTarget)
+ {
+ *Interface = (ICLRDataTarget*)this;
+ AddRef();
+ return S_OK;
+ }
+ else if (InterfaceId == IID_ICorDebugDataTarget4)
+ {
+ *Interface = (ICorDebugDataTarget4*)this;
+ AddRef();
+ return S_OK;
+ }
+ else
+ {
+ *Interface = NULL;
+ return E_NOINTERFACE;
+ }
+}
+
+STDMETHODIMP_(ULONG)
+DataTarget::AddRef()
+{
+ LONG ref = InterlockedIncrement(&m_ref);
+ return ref;
+}
+
+STDMETHODIMP_(ULONG)
+DataTarget::Release()
+{
+ LONG ref = InterlockedDecrement(&m_ref);
+ if (ref == 0)
+ {
+ delete this;
+ }
+ return ref;
+}
+
+HRESULT STDMETHODCALLTYPE
+DataTarget::GetMachineType(
+ /* [out] */ ULONG32 *machine)
+{
+#ifdef _AMD64_
+ *machine = IMAGE_FILE_MACHINE_AMD64;
+#elif _ARM_
+ *machine = IMAGE_FILE_MACHINE_ARMNT;
+#elif _ARM64_
+ *machine = IMAGE_FILE_MACHINE_ARM64;
+#elif _X86_
+ *machine = IMAGE_FILE_MACHINE_I386;
+#else
+#error Unsupported architecture
+#endif
+ return S_OK;
+}
+
+HRESULT STDMETHODCALLTYPE
+DataTarget::GetPointerSize(
+ /* [out] */ ULONG32 *size)
+{
+#if defined(_AMD64_) || defined(_ARM64_)
+ *size = 8;
+#elif defined(_ARM_) || defined(_X86_)
+ *size = 4;
+#else
+#error Unsupported architecture
+#endif
+ return S_OK;
+}
+
+HRESULT STDMETHODCALLTYPE
+DataTarget::GetImageBase(
+ /* [string][in] */ LPCWSTR moduleName,
+ /* [out] */ CLRDATA_ADDRESS *baseAddress)
+{
+ assert(m_crashInfo != nullptr);
+ *baseAddress = 0;
+
+ char tempModuleName[MAX_PATH];
+ int length = WideCharToMultiByte(CP_ACP, 0, moduleName, -1, tempModuleName, sizeof(tempModuleName), NULL, NULL);
+ if (length > 0)
+ {
+ for (const MemoryRegion& image : m_crashInfo->ModuleMappings())
+ {
+ const char *name = strrchr(image.FileName(), '/');
+ if (name != nullptr)
+ {
+ name++;
+ }
+ else
+ {
+ name = image.FileName();
+ }
+ if (strcmp(name, tempModuleName) == 0)
+ {
+ *baseAddress = image.StartAddress();
+ return S_OK;
+ }
+ }
+ }
+ return E_FAIL;
+}
+
+HRESULT STDMETHODCALLTYPE
+DataTarget::ReadVirtual(
+ /* [in] */ CLRDATA_ADDRESS address,
+ /* [length_is][size_is][out] */ PBYTE buffer,
+ /* [in] */ ULONG32 size,
+ /* [optional][out] */ ULONG32 *done)
+{
+ assert(m_fd != -1);
+ size_t read = pread64(m_fd, buffer, size, (off64_t)address);
+ if (read == -1)
+ {
+ fprintf(stderr, "ReadVirtual FAILED %016lx %08x\n", address, size);
+ *done = 0;
+ return E_FAIL;
+ }
+ *done = read;
+ return S_OK;
+}
+
+HRESULT STDMETHODCALLTYPE
+DataTarget::WriteVirtual(
+ /* [in] */ CLRDATA_ADDRESS address,
+ /* [size_is][in] */ PBYTE buffer,
+ /* [in] */ ULONG32 size,
+ /* [optional][out] */ ULONG32 *done)
+{
+ assert(false);
+ return E_NOTIMPL;
+}
+
+HRESULT STDMETHODCALLTYPE
+DataTarget::GetTLSValue(
+ /* [in] */ ULONG32 threadID,
+ /* [in] */ ULONG32 index,
+ /* [out] */ CLRDATA_ADDRESS* value)
+{
+ assert(false);
+ return E_NOTIMPL;
+}
+
+HRESULT STDMETHODCALLTYPE
+DataTarget::SetTLSValue(
+ /* [in] */ ULONG32 threadID,
+ /* [in] */ ULONG32 index,
+ /* [in] */ CLRDATA_ADDRESS value)
+{
+ assert(false);
+ return E_NOTIMPL;
+}
+
+HRESULT STDMETHODCALLTYPE
+DataTarget::GetCurrentThreadID(
+ /* [out] */ ULONG32* threadID)
+{
+ assert(false);
+ return E_NOTIMPL;
+}
+
+HRESULT STDMETHODCALLTYPE
+DataTarget::GetThreadContext(
+ /* [in] */ ULONG32 threadID,
+ /* [in] */ ULONG32 contextFlags,
+ /* [in] */ ULONG32 contextSize,
+ /* [out, size_is(contextSize)] */ PBYTE context)
+{
+ assert(m_crashInfo != nullptr);
+ if (contextSize < sizeof(CONTEXT))
+ {
+ assert(false);
+ return E_INVALIDARG;
+ }
+ memset(context, 0, contextSize);
+ for (const ThreadInfo* thread : m_crashInfo->Threads())
+ {
+ if (thread->Tid() == threadID)
+ {
+ thread->GetThreadContext(contextFlags, reinterpret_cast<CONTEXT*>(context));
+ return S_OK;
+ }
+ }
+ return E_FAIL;
+}
+
+HRESULT STDMETHODCALLTYPE
+DataTarget::SetThreadContext(
+ /* [in] */ ULONG32 threadID,
+ /* [in] */ ULONG32 contextSize,
+ /* [out, size_is(contextSize)] */ PBYTE context)
+{
+ assert(false);
+ return E_NOTIMPL;
+}
+
+HRESULT STDMETHODCALLTYPE
+DataTarget::Request(
+ /* [in] */ ULONG32 reqCode,
+ /* [in] */ ULONG32 inBufferSize,
+ /* [size_is][in] */ BYTE *inBuffer,
+ /* [in] */ ULONG32 outBufferSize,
+ /* [size_is][out] */ BYTE *outBuffer)
+{
+ assert(false);
+ return E_NOTIMPL;
+}
+
+HRESULT STDMETHODCALLTYPE
+DataTarget::VirtualUnwind(
+ /* [in] */ DWORD threadId,
+ /* [in] */ ULONG32 contextSize,
+ /* [in, out, size_is(contextSize)] */ PBYTE context)
+{
+ return E_NOTIMPL;
+} \ No newline at end of file
diff --git a/src/debug/createdump/datatarget.h b/src/debug/createdump/datatarget.h
new file mode 100644
index 0000000000..802c9d6c2b
--- /dev/null
+++ b/src/debug/createdump/datatarget.h
@@ -0,0 +1,90 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+class CrashInfo;
+
+class DataTarget : public ICLRDataTarget, ICorDebugDataTarget4
+{
+private:
+ LONG m_ref; // reference count
+ pid_t m_pid; // process id
+ int m_fd; // /proc/<pid>/mem handle
+ CrashInfo* m_crashInfo;
+
+public:
+ DataTarget(pid_t pid);
+ virtual ~DataTarget();
+ bool Initialize(CrashInfo* crashInfo);
+
+ //
+ // IUnknown
+ //
+ STDMETHOD(QueryInterface)(___in REFIID InterfaceId, ___out PVOID* Interface);
+ STDMETHOD_(ULONG, AddRef)();
+ STDMETHOD_(ULONG, Release)();
+
+ //
+ // ICLRDataTarget
+ //
+ virtual HRESULT STDMETHODCALLTYPE GetMachineType(
+ /* [out] */ ULONG32 *machine);
+
+ virtual HRESULT STDMETHODCALLTYPE GetPointerSize(
+ /* [out] */ ULONG32 *size);
+
+ virtual HRESULT STDMETHODCALLTYPE GetImageBase(
+ /* [string][in] */ LPCWSTR moduleName,
+ /* [out] */ CLRDATA_ADDRESS *baseAddress);
+
+ virtual HRESULT STDMETHODCALLTYPE ReadVirtual(
+ /* [in] */ CLRDATA_ADDRESS address,
+ /* [length_is][size_is][out] */ PBYTE buffer,
+ /* [in] */ ULONG32 size,
+ /* [optional][out] */ ULONG32 *done);
+
+ virtual HRESULT STDMETHODCALLTYPE WriteVirtual(
+ /* [in] */ CLRDATA_ADDRESS address,
+ /* [size_is][in] */ PBYTE buffer,
+ /* [in] */ ULONG32 size,
+ /* [optional][out] */ ULONG32 *done);
+
+ virtual HRESULT STDMETHODCALLTYPE GetTLSValue(
+ /* [in] */ ULONG32 threadID,
+ /* [in] */ ULONG32 index,
+ /* [out] */ CLRDATA_ADDRESS* value);
+
+ virtual HRESULT STDMETHODCALLTYPE SetTLSValue(
+ /* [in] */ ULONG32 threadID,
+ /* [in] */ ULONG32 index,
+ /* [in] */ CLRDATA_ADDRESS value);
+
+ virtual HRESULT STDMETHODCALLTYPE GetCurrentThreadID(
+ /* [out] */ ULONG32* threadID);
+
+ virtual HRESULT STDMETHODCALLTYPE GetThreadContext(
+ /* [in] */ ULONG32 threadID,
+ /* [in] */ ULONG32 contextFlags,
+ /* [in] */ ULONG32 contextSize,
+ /* [out, size_is(contextSize)] */ PBYTE context);
+
+ virtual HRESULT STDMETHODCALLTYPE SetThreadContext(
+ /* [in] */ ULONG32 threadID,
+ /* [in] */ ULONG32 contextSize,
+ /* [in, size_is(contextSize)] */ PBYTE context);
+
+ virtual HRESULT STDMETHODCALLTYPE Request(
+ /* [in] */ ULONG32 reqCode,
+ /* [in] */ ULONG32 inBufferSize,
+ /* [size_is][in] */ BYTE *inBuffer,
+ /* [in] */ ULONG32 outBufferSize,
+ /* [size_is][out] */ BYTE *outBuffer);
+
+ //
+ // ICorDebugDataTarget4
+ //
+ virtual HRESULT STDMETHODCALLTYPE VirtualUnwind(
+ /* [in] */ DWORD threadId,
+ /* [in] */ ULONG32 contextSize,
+ /* [in, out, size_is(contextSize)] */ PBYTE context);
+}; \ No newline at end of file
diff --git a/src/debug/createdump/dumpwriter.cpp b/src/debug/createdump/dumpwriter.cpp
new file mode 100644
index 0000000000..ef3adac1f8
--- /dev/null
+++ b/src/debug/createdump/dumpwriter.cpp
@@ -0,0 +1,486 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+#include "createdump.h"
+
+DumpWriter::DumpWriter(DataTarget& dataTarget, CrashInfo& crashInfo) :
+ m_ref(1),
+ m_fd(-1),
+ m_dataTarget(dataTarget),
+ m_crashInfo(crashInfo)
+{
+ m_dataTarget.AddRef();
+ m_crashInfo.AddRef();
+}
+
+DumpWriter::~DumpWriter()
+{
+ if (m_fd != -1)
+ {
+ close(m_fd);
+ m_fd = -1;
+ }
+ m_dataTarget.Release();
+ m_crashInfo.Release();
+}
+
+STDMETHODIMP
+DumpWriter::QueryInterface(
+ ___in REFIID InterfaceId,
+ ___out PVOID* Interface)
+{
+ if (InterfaceId == IID_IUnknown)
+ {
+ *Interface = (IUnknown*)this;
+ AddRef();
+ return S_OK;
+ }
+ else
+ {
+ *Interface = NULL;
+ return E_NOINTERFACE;
+ }
+}
+
+STDMETHODIMP_(ULONG)
+DumpWriter::AddRef()
+{
+ LONG ref = InterlockedIncrement(&m_ref);
+ return ref;
+}
+
+STDMETHODIMP_(ULONG)
+DumpWriter::Release()
+{
+ LONG ref = InterlockedDecrement(&m_ref);
+ if (ref == 0)
+ {
+ delete this;
+ }
+ return ref;
+}
+
+bool
+DumpWriter::OpenDump(char* dumpFileName)
+{
+ m_fd = open(dumpFileName, O_WRONLY|O_CREAT|O_TRUNC, 0664);
+ if (m_fd == -1)
+ {
+ fprintf(stderr, "Could not open output %s: %s\n", dumpFileName, strerror(errno));
+ return false;
+ }
+ printf("Writing core file %s\n", dumpFileName);
+ return true;
+}
+
+// Write the core dump file:
+// ELF header
+// Single section header (Shdr) for 64 bit program header count
+// Phdr for the PT_NOTE
+// PT_LOAD
+// PT_NOTEs
+// process info (prpsinfo_t)
+// NT_FILE entries
+// threads
+// alignment
+// memory blocks
+bool
+DumpWriter::WriteDump()
+{
+ // Write the ELF header
+ Ehdr ehdr;
+ memset(&ehdr, 0, sizeof(Ehdr));
+ ehdr.e_ident[0] = ELFMAG0;
+ ehdr.e_ident[1] = ELFMAG1;
+ ehdr.e_ident[2] = ELFMAG2;
+ ehdr.e_ident[3] = ELFMAG3;
+ ehdr.e_ident[4] = ELF_CLASS;
+
+ // Note: The sex is the current system running minidump-2-core
+ // Big or Little endian. This means you have to create
+ // the core (minidump-2-core) on the system that matches
+ // your intent to debug properly.
+ ehdr.e_ident[5] = sex() ? ELFDATA2MSB : ELFDATA2LSB;
+ ehdr.e_ident[6] = EV_CURRENT;
+ ehdr.e_ident[EI_OSABI] = ELFOSABI_LINUX;
+
+ ehdr.e_type = ET_CORE;
+ ehdr.e_machine = ELF_ARCH;
+ ehdr.e_version = EV_CURRENT;
+ ehdr.e_shoff = sizeof(Ehdr);
+ ehdr.e_phoff = sizeof(Ehdr) + sizeof(Shdr);
+
+ ehdr.e_ehsize = sizeof(Ehdr);
+ ehdr.e_phentsize = sizeof(Phdr);
+ ehdr.e_shentsize = sizeof(Shdr);
+
+ // The ELF header only allows UINT16 for the number of program
+ // headers. In a core dump this equates to PT_NODE and PT_LOAD.
+ //
+ // When more program headers than 65534 the first section entry
+ // is used to store the actual program header count.
+
+ // PT_NOTE + number of memory regions
+ uint64_t phnum = 1 + m_crashInfo.MemoryRegions().size();
+
+ if (phnum < PH_HDR_CANARY) {
+ ehdr.e_phnum = phnum;
+ }
+ else {
+ ehdr.e_phnum = PH_HDR_CANARY;
+ }
+
+ if (!WriteData(&ehdr, sizeof(Ehdr))) {
+ return false;
+ }
+
+ size_t offset = sizeof(Ehdr) + sizeof(Shdr) + (phnum * sizeof(Phdr));
+ size_t filesz = GetProcessInfoSize() + GetAuxvInfoSize() + GetThreadInfoSize() + GetNTFileInfoSize();
+
+ // Add single section containing the actual count
+ // of the program headers to be written.
+ Shdr shdr;
+ memset(&shdr, 0, sizeof(shdr));
+ shdr.sh_info = phnum;
+ // When section header offset is present but ehdr section num = 0
+ // then is is expected that the sh_size indicates the size of the
+ // section array or 1 in our case.
+ shdr.sh_size = 1;
+ if (!WriteData(&shdr, sizeof(shdr))) {
+ return false;
+ }
+
+ // PT_NOTE header
+ Phdr phdr;
+ memset(&phdr, 0, sizeof(Phdr));
+ phdr.p_type = PT_NOTE;
+ phdr.p_offset = offset;
+ phdr.p_filesz = filesz;
+
+ if (!WriteData(&phdr, sizeof(phdr))) {
+ return false;
+ }
+
+ // PT_NOTE sections must end on 4 byte boundary
+ // We output the NT_FILE, AUX and Thread entries
+ // AUX is aligned, NT_FILE is aligned and then we
+ // check to pad end of the thread list
+ phdr.p_type = PT_LOAD;
+ phdr.p_align = 4096;
+
+ size_t finalNoteAlignment = phdr.p_align - ((offset + filesz) % phdr.p_align);
+ if (finalNoteAlignment == phdr.p_align) {
+ finalNoteAlignment = 0;
+ }
+ offset += finalNoteAlignment;
+
+ printf("Writing memory region headers to core file\n");
+
+ // Write memory region note headers
+ for (const MemoryRegion& memoryRegion : m_crashInfo.MemoryRegions())
+ {
+ phdr.p_flags = memoryRegion.Permissions();
+ phdr.p_vaddr = memoryRegion.StartAddress();
+ phdr.p_memsz = memoryRegion.Size();
+
+ offset += filesz;
+ phdr.p_filesz = filesz = memoryRegion.Size();
+ phdr.p_offset = offset;
+
+ if (!WriteData(&phdr, sizeof(phdr))) {
+ return false;
+ }
+ }
+
+ // Write process info data to core file
+ if (!WriteProcessInfo()) {
+ return false;
+ }
+
+ // Write auxv data to core file
+ if (!WriteAuxv()) {
+ return false;
+ }
+
+ // Write NT_FILE entries to the core file
+ if (!WriteNTFileInfo()) {
+ return false;
+ }
+
+ printf("Writing %ld thread entries to core file\n", m_crashInfo.Threads().size());
+
+ // Write all the thread's state and registers
+ for (const ThreadInfo* thread : m_crashInfo.Threads())
+ {
+ if (!WriteThread(*thread, 0)) {
+ return false;
+ }
+ }
+
+ // Zero out the end of the PT_NOTE section to the boundary
+ // and then laydown the memory blocks
+ if (finalNoteAlignment > 0) {
+ assert(finalNoteAlignment < sizeof(m_tempBuffer));
+ memset(m_tempBuffer, 0, finalNoteAlignment);
+ if (!WriteData(m_tempBuffer, finalNoteAlignment)) {
+ return false;
+ }
+ }
+
+ printf("Writing %ld memory regions to core file\n", m_crashInfo.MemoryRegions().size());
+
+ // Read from target process and write memory regions to core
+ for (const MemoryRegion& memoryRegion : m_crashInfo.MemoryRegions())
+ {
+ uint32_t size = memoryRegion.Size();
+ uint64_t address = memoryRegion.StartAddress();
+
+ while (size > 0)
+ {
+ uint32_t bytesToRead = std::min(size, (uint32_t)sizeof(m_tempBuffer));
+ uint32_t read = 0;
+
+ if (FAILED(m_dataTarget.ReadVirtual(address, m_tempBuffer, bytesToRead, &read))) {
+ fprintf(stderr, "ReadVirtual(%016lx, %08x) FAILED\n", address, bytesToRead);
+ return false;
+ }
+
+ if (!WriteData(m_tempBuffer, read)) {
+ return false;
+ }
+
+ address += read;
+ size -= read;
+ }
+ }
+
+ return true;
+}
+
+bool
+DumpWriter::WriteProcessInfo()
+{
+ prpsinfo_t processInfo;
+ memset(&processInfo, 0, sizeof(processInfo));
+ processInfo.pr_sname = 'R';
+ processInfo.pr_pid = m_crashInfo.Pid();
+ processInfo.pr_ppid = m_crashInfo.Ppid();
+ processInfo.pr_pgrp = m_crashInfo.Tgid();
+ strcpy_s(processInfo.pr_fname, sizeof(processInfo.pr_fname), m_crashInfo.Name());
+
+ Nhdr nhdr;
+ memset(&nhdr, 0, sizeof(nhdr));
+ nhdr.n_namesz = 5;
+ nhdr.n_descsz = sizeof(prpsinfo_t);
+ nhdr.n_type = NT_PRPSINFO;
+
+ printf("Writing process information to core file\n");
+
+ // Write process info data to core file
+ if (!WriteData(&nhdr, sizeof(nhdr)) ||
+ !WriteData("CORE\0PRP", 8) ||
+ !WriteData(&processInfo, sizeof(prpsinfo_t))) {
+ return false;
+ }
+ return true;
+}
+
+bool
+DumpWriter::WriteAuxv()
+{
+ Nhdr nhdr;
+ memset(&nhdr, 0, sizeof(nhdr));
+ nhdr.n_namesz = 5;
+ nhdr.n_descsz = m_crashInfo.GetAuxvSize();
+ nhdr.n_type = NT_AUXV;
+
+ printf("Writing %ld auxv entries to core file\n", m_crashInfo.AuxvEntries().size());
+
+ if (!WriteData(&nhdr, sizeof(nhdr)) ||
+ !WriteData("CORE\0AUX", 8)) {
+ return false;
+ }
+ for (const auto& auxvEntry : m_crashInfo.AuxvEntries())
+ {
+ if (!WriteData(&auxvEntry, sizeof(auxvEntry))) {
+ return false;
+ }
+ }
+ return true;
+}
+
+struct NTFileEntry
+{
+ uint64_t StartAddress;
+ uint64_t EndAddress;
+ uint64_t Offset;
+};
+
+// Calculate the NT_FILE entries total size
+size_t
+DumpWriter::GetNTFileInfoSize(size_t* alignmentBytes)
+{
+ size_t count = m_crashInfo.ModuleMappings().size();
+ size_t size = 0;
+
+ // Header, CORE, entry count, page size
+ size = sizeof(Nhdr) + sizeof(NTFileEntry);
+
+ // start_address, end_address, offset
+ size += count * sizeof(NTFileEntry);
+
+ // \0 terminator for each filename
+ size += count;
+
+ // File name storage needed
+ for (const MemoryRegion& image : m_crashInfo.ModuleMappings()) {
+ size += strlen(image.FileName());
+ }
+ // Notes must end on 4 byte alignment
+ size_t alignmentBytesNeeded = 4 - (size % 4);
+ size += alignmentBytesNeeded;
+
+ if (alignmentBytes != nullptr) {
+ *alignmentBytes = alignmentBytesNeeded;
+ }
+ return size;
+}
+
+// Write NT_FILE entries to the PT_NODE section
+//
+// Nhdr (NT_FILE)
+// Total entries
+// Page size
+// [0] start_address end_address offset
+// [1] start_address end_address offset
+// [file name]\0[file name]\0...
+bool
+DumpWriter::WriteNTFileInfo()
+{
+ Nhdr nhdr;
+ memset(&nhdr, 0, sizeof(nhdr));
+
+ // CORE + \0 and we align on 4 byte boundary
+ // so we can use CORE\0FIL for easier hex debugging
+ nhdr.n_namesz = 5;
+ nhdr.n_type = NT_FILE; // "FILE"
+
+ // Size of payload for NT_FILE after CORE tag written
+ size_t alignmentBytesNeeded = 0;
+ nhdr.n_descsz = GetNTFileInfoSize(&alignmentBytesNeeded) - sizeof(nhdr) - 8;
+
+ size_t count = m_crashInfo.ModuleMappings().size();
+ size_t pageSize = PAGE_SIZE;
+
+ printf("Writing %ld NT_FILE entries to core file\n", m_crashInfo.ModuleMappings().size());
+
+ if (!WriteData(&nhdr, sizeof(nhdr)) ||
+ !WriteData("CORE\0FIL", 8) ||
+ !WriteData(&count, 8) ||
+ !WriteData(&pageSize, 8)) {
+ return false;
+ }
+
+ for (const MemoryRegion& image : m_crashInfo.ModuleMappings())
+ {
+ struct NTFileEntry entry { image.StartAddress(), image.EndAddress(), image.Offset() };
+ if (!WriteData(&entry, sizeof(entry))) {
+ return false;
+ }
+ }
+
+ for (const MemoryRegion& image : m_crashInfo.ModuleMappings())
+ {
+ if (!WriteData(image.FileName(), strlen(image.FileName())) ||
+ !WriteData("\0", 1)) {
+ return false;
+ }
+ }
+
+ // Has to end on a 4 byte boundary. Debugger, readelf and such
+ // will automatically align on next 4 bytes and look for a PT_NOTE
+ // header.
+ if (alignmentBytesNeeded) {
+ if (!WriteData("\0\0\0\0", alignmentBytesNeeded)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool
+DumpWriter::WriteThread(const ThreadInfo& thread, int fatal_signal)
+{
+ prstatus_t pr;
+ memset(&pr, 0, sizeof(pr));
+
+ pr.pr_info.si_signo = fatal_signal;
+ pr.pr_cursig = fatal_signal;
+ pr.pr_pid = thread.Tid();
+ pr.pr_ppid = thread.Ppid();
+ pr.pr_pgrp = thread.Tgid();
+ memcpy(&pr.pr_reg, thread.GPRegisters(), sizeof(user_regs_struct));
+
+ Nhdr nhdr;
+ memset(&nhdr, 0, sizeof(nhdr));
+
+ // Name size is CORE plus the NULL terminator
+ // The format requires 4 byte alignment so the
+ // value written in 8 bytes. Stuff the last 3
+ // bytes with the type of NT_PRSTATUS so it is
+ // easier to debug in a hex editor.
+ nhdr.n_namesz = 5;
+ nhdr.n_descsz = sizeof(prstatus_t);
+ nhdr.n_type = NT_PRSTATUS;
+ if (!WriteData(&nhdr, sizeof(nhdr)) ||
+ !WriteData("CORE\0THR", 8) ||
+ !WriteData(&pr, sizeof(prstatus_t))) {
+ return false;
+ }
+
+#if defined(__i386__) || defined(__x86_64__)
+ nhdr.n_descsz = sizeof(user_fpregs_struct);
+ nhdr.n_type = NT_FPREGSET;
+ if (!WriteData(&nhdr, sizeof(nhdr)) ||
+ !WriteData("CORE\0FLT", 8) ||
+ !WriteData(thread.FPRegisters(), sizeof(user_fpregs_struct))) {
+ return false;
+ }
+#endif
+
+#if defined(__i386__)
+ nhdr.n_descsz = sizeof(user_fpxregs_struct);
+ nhdr.n_type = NT_PRXFPREG;
+ if (!WriteData(&nhdr, sizeof(nhdr)) ||
+ !WriteData("LINUX\0\0\0", 8) ||
+ !WriteData(&thread.FPXRegisters(), sizeof(user_fpxregs_struct))) {
+ return false;
+ }
+#endif
+
+ return true;
+}
+
+// Write all of the given buffer, handling short writes and EINTR. Return true iff successful.
+bool
+DumpWriter::WriteData(const void* buffer, size_t length)
+{
+ const uint8_t* data = (const uint8_t*)buffer;
+
+ size_t done = 0;
+ while (done < length) {
+ ssize_t written;
+ do {
+ written = write(m_fd, data + done, length - done);
+ } while (written == -1 && errno == EINTR);
+
+ if (written < 1) {
+ fprintf(stderr, "WriteData FAILED %s\n", strerror(errno));
+ return false;
+ }
+ done += written;
+ }
+ return true;
+} \ No newline at end of file
diff --git a/src/debug/createdump/dumpwriter.h b/src/debug/createdump/dumpwriter.h
new file mode 100644
index 0000000000..61e3936338
--- /dev/null
+++ b/src/debug/createdump/dumpwriter.h
@@ -0,0 +1,74 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+#ifdef BIT64
+#define ELF_CLASS ELFCLASS64
+#else
+#define ELF_CLASS ELFCLASS32
+#endif
+
+#define Ehdr ElfW(Ehdr)
+#define Phdr ElfW(Phdr)
+#define Shdr ElfW(Shdr)
+#define Nhdr ElfW(Nhdr)
+#define auxv_t ElfW(auxv_t)
+
+#if defined(__x86_64__)
+#define ELF_ARCH EM_X86_64
+#elif defined(__i386__)
+#define ELF_ARCH EM_386
+#elif defined(__arm__)
+#define ELF_ARCH EM_ARM
+#endif
+
+#define PH_HDR_CANARY 0xFFFF
+
+class DumpWriter : IUnknown
+{
+private:
+ LONG m_ref; // reference count
+ int m_fd;
+ DataTarget& m_dataTarget;
+ CrashInfo& m_crashInfo;
+ BYTE m_tempBuffer[0x4000];
+
+public:
+ DumpWriter(DataTarget& dataTarget, CrashInfo& crashInfo);
+ virtual ~DumpWriter();
+ bool OpenDump(char* dumpFileName);
+ bool WriteDump();
+
+ // IUnknown
+ STDMETHOD(QueryInterface)(___in REFIID InterfaceId, ___out PVOID* Interface);
+ STDMETHOD_(ULONG, AddRef)();
+ STDMETHOD_(ULONG, Release)();
+
+private:
+ bool WriteProcessInfo();
+ bool WriteAuxv();
+ size_t GetNTFileInfoSize(size_t* alignmentBytes = nullptr);
+ bool WriteNTFileInfo();
+ bool WriteThread(const ThreadInfo& thread, int fatal_signal);
+ bool WriteData(const void* buffer, size_t length);
+
+ const size_t GetProcessInfoSize() const { return sizeof(Nhdr) + 8 + sizeof(prpsinfo_t); }
+ const size_t GetAuxvInfoSize() const { return sizeof(Nhdr) + 8 + m_crashInfo.GetAuxvSize(); }
+ const size_t GetThreadInfoSize() const
+ {
+ return m_crashInfo.Threads().size() * ((sizeof(Nhdr) + 8 + sizeof(prstatus_t))
+#if defined(__i386__) || defined(__x86_64__)
+ + sizeof(Nhdr) + 8 + sizeof(user_fpregs_struct)
+#endif
+#if defined(__i386__)
+ + sizeof(Nhdr) + 8 + sizeof(user_fpxregs_struct)
+#endif
+ );
+ }
+};
+
+static inline int sex()
+{
+ int probe = 1;
+ return !*(char *)&probe;
+}
diff --git a/src/debug/createdump/memoryregion.h b/src/debug/createdump/memoryregion.h
new file mode 100644
index 0000000000..16c4d1c693
--- /dev/null
+++ b/src/debug/createdump/memoryregion.h
@@ -0,0 +1,97 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+struct MemoryRegion
+{
+private:
+ uint32_t m_permissions;
+ uint64_t m_startAddress;
+ uint64_t m_endAddress;
+ uint64_t m_offset;
+
+ // The name used for NT_FILE output
+ char* m_fileName;
+
+public:
+ MemoryRegion(uint64_t start, uint64_t end) :
+ m_permissions(PF_R | PF_W | PF_X),
+ m_startAddress(start),
+ m_endAddress(end),
+ m_offset(0),
+ m_fileName(nullptr)
+ {
+ assert((start & ~PAGE_MASK) == 0);
+ assert((end & ~PAGE_MASK) == 0);
+ }
+
+ MemoryRegion(uint32_t permissions, uint64_t start, uint64_t end, uint64_t offset, char* filename) :
+ m_permissions(permissions),
+ m_startAddress(start),
+ m_endAddress(end),
+ m_offset(offset),
+ m_fileName(filename)
+ {
+ assert((start & ~PAGE_MASK) == 0);
+ assert((end & ~PAGE_MASK) == 0);
+ }
+
+ const uint32_t Permissions() const
+ {
+ return m_permissions;
+ }
+
+ const uint64_t StartAddress() const
+ {
+ return m_startAddress;
+ }
+
+ const uint64_t EndAddress() const
+ {
+ return m_endAddress;
+ }
+
+ const uint64_t Size() const
+ {
+ return m_endAddress - m_startAddress;
+ }
+
+ const uint64_t Offset() const
+ {
+ return m_offset;
+ }
+
+ const char* FileName() const
+ {
+ return m_fileName;
+ }
+
+ bool operator<(const MemoryRegion& rhs) const
+ {
+ return (m_startAddress < rhs.m_startAddress) && (m_endAddress <= rhs.m_startAddress);
+ }
+
+ bool Contains(const MemoryRegion& rhs) const
+ {
+ return (m_startAddress <= rhs.m_startAddress) && (m_endAddress >= rhs.m_endAddress);
+ }
+
+ void Cleanup()
+ {
+ if (m_fileName != nullptr)
+ {
+ free(m_fileName);
+ m_fileName = nullptr;
+ }
+ }
+
+ void Print() const
+ {
+ if (m_fileName != nullptr) {
+ TRACE("%016lx - %016lx (%04ld) %016lx %x %s\n", m_startAddress, m_endAddress, (Size() >> PAGE_SHIFT), m_offset, m_permissions, m_fileName);
+ }
+ else {
+ TRACE("%016lx - %016lx (%04ld) %02x\n", m_startAddress, m_endAddress, (Size() >> PAGE_SHIFT), m_permissions);
+ }
+ }
+};
diff --git a/src/debug/createdump/threadinfo.cpp b/src/debug/createdump/threadinfo.cpp
new file mode 100644
index 0000000000..52af060b89
--- /dev/null
+++ b/src/debug/createdump/threadinfo.cpp
@@ -0,0 +1,154 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+#include "createdump.h"
+
+ThreadInfo::ThreadInfo(pid_t tid) :
+ m_tid(tid)
+{
+}
+
+ThreadInfo::~ThreadInfo()
+{
+}
+
+bool
+ThreadInfo::Initialize()
+{
+ if (!CrashInfo::GetStatus(m_tid, &m_ppid, &m_tgid, nullptr))
+ {
+ return false;
+ }
+ if (!GetRegisters())
+ {
+ return false;
+ }
+ TRACE("Thread %04x RIP %016llx RSP %016llx\n", m_tid, m_gpRegisters.rip, m_gpRegisters.rsp);
+ return true;
+}
+
+void
+ThreadInfo::ResumeThread()
+{
+ if (ptrace(PTRACE_DETACH, m_tid, nullptr, nullptr) != -1)
+ {
+ int waitStatus;
+ waitpid(m_tid, &waitStatus, __WALL);
+ }
+}
+
+bool
+ThreadInfo::GetRegisters()
+{
+ if (ptrace((__ptrace_request)PTRACE_GETREGS, m_tid, nullptr, &m_gpRegisters) == -1)
+ {
+ fprintf(stderr, "ptrace(GETREGS, %d) FAILED %d (%s)\n", m_tid, errno, strerror(errno));
+ return false;
+ }
+ if (ptrace((__ptrace_request)PTRACE_GETFPREGS, m_tid, nullptr, &m_fpRegisters) == -1)
+ {
+ fprintf(stderr, "ptrace(GETFPREGS, %d) FAILED %d (%s)\n", m_tid, errno, strerror(errno));
+ return false;
+ }
+#if defined(__i386__)
+ if (ptrace((__ptrace_request)PTRACE_GETFPXREGS, m_tid, nullptr, &m_fpxRegisters) == -1)
+ {
+ fprintf(stderr, "ptrace(GETFPXREGS, %d) FAILED %d (%s)\n", m_tid, errno, strerror(errno));
+ return false;
+ }
+#endif
+ return true;
+}
+
+void
+ThreadInfo::GetThreadStack(const CrashInfo& crashInfo, uint64_t* startAddress, size_t* size) const
+{
+ *startAddress = m_gpRegisters.rsp & PAGE_MASK;
+ *size = 4 * PAGE_SIZE;
+
+ for (const MemoryRegion& mapping : crashInfo.OtherMappings())
+ {
+ if (*startAddress >= mapping.StartAddress() && *startAddress < mapping.EndAddress())
+ {
+ // Use the mapping found for the size of the thread's stack
+ *size = mapping.EndAddress() - *startAddress;
+
+ if (g_diagnostics)
+ {
+ TRACE("Thread %04x stack found in other mapping (size %08lx): ", m_tid, *size);
+ mapping.Print();
+ }
+ break;
+ }
+ }
+}
+
+void
+ThreadInfo::GetThreadCode(uint64_t* startAddress, size_t* size) const
+{
+ *startAddress = m_gpRegisters.rip & PAGE_MASK;
+ *size = PAGE_SIZE;
+}
+
+void
+ThreadInfo::GetThreadContext(uint32_t flags, CONTEXT* context) const
+{
+ context->ContextFlags = flags;
+ if ((flags & CONTEXT_CONTROL) == CONTEXT_CONTROL)
+ {
+ context->Rbp = m_gpRegisters.rbp;
+ context->Rip = m_gpRegisters.rip;
+ context->SegCs = m_gpRegisters.cs;
+ context->EFlags = m_gpRegisters.eflags;
+ context->SegSs = m_gpRegisters.ss;
+ context->Rsp = m_gpRegisters.rsp;
+ }
+ if ((flags & CONTEXT_INTEGER) == CONTEXT_INTEGER)
+ {
+ context->Rdi = m_gpRegisters.rdi;
+ context->Rsi = m_gpRegisters.rsi;
+ context->Rbx = m_gpRegisters.rbx;
+ context->Rdx = m_gpRegisters.rdx;
+ context->Rcx = m_gpRegisters.rcx;
+ context->Rax = m_gpRegisters.rax;
+ context->R8 = m_gpRegisters.r8;
+ context->R9 = m_gpRegisters.r9;
+ context->R10 = m_gpRegisters.r10;
+ context->R11 = m_gpRegisters.r11;
+ context->R12 = m_gpRegisters.r12;
+ context->R13 = m_gpRegisters.r13;
+ context->R14 = m_gpRegisters.r14;
+ context->R15 = m_gpRegisters.r15;
+ }
+ if ((flags & CONTEXT_SEGMENTS) == CONTEXT_SEGMENTS)
+ {
+ context->SegDs = m_gpRegisters.ds;
+ context->SegEs = m_gpRegisters.es;
+ context->SegFs = m_gpRegisters.fs;
+ context->SegGs = m_gpRegisters.gs;
+ }
+ if ((flags & CONTEXT_FLOATING_POINT) == CONTEXT_FLOATING_POINT)
+ {
+ context->FltSave.ControlWord = m_fpRegisters.cwd;
+ context->FltSave.StatusWord = m_fpRegisters.swd;
+ context->FltSave.TagWord = m_fpRegisters.ftw;
+ context->FltSave.ErrorOpcode = m_fpRegisters.fop;
+
+ context->FltSave.ErrorOffset = (DWORD)m_fpRegisters.rip;
+ context->FltSave.ErrorSelector = *(((WORD *)&m_fpRegisters.rip) + 2);
+ context->FltSave.DataOffset = (DWORD)m_fpRegisters.rdp;
+ context->FltSave.DataSelector = *(((WORD *)&m_fpRegisters.rdp) + 2);
+
+ context->FltSave.MxCsr = m_fpRegisters.mxcsr;
+ context->FltSave.MxCsr_Mask = m_fpRegisters.mxcr_mask;
+
+ assert(sizeof(context->FltSave.FloatRegisters) == sizeof(m_fpRegisters.st_space));
+ memcpy(context->FltSave.FloatRegisters, m_fpRegisters.st_space, sizeof(context->FltSave.FloatRegisters));
+
+ assert(sizeof(context->FltSave.XmmRegisters) == sizeof(m_fpRegisters.xmm_space));
+ memcpy(context->FltSave.XmmRegisters, m_fpRegisters.xmm_space, sizeof(context->FltSave.XmmRegisters));
+ }
+ // TODO: debug registers?
+ // TODO: x86 registers
+}
diff --git a/src/debug/createdump/threadinfo.h b/src/debug/createdump/threadinfo.h
new file mode 100644
index 0000000000..a2aa3ce75a
--- /dev/null
+++ b/src/debug/createdump/threadinfo.h
@@ -0,0 +1,41 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+class CrashInfo;
+
+class ThreadInfo
+{
+private:
+ pid_t m_tid; // thread id
+ pid_t m_ppid; // parent process
+ pid_t m_tgid; // thread group
+ struct user_regs_struct m_gpRegisters; // general purpose registers
+ struct user_fpregs_struct m_fpRegisters; // floating point registers
+#if defined(__i386__)
+ struct user_fpxregs_struct m_fpxRegisters; // x86 floating point registers
+#endif
+
+public:
+ ThreadInfo(pid_t tid);
+ ~ThreadInfo();
+ bool Initialize();
+ void ResumeThread();
+ void GetThreadStack(const CrashInfo& crashInfo, uint64_t* startAddress, size_t* size) const;
+ void GetThreadCode(uint64_t* startAddress, size_t* size) const;
+ void GetThreadContext(uint32_t flags, CONTEXT* context) const;
+
+ const pid_t Tid() const { return m_tid; }
+ const pid_t Ppid() const { return m_ppid; }
+ const pid_t Tgid() const { return m_tgid; }
+
+ const user_regs_struct* GPRegisters() const { return &m_gpRegisters; }
+ const user_fpregs_struct* FPRegisters() const { return &m_fpRegisters; }
+#if defined(__i386__)
+ const user_fpxregs_struct* FPXRegisters() const { return &m_fpxRegisters; }
+#endif
+
+private:
+ bool GetRegisters();
+};
+
diff --git a/src/debug/daccess/dacfn.cpp b/src/debug/daccess/dacfn.cpp
index 0a167418a1..2f7a98de1a 100644
--- a/src/debug/daccess/dacfn.cpp
+++ b/src/debug/daccess/dacfn.cpp
@@ -1386,6 +1386,8 @@ bool DacTargetConsistencyAssertsEnabled()
//
void DacEnumCodeForStackwalk(TADDR taCallEnd)
{
+ if (taCallEnd == 0)
+ return;
//
// x86 stack walkers often end up having to guess
// about what's a return address on the stack.
diff --git a/src/debug/daccess/enummem.cpp b/src/debug/daccess/enummem.cpp
index 027fe59543..9305bba488 100644
--- a/src/debug/daccess/enummem.cpp
+++ b/src/debug/daccess/enummem.cpp
@@ -22,6 +22,10 @@
#include "binder.h"
#include "win32threadpool.h"
+#ifdef FEATURE_PAL
+#include <dactablerva.h>
+#endif
+
#ifdef FEATURE_APPX
#include "appxutil.h"
#endif // FEATURE_APPX
@@ -220,6 +224,11 @@ HRESULT ClrDataAccess::EnumMemCLRStatic(IN CLRDataEnumMemoryFlags flags)
#define DEFINE_DACVAR_SVR(id_type, size_type, id, var) \
ReportMem(m_globalBase + g_dacGlobals.id, sizeof(size_type));
+#ifdef FEATURE_PAL
+ // Add the dac table memory in coreclr
+ CATCH_ALL_EXCEPT_RETHROW_COR_E_OPERATIONCANCELLED ( ReportMem(m_globalBase + DAC_TABLE_RVA, sizeof(g_dacGlobals)); )
+#endif
+
// Cannot use CATCH_ALL_EXCEPT_RETHROW_COR_E_OPERATIONCANCELLED
// around conditional preprocessor directives in a sane fashion.
EX_TRY
@@ -233,39 +242,33 @@ HRESULT ClrDataAccess::EnumMemCLRStatic(IN CLRDataEnumMemoryFlags flags)
}
EX_END_CATCH(RethrowCancelExceptions)
- CATCH_ALL_EXCEPT_RETHROW_COR_E_OPERATIONCANCELLED
- (
- // StressLog is not defined on Rotor build for DAC
- ReportMem(m_globalBase + g_dacGlobals.dac__g_pStressLog, sizeof(StressLog *));
- );
+ // StressLog is not defined on Rotor build for DAC
+ CATCH_ALL_EXCEPT_RETHROW_COR_E_OPERATIONCANCELLED ( ReportMem(m_globalBase + g_dacGlobals.dac__g_pStressLog, sizeof(StressLog *)); )
EX_TRY
{
// These two static pointers are pointed to static data of byte[]
// then run constructor in place
//
- ReportMem(m_globalBase + g_dacGlobals.SystemDomain__m_pSystemDomain,
- sizeof(SystemDomain));
- ReportMem(m_globalBase + g_dacGlobals.SharedDomain__m_pSharedDomain,
- sizeof(SharedDomain));
+ ReportMem(m_globalBase + g_dacGlobals.SystemDomain__m_pSystemDomain, sizeof(SystemDomain));
+ ReportMem(m_globalBase + g_dacGlobals.SharedDomain__m_pSharedDomain, sizeof(SharedDomain));
// We need IGCHeap pointer to make EEVersion work
- ReportMem(m_globalBase + g_dacGlobals.dac__g_pGCHeap,
- sizeof(IGCHeap *));
+ ReportMem(m_globalBase + g_dacGlobals.dac__g_pGCHeap, sizeof(IGCHeap *));
// see synblk.cpp, the pointer is pointed to a static byte[]
SyncBlockCache::s_pSyncBlockCache.EnumMem();
#ifndef FEATURE_IMPLICIT_TLS
- ReportMem(m_globalBase + g_dacGlobals.dac__gThreadTLSIndex,
- sizeof(DWORD));
- ReportMem(m_globalBase + g_dacGlobals.dac__gAppDomainTLSIndex,
- sizeof(DWORD));
+ ReportMem(m_globalBase + g_dacGlobals.dac__gThreadTLSIndex, sizeof(DWORD));
+ ReportMem(m_globalBase + g_dacGlobals.dac__gAppDomainTLSIndex, sizeof(DWORD));
#endif
- ReportMem( m_globalBase + g_dacGlobals.dac__g_FCDynamicallyAssignedImplementations,
+ ReportMem(m_globalBase + g_dacGlobals.dac__g_FCDynamicallyAssignedImplementations,
sizeof(TADDR)*ECall::NUM_DYNAMICALLY_ASSIGNED_FCALL_IMPLEMENTATIONS);
+ ReportMem(g_gcDacGlobals.GetAddr(), sizeof(GcDacVars));
+
// We need all of the dac variables referenced by the GC DAC global struct.
// This struct contains pointers to pointers, so we first dereference the pointers
// to obtain the location of the variable that's reported.
@@ -316,11 +319,8 @@ HRESULT ClrDataAccess::EnumMemCLRStatic(IN CLRDataEnumMemoryFlags flags)
CATCH_ALL_EXCEPT_RETHROW_COR_E_OPERATIONCANCELLED( StubManager::EnumMemoryRegions(flags); )
CATCH_ALL_EXCEPT_RETHROW_COR_E_OPERATIONCANCELLED( g_pFinalizerThread.EnumMem(); )
CATCH_ALL_EXCEPT_RETHROW_COR_E_OPERATIONCANCELLED( g_pSuspensionThread.EnumMem(); )
-
- CATCH_ALL_EXCEPT_RETHROW_COR_E_OPERATIONCANCELLED
- (
- g_heap_type.EnumMem();
- );
+
+ CATCH_ALL_EXCEPT_RETHROW_COR_E_OPERATIONCANCELLED( g_heap_type.EnumMem(); )
m_dumpStats.m_cbClrStatics = m_cbMemoryReported - cbMemoryReported;
@@ -345,8 +345,6 @@ HRESULT ClrDataAccess::EnumMemoryRegionsWorkerHeap(IN CLRDataEnumMemoryFlags fla
HRESULT status = S_OK;
- m_instances.ClearEnumMemMarker();
-
// clear all of the previous cached memory
Flush();
@@ -365,7 +363,6 @@ HRESULT ClrDataAccess::EnumMemoryRegionsWorkerHeap(IN CLRDataEnumMemoryFlags fla
// would be true, but I don't think we have that guarantee here.
CATCH_ALL_EXCEPT_RETHROW_COR_E_OPERATIONCANCELLED( status = EnumMemDumpModuleList(flags); );
-#ifdef FEATURE_LAZY_COW_PAGES
// Iterating to all threads' stacks, as we have to collect data stored inside (core)clr.dll
CATCH_ALL_EXCEPT_RETHROW_COR_E_OPERATIONCANCELLED( status = EnumMemDumpAllThreadsStack(flags); )
@@ -377,11 +374,11 @@ HRESULT ClrDataAccess::EnumMemoryRegionsWorkerHeap(IN CLRDataEnumMemoryFlags fla
// now dump the memory get dragged in by using DAC API implicitly.
m_dumpStats.m_cbImplicity = m_instances.DumpAllInstances(m_enumMemCb);
-#endif // FEATURE_LAZY_COW_PAGES
-
- // end of code
status = m_memStatus;
+ // Do not let any remaining implicitly enumerated memory leak out.
+ Flush();
+
return S_OK;
} // EnumMemoryRegionsWorkerHeap
@@ -976,16 +973,19 @@ HRESULT ClrDataAccess::EnumMemWalkStackHelper(CLRDataEnumMemoryFlags flags,
{
EECodeInfo codeInfo(addr);
- // We want IsFilterFunclet to work for anything on the stack
- codeInfo.GetJitManager()->IsFilterFunclet(&codeInfo);
-
- // The stackwalker needs GC info to find the parent 'stack pointer' or PSP
- GCInfoToken gcInfoToken = codeInfo.GetGCInfoToken();
- PTR_BYTE pGCInfo = dac_cast<PTR_BYTE>(gcInfoToken.Info);
- if (pGCInfo != NULL)
+ if (codeInfo.IsValid())
{
- GcInfoDecoder gcDecoder(gcInfoToken, DECODE_PSP_SYM, 0);
- DacEnumMemoryRegion(dac_cast<TADDR>(pGCInfo), gcDecoder.GetNumBytesRead(), true);
+ // We want IsFilterFunclet to work for anything on the stack
+ codeInfo.GetJitManager()->IsFilterFunclet(&codeInfo);
+
+ // The stackwalker needs GC info to find the parent 'stack pointer' or PSP
+ GCInfoToken gcInfoToken = codeInfo.GetGCInfoToken();
+ PTR_BYTE pGCInfo = dac_cast<PTR_BYTE>(gcInfoToken.Info);
+ if (pGCInfo != NULL)
+ {
+ GcInfoDecoder gcDecoder(gcInfoToken, DECODE_PSP_SYM, 0);
+ DacEnumMemoryRegion(dac_cast<TADDR>(pGCInfo), gcDecoder.GetNumBytesRead(), true);
+ }
}
}
#endif // WIN64EXCEPTIONS && USE_GC_INFO_DECODER
@@ -1603,10 +1603,6 @@ HRESULT ClrDataAccess::EnumMemoryRegionsWorkerSkinny(IN CLRDataEnumMemoryFlags f
// collect CLR static
CATCH_ALL_EXCEPT_RETHROW_COR_E_OPERATIONCANCELLED( status = EnumMemCLRStatic(flags); )
- // now dump the memory get dragged in by using DAC API implicitly.
- m_dumpStats.m_cbImplicity = m_instances.DumpAllInstances(m_enumMemCb);
- status = m_memStatus;
-
// Dump AppDomain-specific info needed for MiniDumpNormal.
CATCH_ALL_EXCEPT_RETHROW_COR_E_OPERATIONCANCELLED( status = EnumMemDumpAppDomainInfo(flags); )
@@ -1618,6 +1614,10 @@ HRESULT ClrDataAccess::EnumMemoryRegionsWorkerSkinny(IN CLRDataEnumMemoryFlags f
CATCH_ALL_EXCEPT_RETHROW_COR_E_OPERATIONCANCELLED( EnumStreams(flags); )
#endif // FEATURE_MINIMETADATA_IN_TRIAGEDUMPS
+ // now dump the memory get dragged in by using DAC API implicitly.
+ m_dumpStats.m_cbImplicity = m_instances.DumpAllInstances(m_enumMemCb);
+ status = m_memStatus;
+
// Do not let any remaining implicitly enumerated memory leak out.
Flush();
@@ -1654,10 +1654,6 @@ HRESULT ClrDataAccess::EnumMemoryRegionsWorkerMicroTriage(IN CLRDataEnumMemoryFl
// collect CLR static
CATCH_ALL_EXCEPT_RETHROW_COR_E_OPERATIONCANCELLED( status = EnumMemCLRStatic(flags); )
- // now dump the memory get dragged in by using DAC API implicitly.
- m_dumpStats.m_cbImplicity = m_instances.DumpAllInstances(m_enumMemCb);
- status = m_memStatus;
-
// Dump AppDomain-specific info needed for triage dumps methods enumeration (k command).
CATCH_ALL_EXCEPT_RETHROW_COR_E_OPERATIONCANCELLED( status = EnumMemDumpAppDomainInfo(flags); )
@@ -1669,6 +1665,10 @@ HRESULT ClrDataAccess::EnumMemoryRegionsWorkerMicroTriage(IN CLRDataEnumMemoryFl
CATCH_ALL_EXCEPT_RETHROW_COR_E_OPERATIONCANCELLED( EnumStreams(flags); )
#endif // FEATURE_MINIMETADATA_IN_TRIAGEDUMPS
+ // now dump the memory get dragged in by using DAC API implicitly.
+ m_dumpStats.m_cbImplicity = m_instances.DumpAllInstances(m_enumMemCb);
+ status = m_memStatus;
+
// Do not let any remaining implicitly enumerated memory leak out.
Flush();
@@ -1847,17 +1847,17 @@ HRESULT ClrDataAccess::EnumMemoryRegionsWrapper(IN CLRDataEnumMemoryFlags flags)
// The various EnumMemoryRegions() implementations should understand
// CLRDATA_ENUM_MEM_MINI to mean that the bare minimimum memory
// to make a MiniDumpNormal work should be included.
- if ( flags == CLRDATA_ENUM_MEM_MINI)
+ if (flags == CLRDATA_ENUM_MEM_MINI)
{
// skinny mini-dump
status = EnumMemoryRegionsWorkerSkinny(flags);
}
- else if ( flags == CLRDATA_ENUM_MEM_TRIAGE)
+ else if (flags == CLRDATA_ENUM_MEM_TRIAGE)
{
// triage micro-dump
status = EnumMemoryRegionsWorkerMicroTriage(flags);
}
- else if ( flags == CLRDATA_ENUM_MEM_HEAP)
+ else if (flags == CLRDATA_ENUM_MEM_HEAP)
{
status = EnumMemoryRegionsWorkerHeap(flags);
}
@@ -1874,12 +1874,6 @@ HRESULT ClrDataAccess::EnumMemoryRegionsWrapper(IN CLRDataEnumMemoryFlags flags)
return status;
}
-#define MiniDumpWithPrivateReadWriteMemory 0x00000200
-#define MiniDumpWithFullAuxiliaryState 0x00008000
-#define MiniDumpFilterTriage 0x00100000
-
-
-
//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
//
// Entry function for generating CLR aware dump. This function is called
@@ -1972,6 +1966,7 @@ ClrDataAccess::EnumMemoryRegions(IN ICLRDataEnumMemoryRegionsCallback* callback,
status = EnumMemoryRegionsWrapper(CLRDATA_ENUM_MEM_MINI);
}
+#ifndef FEATURE_PAL
// For all dump types, we need to capture the chain to the IMAGE_DIRECTORY_ENTRY_DEBUG
// contents, so that DAC can validate against the TimeDateStamp even if the
// debugger can't find the main CLR module on disk.
@@ -1986,7 +1981,7 @@ ClrDataAccess::EnumMemoryRegions(IN ICLRDataEnumMemoryRegionsCallback* callback,
m_instances.DumpAllInstances(m_enumMemCb);
}
}
-
+#endif
Flush();
}
EX_CATCH
diff --git a/src/debug/daccess/request_svr.cpp b/src/debug/daccess/request_svr.cpp
index 6a1de35ff9..40e3600f9f 100644
--- a/src/debug/daccess/request_svr.cpp
+++ b/src/debug/daccess/request_svr.cpp
@@ -22,6 +22,8 @@
int GCHeapCount()
{
+ if (g_gcDacGlobals->n_heaps == nullptr)
+ return 0;
return *g_gcDacGlobals->n_heaps;
}
@@ -206,14 +208,19 @@ void
ClrDataAccess::EnumSvrGlobalMemoryRegions(CLRDataEnumMemoryFlags flags)
{
SUPPORTS_DAC;
+
+ if (g_gcDacGlobals->n_heaps == nullptr || g_gcDacGlobals->g_heaps == nullptr)
+ return;
+
g_gcDacGlobals->n_heaps.EnumMem();
- DacEnumMemoryRegion(g_gcDacGlobals->g_heaps.GetAddr(),
- sizeof(TADDR) * *g_gcDacGlobals->n_heaps);
+
+ int heaps = *g_gcDacGlobals->n_heaps;
+ DacEnumMemoryRegion(g_gcDacGlobals->g_heaps.GetAddr(), sizeof(TADDR) * heaps);
g_gcDacGlobals->gc_structures_invalid_cnt.EnumMem();
g_gcDacGlobals->g_heaps.EnumMem();
- for (int i=0; i < *g_gcDacGlobals->n_heaps; i++)
+ for (int i = 0; i < heaps; i++)
{
DPTR(dac_gc_heap) pHeap = HeapTableIndex(g_gcDacGlobals->g_heaps, i);
@@ -249,6 +256,9 @@ DWORD DacGetNumHeaps()
HRESULT DacHeapWalker::InitHeapDataSvr(HeapData *&pHeaps, size_t &pCount)
{
+ if (g_gcDacGlobals->n_heaps == nullptr || g_gcDacGlobals->g_heaps == nullptr)
+ return S_OK;
+
// Scrape basic heap details
int heaps = *g_gcDacGlobals->n_heaps;
pCount = heaps;
diff --git a/src/debug/inc/dump/dumpcommon.h b/src/debug/inc/dump/dumpcommon.h
index 3e197ce29b..e57b4b3a12 100644
--- a/src/debug/inc/dump/dumpcommon.h
+++ b/src/debug/inc/dump/dumpcommon.h
@@ -5,6 +5,35 @@
#ifndef DEBUGGER_DUMPCOMMON_H
#define DEBUGGER_DUMPCOMMON_H
+#ifdef FEATURE_PAL
+typedef enum _MINIDUMP_TYPE {
+ MiniDumpNormal = 0x00000000,
+ MiniDumpWithDataSegs = 0x00000001,
+ MiniDumpWithFullMemory = 0x00000002,
+ MiniDumpWithHandleData = 0x00000004,
+ MiniDumpFilterMemory = 0x00000008,
+ MiniDumpScanMemory = 0x00000010,
+ MiniDumpWithUnloadedModules = 0x00000020,
+ MiniDumpWithIndirectlyReferencedMemory = 0x00000040,
+ MiniDumpFilterModulePaths = 0x00000080,
+ MiniDumpWithProcessThreadData = 0x00000100,
+ MiniDumpWithPrivateReadWriteMemory = 0x00000200,
+ MiniDumpWithoutOptionalData = 0x00000400,
+ MiniDumpWithFullMemoryInfo = 0x00000800,
+ MiniDumpWithThreadInfo = 0x00001000,
+ MiniDumpWithCodeSegs = 0x00002000,
+ MiniDumpWithoutAuxiliaryState = 0x00004000,
+ MiniDumpWithFullAuxiliaryState = 0x00008000,
+ MiniDumpWithPrivateWriteCopyMemory = 0x00010000,
+ MiniDumpIgnoreInaccessibleMemory = 0x00020000,
+ MiniDumpWithTokenInformation = 0x00040000,
+ MiniDumpWithModuleHeaders = 0x00080000,
+ MiniDumpFilterTriage = 0x00100000,
+ MiniDumpWithAvxXStateContext = 0x00200000,
+ MiniDumpValidTypeFlags = 0x003fffff,
+} MINIDUMP_TYPE;
+#endif // FEATURE_PAL
+
#if defined(DACCESS_COMPILE) || defined(RIGHT_SIDE_COMPILE)
// When debugging against minidumps, we frequently need to ignore errors
diff --git a/src/dlls/mscordac/mscordac_unixexports.src b/src/dlls/mscordac/mscordac_unixexports.src
index ab73c4fcb8..c2c96fab10 100644
--- a/src/dlls/mscordac/mscordac_unixexports.src
+++ b/src/dlls/mscordac/mscordac_unixexports.src
@@ -24,6 +24,7 @@ PAL_get_stderr
PAL_GetSymbolModuleBase
PAL_GetTransportPipeName
PAL_InitializeDLL
+PAL_TerminateEx
PAL_IsDebuggerPresent
PAL_ProbeMemory
PAL_iswspace
@@ -119,6 +120,9 @@ IID_IClassFactory
IID_ISequentialStream
IID_IStream
IID_IUnknown
+IID_ICLRDataTarget
+IID_ICorDebugDataTarget4
+IID_ICLRDataEnumMemoryRegionsCallback
InitializeCriticalSection
IsDBCSLeadByte
LeaveCriticalSection
diff --git a/src/dlls/mscoree/mscorwks_ntdef.src b/src/dlls/mscoree/mscorwks_ntdef.src
index 8115475418..d7e6a2dcf4 100644
--- a/src/dlls/mscoree/mscorwks_ntdef.src
+++ b/src/dlls/mscoree/mscorwks_ntdef.src
@@ -21,6 +21,7 @@ EXPORTS
coreclr_execute_assembly
coreclr_initialize
coreclr_shutdown
+ coreclr_shutdown_2
; il{d}asm
MetaDataGetDispenser
diff --git a/src/dlls/mscoree/mscorwks_unixexports.src b/src/dlls/mscoree/mscorwks_unixexports.src
index f7862d3afe..28e9ac223f 100644
--- a/src/dlls/mscoree/mscorwks_unixexports.src
+++ b/src/dlls/mscoree/mscorwks_unixexports.src
@@ -3,6 +3,7 @@ coreclr_create_delegate
coreclr_execute_assembly
coreclr_initialize
coreclr_shutdown
+coreclr_shutdown_2
; il{d}asm
MetaDataGetDispenser
diff --git a/src/dlls/mscoree/unixinterface.cpp b/src/dlls/mscoree/unixinterface.cpp
index edd361c0c2..cf9bbc5c72 100644
--- a/src/dlls/mscoree/unixinterface.cpp
+++ b/src/dlls/mscoree/unixinterface.cpp
@@ -183,9 +183,9 @@ int coreclr_initialize(
}
#endif
- ReleaseHolder<ICLRRuntimeHost2> host;
+ ReleaseHolder<ICLRRuntimeHost4> host;
- hr = CorHost2::CreateObject(IID_ICLRRuntimeHost2, (void**)&host);
+ hr = CorHost2::CreateObject(IID_ICLRRuntimeHost4, (void**)&host);
IfFailRet(hr);
ConstWStringHolder appDomainFriendlyNameW = StringToUnicode(appDomainFriendlyName);
@@ -284,7 +284,7 @@ int coreclr_shutdown(
void* hostHandle,
unsigned int domainId)
{
- ReleaseHolder<ICLRRuntimeHost2> host(reinterpret_cast<ICLRRuntimeHost2*>(hostHandle));
+ ReleaseHolder<ICLRRuntimeHost4> host(reinterpret_cast<ICLRRuntimeHost4*>(hostHandle));
HRESULT hr = host->UnloadAppDomain(domainId, true); // Wait until done
IfFailRet(hr);
@@ -299,6 +299,37 @@ int coreclr_shutdown(
}
//
+// Shutdown CoreCLR. It unloads the app domain and stops the CoreCLR host.
+//
+// Parameters:
+// hostHandle - Handle of the host
+// domainId - Id of the domain
+// latchedExitCode - Latched exit code after domain unloaded
+//
+// Returns:
+// HRESULT indicating status of the operation. S_OK if the assembly was successfully executed
+//
+extern "C"
+int coreclr_shutdown_2(
+ void* hostHandle,
+ unsigned int domainId,
+ int* latchedExitCode)
+{
+ ReleaseHolder<ICLRRuntimeHost4> host(reinterpret_cast<ICLRRuntimeHost4*>(hostHandle));
+
+ HRESULT hr = host->UnloadAppDomain2(domainId, true, latchedExitCode); // Wait until done
+ IfFailRet(hr);
+
+ hr = host->Stop();
+
+#ifdef FEATURE_PAL
+ PAL_Shutdown();
+#endif
+
+ return hr;
+}
+
+//
// Create a native callable delegate for a managed method.
//
// Parameters:
@@ -321,7 +352,7 @@ int coreclr_create_delegate(
const char* entryPointMethodName,
void** delegate)
{
- ICLRRuntimeHost2* host = reinterpret_cast<ICLRRuntimeHost2*>(hostHandle);
+ ICLRRuntimeHost4* host = reinterpret_cast<ICLRRuntimeHost4*>(hostHandle);
ConstWStringHolder entryPointAssemblyNameW = StringToUnicode(entryPointAssemblyName);
ConstWStringHolder entryPointTypeNameW = StringToUnicode(entryPointTypeName);
@@ -366,7 +397,7 @@ int coreclr_execute_assembly(
}
*exitCode = -1;
- ICLRRuntimeHost2* host = reinterpret_cast<ICLRRuntimeHost2*>(hostHandle);
+ ICLRRuntimeHost4* host = reinterpret_cast<ICLRRuntimeHost4*>(hostHandle);
ConstWStringArrayHolder argvW;
argvW.Set(StringArrayToUnicode(argc, argv), argc);
diff --git a/src/gc/gchandletable.cpp b/src/gc/gchandletable.cpp
index f8222b193a..82ab269861 100644
--- a/src/gc/gchandletable.cpp
+++ b/src/gc/gchandletable.cpp
@@ -23,19 +23,51 @@ void GCHandleTable::Shutdown()
Ref_Shutdown();
}
-void* GCHandleTable::GetHandleTableContext(void* handleTable)
+void* GCHandleTable::GetGlobalHandleStore()
{
- return (void*)((uintptr_t)::HndGetHandleTableADIndex((HHANDLETABLE)handleTable).m_dwIndex);
+ return (void*)g_HandleTableMap.pBuckets[0];
}
-void* GCHandleTable::GetHandleTableForHandle(OBJECTHANDLE handle)
+void* GCHandleTable::CreateHandleStore(void* context)
{
- return (void*)::HndGetHandleTable(handle);
+#ifndef FEATURE_REDHAWK
+ return (void*)::Ref_CreateHandleTableBucket(ADIndex((DWORD)(uintptr_t)context));
+#else
+ assert("CreateHandleStore is not implemented when FEATURE_REDHAWK is defined!");
+ return nullptr;
+#endif
}
-OBJECTHANDLE GCHandleTable::CreateHandleOfType(void* table, Object* object, int type)
+void* GCHandleTable::GetHandleContext(OBJECTHANDLE handle)
{
- return ::HndCreateHandle((HHANDLETABLE)table, type, ObjectToOBJECTREF(object));
+ return (void*)((uintptr_t)::HndGetHandleTableADIndex(::HndGetHandleTable(handle)).m_dwIndex);
+}
+
+void GCHandleTable::DestroyHandleStore(void* store)
+{
+ Ref_DestroyHandleTableBucket((HandleTableBucket*) store);
+}
+
+void GCHandleTable::UprootHandleStore(void* store)
+{
+ Ref_RemoveHandleTableBucket((HandleTableBucket*) store);
+}
+
+bool GCHandleTable::ContainsHandle(void* store, OBJECTHANDLE handle)
+{
+ return ((HandleTableBucket*)store)->Contains(handle);
+}
+
+OBJECTHANDLE GCHandleTable::CreateHandleOfType(void* store, Object* object, int type)
+{
+ HHANDLETABLE handletable = ((HandleTableBucket*)store)->pTable[GetCurrentThreadHomeHeapNumber()];
+ return ::HndCreateHandle(handletable, type, ObjectToOBJECTREF(object));
+}
+
+OBJECTHANDLE GCHandleTable::CreateHandleOfType(void* store, Object* object, int type, int heapToAffinitizeTo)
+{
+ HHANDLETABLE handletable = ((HandleTableBucket*)store)->pTable[heapToAffinitizeTo];
+ return ::HndCreateHandle(handletable, type, ObjectToOBJECTREF(object));
}
OBJECTHANDLE GCHandleTable::CreateGlobalHandleOfType(Object* object, int type)
@@ -43,15 +75,37 @@ OBJECTHANDLE GCHandleTable::CreateGlobalHandleOfType(Object* object, int type)
return ::HndCreateHandle(g_HandleTableMap.pBuckets[0]->pTable[GetCurrentThreadHomeHeapNumber()], type, ObjectToOBJECTREF(object));
}
-OBJECTHANDLE GCHandleTable::CreateHandleWithExtraInfo(void* table, Object* object, int type, void* pExtraInfo)
+OBJECTHANDLE GCHandleTable::CreateHandleWithExtraInfo(void* store, Object* object, int type, void* pExtraInfo)
{
- return ::HndCreateHandle((HHANDLETABLE)table, type, ObjectToOBJECTREF(object), reinterpret_cast<uintptr_t>(pExtraInfo));
+ HHANDLETABLE handletable = ((HandleTableBucket*)store)->pTable[GetCurrentThreadHomeHeapNumber()];
+ return ::HndCreateHandle(handletable, type, ObjectToOBJECTREF(object), reinterpret_cast<uintptr_t>(pExtraInfo));
}
-OBJECTHANDLE GCHandleTable::CreateDependentHandle(void* table, Object* primary, Object* secondary)
+OBJECTHANDLE GCHandleTable::CreateDependentHandle(void* store, Object* primary, Object* secondary)
{
- OBJECTHANDLE handle = ::HndCreateHandle((HHANDLETABLE)table, HNDTYPE_DEPENDENT, ObjectToOBJECTREF(primary));
+ HHANDLETABLE handletable = ((HandleTableBucket*)store)->pTable[GetCurrentThreadHomeHeapNumber()];
+ OBJECTHANDLE handle = ::HndCreateHandle(handletable, HNDTYPE_DEPENDENT, ObjectToOBJECTREF(primary));
::SetDependentHandleSecondary(handle, ObjectToOBJECTREF(secondary));
return handle;
-} \ No newline at end of file
+}
+
+OBJECTHANDLE GCHandleTable::CreateDuplicateHandle(OBJECTHANDLE handle)
+{
+ return ::HndCreateHandle(HndGetHandleTable(handle), HNDTYPE_DEFAULT, ::HndFetchHandle(handle));
+}
+
+void GCHandleTable::DestroyHandleOfType(OBJECTHANDLE handle, int type)
+{
+ ::HndDestroyHandle(::HndGetHandleTable(handle), type, handle);
+}
+
+void GCHandleTable::DestroyHandleOfUnknownType(OBJECTHANDLE handle)
+{
+ ::HndDestroyHandleOfUnknownType(::HndGetHandleTable(handle), handle);
+}
+
+void* GCHandleTable::GetExtraInfoFromHandle(OBJECTHANDLE handle)
+{
+ return (void*)::HndGetHandleExtraInfo(handle);
+}
diff --git a/src/gc/gchandletableimpl.h b/src/gc/gchandletableimpl.h
index 787e0c1675..af20f52e54 100644
--- a/src/gc/gchandletableimpl.h
+++ b/src/gc/gchandletableimpl.h
@@ -14,17 +14,35 @@ public:
virtual void Shutdown();
- virtual void* GetHandleTableContext(void* handleTable);
+ virtual void* GetGlobalHandleStore();
- virtual void* GetHandleTableForHandle(OBJECTHANDLE handle);
+ virtual void* CreateHandleStore(void* context);
- virtual OBJECTHANDLE CreateHandleOfType(void* table, Object* object, int type);
+ virtual void* GetHandleContext(OBJECTHANDLE handle);
- virtual OBJECTHANDLE CreateHandleWithExtraInfo(void* table, Object* object, int type, void* pExtraInfo);
+ virtual void DestroyHandleStore(void* store);
- virtual OBJECTHANDLE CreateDependentHandle(void* table, Object* primary, Object* secondary);
+ virtual void UprootHandleStore(void* store);
+
+ virtual bool ContainsHandle(void* store, OBJECTHANDLE handle);
+
+ virtual OBJECTHANDLE CreateHandleOfType(void* store, Object* object, int type);
+
+ virtual OBJECTHANDLE CreateHandleOfType(void* store, Object* object, int type, int heapToAffinitizeTo);
+
+ virtual OBJECTHANDLE CreateHandleWithExtraInfo(void* store, Object* object, int type, void* pExtraInfo);
+
+ virtual OBJECTHANDLE CreateDependentHandle(void* store, Object* primary, Object* secondary);
virtual OBJECTHANDLE CreateGlobalHandleOfType(Object* object, int type);
+
+ virtual OBJECTHANDLE CreateDuplicateHandle(OBJECTHANDLE handle);
+
+ virtual void DestroyHandleOfType(OBJECTHANDLE handle, int type);
+
+ virtual void DestroyHandleOfUnknownType(OBJECTHANDLE handle);
+
+ virtual void* GetExtraInfoFromHandle(OBJECTHANDLE handle);
};
#endif // GCHANDLETABLE_H_
diff --git a/src/gc/gcinterface.h b/src/gc/gcinterface.h
index d0fa87969f..cac2ba7114 100644
--- a/src/gc/gcinterface.h
+++ b/src/gc/gcinterface.h
@@ -409,17 +409,35 @@ public:
virtual void Shutdown() = 0;
- virtual void* GetHandleTableContext(void* handleTable) = 0;
+ virtual void* GetHandleContext(OBJECTHANDLE handle) = 0;
- virtual void* GetHandleTableForHandle(OBJECTHANDLE handle) = 0;
+ virtual void* GetGlobalHandleStore() = 0;
- virtual OBJECTHANDLE CreateHandleOfType(void* table, Object* object, int type) = 0;
+ virtual void* CreateHandleStore(void* context) = 0;
- virtual OBJECTHANDLE CreateHandleWithExtraInfo(void* table, Object* object, int type, void* pExtraInfo) = 0;
+ virtual void DestroyHandleStore(void* store) = 0;
- virtual OBJECTHANDLE CreateDependentHandle(void* table, Object* primary, Object* secondary) = 0;
+ virtual void UprootHandleStore(void* store) = 0;
+
+ virtual bool ContainsHandle(void* store, OBJECTHANDLE handle) = 0;
+
+ virtual OBJECTHANDLE CreateHandleOfType(void* store, Object* object, int type) = 0;
+
+ virtual OBJECTHANDLE CreateHandleOfType(void* store, Object* object, int type, int heapToAffinitizeTo) = 0;
+
+ virtual OBJECTHANDLE CreateHandleWithExtraInfo(void* store, Object* object, int type, void* pExtraInfo) = 0;
+
+ virtual OBJECTHANDLE CreateDependentHandle(void* store, Object* primary, Object* secondary) = 0;
virtual OBJECTHANDLE CreateGlobalHandleOfType(Object* object, int type) = 0;
+
+ virtual OBJECTHANDLE CreateDuplicateHandle(OBJECTHANDLE handle) = 0;
+
+ virtual void DestroyHandleOfType(OBJECTHANDLE handle, int type) = 0;
+
+ virtual void DestroyHandleOfUnknownType(OBJECTHANDLE handle) = 0;
+
+ virtual void* GetExtraInfoFromHandle(OBJECTHANDLE handle) = 0;
};
// IGCHeap is the interface that the VM will use when interacting with the GC.
diff --git a/src/gc/handletable.cpp b/src/gc/handletable.cpp
index e56e1e1701..eee181959f 100644
--- a/src/gc/handletable.cpp
+++ b/src/gc/handletable.cpp
@@ -1338,24 +1338,6 @@ void Ref_RelocateAsyncPinHandles(HandleTableBucket *pSource, HandleTableBucket
}
#endif // !FEATURE_REDHAWK
-BOOL Ref_ContainHandle(HandleTableBucket *pBucket, OBJECTHANDLE handle)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- }
- CONTRACTL_END;
-
- int limit = getNumberOfSlots();
- for (int n = 0; n < limit; n ++ )
- {
- if (TableContainHandle(Table(pBucket->pTable[n]), handle))
- return TRUE;
- }
-
- return FALSE;
-}
/*--------------------------------------------------------------------------*/
diff --git a/src/gc/handletable.h b/src/gc/handletable.h
index 5b0299fe02..ebf8c62c33 100644
--- a/src/gc/handletable.h
+++ b/src/gc/handletable.h
@@ -216,18 +216,6 @@ FORCEINLINE BOOL HndIsNull(OBJECTHANDLE handle)
}
-
-/*
- * inline handle checking
- */
-FORCEINLINE BOOL HndCheckForNullUnchecked(OBJECTHANDLE handle)
-{
- LIMITED_METHOD_CONTRACT;
-
- return (handle == NULL || (*(_UNCHECKED_OBJECTREF *)handle) == NULL);
-}
-
-
/*
*
* Checks handle value for null or special value used for free handles in cache.
diff --git a/src/gc/objecthandle.cpp b/src/gc/objecthandle.cpp
index a1000377ba..5df53baad5 100644
--- a/src/gc/objecthandle.cpp
+++ b/src/gc/objecthandle.cpp
@@ -1856,51 +1856,6 @@ bool HandleTableBucket::Contains(OBJECTHANDLE handle)
return FALSE;
}
-void DestroySizedRefHandle(OBJECTHANDLE handle)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- SO_TOLERANT;
- MODE_ANY;
- }
- CONTRACTL_END;
-
- HHANDLETABLE hTable = HndGetHandleTable(handle);
- HndDestroyHandle(hTable , HNDTYPE_SIZEDREF, handle);
- AppDomain* pDomain = SystemDomain::GetAppDomainAtIndex(HndGetHandleTableADIndex(hTable));
- pDomain->DecNumSizedRefHandles();
-}
-
-#ifdef FEATURE_COMINTEROP
-
-void DestroyWinRTWeakHandle(OBJECTHANDLE handle)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- MODE_ANY;
- CAN_TAKE_LOCK;
- SO_TOLERANT;
- }
- CONTRACTL_END;
-
- // Release the WinRT weak reference if we have one. We're assuming that this will not reenter the
- // runtime, since if we are pointing at a managed object, we should not be using a HNDTYPE_WEAK_WINRT
- // but rather a HNDTYPE_WEAK_SHORT or HNDTYPE_WEAK_LONG.
- IWeakReference* pWinRTWeakReference = reinterpret_cast<IWeakReference*>(HndGetHandleExtraInfo(handle));
- if (pWinRTWeakReference != NULL)
- {
- pWinRTWeakReference->Release();
- }
-
- HndDestroyHandle(HndGetHandleTable(handle), HNDTYPE_WEAK_WINRT, handle);
-}
-
-#endif // FEATURE_COMINTEROP
-
#endif // !DACCESS_COMPILE
OBJECTREF GetDependentHandleSecondary(OBJECTHANDLE handle)
diff --git a/src/gc/objecthandle.h b/src/gc/objecthandle.h
index 73d363f4a7..d3e45f8659 100644
--- a/src/gc/objecthandle.h
+++ b/src/gc/objecthandle.h
@@ -30,8 +30,6 @@
#define StoreObjectInHandle(handle, object) HndAssignHandle(handle, object)
#define InterlockedCompareExchangeObjectInHandle(handle, object, oldObj) HndInterlockedCompareExchangeHandle(handle, object, oldObj)
#define StoreFirstObjectInHandle(handle, object) HndFirstAssignHandle(handle, object)
-#define ObjectHandleIsNull(handle) HndIsNull(handle)
-#define IsHandleNullUnchecked(handle) HndCheckForNullUnchecked(handle)
typedef DPTR(struct HandleTableMap) PTR_HandleTableMap;
typedef DPTR(struct HandleTableBucket) PTR_HandleTableBucket;
@@ -73,119 +71,10 @@ struct HandleTableBucket
(flag == VHT_STRONG) || \
(flag == VHT_PINNED))
-#ifndef DACCESS_COMPILE
-/*
- * Convenience macros and prototypes for the various handle types we define
- */
-
-inline void DestroyTypedHandle(OBJECTHANDLE handle)
-{
- WRAPPER_NO_CONTRACT;
-
- HndDestroyHandleOfUnknownType(HndGetHandleTable(handle), handle);
-}
-
-inline void DestroyHandle(OBJECTHANDLE handle)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- MODE_ANY;
- CAN_TAKE_LOCK;
- SO_TOLERANT;
- }
- CONTRACTL_END;
-
- HndDestroyHandle(HndGetHandleTable(handle), HNDTYPE_DEFAULT, handle);
-}
-
-inline void DestroyWeakHandle(OBJECTHANDLE handle)
-{
- WRAPPER_NO_CONTRACT;
-
- HndDestroyHandle(HndGetHandleTable(handle), HNDTYPE_WEAK_DEFAULT, handle);
-}
-
-inline void DestroyShortWeakHandle(OBJECTHANDLE handle)
-{
- WRAPPER_NO_CONTRACT;
-
- HndDestroyHandle(HndGetHandleTable(handle), HNDTYPE_WEAK_SHORT, handle);
-}
-
-inline void DestroyLongWeakHandle(OBJECTHANDLE handle)
-{
- WRAPPER_NO_CONTRACT;
-
- HndDestroyHandle(HndGetHandleTable(handle), HNDTYPE_WEAK_LONG, handle);
-}
-
-#ifndef FEATURE_REDHAWK
-typedef Holder<OBJECTHANDLE,DoNothing<OBJECTHANDLE>,DestroyLongWeakHandle> LongWeakHandleHolder;
-#endif
-
-inline void DestroyStrongHandle(OBJECTHANDLE handle)
-{
- WRAPPER_NO_CONTRACT;
-
- HndDestroyHandle(HndGetHandleTable(handle), HNDTYPE_STRONG, handle);
-}
-
-inline void DestroyPinningHandle(OBJECTHANDLE handle)
-{
- WRAPPER_NO_CONTRACT;
-
- HndDestroyHandle(HndGetHandleTable(handle), HNDTYPE_PINNED, handle);
-}
-
-#ifndef FEATURE_REDHAWK
-typedef Wrapper<OBJECTHANDLE, DoNothing<OBJECTHANDLE>, DestroyPinningHandle, NULL> PinningHandleHolder;
-#endif
-
-inline void DestroyAsyncPinningHandle(OBJECTHANDLE handle)
-{
- WRAPPER_NO_CONTRACT;
-
- HndDestroyHandle(HndGetHandleTable(handle), HNDTYPE_ASYNCPINNED, handle);
-}
-
-#ifndef FEATURE_REDHAWK
-typedef Wrapper<OBJECTHANDLE, DoNothing<OBJECTHANDLE>, DestroyAsyncPinningHandle, NULL> AsyncPinningHandleHolder;
-#endif
-
-void DestroySizedRefHandle(OBJECTHANDLE handle);
-
-#ifndef FEATURE_REDHAWK
-typedef Wrapper<OBJECTHANDLE, DoNothing<OBJECTHANDLE>, DestroySizedRefHandle, NULL> SizeRefHandleHolder;
-#endif
-
-#ifdef FEATURE_COMINTEROP
-
-inline void DestroyRefcountedHandle(OBJECTHANDLE handle)
-{
- WRAPPER_NO_CONTRACT;
-
- HndDestroyHandle(HndGetHandleTable(handle), HNDTYPE_REFCOUNTED, handle);
-}
-
-void DestroyWinRTWeakHandle(OBJECTHANDLE handle);
-
-#endif // FEATURE_COMINTEROP
-
-#endif // !DACCESS_COMPILE
-
OBJECTREF GetDependentHandleSecondary(OBJECTHANDLE handle);
#ifndef DACCESS_COMPILE
void SetDependentHandleSecondary(OBJECTHANDLE handle, OBJECTREF secondary);
-
-inline void DestroyDependentHandle(OBJECTHANDLE handle)
-{
- WRAPPER_NO_CONTRACT;
-
- HndDestroyHandle(HndGetHandleTable(handle), HNDTYPE_DEPENDENT, handle);
-}
#endif // !DACCESS_COMPILE
#ifndef DACCESS_COMPILE
@@ -193,130 +82,14 @@ uint32_t GetVariableHandleType(OBJECTHANDLE handle);
void UpdateVariableHandleType(OBJECTHANDLE handle, uint32_t type);
uint32_t CompareExchangeVariableHandleType(OBJECTHANDLE handle, uint32_t oldType, uint32_t newType);
-inline void DestroyVariableHandle(OBJECTHANDLE handle)
-{
- WRAPPER_NO_CONTRACT;
-
- HndDestroyHandle(HndGetHandleTable(handle), HNDTYPE_VARIABLE, handle);
-}
-
void GCHandleValidatePinnedObject(OBJECTREF obj);
/*
- * Holder for OBJECTHANDLE
- */
-
-#ifndef FEATURE_REDHAWK
-typedef Wrapper<OBJECTHANDLE, DoNothing<OBJECTHANDLE>, DestroyHandle > OHWrapper;
-
-class OBJECTHANDLEHolder : public OHWrapper
-{
-public:
- FORCEINLINE OBJECTHANDLEHolder(OBJECTHANDLE p = NULL) : OHWrapper(p)
- {
- LIMITED_METHOD_CONTRACT;
- }
- FORCEINLINE void operator=(OBJECTHANDLE p)
- {
- WRAPPER_NO_CONTRACT;
-
- OHWrapper::operator=(p);
- }
-};
-#endif
-
-#ifdef FEATURE_COMINTEROP
-
-typedef Wrapper<OBJECTHANDLE, DoNothing<OBJECTHANDLE>, DestroyRefcountedHandle> RefCountedOHWrapper;
-
-class RCOBJECTHANDLEHolder : public RefCountedOHWrapper
-{
-public:
- FORCEINLINE RCOBJECTHANDLEHolder(OBJECTHANDLE p = NULL) : RefCountedOHWrapper(p)
- {
- LIMITED_METHOD_CONTRACT;
- }
- FORCEINLINE void operator=(OBJECTHANDLE p)
- {
- WRAPPER_NO_CONTRACT;
-
- RefCountedOHWrapper::operator=(p);
- }
-};
-
-#endif // FEATURE_COMINTEROP
-/*
* Convenience prototypes for using the global handles
*/
int GetCurrentThreadHomeHeapNumber();
-inline void DestroyGlobalTypedHandle(OBJECTHANDLE handle)
-{
- WRAPPER_NO_CONTRACT;
-
- HndDestroyHandleOfUnknownType(HndGetHandleTable(handle), handle);
-}
-
-inline void DestroyGlobalHandle(OBJECTHANDLE handle)
-{
- WRAPPER_NO_CONTRACT;
-
- HndDestroyHandle(HndGetHandleTable(handle), HNDTYPE_DEFAULT, handle);
-}
-
-inline void DestroyGlobalWeakHandle(OBJECTHANDLE handle)
-{
- WRAPPER_NO_CONTRACT;
-
- HndDestroyHandle(HndGetHandleTable(handle), HNDTYPE_WEAK_DEFAULT, handle);
-}
-
-inline void DestroyGlobalShortWeakHandle(OBJECTHANDLE handle)
-{
- WRAPPER_NO_CONTRACT;
-
- HndDestroyHandle(HndGetHandleTable(handle), HNDTYPE_WEAK_SHORT, handle);
-}
-
-#ifndef FEATURE_REDHAWK
-typedef Holder<OBJECTHANDLE,DoNothing<OBJECTHANDLE>,DestroyGlobalShortWeakHandle> GlobalShortWeakHandleHolder;
-#endif
-
-inline void DestroyGlobalLongWeakHandle(OBJECTHANDLE handle)
-{
- WRAPPER_NO_CONTRACT;
-
- HndDestroyHandle(HndGetHandleTable(handle), HNDTYPE_WEAK_LONG, handle);
-}
-
-inline void DestroyGlobalStrongHandle(OBJECTHANDLE handle)
-{
- WRAPPER_NO_CONTRACT;
-
- HndDestroyHandle(HndGetHandleTable(handle), HNDTYPE_STRONG, handle);
-}
-
-#ifndef FEATURE_REDHAWK
-typedef Holder<OBJECTHANDLE,DoNothing<OBJECTHANDLE>,DestroyGlobalStrongHandle> GlobalStrongHandleHolder;
-#endif
-
-inline void DestroyGlobalPinningHandle(OBJECTHANDLE handle)
-{
- WRAPPER_NO_CONTRACT;
-
- HndDestroyHandle(HndGetHandleTable(handle), HNDTYPE_PINNED, handle);
-}
-
-#ifdef FEATURE_COMINTEROP
-inline void DestroyGlobalRefcountedHandle(OBJECTHANDLE handle)
-{
- WRAPPER_NO_CONTRACT;
-
- HndDestroyHandle(HndGetHandleTable(handle), HNDTYPE_REFCOUNTED, handle);
-}
-#endif // FEATURE_COMINTEROP
-
inline void ResetOBJECTHANDLE(OBJECTHANDLE handle)
{
WRAPPER_NO_CONTRACT;
@@ -338,7 +111,6 @@ BOOL Ref_HandleAsyncPinHandles();
void Ref_RelocateAsyncPinHandles(HandleTableBucket *pSource, HandleTableBucket *pTarget);
void Ref_RemoveHandleTableBucket(HandleTableBucket *pBucket);
void Ref_DestroyHandleTableBucket(HandleTableBucket *pBucket);
-BOOL Ref_ContainHandle(HandleTableBucket *pBucket, OBJECTHANDLE handle);
/*
* GC-time scanning entrypoints
diff --git a/src/gc/sample/GCSample.cpp b/src/gc/sample/GCSample.cpp
index ed67e892ff..2914ee1665 100644
--- a/src/gc/sample/GCSample.cpp
+++ b/src/gc/sample/GCSample.cpp
@@ -229,7 +229,7 @@ int __cdecl main(int argc, char* argv[])
return -1;
// Destroy the strong handle so that nothing will be keeping out object alive
- DestroyGlobalHandle(oh);
+ HndDestroyHandle(HndGetHandleTable(oh), HNDTYPE_DEFAULT, oh);
// Explicitly trigger full GC
pGCHeap->GarbageCollect();
diff --git a/src/inc/arrayholder.h b/src/inc/arrayholder.h
new file mode 100644
index 0000000000..681014fc95
--- /dev/null
+++ b/src/inc/arrayholder.h
@@ -0,0 +1,80 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+template <class T>
+class ArrayHolder
+{
+public:
+ ArrayHolder(T *ptr)
+ : m_ptr(ptr)
+ {
+ }
+
+ ~ArrayHolder()
+ {
+ Clear();
+ }
+
+ ArrayHolder(const ArrayHolder &rhs)
+ {
+ m_ptr = const_cast<ArrayHolder *>(&rhs)->Detach();
+ }
+
+ ArrayHolder &operator=(T *ptr)
+ {
+ Clear();
+ m_ptr = ptr;
+ return *this;
+ }
+
+ const T &operator[](int i) const
+ {
+ return m_ptr[i];
+ }
+
+ T &operator[](int i)
+ {
+ return m_ptr[i];
+ }
+
+ operator const T *() const
+ {
+ return m_ptr;
+ }
+
+ operator T *()
+ {
+ return m_ptr;
+ }
+
+ T **operator&()
+ {
+ return &m_ptr;
+ }
+
+ T *GetPtr()
+ {
+ return m_ptr;
+ }
+
+ T *Detach()
+ {
+ T *ret = m_ptr;
+ m_ptr = NULL;
+ return ret;
+ }
+
+private:
+ void Clear()
+ {
+ if (m_ptr)
+ {
+ delete [] m_ptr;
+ m_ptr = NULL;
+ }
+ }
+
+private:
+ T *m_ptr;
+};
diff --git a/src/inc/corhost.h b/src/inc/corhost.h
index 3aabe9ed5d..59ab23cd27 100644
--- a/src/inc/corhost.h
+++ b/src/inc/corhost.h
@@ -137,6 +137,7 @@ protected:
STDMETHODIMP UnloadAppDomain(DWORD dwDomainId, BOOL fWaitUntilDone);
+ STDMETHODIMP UnloadAppDomain2(DWORD dwDomainId, BOOL fWaitUntilDone, int *pLatchedExitCode);
public:
static ULONG GetHostVersion()
{
@@ -275,7 +276,7 @@ class CorHost2 :
#ifndef FEATURE_PAL
, public IPrivateManagedExceptionReporting /* This interface is for internal Watson testing only*/
#endif // FEATURE_PAL
- , public ICLRRuntimeHost2
+ , public ICLRRuntimeHost4
, public CorExecutionManager
{
friend struct _DacGlobals;
@@ -337,6 +338,8 @@ public:
STDMETHODIMP UnloadAppDomain(DWORD dwDomainId, BOOL fWaitUntilDone);
+ STDMETHODIMP UnloadAppDomain2(DWORD dwDomainId, BOOL fWaitUntilDone, int *pLatchedExitCode);
+
STDMETHODIMP GetCurrentAppDomainId(DWORD *pdwAppDomainId);
STDMETHODIMP ExecuteApplication(LPCWSTR pwzAppFullName,
diff --git a/src/inc/eetwain.h b/src/inc/eetwain.h
index 54e9a34464..497e0b0e6b 100644
--- a/src/inc/eetwain.h
+++ b/src/inc/eetwain.h
@@ -652,7 +652,10 @@ HRESULT FixContextForEnC(PCONTEXT pCtx,
#ifdef WIN64EXCEPTIONS
static void EnsureCallerContextIsValid( PREGDISPLAY pRD, StackwalkCacheEntry* pCacheEntry, EECodeInfo * pCodeInfo = NULL );
static size_t GetCallerSp( PREGDISPLAY pRD );
-#endif
+#ifdef _TARGET_X86_
+ static size_t GetResumeSp( PCONTEXT pContext );
+#endif // _TARGET_X86_
+#endif // WIN64EXCEPTIONS
#ifdef DACCESS_COMPILE
virtual void EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
diff --git a/src/jit/CMakeLists.txt b/src/jit/CMakeLists.txt
index e8920a2860..e2a9ca66ab 100644
--- a/src/jit/CMakeLists.txt
+++ b/src/jit/CMakeLists.txt
@@ -104,10 +104,13 @@ set( JIT_AMD64_SOURCES
set( JIT_ARM_SOURCES
${JIT_ARM_LEGACY_SOURCES}
+ codegenarmarch.cpp
codegenarm.cpp
decomposelongs.cpp
emitarm.cpp
+ lowerarmarch.cpp
lowerarm.cpp
+ lsraarmarch.cpp
lsraarm.cpp
targetarm.cpp
unwindarm.cpp
@@ -127,9 +130,12 @@ set( JIT_I386_SOURCES
)
set( JIT_ARM64_SOURCES
+ codegenarmarch.cpp
codegenarm64.cpp
emitarm64.cpp
+ lowerarmarch.cpp
lowerarm64.cpp
+ lsraarmarch.cpp
lsraarm64.cpp
targetarm64.cpp
unwindarm.cpp
diff --git a/src/jit/block.h b/src/jit/block.h
index 3a7d7066b2..752219bdb7 100644
--- a/src/jit/block.h
+++ b/src/jit/block.h
@@ -1063,7 +1063,6 @@ struct BasicBlock : private LIR::Range
GenTreeStmt* firstStmt() const;
GenTreeStmt* lastStmt() const;
- GenTreeStmt* lastTopLevelStmt();
GenTree* firstNode();
GenTree* lastNode();
diff --git a/src/jit/codegenarm.cpp b/src/jit/codegenarm.cpp
index 34c86e921e..41bd8040ac 100644
--- a/src/jit/codegenarm.cpp
+++ b/src/jit/codegenarm.cpp
@@ -24,22 +24,6 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#include "emit.h"
//------------------------------------------------------------------------
-// genSetRegToIcon: Generate code that will set the given register to the integer constant.
-//
-void CodeGen::genSetRegToIcon(regNumber reg, ssize_t val, var_types type, insFlags flags)
-{
- // Reg cannot be a FP reg
- assert(!genIsValidFloatReg(reg));
-
- // The only TYP_REF constant that can come this path is a managed 'null' since it is not
- // relocatable. Other ref type constants (e.g. string objects) go through a different
- // code path.
- noway_assert(type != TYP_REF || val == 0);
-
- instGen_Set_Reg_To_Imm(emitActualTypeSize(type), reg, val, flags);
-}
-
-//------------------------------------------------------------------------
// genCallFinally: Generate a call to the finally block.
//
BasicBlock* CodeGen::genCallFinally(BasicBlock* block)
@@ -83,140 +67,6 @@ void CodeGen::genEHCatchRet(BasicBlock* block)
getEmitter()->emitIns_R_L(INS_movt, EA_4BYTE_DSP_RELOC, block->bbJumpDest, REG_INTRET);
}
-//---------------------------------------------------------------------
-// genIntrinsic - generate code for a given intrinsic
-//
-// Arguments
-// treeNode - the GT_INTRINSIC node
-//
-// Return value:
-// None
-//
-void CodeGen::genIntrinsic(GenTreePtr treeNode)
-{
- // Both operand and its result must be of the same floating point type.
- GenTreePtr srcNode = treeNode->gtOp.gtOp1;
- assert(varTypeIsFloating(srcNode));
- assert(srcNode->TypeGet() == treeNode->TypeGet());
-
- // Right now only Abs/Round/Sqrt are treated as math intrinsics.
- //
- switch (treeNode->gtIntrinsic.gtIntrinsicId)
- {
- case CORINFO_INTRINSIC_Abs:
- genConsumeOperands(treeNode->AsOp());
- getEmitter()->emitInsBinary(INS_vabs, emitTypeSize(treeNode), treeNode, srcNode);
- break;
-
- case CORINFO_INTRINSIC_Round:
- NYI_ARM("genIntrinsic for round - not implemented yet");
- break;
-
- case CORINFO_INTRINSIC_Sqrt:
- genConsumeOperands(treeNode->AsOp());
- getEmitter()->emitInsBinary(INS_vsqrt, emitTypeSize(treeNode), treeNode, srcNode);
- break;
-
- default:
- assert(!"genIntrinsic: Unsupported intrinsic");
- unreached();
- }
-
- genProduceReg(treeNode);
-}
-
-//---------------------------------------------------------------------
-// genPutArgStk - generate code for a GT_PUTARG_STK node
-//
-// Arguments
-// treeNode - the GT_PUTARG_STK node
-//
-// Return value:
-// None
-//
-void CodeGen::genPutArgStk(GenTreePutArgStk* treeNode)
-{
- assert(treeNode->OperGet() == GT_PUTARG_STK);
- var_types targetType = treeNode->TypeGet();
- GenTreePtr source = treeNode->gtOp1;
- emitter* emit = getEmitter();
-
- // This is the varNum for our store operations,
- // typically this is the varNum for the Outgoing arg space
- // When we are generating a tail call it will be the varNum for arg0
- unsigned varNumOut;
- unsigned argOffsetMax; // Records the maximum size of this area for assert checks
-
- // Get argument offset to use with 'varNumOut'
- // Here we cross check that argument offset hasn't changed from lowering to codegen since
- // we are storing arg slot number in GT_PUTARG_STK node in lowering phase.
- unsigned argOffsetOut = treeNode->gtSlotNum * TARGET_POINTER_SIZE;
-
-#ifdef DEBUG
- fgArgTabEntryPtr curArgTabEntry = compiler->gtArgEntryByNode(treeNode->gtCall, treeNode);
- assert(curArgTabEntry);
- assert(argOffsetOut == (curArgTabEntry->slotNum * TARGET_POINTER_SIZE));
-#endif // DEBUG
-
- varNumOut = compiler->lvaOutgoingArgSpaceVar;
- argOffsetMax = compiler->lvaOutgoingArgSpaceSize;
-
- bool isStruct = (targetType == TYP_STRUCT) || (source->OperGet() == GT_FIELD_LIST);
-
- if (!isStruct) // a normal non-Struct argument
- {
- instruction storeIns = ins_Store(targetType);
- emitAttr storeAttr = emitTypeSize(targetType);
-
- // If it is contained then source must be the integer constant zero
- if (source->isContained())
- {
- assert(source->OperGet() == GT_CNS_INT);
- assert(source->AsIntConCommon()->IconValue() == 0);
- NYI("genPutArgStk: contained zero source");
- }
- else
- {
- genConsumeReg(source);
- emit->emitIns_S_R(storeIns, storeAttr, source->gtRegNum, varNumOut, argOffsetOut);
- }
- argOffsetOut += EA_SIZE_IN_BYTES(storeAttr);
- assert(argOffsetOut <= argOffsetMax); // We can't write beyound the outgoing area area
- }
- else // We have some kind of a struct argument
- {
- assert(source->isContained()); // We expect that this node was marked as contained in LowerArm
-
- if (source->OperGet() == GT_FIELD_LIST)
- {
- // Deal with the multi register passed struct args.
- GenTreeFieldList* fieldListPtr = source->AsFieldList();
-
- // Evaluate each of the GT_FIELD_LIST items into their register
- // and store their register into the outgoing argument area
- for (; fieldListPtr != nullptr; fieldListPtr = fieldListPtr->Rest())
- {
- GenTreePtr nextArgNode = fieldListPtr->gtOp.gtOp1;
- genConsumeReg(nextArgNode);
-
- regNumber reg = nextArgNode->gtRegNum;
- var_types type = nextArgNode->TypeGet();
- emitAttr attr = emitTypeSize(type);
-
- // Emit store instructions to store the registers produced by the GT_FIELD_LIST into the outgoing
- // argument area
- emit->emitIns_S_R(ins_Store(type), attr, reg, varNumOut, argOffsetOut);
- argOffsetOut += EA_SIZE_IN_BYTES(attr);
- assert(argOffsetOut <= argOffsetMax); // We can't write beyound the outgoing area area
- }
- }
- else // We must have a GT_OBJ or a GT_LCL_VAR
- {
- NYI("genPutArgStk: GT_OBJ or GT_LCL_VAR source of struct type");
- }
- }
-}
-
//------------------------------------------------------------------------
// instGen_Set_Reg_To_Imm: Move an immediate value into an integer register.
//
@@ -1248,69 +1098,6 @@ void CodeGen::genLockedInstructions(GenTreeOp* treeNode)
NYI("genLockedInstructions");
}
-//----------------------------------------------------------------------------------
-// genMultiRegCallStoreToLocal: store multi-reg return value of a call node to a local
-//
-// Arguments:
-// treeNode - Gentree of GT_STORE_LCL_VAR
-//
-// Return Value:
-// None
-//
-// Assumption:
-// The child of store is a multi-reg call node.
-// genProduceReg() on treeNode is made by caller of this routine.
-//
-void CodeGen::genMultiRegCallStoreToLocal(GenTreePtr treeNode)
-{
- assert(treeNode->OperGet() == GT_STORE_LCL_VAR);
-
- // Longs are returned in two return registers on Arm32.
- assert(varTypeIsLong(treeNode));
-
- // Assumption: current Arm32 implementation requires that a multi-reg long
- // var in 'var = call' is flagged as lvIsMultiRegRet to prevent it from
- // being promoted.
- unsigned lclNum = treeNode->AsLclVarCommon()->gtLclNum;
- LclVarDsc* varDsc = &(compiler->lvaTable[lclNum]);
- noway_assert(varDsc->lvIsMultiRegRet);
-
- GenTree* op1 = treeNode->gtGetOp1();
- GenTree* actualOp1 = op1->gtSkipReloadOrCopy();
- GenTreeCall* call = actualOp1->AsCall();
- assert(call->HasMultiRegRetVal());
-
- genConsumeRegs(op1);
-
- ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc();
- unsigned regCount = retTypeDesc->GetReturnRegCount();
- assert(regCount <= MAX_RET_REG_COUNT);
-
- // Stack store
- int offset = 0;
- for (unsigned i = 0; i < regCount; ++i)
- {
- var_types type = retTypeDesc->GetReturnRegType(i);
- regNumber reg = call->GetRegNumByIdx(i);
- if (op1->IsCopyOrReload())
- {
- // GT_COPY/GT_RELOAD will have valid reg for those positions
- // that need to be copied or reloaded.
- regNumber reloadReg = op1->AsCopyOrReload()->GetRegNumByIdx(i);
- if (reloadReg != REG_NA)
- {
- reg = reloadReg;
- }
- }
-
- assert(reg != REG_NA);
- getEmitter()->emitIns_S_R(ins_Store(type), emitTypeSize(type), reg, lclNum, offset);
- offset += genTypeSize(type);
- }
-
- varDsc->lvRegNum = REG_STK;
-}
-
//--------------------------------------------------------------------------------------
// genLclHeap: Generate code for localloc
//
@@ -1680,223 +1467,6 @@ void CodeGen::genJumpTable(GenTree* treeNode)
}
//------------------------------------------------------------------------
-// genRangeCheck: generate code for GT_ARR_BOUNDS_CHECK node.
-//
-void CodeGen::genRangeCheck(GenTreePtr oper)
-{
- noway_assert(oper->OperGet() == GT_ARR_BOUNDS_CHECK);
- GenTreeBoundsChk* bndsChk = oper->AsBoundsChk();
-
- GenTreePtr arrIdx = bndsChk->gtIndex->gtEffectiveVal();
- GenTreePtr arrLen = bndsChk->gtArrLen->gtEffectiveVal();
- GenTreePtr arrRef = NULL;
- int lenOffset = 0;
-
- genConsumeIfReg(arrIdx);
- genConsumeIfReg(arrLen);
-
- GenTree * src1, *src2;
- emitJumpKind jmpKind;
-
- if (arrIdx->isContainedIntOrIImmed())
- {
- // To encode using a cmp immediate, we place the
- // constant operand in the second position
- src1 = arrLen;
- src2 = arrIdx;
- jmpKind = genJumpKindForOper(GT_LE, CK_UNSIGNED);
- }
- else
- {
- src1 = arrIdx;
- src2 = arrLen;
- jmpKind = genJumpKindForOper(GT_GE, CK_UNSIGNED);
- }
-
- getEmitter()->emitInsBinary(INS_cmp, emitAttr(TYP_INT), src1, src2);
- genJumpToThrowHlpBlk(jmpKind, SCK_RNGCHK_FAIL, bndsChk->gtIndRngFailBB);
-}
-
-//------------------------------------------------------------------------
-// genOffsetOfMDArrayLowerBound: Returns the offset from the Array object to the
-// lower bound for the given dimension.
-//
-// Arguments:
-// elemType - the element type of the array
-// rank - the rank of the array
-// dimension - the dimension for which the lower bound offset will be returned.
-//
-// Return Value:
-// The offset.
-// TODO-Cleanup: move to CodeGenCommon.cpp
-
-// static
-unsigned CodeGen::genOffsetOfMDArrayLowerBound(var_types elemType, unsigned rank, unsigned dimension)
-{
- // Note that the lower bound and length fields of the Array object are always TYP_INT
- return compiler->eeGetArrayDataOffset(elemType) + genTypeSize(TYP_INT) * (dimension + rank);
-}
-
-//------------------------------------------------------------------------
-// genOffsetOfMDArrayLength: Returns the offset from the Array object to the
-// size for the given dimension.
-//
-// Arguments:
-// elemType - the element type of the array
-// rank - the rank of the array
-// dimension - the dimension for which the lower bound offset will be returned.
-//
-// Return Value:
-// The offset.
-// TODO-Cleanup: move to CodeGenCommon.cpp
-
-// static
-unsigned CodeGen::genOffsetOfMDArrayDimensionSize(var_types elemType, unsigned rank, unsigned dimension)
-{
- // Note that the lower bound and length fields of the Array object are always TYP_INT
- return compiler->eeGetArrayDataOffset(elemType) + genTypeSize(TYP_INT) * dimension;
-}
-
-//------------------------------------------------------------------------
-// genCodeForArrIndex: Generates code to bounds check the index for one dimension of an array reference,
-// producing the effective index by subtracting the lower bound.
-//
-// Arguments:
-// arrIndex - the node for which we're generating code
-//
-// Return Value:
-// None.
-//
-
-void CodeGen::genCodeForArrIndex(GenTreeArrIndex* arrIndex)
-{
- emitter* emit = getEmitter();
- GenTreePtr arrObj = arrIndex->ArrObj();
- GenTreePtr indexNode = arrIndex->IndexExpr();
- regNumber arrReg = genConsumeReg(arrObj);
- regNumber indexReg = genConsumeReg(indexNode);
- regNumber tgtReg = arrIndex->gtRegNum;
- noway_assert(tgtReg != REG_NA);
-
- // We will use a temp register to load the lower bound and dimension size values
- //
- regMaskTP tmpRegsMask = arrIndex->gtRsvdRegs; // there will be two bits set
- tmpRegsMask &= ~genRegMask(tgtReg); // remove the bit for 'tgtReg' from 'tmpRegsMask'
-
- regMaskTP tmpRegMask = genFindLowestBit(tmpRegsMask); // set tmpRegMsk to a one-bit mask
- regNumber tmpReg = genRegNumFromMask(tmpRegMask); // set tmpReg from that mask
- noway_assert(tmpReg != REG_NA);
-
- assert(tgtReg != tmpReg);
-
- unsigned dim = arrIndex->gtCurrDim;
- unsigned rank = arrIndex->gtArrRank;
- var_types elemType = arrIndex->gtArrElemType;
- unsigned offset;
-
- offset = genOffsetOfMDArrayLowerBound(elemType, rank, dim);
- emit->emitIns_R_R_I(ins_Load(TYP_INT), EA_4BYTE, tmpReg, arrReg, offset); // a 4 BYTE sign extending load
- emit->emitIns_R_R_R(INS_sub, EA_4BYTE, tgtReg, indexReg, tmpReg);
-
- offset = genOffsetOfMDArrayDimensionSize(elemType, rank, dim);
- emit->emitIns_R_R_I(ins_Load(TYP_INT), EA_4BYTE, tmpReg, arrReg, offset); // a 4 BYTE sign extending load
- emit->emitIns_R_R(INS_cmp, EA_4BYTE, tgtReg, tmpReg);
-
- emitJumpKind jmpGEU = genJumpKindForOper(GT_GE, CK_UNSIGNED);
- genJumpToThrowHlpBlk(jmpGEU, SCK_RNGCHK_FAIL);
-
- genProduceReg(arrIndex);
-}
-
-//------------------------------------------------------------------------
-// genCodeForArrOffset: Generates code to compute the flattened array offset for
-// one dimension of an array reference:
-// result = (prevDimOffset * dimSize) + effectiveIndex
-// where dimSize is obtained from the arrObj operand
-//
-// Arguments:
-// arrOffset - the node for which we're generating code
-//
-// Return Value:
-// None.
-//
-// Notes:
-// dimSize and effectiveIndex are always non-negative, the former by design,
-// and the latter because it has been normalized to be zero-based.
-
-void CodeGen::genCodeForArrOffset(GenTreeArrOffs* arrOffset)
-{
- GenTreePtr offsetNode = arrOffset->gtOffset;
- GenTreePtr indexNode = arrOffset->gtIndex;
- regNumber tgtReg = arrOffset->gtRegNum;
-
- noway_assert(tgtReg != REG_NA);
-
- if (!offsetNode->IsIntegralConst(0))
- {
- emitter* emit = getEmitter();
- regNumber offsetReg = genConsumeReg(offsetNode);
- noway_assert(offsetReg != REG_NA);
- regNumber indexReg = genConsumeReg(indexNode);
- noway_assert(indexReg != REG_NA);
- GenTreePtr arrObj = arrOffset->gtArrObj;
- regNumber arrReg = genConsumeReg(arrObj);
- noway_assert(arrReg != REG_NA);
- regMaskTP tmpRegMask = arrOffset->gtRsvdRegs;
- regNumber tmpReg = genRegNumFromMask(tmpRegMask);
- noway_assert(tmpReg != REG_NA);
- unsigned dim = arrOffset->gtCurrDim;
- unsigned rank = arrOffset->gtArrRank;
- var_types elemType = arrOffset->gtArrElemType;
- unsigned offset = genOffsetOfMDArrayDimensionSize(elemType, rank, dim);
-
- // Load tmpReg with the dimension size
- emit->emitIns_R_R_I(ins_Load(TYP_INT), EA_4BYTE, tmpReg, arrReg, offset); // a 4 BYTE sign extending load
-
- // Evaluate tgtReg = offsetReg*dim_size + indexReg.
- emit->emitIns_R_R_R(INS_MUL, EA_4BYTE, tgtReg, tmpReg, offsetReg);
- emit->emitIns_R_R_R(INS_add, EA_4BYTE, tgtReg, tgtReg, indexReg);
- }
- else
- {
- regNumber indexReg = genConsumeReg(indexNode);
- if (indexReg != tgtReg)
- {
- inst_RV_RV(INS_mov, tgtReg, indexReg, TYP_INT);
- }
- }
- genProduceReg(arrOffset);
-}
-
-//------------------------------------------------------------------------
-// indirForm: Make a temporary indir we can feed to pattern matching routines
-// in cases where we don't want to instantiate all the indirs that happen.
-//
-GenTreeIndir CodeGen::indirForm(var_types type, GenTree* base)
-{
- GenTreeIndir i(GT_IND, type, base, nullptr);
- i.gtRegNum = REG_NA;
- // has to be nonnull (because contained nodes can't be the last in block)
- // but don't want it to be a valid pointer
- i.gtNext = (GenTree*)(-1);
- return i;
-}
-
-//------------------------------------------------------------------------
-// intForm: Make a temporary int we can feed to pattern matching routines
-// in cases where we don't want to instantiate.
-//
-GenTreeIntCon CodeGen::intForm(var_types type, ssize_t value)
-{
- GenTreeIntCon i(type, value);
- i.gtRegNum = REG_NA;
- // has to be nonnull (because contained nodes can't be the last in block)
- // but don't want it to be a valid pointer
- i.gtNext = (GenTree*)(-1);
- return i;
-}
-
-//------------------------------------------------------------------------
// genGetInsForOper: Return instruction encoding of the operation tree.
//
instruction CodeGen::genGetInsForOper(genTreeOps oper, var_types type)
@@ -1972,59 +1542,6 @@ instruction CodeGen::genGetInsForOper(genTreeOps oper, var_types type)
return ins;
}
-//------------------------------------------------------------------------
-// genCodeForShift: Generates the code sequence for a GenTree node that
-// represents a bit shift or rotate operation (<<, >>, >>>, rol, ror).
-//
-// Arguments:
-// tree - the bit shift node (that specifies the type of bit shift to perform).
-//
-// Assumptions:
-// a) All GenTrees are register allocated.
-//
-void CodeGen::genCodeForShift(GenTreePtr tree)
-{
- var_types targetType = tree->TypeGet();
- genTreeOps oper = tree->OperGet();
- instruction ins = genGetInsForOper(oper, targetType);
- emitAttr size = emitTypeSize(tree);
-
- assert(tree->gtRegNum != REG_NA);
-
- genConsumeOperands(tree->AsOp());
-
- GenTreePtr operand = tree->gtGetOp1();
- GenTreePtr shiftBy = tree->gtGetOp2();
- if (!shiftBy->IsCnsIntOrI())
- {
- getEmitter()->emitIns_R_R_R(ins, size, tree->gtRegNum, operand->gtRegNum, shiftBy->gtRegNum);
- }
- else
- {
- unsigned immWidth = size * BITS_PER_BYTE;
- ssize_t shiftByImm = shiftBy->gtIntCon.gtIconVal & (immWidth - 1);
-
- getEmitter()->emitIns_R_R_I(ins, size, tree->gtRegNum, operand->gtRegNum, shiftByImm);
- }
-
- genProduceReg(tree);
-}
-
-// Generate code for a CpBlk node by the means of the VM memcpy helper call
-// Preconditions:
-// a) The size argument of the CpBlk is not an integer constant
-// b) The size argument is a constant but is larger than CPBLK_MOVS_LIMIT bytes.
-void CodeGen::genCodeForCpBlk(GenTreeBlk* cpBlkNode)
-{
- // Make sure we got the arguments of the cpblk operation in the right registers
- unsigned blockSize = cpBlkNode->Size();
- GenTreePtr dstAddr = cpBlkNode->Addr();
- assert(!dstAddr->isContained());
-
- genConsumeBlockOp(cpBlkNode, REG_ARG_0, REG_ARG_1, REG_ARG_2);
- genEmitHelperCall(CORINFO_HELP_MEMCPY, 0, EA_UNKNOWN);
-}
-
// Generates CpBlk code by performing a loop unroll
// Preconditions:
// The size argument of the CpBlk node is a constant and <= 64 bytes.
@@ -2082,36 +1599,6 @@ void CodeGen::genCodeForStoreBlk(GenTreeBlk* blkOp)
}
}
-// Generates code for InitBlk by calling the VM memset helper function.
-// Preconditions:
-// a) The size argument of the InitBlk is not an integer constant.
-// b) The size argument of the InitBlk is >= INITBLK_STOS_LIMIT bytes.
-void CodeGen::genCodeForInitBlk(GenTreeBlk* initBlkNode)
-{
- // Make sure we got the arguments of the initblk operation in the right registers
- unsigned size = initBlkNode->Size();
- GenTreePtr dstAddr = initBlkNode->Addr();
- GenTreePtr initVal = initBlkNode->Data();
- if (initVal->OperIsInitVal())
- {
- initVal = initVal->gtGetOp1();
- }
-
- assert(!dstAddr->isContained());
- assert(!initVal->isContained());
- if (initBlkNode->gtOper == GT_STORE_DYN_BLK)
- {
- assert(initBlkNode->AsDynBlk()->gtDynamicSize->gtRegNum == REG_ARG_2);
- }
- else
- {
- assert(initBlkNode->gtRsvdRegs == RBM_ARG_2);
- }
-
- genConsumeBlockOp(initBlkNode, REG_ARG_0, REG_ARG_1, REG_ARG_2);
- genEmitHelperCall(CORINFO_HELP_MEMSET, 0, EA_UNKNOWN);
-}
-
//------------------------------------------------------------------------
// genCodeForShiftLong: Generates the code sequence for a GenTree node that
// represents a three operand bit shift or rotate operation (<<Hi, >>Lo).
@@ -2176,370 +1663,6 @@ void CodeGen::genCodeForShiftLong(GenTreePtr tree)
}
//------------------------------------------------------------------------
-// genRegCopy: Generate a register copy.
-//
-void CodeGen::genRegCopy(GenTree* treeNode)
-{
- assert(treeNode->OperGet() == GT_COPY);
-
- var_types targetType = treeNode->TypeGet();
- regNumber targetReg = treeNode->gtRegNum;
- assert(targetReg != REG_NA);
-
- GenTree* op1 = treeNode->gtOp.gtOp1;
-
- // Check whether this node and the node from which we're copying the value have the same
- // register type.
- // This can happen if (currently iff) we have a SIMD vector type that fits in an integer
- // register, in which case it is passed as an argument, or returned from a call,
- // in an integer register and must be copied if it's in an xmm register.
-
- if (varTypeIsFloating(treeNode) != varTypeIsFloating(op1))
- {
- NYI("genRegCopy floating point");
- }
- else
- {
- inst_RV_RV(ins_Copy(targetType), targetReg, genConsumeReg(op1), targetType);
- }
-
- if (op1->IsLocal())
- {
- // The lclVar will never be a def.
- // If it is a last use, the lclVar will be killed by genConsumeReg(), as usual, and genProduceReg will
- // appropriately set the gcInfo for the copied value.
- // If not, there are two cases we need to handle:
- // - If this is a TEMPORARY copy (indicated by the GTF_VAR_DEATH flag) the variable
- // will remain live in its original register.
- // genProduceReg() will appropriately set the gcInfo for the copied value,
- // and genConsumeReg will reset it.
- // - Otherwise, we need to update register info for the lclVar.
-
- GenTreeLclVarCommon* lcl = op1->AsLclVarCommon();
- assert((lcl->gtFlags & GTF_VAR_DEF) == 0);
-
- if ((lcl->gtFlags & GTF_VAR_DEATH) == 0 && (treeNode->gtFlags & GTF_VAR_DEATH) == 0)
- {
- LclVarDsc* varDsc = &compiler->lvaTable[lcl->gtLclNum];
-
- // If we didn't just spill it (in genConsumeReg, above), then update the register info
- if (varDsc->lvRegNum != REG_STK)
- {
- // The old location is dying
- genUpdateRegLife(varDsc, /*isBorn*/ false, /*isDying*/ true DEBUGARG(op1));
-
- gcInfo.gcMarkRegSetNpt(genRegMask(op1->gtRegNum));
-
- genUpdateVarReg(varDsc, treeNode);
-
- // The new location is going live
- genUpdateRegLife(varDsc, /*isBorn*/ true, /*isDying*/ false DEBUGARG(treeNode));
- }
- }
- }
-
- genProduceReg(treeNode);
-}
-
-//------------------------------------------------------------------------
-// genCallInstruction: Produce code for a GT_CALL node
-//
-void CodeGen::genCallInstruction(GenTreeCall* call)
-{
- gtCallTypes callType = (gtCallTypes)call->gtCallType;
-
- IL_OFFSETX ilOffset = BAD_IL_OFFSET;
-
- // all virtuals should have been expanded into a control expression
- assert(!call->IsVirtual() || call->gtControlExpr || call->gtCallAddr);
-
- // Consume all the arg regs
- for (GenTreePtr list = call->gtCallLateArgs; list; list = list->MoveNext())
- {
- assert(list->OperIsList());
-
- GenTreePtr argNode = list->Current();
-
- fgArgTabEntryPtr curArgTabEntry = compiler->gtArgEntryByNode(call, argNode->gtSkipReloadOrCopy());
- assert(curArgTabEntry);
-
- if (curArgTabEntry->regNum == REG_STK)
- continue;
-
- // Deal with multi register passed struct args.
- if (argNode->OperGet() == GT_FIELD_LIST)
- {
- GenTreeArgList* argListPtr = argNode->AsArgList();
- unsigned iterationNum = 0;
- regNumber argReg = curArgTabEntry->regNum;
- for (; argListPtr != nullptr; argListPtr = argListPtr->Rest(), iterationNum++)
- {
- GenTreePtr putArgRegNode = argListPtr->gtOp.gtOp1;
- assert(putArgRegNode->gtOper == GT_PUTARG_REG);
-
- genConsumeReg(putArgRegNode);
-
- if (putArgRegNode->gtRegNum != argReg)
- {
- inst_RV_RV(ins_Move_Extend(putArgRegNode->TypeGet(), putArgRegNode->InReg()), argReg,
- putArgRegNode->gtRegNum);
- }
-
- argReg = genRegArgNext(argReg);
- }
- }
- else
- {
- regNumber argReg = curArgTabEntry->regNum;
- genConsumeReg(argNode);
- if (argNode->gtRegNum != argReg)
- {
- inst_RV_RV(ins_Move_Extend(argNode->TypeGet(), argNode->InReg()), argReg, argNode->gtRegNum);
- }
- }
-
- // In the case of a varargs call,
- // the ABI dictates that if we have floating point args,
- // we must pass the enregistered arguments in both the
- // integer and floating point registers so, let's do that.
- if (call->IsVarargs() && varTypeIsFloating(argNode))
- {
- NYI_ARM("CodeGen - IsVarargs");
- }
- }
-
- // Insert a null check on "this" pointer if asked.
- if (call->NeedsNullCheck())
- {
- const regNumber regThis = genGetThisArgReg(call);
- regMaskTP tempMask = genFindLowestBit(call->gtRsvdRegs);
- const regNumber tmpReg = genRegNumFromMask(tempMask);
- if (genCountBits(call->gtRsvdRegs) > 1)
- {
- call->gtRsvdRegs &= ~tempMask;
- }
- getEmitter()->emitIns_R_R_I(INS_ldr, EA_4BYTE, tmpReg, regThis, 0);
- }
-
- // Either gtControlExpr != null or gtCallAddr != null or it is a direct non-virtual call to a user or helper method.
- CORINFO_METHOD_HANDLE methHnd;
- GenTree* target = call->gtControlExpr;
- if (callType == CT_INDIRECT)
- {
- assert(target == nullptr);
- target = call->gtCallAddr;
- methHnd = nullptr;
- }
- else
- {
- methHnd = call->gtCallMethHnd;
- }
-
- CORINFO_SIG_INFO* sigInfo = nullptr;
-#ifdef DEBUG
- // Pass the call signature information down into the emitter so the emitter can associate
- // native call sites with the signatures they were generated from.
- if (callType != CT_HELPER)
- {
- sigInfo = call->callSig;
- }
-#endif // DEBUG
-
- // If fast tail call, then we are done.
- if (call->IsFastTailCall())
- {
- NYI_ARM("fast tail call");
- }
-
- // For a pinvoke to unmanaged code we emit a label to clear
- // the GC pointer state before the callsite.
- // We can't utilize the typical lazy killing of GC pointers
- // at (or inside) the callsite.
- if (call->IsUnmanaged())
- {
- genDefineTempLabel(genCreateTempLabel());
- }
-
- // Determine return value size(s).
- ReturnTypeDesc* pRetTypeDesc = call->GetReturnTypeDesc();
- emitAttr retSize = EA_PTRSIZE;
- emitAttr secondRetSize = EA_UNKNOWN;
-
- if (call->HasMultiRegRetVal())
- {
- retSize = emitTypeSize(pRetTypeDesc->GetReturnRegType(0));
- secondRetSize = emitTypeSize(pRetTypeDesc->GetReturnRegType(1));
- }
- else
- {
- assert(!varTypeIsStruct(call));
-
- if (call->gtType == TYP_REF || call->gtType == TYP_ARRAY)
- {
- retSize = EA_GCREF;
- }
- else if (call->gtType == TYP_BYREF)
- {
- retSize = EA_BYREF;
- }
- }
-
- // We need to propagate the IL offset information to the call instruction, so we can emit
- // an IL to native mapping record for the call, to support managed return value debugging.
- // We don't want tail call helper calls that were converted from normal calls to get a record,
- // so we skip this hash table lookup logic in that case.
- if (compiler->opts.compDbgInfo && compiler->genCallSite2ILOffsetMap != nullptr && !call->IsTailCall())
- {
- (void)compiler->genCallSite2ILOffsetMap->Lookup(call, &ilOffset);
- }
-
- if (target != nullptr)
- {
- // For ARM a call target can not be a contained indirection
- assert(!target->isContainedIndir());
-
- genConsumeReg(target);
-
- // We have already generated code for gtControlExpr evaluating it into a register.
- // We just need to emit "call reg" in this case.
- //
- assert(genIsValidIntReg(target->gtRegNum));
-
- genEmitCall(emitter::EC_INDIR_R, methHnd,
- INDEBUG_LDISASM_COMMA(sigInfo) nullptr, // addr
- retSize, ilOffset, target->gtRegNum);
- }
- else
- {
- // Generate a direct call to a non-virtual user defined or helper method
- assert(callType == CT_HELPER || callType == CT_USER_FUNC);
-
- void* addr = nullptr;
- if (callType == CT_HELPER)
- {
- // Direct call to a helper method.
- CorInfoHelpFunc helperNum = compiler->eeGetHelperNum(methHnd);
- noway_assert(helperNum != CORINFO_HELP_UNDEF);
-
- void* pAddr = nullptr;
- addr = compiler->compGetHelperFtn(helperNum, (void**)&pAddr);
-
- if (addr == nullptr)
- {
- addr = pAddr;
- }
- }
- else
- {
- // Direct call to a non-virtual user function.
- CORINFO_ACCESS_FLAGS aflags = CORINFO_ACCESS_ANY;
- if (call->IsSameThis())
- {
- aflags = (CORINFO_ACCESS_FLAGS)(aflags | CORINFO_ACCESS_THIS);
- }
-
- if ((call->NeedsNullCheck()) == 0)
- {
- aflags = (CORINFO_ACCESS_FLAGS)(aflags | CORINFO_ACCESS_NONNULL);
- }
-
- CORINFO_CONST_LOOKUP addrInfo;
- compiler->info.compCompHnd->getFunctionEntryPoint(methHnd, &addrInfo, aflags);
-
- addr = addrInfo.addr;
- }
-
- assert(addr);
- // Non-virtual direct call to known addresses
- if (!arm_Valid_Imm_For_BL((ssize_t)addr))
- {
- regNumber tmpReg = genRegNumFromMask(call->gtRsvdRegs);
- instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, tmpReg, (ssize_t)addr);
- genEmitCall(emitter::EC_INDIR_R, methHnd, INDEBUG_LDISASM_COMMA(sigInfo) NULL, retSize, ilOffset, tmpReg);
- }
- else
- {
- genEmitCall(emitter::EC_FUNC_TOKEN, methHnd, INDEBUG_LDISASM_COMMA(sigInfo) addr, retSize, ilOffset);
- }
- }
-
- // if it was a pinvoke we may have needed to get the address of a label
- if (genPendingCallLabel)
- {
- assert(call->IsUnmanaged());
- genDefineTempLabel(genPendingCallLabel);
- genPendingCallLabel = nullptr;
- }
-
- // Update GC info:
- // All Callee arg registers are trashed and no longer contain any GC pointers.
- // TODO-ARM-Bug?: As a matter of fact shouldn't we be killing all of callee trashed regs here?
- // For now we will assert that other than arg regs gc ref/byref set doesn't contain any other
- // registers from RBM_CALLEE_TRASH
- assert((gcInfo.gcRegGCrefSetCur & (RBM_CALLEE_TRASH & ~RBM_ARG_REGS)) == 0);
- assert((gcInfo.gcRegByrefSetCur & (RBM_CALLEE_TRASH & ~RBM_ARG_REGS)) == 0);
- gcInfo.gcRegGCrefSetCur &= ~RBM_ARG_REGS;
- gcInfo.gcRegByrefSetCur &= ~RBM_ARG_REGS;
-
- var_types returnType = call->TypeGet();
- if (returnType != TYP_VOID)
- {
- regNumber returnReg;
-
- if (call->HasMultiRegRetVal())
- {
- assert(pRetTypeDesc != nullptr);
- unsigned regCount = pRetTypeDesc->GetReturnRegCount();
-
- // If regs allocated to call node are different from ABI return
- // regs in which the call has returned its result, move the result
- // to regs allocated to call node.
- for (unsigned i = 0; i < regCount; ++i)
- {
- var_types regType = pRetTypeDesc->GetReturnRegType(i);
- returnReg = pRetTypeDesc->GetABIReturnReg(i);
- regNumber allocatedReg = call->GetRegNumByIdx(i);
- if (returnReg != allocatedReg)
- {
- inst_RV_RV(ins_Copy(regType), allocatedReg, returnReg, regType);
- }
- }
- }
- else
- {
- if (call->IsHelperCall(compiler, CORINFO_HELP_INIT_PINVOKE_FRAME))
- {
- // The CORINFO_HELP_INIT_PINVOKE_FRAME helper uses a custom calling convention that returns with
- // TCB in REG_PINVOKE_TCB. fgMorphCall() sets the correct argument registers.
- returnReg = REG_PINVOKE_TCB;
- }
- else if (varTypeIsFloating(returnType))
- {
- returnReg = REG_FLOATRET;
- }
- else
- {
- returnReg = REG_INTRET;
- }
-
- if (call->gtRegNum != returnReg)
- {
- inst_RV_RV(ins_Copy(returnType), call->gtRegNum, returnReg, returnType);
- }
- }
-
- genProduceReg(call);
- }
-
- // If there is nothing next, that means the result is thrown away, so this value is not live.
- // However, for minopts or debuggable code, we keep it live to support managed return value debugging.
- if ((call->gtNext == nullptr) && !compiler->opts.MinOpts() && !compiler->opts.compDbgCode)
- {
- gcInfo.gcMarkRegSetNpt(RBM_INTRET);
- }
-}
-
-//------------------------------------------------------------------------
// genLeaInstruction: Produce code for a GT_LEA subnode.
//
void CodeGen::genLeaInstruction(GenTreeAddrMode* lea)
@@ -2848,229 +1971,6 @@ void CodeGen::genLongToIntCast(GenTree* cast)
}
//------------------------------------------------------------------------
-// genIntToIntCast: Generate code for an integer cast
-//
-// Arguments:
-// treeNode - The GT_CAST node
-//
-// Return Value:
-// None.
-//
-// Assumptions:
-// The treeNode must have an assigned register.
-// For a signed convert from byte, the source must be in a byte-addressable register.
-// Neither the source nor target type can be a floating point type.
-//
-void CodeGen::genIntToIntCast(GenTreePtr treeNode)
-{
- assert(treeNode->OperGet() == GT_CAST);
-
- GenTreePtr castOp = treeNode->gtCast.CastOp();
- emitter* emit = getEmitter();
-
- var_types dstType = treeNode->CastToType();
- var_types srcType = genActualType(castOp->TypeGet());
- emitAttr movSize = emitActualTypeSize(dstType);
- bool movRequired = false;
-
- if (varTypeIsLong(srcType))
- {
- genLongToIntCast(treeNode);
- return;
- }
-
- regNumber targetReg = treeNode->gtRegNum;
- regNumber sourceReg = castOp->gtRegNum;
-
- // For Long to Int conversion we will have a reserved integer register to hold the immediate mask
- regNumber tmpReg = (treeNode->gtRsvdRegs == RBM_NONE) ? REG_NA : genRegNumFromMask(treeNode->gtRsvdRegs);
-
- assert(genIsValidIntReg(targetReg));
- assert(genIsValidIntReg(sourceReg));
-
- instruction ins = INS_invalid;
-
- genConsumeReg(castOp);
- Lowering::CastInfo castInfo;
-
- // Get information about the cast.
- Lowering::getCastDescription(treeNode, &castInfo);
-
- if (castInfo.requiresOverflowCheck)
- {
- emitAttr cmpSize = EA_ATTR(genTypeSize(srcType));
-
- if (castInfo.signCheckOnly)
- {
- // We only need to check for a negative value in sourceReg
- emit->emitIns_R_I(INS_cmp, cmpSize, sourceReg, 0);
- emitJumpKind jmpLT = genJumpKindForOper(GT_LT, CK_SIGNED);
- genJumpToThrowHlpBlk(jmpLT, SCK_OVERFLOW);
- noway_assert(genTypeSize(srcType) == 4 || genTypeSize(srcType) == 8);
- // This is only interesting case to ensure zero-upper bits.
- if ((srcType == TYP_INT) && (dstType == TYP_ULONG))
- {
- // cast to TYP_ULONG:
- // We use a mov with size=EA_4BYTE
- // which will zero out the upper bits
- movSize = EA_4BYTE;
- movRequired = true;
- }
- }
- else if (castInfo.unsignedSource || castInfo.unsignedDest)
- {
- // When we are converting from/to unsigned,
- // we only have to check for any bits set in 'typeMask'
-
- noway_assert(castInfo.typeMask != 0);
- emit->emitIns_R_I(INS_tst, cmpSize, sourceReg, castInfo.typeMask);
- emitJumpKind jmpNotEqual = genJumpKindForOper(GT_NE, CK_SIGNED);
- genJumpToThrowHlpBlk(jmpNotEqual, SCK_OVERFLOW);
- }
- else
- {
- // For a narrowing signed cast
- //
- // We must check the value is in a signed range.
-
- // Compare with the MAX
-
- noway_assert((castInfo.typeMin != 0) && (castInfo.typeMax != 0));
-
- if (emitter::emitIns_valid_imm_for_cmp(castInfo.typeMax, INS_FLAGS_DONT_CARE))
- {
- emit->emitIns_R_I(INS_cmp, cmpSize, sourceReg, castInfo.typeMax);
- }
- else
- {
- noway_assert(tmpReg != REG_NA);
- instGen_Set_Reg_To_Imm(cmpSize, tmpReg, castInfo.typeMax);
- emit->emitIns_R_R(INS_cmp, cmpSize, sourceReg, tmpReg);
- }
-
- emitJumpKind jmpGT = genJumpKindForOper(GT_GT, CK_SIGNED);
- genJumpToThrowHlpBlk(jmpGT, SCK_OVERFLOW);
-
- // Compare with the MIN
-
- if (emitter::emitIns_valid_imm_for_cmp(castInfo.typeMin, INS_FLAGS_DONT_CARE))
- {
- emit->emitIns_R_I(INS_cmp, cmpSize, sourceReg, castInfo.typeMin);
- }
- else
- {
- noway_assert(tmpReg != REG_NA);
- instGen_Set_Reg_To_Imm(cmpSize, tmpReg, castInfo.typeMin);
- emit->emitIns_R_R(INS_cmp, cmpSize, sourceReg, tmpReg);
- }
-
- emitJumpKind jmpLT = genJumpKindForOper(GT_LT, CK_SIGNED);
- genJumpToThrowHlpBlk(jmpLT, SCK_OVERFLOW);
- }
- ins = INS_mov;
- }
- else // Non-overflow checking cast.
- {
- if (genTypeSize(srcType) == genTypeSize(dstType))
- {
- ins = INS_mov;
- }
- else
- {
- var_types extendType = TYP_UNKNOWN;
-
- // If we need to treat a signed type as unsigned
- if ((treeNode->gtFlags & GTF_UNSIGNED) != 0)
- {
- extendType = genUnsignedType(srcType);
- movSize = emitTypeSize(extendType);
- movRequired = true;
- }
- else
- {
- if (genTypeSize(srcType) < genTypeSize(dstType))
- {
- extendType = srcType;
- movSize = emitTypeSize(srcType);
- if (srcType == TYP_UINT)
- {
- movRequired = true;
- }
- }
- else // (genTypeSize(srcType) > genTypeSize(dstType))
- {
- extendType = dstType;
- movSize = emitTypeSize(dstType);
- }
- }
-
- ins = ins_Move_Extend(extendType, castOp->InReg());
- }
- }
-
- // We should never be generating a load from memory instruction here!
- assert(!emit->emitInsIsLoad(ins));
-
- if ((ins != INS_mov) || movRequired || (targetReg != sourceReg))
- {
- emit->emitIns_R_R(ins, movSize, targetReg, sourceReg);
- }
-
- genProduceReg(treeNode);
-}
-
-//------------------------------------------------------------------------
-// genFloatToFloatCast: Generate code for a cast between float and double
-//
-// Arguments:
-// treeNode - The GT_CAST node
-//
-// Return Value:
-// None.
-//
-// Assumptions:
-// Cast is a non-overflow conversion.
-// The treeNode must have an assigned register.
-// The cast is between float and double.
-//
-void CodeGen::genFloatToFloatCast(GenTreePtr treeNode)
-{
- // float <--> double conversions are always non-overflow ones
- assert(treeNode->OperGet() == GT_CAST);
- assert(!treeNode->gtOverflow());
-
- regNumber targetReg = treeNode->gtRegNum;
- assert(genIsValidFloatReg(targetReg));
-
- GenTreePtr op1 = treeNode->gtOp.gtOp1;
- assert(!op1->isContained()); // Cannot be contained
- assert(genIsValidFloatReg(op1->gtRegNum)); // Must be a valid float reg.
-
- var_types dstType = treeNode->CastToType();
- var_types srcType = op1->TypeGet();
- assert(varTypeIsFloating(srcType) && varTypeIsFloating(dstType));
-
- genConsumeOperands(treeNode->AsOp());
-
- // treeNode must be a reg
- assert(!treeNode->isContained());
-
- if (srcType != dstType)
- {
- instruction insVcvt = (srcType == TYP_FLOAT) ? INS_vcvt_f2d // convert Float to Double
- : INS_vcvt_d2f; // convert Double to Float
-
- getEmitter()->emitIns_R_R(insVcvt, emitTypeSize(treeNode), treeNode->gtRegNum, op1->gtRegNum);
- }
- else if (treeNode->gtRegNum != op1->gtRegNum)
- {
- getEmitter()->emitIns_R_R(INS_vmov, emitTypeSize(treeNode), treeNode->gtRegNum, op1->gtRegNum);
- }
-
- genProduceReg(treeNode);
-}
-
-//------------------------------------------------------------------------
// genIntToFloatCast: Generate code to cast an int/long to float/double
//
// Arguments:
@@ -3228,38 +2128,6 @@ void CodeGen::genFloatToIntCast(GenTreePtr treeNode)
}
//------------------------------------------------------------------------
-// genCreateAndStoreGCInfo: Create and record GC Info for the function.
-//
-void CodeGen::genCreateAndStoreGCInfo(unsigned codeSize,
- unsigned prologSize,
- unsigned epilogSize DEBUGARG(void* codePtr))
-{
- IAllocator* allowZeroAlloc = new (compiler, CMK_GC) AllowZeroAllocator(compiler->getAllocatorGC());
- GcInfoEncoder* gcInfoEncoder = new (compiler, CMK_GC)
- GcInfoEncoder(compiler->info.compCompHnd, compiler->info.compMethodInfo, allowZeroAlloc, NOMEM);
- assert(gcInfoEncoder);
-
- // Follow the code pattern of the x86 gc info encoder (genCreateAndStoreGCInfoJIT32).
- gcInfo.gcInfoBlockHdrSave(gcInfoEncoder, codeSize, prologSize);
-
- // We keep the call count for the second call to gcMakeRegPtrTable() below.
- unsigned callCnt = 0;
- // First we figure out the encoder ID's for the stack slots and registers.
- gcInfo.gcMakeRegPtrTable(gcInfoEncoder, codeSize, prologSize, GCInfo::MAKE_REG_PTR_MODE_ASSIGN_SLOTS, &callCnt);
- // Now we've requested all the slots we'll need; "finalize" these (make more compact data structures for them).
- gcInfoEncoder->FinalizeSlotIds();
- // Now we can actually use those slot ID's to declare live ranges.
- gcInfo.gcMakeRegPtrTable(gcInfoEncoder, codeSize, prologSize, GCInfo::MAKE_REG_PTR_MODE_DO_WORK, &callCnt);
-
- gcInfoEncoder->Build();
-
- // GC Encoder automatically puts the GC info in the right spot using ICorJitInfo::allocGCInfo(size_t)
- // let's save the values anyway for debugging purposes
- compiler->compInfoBlkAddr = gcInfoEncoder->Emit();
- compiler->compInfoBlkSize = 0; // not exposed by the GCEncoder interface
-}
-
-//------------------------------------------------------------------------
// genEmitHelperCall: Emit a call to a helper function.
//
void CodeGen::genEmitHelperCall(unsigned helper, int argSize, emitAttr retSize, regNumber callTargetReg /*= REG_NA */)
diff --git a/src/jit/codegenarm64.cpp b/src/jit/codegenarm64.cpp
index c17e033fa6..7f98221df8 100644
--- a/src/jit/codegenarm64.cpp
+++ b/src/jit/codegenarm64.cpp
@@ -1265,24 +1265,6 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
-/*****************************************************************************
- *
- * Generate code that will set the given register to the integer constant.
- */
-
-void CodeGen::genSetRegToIcon(regNumber reg, ssize_t val, var_types type, insFlags flags)
-{
- // Reg cannot be a FP reg
- assert(!genIsValidFloatReg(reg));
-
- // The only TYP_REF constant that can come this path is a managed 'null' since it is not
- // relocatable. Other ref type constants (e.g. string objects) go through a different
- // code path.
- noway_assert(type != TYP_REF || val == 0);
-
- instGen_Set_Reg_To_Imm(emitActualTypeSize(type), reg, val, flags);
-}
-
BasicBlock* CodeGen::genCallFinally(BasicBlock* block)
{
// Generate a call to the finally, like this:
@@ -2861,77 +2843,6 @@ void CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
}
}
-//----------------------------------------------------------------------------------
-// genMultiRegCallStoreToLocal: store multi-reg return value of a call node to a local
-//
-// Arguments:
-// treeNode - Gentree of GT_STORE_LCL_VAR
-//
-// Return Value:
-// None
-//
-// Assumption:
-// The child of store is a multi-reg call node.
-// genProduceReg() on treeNode is made by caller of this routine.
-//
-void CodeGen::genMultiRegCallStoreToLocal(GenTreePtr treeNode)
-{
- assert(treeNode->OperGet() == GT_STORE_LCL_VAR);
-
- // Structs of size >=9 and <=16 are returned in two return registers on ARM64 and HFAs.
- assert(varTypeIsStruct(treeNode));
-
- // Assumption: current ARM64 implementation requires that a multi-reg struct
- // var in 'var = call' is flagged as lvIsMultiRegRet to prevent it from
- // being struct promoted.
- unsigned lclNum = treeNode->AsLclVarCommon()->gtLclNum;
- LclVarDsc* varDsc = &(compiler->lvaTable[lclNum]);
- noway_assert(varDsc->lvIsMultiRegRet);
-
- GenTree* op1 = treeNode->gtGetOp1();
- GenTree* actualOp1 = op1->gtSkipReloadOrCopy();
- GenTreeCall* call = actualOp1->AsCall();
- assert(call->HasMultiRegRetVal());
-
- genConsumeRegs(op1);
-
- ReturnTypeDesc* pRetTypeDesc = call->GetReturnTypeDesc();
- unsigned regCount = pRetTypeDesc->GetReturnRegCount();
-
- if (treeNode->gtRegNum != REG_NA)
- {
- // Right now the only enregistrable structs supported are SIMD types.
- assert(varTypeIsSIMD(treeNode));
- NYI("GT_STORE_LCL_VAR of a SIMD enregisterable struct");
- }
- else
- {
- // Stack store
- int offset = 0;
- for (unsigned i = 0; i < regCount; ++i)
- {
- var_types type = pRetTypeDesc->GetReturnRegType(i);
- regNumber reg = call->GetRegNumByIdx(i);
- if (op1->IsCopyOrReload())
- {
- // GT_COPY/GT_RELOAD will have valid reg for those positions
- // that need to be copied or reloaded.
- regNumber reloadReg = op1->AsCopyOrReload()->GetRegNumByIdx(i);
- if (reloadReg != REG_NA)
- {
- reg = reloadReg;
- }
- }
-
- assert(reg != REG_NA);
- getEmitter()->emitIns_S_R(ins_Store(type), emitTypeSize(type), reg, lclNum, offset);
- offset += genTypeSize(type);
- }
-
- varDsc->lvRegNum = REG_STK;
- }
-}
-
/***********************************************************************************************
* Generate code for localloc
*/
@@ -3349,42 +3260,6 @@ void CodeGen::genCodeForInitBlkUnroll(GenTreeBlk* initBlkNode)
}
}
-// Generates code for InitBlk by calling the VM memset helper function.
-// Preconditions:
-// a) The size argument of the InitBlk is not an integer constant.
-// b) The size argument of the InitBlk is >= INITBLK_STOS_LIMIT bytes.
-void CodeGen::genCodeForInitBlk(GenTreeBlk* initBlkNode)
-{
- // Make sure we got the arguments of the initblk operation in the right registers
- unsigned size = initBlkNode->Size();
- GenTreePtr dstAddr = initBlkNode->Addr();
- GenTreePtr initVal = initBlkNode->Data();
- if (initVal->OperIsInitVal())
- {
- initVal = initVal->gtGetOp1();
- }
-
- assert(!dstAddr->isContained());
- assert(!initVal->isContained());
- if (initBlkNode->gtOper == GT_STORE_DYN_BLK)
- {
- assert(initBlkNode->AsDynBlk()->gtDynamicSize->gtRegNum == REG_ARG_2);
- }
- else
- {
- assert(initBlkNode->gtRsvdRegs == RBM_ARG_2);
- }
-
- if (size != 0)
- {
- assert(size > INITBLK_UNROLL_LIMIT);
- }
-
- genConsumeBlockOp(initBlkNode, REG_ARG_0, REG_ARG_1, REG_ARG_2);
-
- genEmitHelperCall(CORINFO_HELP_MEMSET, 0, EA_UNKNOWN);
-}
-
// Generate code for a load from some address + offset
// base: tree node which can be either a local address or arbitrary node
// offset: distance from the base from which to load
@@ -3689,27 +3564,6 @@ void CodeGen::genCodeForCpObj(GenTreeObj* cpObjNode)
gcInfo.gcMarkRegSetNpt(RBM_WRITE_BARRIER_SRC_BYREF | RBM_WRITE_BARRIER_DST_BYREF);
}
-// Generate code for a CpBlk node by the means of the VM memcpy helper call
-// Preconditions:
-// a) The size argument of the CpBlk is not an integer constant
-// b) The size argument is a constant but is larger than CPBLK_MOVS_LIMIT bytes.
-void CodeGen::genCodeForCpBlk(GenTreeBlk* cpBlkNode)
-{
- // Make sure we got the arguments of the cpblk operation in the right registers
- unsigned blockSize = cpBlkNode->Size();
- GenTreePtr dstAddr = cpBlkNode->Addr();
- assert(!dstAddr->isContained());
-
- genConsumeBlockOp(cpBlkNode, REG_ARG_0, REG_ARG_1, REG_ARG_2);
-
- if (blockSize != 0)
- {
- assert(blockSize > CPBLK_UNROLL_LIMIT);
- }
-
- genEmitHelperCall(CORINFO_HELP_MEMCPY, 0, EA_UNKNOWN);
-}
-
// generate code do a switch statement based on a table of ip-relative offsets
void CodeGen::genTableBasedSwitch(GenTree* treeNode)
{
@@ -3830,239 +3684,6 @@ void CodeGen::genLockedInstructions(GenTreeOp* treeNode)
#endif // !0
}
-// generate code for BoundsCheck nodes
-void CodeGen::genRangeCheck(GenTreePtr oper)
-{
-#ifdef FEATURE_SIMD
- noway_assert(oper->OperGet() == GT_ARR_BOUNDS_CHECK || oper->OperGet() == GT_SIMD_CHK);
-#else // !FEATURE_SIMD
- noway_assert(oper->OperGet() == GT_ARR_BOUNDS_CHECK);
-#endif // !FEATURE_SIMD
-
- GenTreeBoundsChk* bndsChk = oper->AsBoundsChk();
-
- GenTreePtr arrLen = bndsChk->gtArrLen;
- GenTreePtr arrIndex = bndsChk->gtIndex;
- GenTreePtr arrRef = NULL;
- int lenOffset = 0;
-
- GenTree * src1, *src2;
- emitJumpKind jmpKind;
-
- genConsumeRegs(arrIndex);
- genConsumeRegs(arrLen);
-
- if (arrIndex->isContainedIntOrIImmed())
- {
- // To encode using a cmp immediate, we place the
- // constant operand in the second position
- src1 = arrLen;
- src2 = arrIndex;
- jmpKind = genJumpKindForOper(GT_LE, CK_UNSIGNED);
- }
- else
- {
- src1 = arrIndex;
- src2 = arrLen;
- jmpKind = genJumpKindForOper(GT_GE, CK_UNSIGNED);
- }
-
- GenTreeIntConCommon* intConst = nullptr;
- if (src2->isContainedIntOrIImmed())
- {
- intConst = src2->AsIntConCommon();
- }
-
- if (intConst != nullptr)
- {
- getEmitter()->emitIns_R_I(INS_cmp, EA_4BYTE, src1->gtRegNum, intConst->IconValue());
- }
- else
- {
- getEmitter()->emitIns_R_R(INS_cmp, EA_4BYTE, src1->gtRegNum, src2->gtRegNum);
- }
-
- genJumpToThrowHlpBlk(jmpKind, SCK_RNGCHK_FAIL, bndsChk->gtIndRngFailBB);
-}
-
-//------------------------------------------------------------------------
-// genOffsetOfMDArrayLowerBound: Returns the offset from the Array object to the
-// lower bound for the given dimension.
-//
-// Arguments:
-// elemType - the element type of the array
-// rank - the rank of the array
-// dimension - the dimension for which the lower bound offset will be returned.
-//
-// Return Value:
-// The offset.
-// TODO-Cleanup: move to CodeGenCommon.cpp
-
-// static
-unsigned CodeGen::genOffsetOfMDArrayLowerBound(var_types elemType, unsigned rank, unsigned dimension)
-{
- // Note that the lower bound and length fields of the Array object are always TYP_INT, even on 64-bit targets.
- return compiler->eeGetArrayDataOffset(elemType) + genTypeSize(TYP_INT) * (dimension + rank);
-}
-
-//------------------------------------------------------------------------
-// genOffsetOfMDArrayLength: Returns the offset from the Array object to the
-// size for the given dimension.
-//
-// Arguments:
-// elemType - the element type of the array
-// rank - the rank of the array
-// dimension - the dimension for which the lower bound offset will be returned.
-//
-// Return Value:
-// The offset.
-// TODO-Cleanup: move to CodeGenCommon.cpp
-
-// static
-unsigned CodeGen::genOffsetOfMDArrayDimensionSize(var_types elemType, unsigned rank, unsigned dimension)
-{
- // Note that the lower bound and length fields of the Array object are always TYP_INT, even on 64-bit targets.
- return compiler->eeGetArrayDataOffset(elemType) + genTypeSize(TYP_INT) * dimension;
-}
-
-//------------------------------------------------------------------------
-// genCodeForArrIndex: Generates code to bounds check the index for one dimension of an array reference,
-// producing the effective index by subtracting the lower bound.
-//
-// Arguments:
-// arrIndex - the node for which we're generating code
-//
-// Return Value:
-// None.
-//
-
-void CodeGen::genCodeForArrIndex(GenTreeArrIndex* arrIndex)
-{
- emitter* emit = getEmitter();
- GenTreePtr arrObj = arrIndex->ArrObj();
- GenTreePtr indexNode = arrIndex->IndexExpr();
- regNumber arrReg = genConsumeReg(arrObj);
- regNumber indexReg = genConsumeReg(indexNode);
- regNumber tgtReg = arrIndex->gtRegNum;
- noway_assert(tgtReg != REG_NA);
-
- // We will use a temp register to load the lower bound and dimension size values
- //
- regMaskTP tmpRegsMask = arrIndex->gtRsvdRegs; // there will be two bits set
- tmpRegsMask &= ~genRegMask(tgtReg); // remove the bit for 'tgtReg' from 'tmpRegsMask'
-
- regMaskTP tmpRegMask = genFindLowestBit(tmpRegsMask); // set tmpRegMsk to a one-bit mask
- regNumber tmpReg = genRegNumFromMask(tmpRegMask); // set tmpReg from that mask
- noway_assert(tmpReg != REG_NA);
-
- assert(tgtReg != tmpReg);
-
- unsigned dim = arrIndex->gtCurrDim;
- unsigned rank = arrIndex->gtArrRank;
- var_types elemType = arrIndex->gtArrElemType;
- unsigned offset;
-
- offset = genOffsetOfMDArrayLowerBound(elemType, rank, dim);
- emit->emitIns_R_R_I(ins_Load(TYP_INT), EA_8BYTE, tmpReg, arrReg, offset); // a 4 BYTE sign extending load
- emit->emitIns_R_R_R(INS_sub, EA_4BYTE, tgtReg, indexReg, tmpReg);
-
- offset = genOffsetOfMDArrayDimensionSize(elemType, rank, dim);
- emit->emitIns_R_R_I(ins_Load(TYP_INT), EA_8BYTE, tmpReg, arrReg, offset); // a 4 BYTE sign extending load
- emit->emitIns_R_R(INS_cmp, EA_4BYTE, tgtReg, tmpReg);
-
- emitJumpKind jmpGEU = genJumpKindForOper(GT_GE, CK_UNSIGNED);
- genJumpToThrowHlpBlk(jmpGEU, SCK_RNGCHK_FAIL);
-
- genProduceReg(arrIndex);
-}
-
-//------------------------------------------------------------------------
-// genCodeForArrOffset: Generates code to compute the flattened array offset for
-// one dimension of an array reference:
-// result = (prevDimOffset * dimSize) + effectiveIndex
-// where dimSize is obtained from the arrObj operand
-//
-// Arguments:
-// arrOffset - the node for which we're generating code
-//
-// Return Value:
-// None.
-//
-// Notes:
-// dimSize and effectiveIndex are always non-negative, the former by design,
-// and the latter because it has been normalized to be zero-based.
-
-void CodeGen::genCodeForArrOffset(GenTreeArrOffs* arrOffset)
-{
- GenTreePtr offsetNode = arrOffset->gtOffset;
- GenTreePtr indexNode = arrOffset->gtIndex;
- regNumber tgtReg = arrOffset->gtRegNum;
-
- noway_assert(tgtReg != REG_NA);
-
- if (!offsetNode->IsIntegralConst(0))
- {
- emitter* emit = getEmitter();
- regNumber offsetReg = genConsumeReg(offsetNode);
- noway_assert(offsetReg != REG_NA);
- regNumber indexReg = genConsumeReg(indexNode);
- noway_assert(indexReg != REG_NA);
- GenTreePtr arrObj = arrOffset->gtArrObj;
- regNumber arrReg = genConsumeReg(arrObj);
- noway_assert(arrReg != REG_NA);
- regMaskTP tmpRegMask = arrOffset->gtRsvdRegs;
- regNumber tmpReg = genRegNumFromMask(tmpRegMask);
- noway_assert(tmpReg != REG_NA);
- unsigned dim = arrOffset->gtCurrDim;
- unsigned rank = arrOffset->gtArrRank;
- var_types elemType = arrOffset->gtArrElemType;
- unsigned offset = genOffsetOfMDArrayDimensionSize(elemType, rank, dim);
-
- // Load tmpReg with the dimension size
- emit->emitIns_R_R_I(ins_Load(TYP_INT), EA_8BYTE, tmpReg, arrReg, offset); // a 4 BYTE sign extending load
-
- // Evaluate tgtReg = offsetReg*dim_size + indexReg.
- emit->emitIns_R_R_R_R(INS_madd, EA_4BYTE, tgtReg, tmpReg, offsetReg, indexReg);
- }
- else
- {
- regNumber indexReg = genConsumeReg(indexNode);
- if (indexReg != tgtReg)
- {
- inst_RV_RV(INS_mov, tgtReg, indexReg, TYP_INT);
- }
- }
- genProduceReg(arrOffset);
-}
-
-// make a temporary indir we can feed to pattern matching routines
-// in cases where we don't want to instantiate all the indirs that happen
-//
-// TODO-Cleanup: move to CodeGenCommon.cpp
-GenTreeIndir CodeGen::indirForm(var_types type, GenTree* base)
-{
- GenTreeIndir i(GT_IND, type, base, nullptr);
- i.gtRegNum = REG_NA;
- // has to be nonnull (because contained nodes can't be the last in block)
- // but don't want it to be a valid pointer
- i.gtNext = (GenTree*)(-1);
- return i;
-}
-
-// make a temporary int we can feed to pattern matching routines
-// in cases where we don't want to instantiate
-//
-// TODO-Cleanup: move to CodeGenCommon.cpp
-GenTreeIntCon CodeGen::intForm(var_types type, ssize_t value)
-{
- GenTreeIntCon i(type, value);
- i.gtRegNum = REG_NA;
- // has to be nonnull (because contained nodes can't be the last in block)
- // but don't want it to be a valid pointer
- i.gtNext = (GenTree*)(-1);
- return i;
-}
-
instruction CodeGen::genGetInsForOper(genTreeOps oper, var_types type)
{
instruction ins = INS_brk;
@@ -4149,410 +3770,6 @@ instruction CodeGen::genGetInsForOper(genTreeOps oper, var_types type)
return ins;
}
-//------------------------------------------------------------------------
-// genCodeForShift: Generates the code sequence for a GenTree node that
-// represents a bit shift or rotate operation (<<, >>, >>>, rol, ror).
-//
-// Arguments:
-// tree - the bit shift node (that specifies the type of bit shift to perform).
-//
-// Assumptions:
-// a) All GenTrees are register allocated.
-//
-void CodeGen::genCodeForShift(GenTreePtr tree)
-{
- var_types targetType = tree->TypeGet();
- genTreeOps oper = tree->OperGet();
- instruction ins = genGetInsForOper(oper, targetType);
- emitAttr size = emitTypeSize(tree);
-
- assert(tree->gtRegNum != REG_NA);
-
- GenTreePtr operand = tree->gtGetOp1();
- genConsumeOperands(tree->AsOp());
-
- GenTreePtr shiftBy = tree->gtGetOp2();
- if (!shiftBy->IsCnsIntOrI())
- {
- getEmitter()->emitIns_R_R_R(ins, size, tree->gtRegNum, operand->gtRegNum, shiftBy->gtRegNum);
- }
- else
- {
- unsigned immWidth = emitter::getBitWidth(size); // immWidth will be set to 32 or 64
- ssize_t shiftByImm = shiftBy->gtIntCon.gtIconVal & (immWidth - 1);
-
- getEmitter()->emitIns_R_R_I(ins, size, tree->gtRegNum, operand->gtRegNum, shiftByImm);
- }
-
- genProduceReg(tree);
-}
-
-void CodeGen::genRegCopy(GenTree* treeNode)
-{
- assert(treeNode->OperGet() == GT_COPY);
-
- var_types targetType = treeNode->TypeGet();
- regNumber targetReg = treeNode->gtRegNum;
- assert(targetReg != REG_NA);
-
- GenTree* op1 = treeNode->gtOp.gtOp1;
-
- // Check whether this node and the node from which we're copying the value have the same
- // register type.
- // This can happen if (currently iff) we have a SIMD vector type that fits in an integer
- // register, in which case it is passed as an argument, or returned from a call,
- // in an integer register and must be copied if it's in an xmm register.
-
- if (varTypeIsFloating(treeNode) != varTypeIsFloating(op1))
- {
- inst_RV_RV(INS_fmov, targetReg, genConsumeReg(op1), targetType);
- }
- else
- {
- inst_RV_RV(ins_Copy(targetType), targetReg, genConsumeReg(op1), targetType);
- }
-
- if (op1->IsLocal())
- {
- // The lclVar will never be a def.
- // If it is a last use, the lclVar will be killed by genConsumeReg(), as usual, and genProduceReg will
- // appropriately set the gcInfo for the copied value.
- // If not, there are two cases we need to handle:
- // - If this is a TEMPORARY copy (indicated by the GTF_VAR_DEATH flag) the variable
- // will remain live in its original register.
- // genProduceReg() will appropriately set the gcInfo for the copied value,
- // and genConsumeReg will reset it.
- // - Otherwise, we need to update register info for the lclVar.
-
- GenTreeLclVarCommon* lcl = op1->AsLclVarCommon();
- assert((lcl->gtFlags & GTF_VAR_DEF) == 0);
-
- if ((lcl->gtFlags & GTF_VAR_DEATH) == 0 && (treeNode->gtFlags & GTF_VAR_DEATH) == 0)
- {
- LclVarDsc* varDsc = &compiler->lvaTable[lcl->gtLclNum];
-
- // If we didn't just spill it (in genConsumeReg, above), then update the register info
- if (varDsc->lvRegNum != REG_STK)
- {
- // The old location is dying
- genUpdateRegLife(varDsc, /*isBorn*/ false, /*isDying*/ true DEBUGARG(op1));
-
- gcInfo.gcMarkRegSetNpt(genRegMask(op1->gtRegNum));
-
- genUpdateVarReg(varDsc, treeNode);
-
- // The new location is going live
- genUpdateRegLife(varDsc, /*isBorn*/ true, /*isDying*/ false DEBUGARG(treeNode));
- }
- }
- }
- genProduceReg(treeNode);
-}
-
-// Produce code for a GT_CALL node
-void CodeGen::genCallInstruction(GenTreeCall* call)
-{
- gtCallTypes callType = (gtCallTypes)call->gtCallType;
-
- IL_OFFSETX ilOffset = BAD_IL_OFFSET;
-
- // all virtuals should have been expanded into a control expression
- assert(!call->IsVirtual() || call->gtControlExpr || call->gtCallAddr);
-
- // Consume all the arg regs
- for (GenTreePtr list = call->gtCallLateArgs; list; list = list->MoveNext())
- {
- assert(list->OperIsList());
-
- GenTreePtr argNode = list->Current();
-
- fgArgTabEntryPtr curArgTabEntry = compiler->gtArgEntryByNode(call, argNode->gtSkipReloadOrCopy());
- assert(curArgTabEntry);
-
- if (curArgTabEntry->regNum == REG_STK)
- continue;
-
- // Deal with multi register passed struct args.
- if (argNode->OperGet() == GT_FIELD_LIST)
- {
- GenTreeArgList* argListPtr = argNode->AsArgList();
- unsigned iterationNum = 0;
- regNumber argReg = curArgTabEntry->regNum;
- for (; argListPtr != nullptr; argListPtr = argListPtr->Rest(), iterationNum++)
- {
- GenTreePtr putArgRegNode = argListPtr->gtOp.gtOp1;
- assert(putArgRegNode->gtOper == GT_PUTARG_REG);
-
- genConsumeReg(putArgRegNode);
-
- if (putArgRegNode->gtRegNum != argReg)
- {
- inst_RV_RV(ins_Move_Extend(putArgRegNode->TypeGet(), putArgRegNode->InReg()), argReg,
- putArgRegNode->gtRegNum);
- }
-
- argReg = genRegArgNext(argReg);
- }
- }
- else
- {
- regNumber argReg = curArgTabEntry->regNum;
- genConsumeReg(argNode);
- if (argNode->gtRegNum != argReg)
- {
- inst_RV_RV(ins_Move_Extend(argNode->TypeGet(), argNode->InReg()), argReg, argNode->gtRegNum);
- }
- }
-
- // In the case of a varargs call,
- // the ABI dictates that if we have floating point args,
- // we must pass the enregistered arguments in both the
- // integer and floating point registers so, let's do that.
- if (call->IsVarargs() && varTypeIsFloating(argNode))
- {
- NYI_ARM64("CodeGen - IsVarargs");
- }
- }
-
- // Insert a null check on "this" pointer if asked.
- if (call->NeedsNullCheck())
- {
- const regNumber regThis = genGetThisArgReg(call);
- getEmitter()->emitIns_R_R_I(INS_ldr, EA_4BYTE, REG_ZR, regThis, 0);
- }
-
- // Either gtControlExpr != null or gtCallAddr != null or it is a direct non-virtual call to a user or helper method.
- CORINFO_METHOD_HANDLE methHnd;
- GenTree* target = call->gtControlExpr;
- if (callType == CT_INDIRECT)
- {
- assert(target == nullptr);
- target = call->gtCallAddr;
- methHnd = nullptr;
- }
- else
- {
- methHnd = call->gtCallMethHnd;
- }
-
- CORINFO_SIG_INFO* sigInfo = nullptr;
-#ifdef DEBUG
- // Pass the call signature information down into the emitter so the emitter can associate
- // native call sites with the signatures they were generated from.
- if (callType != CT_HELPER)
- {
- sigInfo = call->callSig;
- }
-#endif // DEBUG
-
- // If fast tail call, then we are done. In this case we setup the args (both reg args
- // and stack args in incoming arg area) and call target in IP0. Epilog sequence would
- // generate "br IP0".
- if (call->IsFastTailCall())
- {
- // Don't support fast tail calling JIT helpers
- assert(callType != CT_HELPER);
-
- // Fast tail calls materialize call target either in gtControlExpr or in gtCallAddr.
- assert(target != nullptr);
-
- genConsumeReg(target);
-
- if (target->gtRegNum != REG_IP0)
- {
- inst_RV_RV(INS_mov, REG_IP0, target->gtRegNum);
- }
- return;
- }
-
- // For a pinvoke to unmanged code we emit a label to clear
- // the GC pointer state before the callsite.
- // We can't utilize the typical lazy killing of GC pointers
- // at (or inside) the callsite.
- if (call->IsUnmanaged())
- {
- genDefineTempLabel(genCreateTempLabel());
- }
-
- // Determine return value size(s).
- ReturnTypeDesc* pRetTypeDesc = call->GetReturnTypeDesc();
- emitAttr retSize = EA_PTRSIZE;
- emitAttr secondRetSize = EA_UNKNOWN;
-
- if (call->HasMultiRegRetVal())
- {
- retSize = emitTypeSize(pRetTypeDesc->GetReturnRegType(0));
- secondRetSize = emitTypeSize(pRetTypeDesc->GetReturnRegType(1));
- }
- else
- {
- assert(!varTypeIsStruct(call));
-
- if (call->gtType == TYP_REF || call->gtType == TYP_ARRAY)
- {
- retSize = EA_GCREF;
- }
- else if (call->gtType == TYP_BYREF)
- {
- retSize = EA_BYREF;
- }
- }
-
- // We need to propagate the IL offset information to the call instruction, so we can emit
- // an IL to native mapping record for the call, to support managed return value debugging.
- // We don't want tail call helper calls that were converted from normal calls to get a record,
- // so we skip this hash table lookup logic in that case.
- if (compiler->opts.compDbgInfo && compiler->genCallSite2ILOffsetMap != nullptr && !call->IsTailCall())
- {
- (void)compiler->genCallSite2ILOffsetMap->Lookup(call, &ilOffset);
- }
-
- if (target != nullptr)
- {
- // For Arm64 a call target can not be a contained indirection
- assert(!target->isContainedIndir());
-
- // We have already generated code for gtControlExpr evaluating it into a register.
- // We just need to emit "call reg" in this case.
- //
- assert(genIsValidIntReg(target->gtRegNum));
-
- genEmitCall(emitter::EC_INDIR_R, methHnd,
- INDEBUG_LDISASM_COMMA(sigInfo) nullptr, // addr
- retSize, secondRetSize, ilOffset, genConsumeReg(target));
- }
- else
- {
- // Generate a direct call to a non-virtual user defined or helper method
- assert(callType == CT_HELPER || callType == CT_USER_FUNC);
-
- void* addr = nullptr;
- if (callType == CT_HELPER)
- {
- // Direct call to a helper method.
- CorInfoHelpFunc helperNum = compiler->eeGetHelperNum(methHnd);
- noway_assert(helperNum != CORINFO_HELP_UNDEF);
-
- void* pAddr = nullptr;
- addr = compiler->compGetHelperFtn(helperNum, (void**)&pAddr);
-
- if (addr == nullptr)
- {
- addr = pAddr;
- }
- }
- else
- {
- // Direct call to a non-virtual user function.
- CORINFO_ACCESS_FLAGS aflags = CORINFO_ACCESS_ANY;
- if (call->IsSameThis())
- {
- aflags = (CORINFO_ACCESS_FLAGS)(aflags | CORINFO_ACCESS_THIS);
- }
-
- if ((call->NeedsNullCheck()) == 0)
- {
- aflags = (CORINFO_ACCESS_FLAGS)(aflags | CORINFO_ACCESS_NONNULL);
- }
-
- CORINFO_CONST_LOOKUP addrInfo;
- compiler->info.compCompHnd->getFunctionEntryPoint(methHnd, &addrInfo, aflags);
-
- addr = addrInfo.addr;
- }
-#if 0
- // Use this path if you want to load an absolute call target using
- // a sequence of movs followed by an indirect call (blr instruction)
-
- // Load the call target address in x16
- instGen_Set_Reg_To_Imm(EA_8BYTE, REG_IP0, (ssize_t) addr);
-
- // indirect call to constant address in IP0
- genEmitCall(emitter::EC_INDIR_R,
- methHnd,
- INDEBUG_LDISASM_COMMA(sigInfo)
- nullptr, //addr
- retSize,
- secondRetSize,
- ilOffset,
- REG_IP0);
-#else
- // Non-virtual direct call to known addresses
- genEmitCall(emitter::EC_FUNC_TOKEN, methHnd, INDEBUG_LDISASM_COMMA(sigInfo) addr, retSize, secondRetSize,
- ilOffset);
-#endif
- }
-
- // if it was a pinvoke we may have needed to get the address of a label
- if (genPendingCallLabel)
- {
- assert(call->IsUnmanaged());
- genDefineTempLabel(genPendingCallLabel);
- genPendingCallLabel = nullptr;
- }
-
- // Update GC info:
- // All Callee arg registers are trashed and no longer contain any GC pointers.
- // TODO-ARM64-Bug?: As a matter of fact shouldn't we be killing all of callee trashed regs here?
- // For now we will assert that other than arg regs gc ref/byref set doesn't contain any other
- // registers from RBM_CALLEE_TRASH
- assert((gcInfo.gcRegGCrefSetCur & (RBM_CALLEE_TRASH & ~RBM_ARG_REGS)) == 0);
- assert((gcInfo.gcRegByrefSetCur & (RBM_CALLEE_TRASH & ~RBM_ARG_REGS)) == 0);
- gcInfo.gcRegGCrefSetCur &= ~RBM_ARG_REGS;
- gcInfo.gcRegByrefSetCur &= ~RBM_ARG_REGS;
-
- var_types returnType = call->TypeGet();
- if (returnType != TYP_VOID)
- {
- regNumber returnReg;
-
- if (call->HasMultiRegRetVal())
- {
- assert(pRetTypeDesc != nullptr);
- unsigned regCount = pRetTypeDesc->GetReturnRegCount();
-
- // If regs allocated to call node are different from ABI return
- // regs in which the call has returned its result, move the result
- // to regs allocated to call node.
- for (unsigned i = 0; i < regCount; ++i)
- {
- var_types regType = pRetTypeDesc->GetReturnRegType(i);
- returnReg = pRetTypeDesc->GetABIReturnReg(i);
- regNumber allocatedReg = call->GetRegNumByIdx(i);
- if (returnReg != allocatedReg)
- {
- inst_RV_RV(ins_Copy(regType), allocatedReg, returnReg, regType);
- }
- }
- }
- else
- {
- if (varTypeIsFloating(returnType))
- {
- returnReg = REG_FLOATRET;
- }
- else
- {
- returnReg = REG_INTRET;
- }
-
- if (call->gtRegNum != returnReg)
- {
- inst_RV_RV(ins_Copy(returnType), call->gtRegNum, returnReg, returnType);
- }
- }
-
- genProduceReg(call);
- }
-
- // If there is nothing next, that means the result is thrown away, so this value is not live.
- // However, for minopts or debuggable code, we keep it live to support managed return value debugging.
- if ((call->gtNext == nullptr) && !compiler->opts.MinOpts() && !compiler->opts.compDbgCode)
- {
- gcInfo.gcMarkRegSetNpt(RBM_INTRET);
- }
-}
-
// Produce code for a GT_JMP node.
// The arguments of the caller needs to be transferred to the callee before exiting caller.
// The actual jump to callee is generated as part of caller epilog sequence.
@@ -5081,237 +4298,6 @@ void CodeGen::genSetRegToCond(regNumber dstReg, GenTreePtr tree)
}
//------------------------------------------------------------------------
-// genIntToIntCast: Generate code for an integer cast
-// This method handles integer overflow checking casts
-// as well as ordinary integer casts.
-//
-// Arguments:
-// treeNode - The GT_CAST node
-//
-// Return Value:
-// None.
-//
-// Assumptions:
-// The treeNode is not a contained node and must have an assigned register.
-// For a signed convert from byte, the source must be in a byte-addressable register.
-// Neither the source nor target type can be a floating point type.
-//
-// TODO-ARM64-CQ: Allow castOp to be a contained node without an assigned register.
-//
-void CodeGen::genIntToIntCast(GenTreePtr treeNode)
-{
- assert(treeNode->OperGet() == GT_CAST);
-
- GenTreePtr castOp = treeNode->gtCast.CastOp();
- emitter* emit = getEmitter();
-
- var_types dstType = treeNode->CastToType();
- var_types srcType = genActualType(castOp->TypeGet());
- emitAttr movSize = emitActualTypeSize(dstType);
- bool movRequired = false;
-
- regNumber targetReg = treeNode->gtRegNum;
- regNumber sourceReg = castOp->gtRegNum;
-
- // For Long to Int conversion we will have a reserved integer register to hold the immediate mask
- regNumber tmpReg = (treeNode->gtRsvdRegs == RBM_NONE) ? REG_NA : genRegNumFromMask(treeNode->gtRsvdRegs);
-
- assert(genIsValidIntReg(targetReg));
- assert(genIsValidIntReg(sourceReg));
-
- instruction ins = INS_invalid;
-
- genConsumeReg(castOp);
- Lowering::CastInfo castInfo;
-
- // Get information about the cast.
- Lowering::getCastDescription(treeNode, &castInfo);
-
- if (castInfo.requiresOverflowCheck)
- {
-
- emitAttr cmpSize = EA_ATTR(genTypeSize(srcType));
-
- if (castInfo.signCheckOnly)
- {
- // We only need to check for a negative value in sourceReg
- emit->emitIns_R_I(INS_cmp, cmpSize, sourceReg, 0);
- emitJumpKind jmpLT = genJumpKindForOper(GT_LT, CK_SIGNED);
- genJumpToThrowHlpBlk(jmpLT, SCK_OVERFLOW);
- noway_assert(genTypeSize(srcType) == 4 || genTypeSize(srcType) == 8);
- // This is only interesting case to ensure zero-upper bits.
- if ((srcType == TYP_INT) && (dstType == TYP_ULONG))
- {
- // cast to TYP_ULONG:
- // We use a mov with size=EA_4BYTE
- // which will zero out the upper bits
- movSize = EA_4BYTE;
- movRequired = true;
- }
- }
- else if (castInfo.unsignedSource || castInfo.unsignedDest)
- {
- // When we are converting from/to unsigned,
- // we only have to check for any bits set in 'typeMask'
-
- noway_assert(castInfo.typeMask != 0);
- emit->emitIns_R_I(INS_tst, cmpSize, sourceReg, castInfo.typeMask);
- emitJumpKind jmpNotEqual = genJumpKindForOper(GT_NE, CK_SIGNED);
- genJumpToThrowHlpBlk(jmpNotEqual, SCK_OVERFLOW);
- }
- else
- {
- // For a narrowing signed cast
- //
- // We must check the value is in a signed range.
-
- // Compare with the MAX
-
- noway_assert((castInfo.typeMin != 0) && (castInfo.typeMax != 0));
-
- if (emitter::emitIns_valid_imm_for_cmp(castInfo.typeMax, cmpSize))
- {
- emit->emitIns_R_I(INS_cmp, cmpSize, sourceReg, castInfo.typeMax);
- }
- else
- {
- noway_assert(tmpReg != REG_NA);
- instGen_Set_Reg_To_Imm(cmpSize, tmpReg, castInfo.typeMax);
- emit->emitIns_R_R(INS_cmp, cmpSize, sourceReg, tmpReg);
- }
-
- emitJumpKind jmpGT = genJumpKindForOper(GT_GT, CK_SIGNED);
- genJumpToThrowHlpBlk(jmpGT, SCK_OVERFLOW);
-
- // Compare with the MIN
-
- if (emitter::emitIns_valid_imm_for_cmp(castInfo.typeMin, cmpSize))
- {
- emit->emitIns_R_I(INS_cmp, cmpSize, sourceReg, castInfo.typeMin);
- }
- else
- {
- noway_assert(tmpReg != REG_NA);
- instGen_Set_Reg_To_Imm(cmpSize, tmpReg, castInfo.typeMin);
- emit->emitIns_R_R(INS_cmp, cmpSize, sourceReg, tmpReg);
- }
-
- emitJumpKind jmpLT = genJumpKindForOper(GT_LT, CK_SIGNED);
- genJumpToThrowHlpBlk(jmpLT, SCK_OVERFLOW);
- }
- ins = INS_mov;
- }
- else // Non-overflow checking cast.
- {
- if (genTypeSize(srcType) == genTypeSize(dstType))
- {
- ins = INS_mov;
- }
- else
- {
- var_types extendType = TYP_UNKNOWN;
-
- // If we need to treat a signed type as unsigned
- if ((treeNode->gtFlags & GTF_UNSIGNED) != 0)
- {
- extendType = genUnsignedType(srcType);
- movSize = emitTypeSize(extendType);
- movRequired = true;
- }
- else
- {
- if (genTypeSize(srcType) < genTypeSize(dstType))
- {
- extendType = srcType;
- if (srcType == TYP_UINT)
- {
- // If we are casting from a smaller type to
- // a larger type, then we need to make sure the
- // higher 4 bytes are zero to gaurentee the correct value.
- // Therefore using a mov with EA_4BYTE in place of EA_8BYTE
- // will zero the upper bits
- movSize = EA_4BYTE;
- movRequired = true;
- }
- }
- else // (genTypeSize(srcType) > genTypeSize(dstType))
- {
- extendType = dstType;
- if (dstType == TYP_INT)
- {
- movSize = EA_8BYTE; // a sxtw instruction requires EA_8BYTE
- }
- }
- }
-
- ins = ins_Move_Extend(extendType, castOp->InReg());
- }
- }
-
- // We should never be generating a load from memory instruction here!
- assert(!emit->emitInsIsLoad(ins));
-
- if ((ins != INS_mov) || movRequired || (targetReg != sourceReg))
- {
- emit->emitIns_R_R(ins, movSize, targetReg, sourceReg);
- }
-
- genProduceReg(treeNode);
-}
-
-//------------------------------------------------------------------------
-// genFloatToFloatCast: Generate code for a cast between float and double
-//
-// Arguments:
-// treeNode - The GT_CAST node
-//
-// Return Value:
-// None.
-//
-// Assumptions:
-// Cast is a non-overflow conversion.
-// The treeNode must have an assigned register.
-// The cast is between float and double or vice versa.
-//
-void CodeGen::genFloatToFloatCast(GenTreePtr treeNode)
-{
- // float <--> double conversions are always non-overflow ones
- assert(treeNode->OperGet() == GT_CAST);
- assert(!treeNode->gtOverflow());
-
- regNumber targetReg = treeNode->gtRegNum;
- assert(genIsValidFloatReg(targetReg));
-
- GenTreePtr op1 = treeNode->gtOp.gtOp1;
- assert(!op1->isContained()); // Cannot be contained
- assert(genIsValidFloatReg(op1->gtRegNum)); // Must be a valid float reg.
-
- var_types dstType = treeNode->CastToType();
- var_types srcType = op1->TypeGet();
- assert(varTypeIsFloating(srcType) && varTypeIsFloating(dstType));
-
- genConsumeOperands(treeNode->AsOp());
-
- // treeNode must be a reg
- assert(!treeNode->isContained());
-
- if (srcType != dstType)
- {
- insOpts cvtOption = (srcType == TYP_FLOAT) ? INS_OPTS_S_TO_D // convert Single to Double
- : INS_OPTS_D_TO_S; // convert Double to Single
-
- getEmitter()->emitIns_R_R(INS_fcvt, emitTypeSize(treeNode), treeNode->gtRegNum, op1->gtRegNum, cvtOption);
- }
- else if (treeNode->gtRegNum != op1->gtRegNum)
- {
- // If double to double cast or float to float cast. Emit a move instruction.
- getEmitter()->emitIns_R_R(INS_mov, emitTypeSize(treeNode), treeNode->gtRegNum, op1->gtRegNum);
- }
-
- genProduceReg(treeNode);
-}
-
-//------------------------------------------------------------------------
// genIntToFloatCast: Generate code to cast an int/long to float/double
//
// Arguments:
@@ -5589,544 +4575,6 @@ int CodeGenInterface::genCallerSPtoInitialSPdelta()
return callerSPtoSPdelta;
}
-//---------------------------------------------------------------------
-// genIntrinsic - generate code for a given intrinsic
-//
-// Arguments
-// treeNode - the GT_INTRINSIC node
-//
-// Return value:
-// None
-//
-void CodeGen::genIntrinsic(GenTreePtr treeNode)
-{
- // Both operand and its result must be of the same floating point type.
- GenTreePtr srcNode = treeNode->gtOp.gtOp1;
- assert(varTypeIsFloating(srcNode));
- assert(srcNode->TypeGet() == treeNode->TypeGet());
-
- // Right now only Abs/Round/Sqrt are treated as math intrinsics.
- //
- switch (treeNode->gtIntrinsic.gtIntrinsicId)
- {
- case CORINFO_INTRINSIC_Abs:
- genConsumeOperands(treeNode->AsOp());
- getEmitter()->emitInsBinary(INS_fabs, emitTypeSize(treeNode), treeNode, srcNode);
- break;
-
- case CORINFO_INTRINSIC_Round:
- genConsumeOperands(treeNode->AsOp());
- getEmitter()->emitInsBinary(INS_frintn, emitTypeSize(treeNode), treeNode, srcNode);
- break;
-
- case CORINFO_INTRINSIC_Sqrt:
- genConsumeOperands(treeNode->AsOp());
- getEmitter()->emitInsBinary(INS_fsqrt, emitTypeSize(treeNode), treeNode, srcNode);
- break;
-
- default:
- assert(!"genIntrinsic: Unsupported intrinsic");
- unreached();
- }
-
- genProduceReg(treeNode);
-}
-
-//---------------------------------------------------------------------
-// genPutArgStk - generate code for a GT_PUTARG_STK node
-//
-// Arguments
-// treeNode - the GT_PUTARG_STK node
-//
-// Return value:
-// None
-//
-void CodeGen::genPutArgStk(GenTreePutArgStk* treeNode)
-{
- assert(treeNode->OperGet() == GT_PUTARG_STK);
- var_types targetType = treeNode->TypeGet();
- GenTreePtr source = treeNode->gtOp1;
- emitter* emit = getEmitter();
-
- // This is the varNum for our store operations,
- // typically this is the varNum for the Outgoing arg space
- // When we are generating a tail call it will be the varNum for arg0
- unsigned varNumOut;
- unsigned argOffsetMax; // Records the maximum size of this area for assert checks
-
- // This is the varNum for our load operations,
- // only used when we have a multireg struct with a LclVar source
- unsigned varNumInp = BAD_VAR_NUM;
-
- // Get argument offset to use with 'varNumOut'
- // Here we cross check that argument offset hasn't changed from lowering to codegen since
- // we are storing arg slot number in GT_PUTARG_STK node in lowering phase.
- unsigned argOffsetOut = treeNode->gtSlotNum * TARGET_POINTER_SIZE;
-
-#ifdef DEBUG
- fgArgTabEntryPtr curArgTabEntry = compiler->gtArgEntryByNode(treeNode->gtCall, treeNode);
- assert(curArgTabEntry);
- assert(argOffsetOut == (curArgTabEntry->slotNum * TARGET_POINTER_SIZE));
-#endif // DEBUG
-
- // Whether to setup stk arg in incoming or out-going arg area?
- // Fast tail calls implemented as epilog+jmp = stk arg is setup in incoming arg area.
- // All other calls - stk arg is setup in out-going arg area.
- if (treeNode->putInIncomingArgArea())
- {
- varNumOut = getFirstArgWithStackSlot();
- argOffsetMax = compiler->compArgSize;
-#if FEATURE_FASTTAILCALL
- // This must be a fast tail call.
- assert(treeNode->gtCall->IsFastTailCall());
-
- // Since it is a fast tail call, the existence of first incoming arg is guaranteed
- // because fast tail call requires that in-coming arg area of caller is >= out-going
- // arg area required for tail call.
- LclVarDsc* varDsc = &(compiler->lvaTable[varNumOut]);
- assert(varDsc != nullptr);
-#endif // FEATURE_FASTTAILCALL
- }
- else
- {
- varNumOut = compiler->lvaOutgoingArgSpaceVar;
- argOffsetMax = compiler->lvaOutgoingArgSpaceSize;
- }
- bool isStruct = (targetType == TYP_STRUCT) || (source->OperGet() == GT_FIELD_LIST);
-
- if (!isStruct) // a normal non-Struct argument
- {
- instruction storeIns = ins_Store(targetType);
- emitAttr storeAttr = emitTypeSize(targetType);
-
- // If it is contained then source must be the integer constant zero
- if (source->isContained())
- {
- assert(source->OperGet() == GT_CNS_INT);
- assert(source->AsIntConCommon()->IconValue() == 0);
- emit->emitIns_S_R(storeIns, storeAttr, REG_ZR, varNumOut, argOffsetOut);
- }
- else
- {
- genConsumeReg(source);
- emit->emitIns_S_R(storeIns, storeAttr, source->gtRegNum, varNumOut, argOffsetOut);
- }
- argOffsetOut += EA_SIZE_IN_BYTES(storeAttr);
- assert(argOffsetOut <= argOffsetMax); // We can't write beyound the outgoing area area
- }
- else // We have some kind of a struct argument
- {
- assert(source->isContained()); // We expect that this node was marked as contained in LowerArm64
-
- if (source->OperGet() == GT_FIELD_LIST)
- {
- // Deal with the multi register passed struct args.
- GenTreeFieldList* fieldListPtr = source->AsFieldList();
-
- // Evaluate each of the GT_FIELD_LIST items into their register
- // and store their register into the outgoing argument area
- for (; fieldListPtr != nullptr; fieldListPtr = fieldListPtr->Rest())
- {
- GenTreePtr nextArgNode = fieldListPtr->gtOp.gtOp1;
- genConsumeReg(nextArgNode);
-
- regNumber reg = nextArgNode->gtRegNum;
- var_types type = nextArgNode->TypeGet();
- emitAttr attr = emitTypeSize(type);
-
- // Emit store instructions to store the registers produced by the GT_FIELD_LIST into the outgoing
- // argument area
- emit->emitIns_S_R(ins_Store(type), attr, reg, varNumOut, argOffsetOut);
- argOffsetOut += EA_SIZE_IN_BYTES(attr);
- assert(argOffsetOut <= argOffsetMax); // We can't write beyound the outgoing area area
- }
- }
- else // We must have a GT_OBJ or a GT_LCL_VAR
- {
- noway_assert((source->OperGet() == GT_LCL_VAR) || (source->OperGet() == GT_OBJ));
-
- var_types targetType = source->TypeGet();
- noway_assert(varTypeIsStruct(targetType));
-
- // We will copy this struct to the stack, possibly using a ldp instruction
- // Setup loReg and hiReg from the internal registers that we reserved in lower.
- //
- regNumber loReg = REG_NA;
- regNumber hiReg = REG_NA;
- regNumber addrReg = REG_NA;
-
- // In lowerArm64/TreeNodeInfoInitPutArgStk we have reserved two internal integer registers
- genGetRegPairFromMask(treeNode->gtRsvdRegs, &loReg, &hiReg);
-
- GenTreeLclVarCommon* varNode = nullptr;
- GenTreePtr addrNode = nullptr;
-
- if (source->OperGet() == GT_LCL_VAR)
- {
- varNode = source->AsLclVarCommon();
- }
- else // we must have a GT_OBJ
- {
- assert(source->OperGet() == GT_OBJ);
-
- addrNode = source->gtOp.gtOp1;
-
- // addrNode can either be a GT_LCL_VAR_ADDR or an address expression
- //
- if (addrNode->OperGet() == GT_LCL_VAR_ADDR)
- {
- // We have a GT_OBJ(GT_LCL_VAR_ADDR)
- //
- // We will treat this case the same as above
- // (i.e if we just had this GT_LCL_VAR directly as the source)
- // so update 'source' to point this GT_LCL_VAR_ADDR node
- // and continue to the codegen for the LCL_VAR node below
- //
- varNode = addrNode->AsLclVarCommon();
- addrNode = nullptr;
- }
- }
-
- // Either varNode or addrNOde must have been setup above,
- // the xor ensures that only one of the two is setup, not both
- assert((varNode != nullptr) ^ (addrNode != nullptr));
-
- BYTE gcPtrs[MAX_ARG_REG_COUNT] = {}; // TYPE_GC_NONE = 0
- unsigned gcPtrCount; // The count of GC pointers in the struct
- int structSize;
- bool isHfa;
-
- // Setup the structSize, isHFa, and gcPtrCount
- if (varNode != nullptr)
- {
- varNumInp = varNode->gtLclNum;
- assert(varNumInp < compiler->lvaCount);
- LclVarDsc* varDsc = &compiler->lvaTable[varNumInp];
-
- assert(varDsc->lvType == TYP_STRUCT);
- assert(varDsc->lvOnFrame); // This struct also must live in the stack frame
- assert(!varDsc->lvRegister); // And it can't live in a register (SIMD)
-
- structSize = varDsc->lvSize(); // This yields the roundUp size, but that is fine
- // as that is how much stack is allocated for this LclVar
- isHfa = varDsc->lvIsHfa();
- gcPtrCount = varDsc->lvStructGcCount;
- for (unsigned i = 0; i < gcPtrCount; ++i)
- gcPtrs[i] = varDsc->lvGcLayout[i];
- }
- else // addrNode is used
- {
- assert(addrNode != nullptr);
-
- // Generate code to load the address that we need into a register
- genConsumeAddress(addrNode);
- addrReg = addrNode->gtRegNum;
-
- CORINFO_CLASS_HANDLE objClass = source->gtObj.gtClass;
-
- structSize = compiler->info.compCompHnd->getClassSize(objClass);
- isHfa = compiler->IsHfa(objClass);
- gcPtrCount = compiler->info.compCompHnd->getClassGClayout(objClass, &gcPtrs[0]);
- }
-
- bool hasGCpointers = (gcPtrCount > 0); // true if there are any GC pointers in the struct
-
- // If we have an HFA we can't have any GC pointers,
- // if not then the max size for the the struct is 16 bytes
- if (isHfa)
- {
- noway_assert(gcPtrCount == 0);
- }
- else
- {
- noway_assert(structSize <= 2 * TARGET_POINTER_SIZE);
- }
-
- noway_assert(structSize <= MAX_PASS_MULTIREG_BYTES);
-
- // For a 16-byte structSize with GC pointers we will use two ldr and two str instructions
- // ldr x2, [x0]
- // ldr x3, [x0, #8]
- // str x2, [sp, #16]
- // str x3, [sp, #24]
- //
- // For a 16-byte structSize with no GC pointers we will use a ldp and two str instructions
- // ldp x2, x3, [x0]
- // str x2, [sp, #16]
- // str x3, [sp, #24]
- //
- // For a 32-byte structSize with no GC pointers we will use two ldp and four str instructions
- // ldp x2, x3, [x0]
- // str x2, [sp, #16]
- // str x3, [sp, #24]
- // ldp x2, x3, [x0]
- // str x2, [sp, #32]
- // str x3, [sp, #40]
- //
- // Note that when loading from a varNode we currently can't use the ldp instruction
- // TODO-ARM64-CQ: Implement support for using a ldp instruction with a varNum (see emitIns_R_S)
- //
-
- int remainingSize = structSize;
- unsigned structOffset = 0;
- unsigned nextIndex = 0;
-
- while (remainingSize >= 2 * TARGET_POINTER_SIZE)
- {
- var_types type0 = compiler->getJitGCType(gcPtrs[nextIndex + 0]);
- var_types type1 = compiler->getJitGCType(gcPtrs[nextIndex + 1]);
-
- if (hasGCpointers)
- {
- // We have GC pointers, so use two ldr instructions
- //
- // We must do it this way because we can't currently pass or track
- // two different emitAttr values for a ldp instruction.
-
- // Make sure that the first load instruction does not overwrite the addrReg.
- //
- if (loReg != addrReg)
- {
- if (varNode != nullptr)
- {
- // Load from our varNumImp source
- emit->emitIns_R_S(ins_Load(type0), emitTypeSize(type0), loReg, varNumInp, 0);
- emit->emitIns_R_S(ins_Load(type1), emitTypeSize(type1), hiReg, varNumInp,
- TARGET_POINTER_SIZE);
- }
- else
- {
- // Load from our address expression source
- emit->emitIns_R_R_I(ins_Load(type0), emitTypeSize(type0), loReg, addrReg, structOffset);
- emit->emitIns_R_R_I(ins_Load(type1), emitTypeSize(type1), hiReg, addrReg,
- structOffset + TARGET_POINTER_SIZE);
- }
- }
- else // loReg == addrReg
- {
- assert(varNode == nullptr); // because addrReg is REG_NA when varNode is non-null
- assert(hiReg != addrReg);
- // Load from our address expression source
- emit->emitIns_R_R_I(ins_Load(type1), emitTypeSize(type1), hiReg, addrReg,
- structOffset + TARGET_POINTER_SIZE);
- emit->emitIns_R_R_I(ins_Load(type0), emitTypeSize(type0), loReg, addrReg, structOffset);
- }
- }
- else // our struct has no GC pointers
- {
- if (varNode != nullptr)
- {
- // Load from our varNumImp source, currently we can't use a ldp instruction to do this
- emit->emitIns_R_S(ins_Load(type0), emitTypeSize(type0), loReg, varNumInp, 0);
- emit->emitIns_R_S(ins_Load(type1), emitTypeSize(type1), hiReg, varNumInp, TARGET_POINTER_SIZE);
- }
- else
- {
- // Use a ldp instruction
-
- // Load from our address expression source
- emit->emitIns_R_R_R_I(INS_ldp, EA_PTRSIZE, loReg, hiReg, addrReg, structOffset);
- }
- }
-
- // Emit two store instructions to store the two registers into the outgoing argument area
- emit->emitIns_S_R(ins_Store(type0), emitTypeSize(type0), loReg, varNumOut, argOffsetOut);
- emit->emitIns_S_R(ins_Store(type1), emitTypeSize(type1), hiReg, varNumOut,
- argOffsetOut + TARGET_POINTER_SIZE);
- argOffsetOut += (2 * TARGET_POINTER_SIZE); // We stored 16-bytes of the struct
- assert(argOffsetOut <= argOffsetMax); // We can't write beyound the outgoing area area
-
- remainingSize -= (2 * TARGET_POINTER_SIZE); // We loaded 16-bytes of the struct
- structOffset += (2 * TARGET_POINTER_SIZE);
- nextIndex += 2;
- }
-
- // For a 12-byte structSize we will we will generate two load instructions
- // ldr x2, [x0]
- // ldr w3, [x0, #8]
- // str x2, [sp, #16]
- // str w3, [sp, #24]
- //
- // When the first instruction has a loReg that is the same register as the addrReg,
- // we set deferLoad to true and issue the intructions in the reverse order
- // ldr x3, [x2, #8]
- // ldr x2, [x2]
- // str x2, [sp, #16]
- // str x3, [sp, #24]
- //
-
- var_types nextType = compiler->getJitGCType(gcPtrs[nextIndex]);
- emitAttr nextAttr = emitTypeSize(nextType);
- regNumber curReg = loReg;
-
- bool deferLoad = false;
- var_types deferType = TYP_UNKNOWN;
- emitAttr deferAttr = EA_PTRSIZE;
- int deferOffset = 0;
-
- while (remainingSize > 0)
- {
- if (remainingSize >= TARGET_POINTER_SIZE)
- {
- remainingSize -= TARGET_POINTER_SIZE;
-
- if ((curReg == addrReg) && (remainingSize != 0))
- {
- deferLoad = true;
- deferType = nextType;
- deferAttr = emitTypeSize(nextType);
- deferOffset = structOffset;
- }
- else // the typical case
- {
- if (varNode != nullptr)
- {
- // Load from our varNumImp source
- emit->emitIns_R_S(ins_Load(nextType), nextAttr, curReg, varNumInp, structOffset);
- }
- else
- {
- // Load from our address expression source
- emit->emitIns_R_R_I(ins_Load(nextType), nextAttr, curReg, addrReg, structOffset);
- }
- // Emit a store instruction to store the register into the outgoing argument area
- emit->emitIns_S_R(ins_Store(nextType), nextAttr, curReg, varNumOut, argOffsetOut);
- argOffsetOut += EA_SIZE_IN_BYTES(nextAttr);
- assert(argOffsetOut <= argOffsetMax); // We can't write beyound the outgoing area area
- }
- curReg = hiReg;
- structOffset += TARGET_POINTER_SIZE;
- nextIndex++;
- nextType = compiler->getJitGCType(gcPtrs[nextIndex]);
- nextAttr = emitTypeSize(nextType);
- }
- else // (remainingSize < TARGET_POINTER_SIZE)
- {
- int loadSize = remainingSize;
- remainingSize = 0;
-
- // We should never have to do a non-pointer sized load when we have a LclVar source
- assert(varNode == nullptr);
-
- // the left over size is smaller than a pointer and thus can never be a GC type
- assert(varTypeIsGC(nextType) == false);
-
- var_types loadType = TYP_UINT;
- if (loadSize == 1)
- {
- loadType = TYP_UBYTE;
- }
- else if (loadSize == 2)
- {
- loadType = TYP_USHORT;
- }
- else
- {
- // Need to handle additional loadSize cases here
- noway_assert(loadSize == 4);
- }
-
- instruction loadIns = ins_Load(loadType);
- emitAttr loadAttr = emitAttr(loadSize);
-
- // When deferLoad is false, curReg can be the same as addrReg
- // because the last instruction is allowed to overwrite addrReg.
- //
- noway_assert(!deferLoad || (curReg != addrReg));
-
- emit->emitIns_R_R_I(loadIns, loadAttr, curReg, addrReg, structOffset);
-
- // Emit a store instruction to store the register into the outgoing argument area
- emit->emitIns_S_R(ins_Store(loadType), loadAttr, curReg, varNumOut, argOffsetOut);
- argOffsetOut += EA_SIZE_IN_BYTES(loadAttr);
- assert(argOffsetOut <= argOffsetMax); // We can't write beyound the outgoing area area
- }
- }
-
- if (deferLoad)
- {
- // We should never have to do a deferred load when we have a LclVar source
- assert(varNode == nullptr);
-
- curReg = addrReg;
-
- // Load from our address expression source
- emit->emitIns_R_R_I(ins_Load(deferType), deferAttr, curReg, addrReg, deferOffset);
-
- // Emit a store instruction to store the register into the outgoing argument area
- emit->emitIns_S_R(ins_Store(nextType), nextAttr, curReg, varNumOut, argOffsetOut);
- argOffsetOut += EA_SIZE_IN_BYTES(nextAttr);
- assert(argOffsetOut <= argOffsetMax); // We can't write beyound the outgoing area area
- }
- }
- }
-}
-
-/*****************************************************************************
- *
- * Create and record GC Info for the function.
- */
-void CodeGen::genCreateAndStoreGCInfo(unsigned codeSize,
- unsigned prologSize,
- unsigned epilogSize DEBUGARG(void* codePtr))
-{
- genCreateAndStoreGCInfoX64(codeSize, prologSize DEBUGARG(codePtr));
-}
-
-void CodeGen::genCreateAndStoreGCInfoX64(unsigned codeSize, unsigned prologSize DEBUGARG(void* codePtr))
-{
- IAllocator* allowZeroAlloc = new (compiler, CMK_GC) AllowZeroAllocator(compiler->getAllocatorGC());
- GcInfoEncoder* gcInfoEncoder = new (compiler, CMK_GC)
- GcInfoEncoder(compiler->info.compCompHnd, compiler->info.compMethodInfo, allowZeroAlloc, NOMEM);
- assert(gcInfoEncoder != nullptr);
-
- // Follow the code pattern of the x86 gc info encoder (genCreateAndStoreGCInfoJIT32).
- gcInfo.gcInfoBlockHdrSave(gcInfoEncoder, codeSize, prologSize);
-
- // We keep the call count for the second call to gcMakeRegPtrTable() below.
- unsigned callCnt = 0;
-
- // First we figure out the encoder ID's for the stack slots and registers.
- gcInfo.gcMakeRegPtrTable(gcInfoEncoder, codeSize, prologSize, GCInfo::MAKE_REG_PTR_MODE_ASSIGN_SLOTS, &callCnt);
-
- // Now we've requested all the slots we'll need; "finalize" these (make more compact data structures for them).
- gcInfoEncoder->FinalizeSlotIds();
-
- // Now we can actually use those slot ID's to declare live ranges.
- gcInfo.gcMakeRegPtrTable(gcInfoEncoder, codeSize, prologSize, GCInfo::MAKE_REG_PTR_MODE_DO_WORK, &callCnt);
-
- if (compiler->opts.compDbgEnC)
- {
- // what we have to preserve is called the "frame header" (see comments in VM\eetwain.cpp)
- // which is:
- // -return address
- // -saved off RBP
- // -saved 'this' pointer and bool for synchronized methods
-
- // 4 slots for RBP + return address + RSI + RDI
- int preservedAreaSize = 4 * REGSIZE_BYTES;
-
- if (compiler->info.compFlags & CORINFO_FLG_SYNCH)
- {
- if (!(compiler->info.compFlags & CORINFO_FLG_STATIC))
- preservedAreaSize += REGSIZE_BYTES;
-
- preservedAreaSize += 1; // bool for synchronized methods
- }
-
- // Used to signal both that the method is compiled for EnC, and also the size of the block at the top of the
- // frame
- gcInfoEncoder->SetSizeOfEditAndContinuePreservedArea(preservedAreaSize);
- }
-
- gcInfoEncoder->Build();
-
- // GC Encoder automatically puts the GC info in the right spot using ICorJitInfo::allocGCInfo(size_t)
- // let's save the values anyway for debugging purposes
- compiler->compInfoBlkAddr = gcInfoEncoder->Emit();
- compiler->compInfoBlkSize = 0; // not exposed by the GCEncoder interface
-}
-
/*****************************************************************************
* Emit a call to a helper function.
*
diff --git a/src/jit/codegenarmarch.cpp b/src/jit/codegenarmarch.cpp
new file mode 100644
index 0000000000..af9fdfed9c
--- /dev/null
+++ b/src/jit/codegenarmarch.cpp
@@ -0,0 +1,1687 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+XX XX
+XX ARM/ARM64 Code Generator Common Code XX
+XX XX
+XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+*/
+#include "jitpch.h"
+#ifdef _MSC_VER
+#pragma hdrstop
+#endif
+
+#ifndef LEGACY_BACKEND // This file is ONLY used for the RyuJIT backend that uses the linear scan register allocator
+
+#ifdef _TARGET_ARMARCH_ // This file is ONLY used for ARM and ARM64 architectures
+
+#include "codegen.h"
+#include "lower.h"
+#include "gcinfo.h"
+#include "emit.h"
+
+//------------------------------------------------------------------------
+// genSetRegToIcon: Generate code that will set the given register to the integer constant.
+//
+void CodeGen::genSetRegToIcon(regNumber reg, ssize_t val, var_types type, insFlags flags)
+{
+ // Reg cannot be a FP reg
+ assert(!genIsValidFloatReg(reg));
+
+ // The only TYP_REF constant that can come this path is a managed 'null' since it is not
+ // relocatable. Other ref type constants (e.g. string objects) go through a different
+ // code path.
+ noway_assert(type != TYP_REF || val == 0);
+
+ instGen_Set_Reg_To_Imm(emitActualTypeSize(type), reg, val, flags);
+}
+
+//---------------------------------------------------------------------
+// genIntrinsic - generate code for a given intrinsic
+//
+// Arguments
+// treeNode - the GT_INTRINSIC node
+//
+// Return value:
+// None
+//
+void CodeGen::genIntrinsic(GenTreePtr treeNode)
+{
+ // Both operand and its result must be of the same floating point type.
+ GenTreePtr srcNode = treeNode->gtOp.gtOp1;
+ assert(varTypeIsFloating(srcNode));
+ assert(srcNode->TypeGet() == treeNode->TypeGet());
+
+ // Right now only Abs/Round/Sqrt are treated as math intrinsics.
+ //
+ switch (treeNode->gtIntrinsic.gtIntrinsicId)
+ {
+ case CORINFO_INTRINSIC_Abs:
+ genConsumeOperands(treeNode->AsOp());
+ getEmitter()->emitInsBinary(INS_ABS, emitTypeSize(treeNode), treeNode, srcNode);
+ break;
+
+ case CORINFO_INTRINSIC_Round:
+ NYI_ARM("genIntrinsic for round - not implemented yet");
+ genConsumeOperands(treeNode->AsOp());
+ getEmitter()->emitInsBinary(INS_ROUND, emitTypeSize(treeNode), treeNode, srcNode);
+ break;
+
+ case CORINFO_INTRINSIC_Sqrt:
+ genConsumeOperands(treeNode->AsOp());
+ getEmitter()->emitInsBinary(INS_SQRT, emitTypeSize(treeNode), treeNode, srcNode);
+ break;
+
+ default:
+ assert(!"genIntrinsic: Unsupported intrinsic");
+ unreached();
+ }
+
+ genProduceReg(treeNode);
+}
+
+//---------------------------------------------------------------------
+// genPutArgStk - generate code for a GT_PUTARG_STK node
+//
+// Arguments
+// treeNode - the GT_PUTARG_STK node
+//
+// Return value:
+// None
+//
+void CodeGen::genPutArgStk(GenTreePutArgStk* treeNode)
+{
+ assert(treeNode->OperGet() == GT_PUTARG_STK);
+ var_types targetType = treeNode->TypeGet();
+ GenTreePtr source = treeNode->gtOp1;
+ emitter* emit = getEmitter();
+
+ // This is the varNum for our store operations,
+ // typically this is the varNum for the Outgoing arg space
+ // When we are generating a tail call it will be the varNum for arg0
+ unsigned varNumOut = (unsigned)-1;
+ unsigned argOffsetMax = (unsigned)-1; // Records the maximum size of this area for assert checks
+
+ // Get argument offset to use with 'varNumOut'
+ // Here we cross check that argument offset hasn't changed from lowering to codegen since
+ // we are storing arg slot number in GT_PUTARG_STK node in lowering phase.
+ unsigned argOffsetOut = treeNode->gtSlotNum * TARGET_POINTER_SIZE;
+
+#ifdef DEBUG
+ fgArgTabEntryPtr curArgTabEntry = compiler->gtArgEntryByNode(treeNode->gtCall, treeNode);
+ assert(curArgTabEntry);
+ assert(argOffsetOut == (curArgTabEntry->slotNum * TARGET_POINTER_SIZE));
+#endif // DEBUG
+
+ // Whether to setup stk arg in incoming or out-going arg area?
+ // Fast tail calls implemented as epilog+jmp = stk arg is setup in incoming arg area.
+ // All other calls - stk arg is setup in out-going arg area.
+ if (treeNode->putInIncomingArgArea())
+ {
+ NYI_ARM("genPutArgStk: fast tail call");
+
+#ifdef _TARGET_ARM64_
+ varNumOut = getFirstArgWithStackSlot();
+ argOffsetMax = compiler->compArgSize;
+#if FEATURE_FASTTAILCALL
+ // This must be a fast tail call.
+ assert(treeNode->gtCall->IsFastTailCall());
+
+ // Since it is a fast tail call, the existence of first incoming arg is guaranteed
+ // because fast tail call requires that in-coming arg area of caller is >= out-going
+ // arg area required for tail call.
+ LclVarDsc* varDsc = &(compiler->lvaTable[varNumOut]);
+ assert(varDsc != nullptr);
+#endif // FEATURE_FASTTAILCALL
+#endif // _TARGET_ARM64_
+ }
+ else
+ {
+ varNumOut = compiler->lvaOutgoingArgSpaceVar;
+ argOffsetMax = compiler->lvaOutgoingArgSpaceSize;
+ }
+
+ bool isStruct = (targetType == TYP_STRUCT) || (source->OperGet() == GT_FIELD_LIST);
+
+ if (!isStruct) // a normal non-Struct argument
+ {
+ instruction storeIns = ins_Store(targetType);
+ emitAttr storeAttr = emitTypeSize(targetType);
+
+ // If it is contained then source must be the integer constant zero
+ if (source->isContained())
+ {
+ assert(source->OperGet() == GT_CNS_INT);
+ assert(source->AsIntConCommon()->IconValue() == 0);
+ NYI_ARM("genPutArgStk: contained zero source");
+
+#ifdef _TARGET_ARM64_
+ emit->emitIns_S_R(storeIns, storeAttr, REG_ZR, varNumOut, argOffsetOut);
+#endif // _TARGET_ARM64_
+ }
+ else
+ {
+ genConsumeReg(source);
+ emit->emitIns_S_R(storeIns, storeAttr, source->gtRegNum, varNumOut, argOffsetOut);
+ }
+ argOffsetOut += EA_SIZE_IN_BYTES(storeAttr);
+ assert(argOffsetOut <= argOffsetMax); // We can't write beyound the outgoing area area
+ }
+ else // We have some kind of a struct argument
+ {
+ assert(source->isContained()); // We expect that this node was marked as contained in Lower
+
+ if (source->OperGet() == GT_FIELD_LIST)
+ {
+ // Deal with the multi register passed struct args.
+ GenTreeFieldList* fieldListPtr = source->AsFieldList();
+
+ // Evaluate each of the GT_FIELD_LIST items into their register
+ // and store their register into the outgoing argument area
+ for (; fieldListPtr != nullptr; fieldListPtr = fieldListPtr->Rest())
+ {
+ GenTreePtr nextArgNode = fieldListPtr->gtOp.gtOp1;
+ genConsumeReg(nextArgNode);
+
+ regNumber reg = nextArgNode->gtRegNum;
+ var_types type = nextArgNode->TypeGet();
+ emitAttr attr = emitTypeSize(type);
+
+ // Emit store instructions to store the registers produced by the GT_FIELD_LIST into the outgoing
+ // argument area
+ emit->emitIns_S_R(ins_Store(type), attr, reg, varNumOut, argOffsetOut);
+ argOffsetOut += EA_SIZE_IN_BYTES(attr);
+ assert(argOffsetOut <= argOffsetMax); // We can't write beyound the outgoing area area
+ }
+ }
+ else // We must have a GT_OBJ or a GT_LCL_VAR
+ {
+ noway_assert((source->OperGet() == GT_LCL_VAR) || (source->OperGet() == GT_OBJ));
+
+ NYI_ARM("genPutArgStk: GT_OBJ or GT_LCL_VAR source of struct type");
+
+#ifdef _TARGET_ARM64_
+
+ var_types targetType = source->TypeGet();
+ noway_assert(varTypeIsStruct(targetType));
+
+ // We will copy this struct to the stack, possibly using a ldp instruction
+ // Setup loReg and hiReg from the internal registers that we reserved in lower.
+ //
+ regNumber loReg = REG_NA;
+ regNumber hiReg = REG_NA;
+ regNumber addrReg = REG_NA;
+
+ // In lowerArm64/TreeNodeInfoInitPutArgStk we have reserved two internal integer registers
+ genGetRegPairFromMask(treeNode->gtRsvdRegs, &loReg, &hiReg);
+
+ GenTreeLclVarCommon* varNode = nullptr;
+ GenTreePtr addrNode = nullptr;
+
+ if (source->OperGet() == GT_LCL_VAR)
+ {
+ varNode = source->AsLclVarCommon();
+ }
+ else // we must have a GT_OBJ
+ {
+ assert(source->OperGet() == GT_OBJ);
+
+ addrNode = source->gtOp.gtOp1;
+
+ // addrNode can either be a GT_LCL_VAR_ADDR or an address expression
+ //
+ if (addrNode->OperGet() == GT_LCL_VAR_ADDR)
+ {
+ // We have a GT_OBJ(GT_LCL_VAR_ADDR)
+ //
+ // We will treat this case the same as above
+ // (i.e if we just had this GT_LCL_VAR directly as the source)
+ // so update 'source' to point this GT_LCL_VAR_ADDR node
+ // and continue to the codegen for the LCL_VAR node below
+ //
+ varNode = addrNode->AsLclVarCommon();
+ addrNode = nullptr;
+ }
+ }
+
+ // Either varNode or addrNOde must have been setup above,
+ // the xor ensures that only one of the two is setup, not both
+ assert((varNode != nullptr) ^ (addrNode != nullptr));
+
+ BYTE gcPtrs[MAX_ARG_REG_COUNT] = {}; // TYPE_GC_NONE = 0
+ unsigned gcPtrCount; // The count of GC pointers in the struct
+ int structSize;
+ bool isHfa;
+
+ // This is the varNum for our load operations,
+ // only used when we have a multireg struct with a LclVar source
+ unsigned varNumInp = BAD_VAR_NUM;
+
+ // Setup the structSize, isHFa, and gcPtrCount
+ if (varNode != nullptr)
+ {
+ varNumInp = varNode->gtLclNum;
+ assert(varNumInp < compiler->lvaCount);
+ LclVarDsc* varDsc = &compiler->lvaTable[varNumInp];
+
+ assert(varDsc->lvType == TYP_STRUCT);
+ assert(varDsc->lvOnFrame); // This struct also must live in the stack frame
+ assert(!varDsc->lvRegister); // And it can't live in a register (SIMD)
+
+ structSize = varDsc->lvSize(); // This yields the roundUp size, but that is fine
+ // as that is how much stack is allocated for this LclVar
+ isHfa = varDsc->lvIsHfa();
+ gcPtrCount = varDsc->lvStructGcCount;
+ for (unsigned i = 0; i < gcPtrCount; ++i)
+ gcPtrs[i] = varDsc->lvGcLayout[i];
+ }
+ else // addrNode is used
+ {
+ assert(addrNode != nullptr);
+
+ // Generate code to load the address that we need into a register
+ genConsumeAddress(addrNode);
+ addrReg = addrNode->gtRegNum;
+
+ CORINFO_CLASS_HANDLE objClass = source->gtObj.gtClass;
+
+ structSize = compiler->info.compCompHnd->getClassSize(objClass);
+ isHfa = compiler->IsHfa(objClass);
+ gcPtrCount = compiler->info.compCompHnd->getClassGClayout(objClass, &gcPtrs[0]);
+ }
+
+ bool hasGCpointers = (gcPtrCount > 0); // true if there are any GC pointers in the struct
+
+ // If we have an HFA we can't have any GC pointers,
+ // if not then the max size for the the struct is 16 bytes
+ if (isHfa)
+ {
+ noway_assert(gcPtrCount == 0);
+ }
+ else
+ {
+ noway_assert(structSize <= 2 * TARGET_POINTER_SIZE);
+ }
+
+ noway_assert(structSize <= MAX_PASS_MULTIREG_BYTES);
+
+ // For a 16-byte structSize with GC pointers we will use two ldr and two str instructions
+ // ldr x2, [x0]
+ // ldr x3, [x0, #8]
+ // str x2, [sp, #16]
+ // str x3, [sp, #24]
+ //
+ // For a 16-byte structSize with no GC pointers we will use a ldp and two str instructions
+ // ldp x2, x3, [x0]
+ // str x2, [sp, #16]
+ // str x3, [sp, #24]
+ //
+ // For a 32-byte structSize with no GC pointers we will use two ldp and four str instructions
+ // ldp x2, x3, [x0]
+ // str x2, [sp, #16]
+ // str x3, [sp, #24]
+ // ldp x2, x3, [x0]
+ // str x2, [sp, #32]
+ // str x3, [sp, #40]
+ //
+ // Note that when loading from a varNode we currently can't use the ldp instruction
+ // TODO-ARM64-CQ: Implement support for using a ldp instruction with a varNum (see emitIns_R_S)
+ //
+
+ int remainingSize = structSize;
+ unsigned structOffset = 0;
+ unsigned nextIndex = 0;
+
+ while (remainingSize >= 2 * TARGET_POINTER_SIZE)
+ {
+ var_types type0 = compiler->getJitGCType(gcPtrs[nextIndex + 0]);
+ var_types type1 = compiler->getJitGCType(gcPtrs[nextIndex + 1]);
+
+ if (hasGCpointers)
+ {
+ // We have GC pointers, so use two ldr instructions
+ //
+ // We must do it this way because we can't currently pass or track
+ // two different emitAttr values for a ldp instruction.
+
+ // Make sure that the first load instruction does not overwrite the addrReg.
+ //
+ if (loReg != addrReg)
+ {
+ if (varNode != nullptr)
+ {
+ // Load from our varNumImp source
+ emit->emitIns_R_S(ins_Load(type0), emitTypeSize(type0), loReg, varNumInp, 0);
+ emit->emitIns_R_S(ins_Load(type1), emitTypeSize(type1), hiReg, varNumInp,
+ TARGET_POINTER_SIZE);
+ }
+ else
+ {
+ // Load from our address expression source
+ emit->emitIns_R_R_I(ins_Load(type0), emitTypeSize(type0), loReg, addrReg, structOffset);
+ emit->emitIns_R_R_I(ins_Load(type1), emitTypeSize(type1), hiReg, addrReg,
+ structOffset + TARGET_POINTER_SIZE);
+ }
+ }
+ else // loReg == addrReg
+ {
+ assert(varNode == nullptr); // because addrReg is REG_NA when varNode is non-null
+ assert(hiReg != addrReg);
+ // Load from our address expression source
+ emit->emitIns_R_R_I(ins_Load(type1), emitTypeSize(type1), hiReg, addrReg,
+ structOffset + TARGET_POINTER_SIZE);
+ emit->emitIns_R_R_I(ins_Load(type0), emitTypeSize(type0), loReg, addrReg, structOffset);
+ }
+ }
+ else // our struct has no GC pointers
+ {
+ if (varNode != nullptr)
+ {
+ // Load from our varNumImp source, currently we can't use a ldp instruction to do this
+ emit->emitIns_R_S(ins_Load(type0), emitTypeSize(type0), loReg, varNumInp, 0);
+ emit->emitIns_R_S(ins_Load(type1), emitTypeSize(type1), hiReg, varNumInp, TARGET_POINTER_SIZE);
+ }
+ else
+ {
+ // Use a ldp instruction
+
+ // Load from our address expression source
+ emit->emitIns_R_R_R_I(INS_ldp, EA_PTRSIZE, loReg, hiReg, addrReg, structOffset);
+ }
+ }
+
+ // Emit two store instructions to store the two registers into the outgoing argument area
+ emit->emitIns_S_R(ins_Store(type0), emitTypeSize(type0), loReg, varNumOut, argOffsetOut);
+ emit->emitIns_S_R(ins_Store(type1), emitTypeSize(type1), hiReg, varNumOut,
+ argOffsetOut + TARGET_POINTER_SIZE);
+ argOffsetOut += (2 * TARGET_POINTER_SIZE); // We stored 16-bytes of the struct
+ assert(argOffsetOut <= argOffsetMax); // We can't write beyound the outgoing area area
+
+ remainingSize -= (2 * TARGET_POINTER_SIZE); // We loaded 16-bytes of the struct
+ structOffset += (2 * TARGET_POINTER_SIZE);
+ nextIndex += 2;
+ }
+
+ // For a 12-byte structSize we will we will generate two load instructions
+ // ldr x2, [x0]
+ // ldr w3, [x0, #8]
+ // str x2, [sp, #16]
+ // str w3, [sp, #24]
+ //
+ // When the first instruction has a loReg that is the same register as the addrReg,
+ // we set deferLoad to true and issue the intructions in the reverse order
+ // ldr x3, [x2, #8]
+ // ldr x2, [x2]
+ // str x2, [sp, #16]
+ // str x3, [sp, #24]
+ //
+
+ var_types nextType = compiler->getJitGCType(gcPtrs[nextIndex]);
+ emitAttr nextAttr = emitTypeSize(nextType);
+ regNumber curReg = loReg;
+
+ bool deferLoad = false;
+ var_types deferType = TYP_UNKNOWN;
+ emitAttr deferAttr = EA_PTRSIZE;
+ int deferOffset = 0;
+
+ while (remainingSize > 0)
+ {
+ if (remainingSize >= TARGET_POINTER_SIZE)
+ {
+ remainingSize -= TARGET_POINTER_SIZE;
+
+ if ((curReg == addrReg) && (remainingSize != 0))
+ {
+ deferLoad = true;
+ deferType = nextType;
+ deferAttr = emitTypeSize(nextType);
+ deferOffset = structOffset;
+ }
+ else // the typical case
+ {
+ if (varNode != nullptr)
+ {
+ // Load from our varNumImp source
+ emit->emitIns_R_S(ins_Load(nextType), nextAttr, curReg, varNumInp, structOffset);
+ }
+ else
+ {
+ // Load from our address expression source
+ emit->emitIns_R_R_I(ins_Load(nextType), nextAttr, curReg, addrReg, structOffset);
+ }
+ // Emit a store instruction to store the register into the outgoing argument area
+ emit->emitIns_S_R(ins_Store(nextType), nextAttr, curReg, varNumOut, argOffsetOut);
+ argOffsetOut += EA_SIZE_IN_BYTES(nextAttr);
+ assert(argOffsetOut <= argOffsetMax); // We can't write beyound the outgoing area area
+ }
+ curReg = hiReg;
+ structOffset += TARGET_POINTER_SIZE;
+ nextIndex++;
+ nextType = compiler->getJitGCType(gcPtrs[nextIndex]);
+ nextAttr = emitTypeSize(nextType);
+ }
+ else // (remainingSize < TARGET_POINTER_SIZE)
+ {
+ int loadSize = remainingSize;
+ remainingSize = 0;
+
+ // We should never have to do a non-pointer sized load when we have a LclVar source
+ assert(varNode == nullptr);
+
+ // the left over size is smaller than a pointer and thus can never be a GC type
+ assert(varTypeIsGC(nextType) == false);
+
+ var_types loadType = TYP_UINT;
+ if (loadSize == 1)
+ {
+ loadType = TYP_UBYTE;
+ }
+ else if (loadSize == 2)
+ {
+ loadType = TYP_USHORT;
+ }
+ else
+ {
+ // Need to handle additional loadSize cases here
+ noway_assert(loadSize == 4);
+ }
+
+ instruction loadIns = ins_Load(loadType);
+ emitAttr loadAttr = emitAttr(loadSize);
+
+ // When deferLoad is false, curReg can be the same as addrReg
+ // because the last instruction is allowed to overwrite addrReg.
+ //
+ noway_assert(!deferLoad || (curReg != addrReg));
+
+ emit->emitIns_R_R_I(loadIns, loadAttr, curReg, addrReg, structOffset);
+
+ // Emit a store instruction to store the register into the outgoing argument area
+ emit->emitIns_S_R(ins_Store(loadType), loadAttr, curReg, varNumOut, argOffsetOut);
+ argOffsetOut += EA_SIZE_IN_BYTES(loadAttr);
+ assert(argOffsetOut <= argOffsetMax); // We can't write beyound the outgoing area area
+ }
+ }
+
+ if (deferLoad)
+ {
+ // We should never have to do a deferred load when we have a LclVar source
+ assert(varNode == nullptr);
+
+ curReg = addrReg;
+
+ // Load from our address expression source
+ emit->emitIns_R_R_I(ins_Load(deferType), deferAttr, curReg, addrReg, deferOffset);
+
+ // Emit a store instruction to store the register into the outgoing argument area
+ emit->emitIns_S_R(ins_Store(nextType), nextAttr, curReg, varNumOut, argOffsetOut);
+ argOffsetOut += EA_SIZE_IN_BYTES(nextAttr);
+ assert(argOffsetOut <= argOffsetMax); // We can't write beyound the outgoing area area
+ }
+
+#endif // _TARGET_ARM64_
+ }
+ }
+}
+
+//----------------------------------------------------------------------------------
+// genMultiRegCallStoreToLocal: store multi-reg return value of a call node to a local
+//
+// Arguments:
+// treeNode - Gentree of GT_STORE_LCL_VAR
+//
+// Return Value:
+// None
+//
+// Assumption:
+// The child of store is a multi-reg call node.
+// genProduceReg() on treeNode is made by caller of this routine.
+//
+void CodeGen::genMultiRegCallStoreToLocal(GenTreePtr treeNode)
+{
+ assert(treeNode->OperGet() == GT_STORE_LCL_VAR);
+
+#if defined(_TARGET_ARM_)
+ // Longs are returned in two return registers on Arm32.
+ assert(varTypeIsLong(treeNode));
+#elif defined(_TARGET_ARM64_)
+ // Structs of size >=9 and <=16 are returned in two return registers on ARM64 and HFAs.
+ assert(varTypeIsStruct(treeNode));
+#endif // _TARGET_*
+
+ // Assumption: current implementation requires that a multi-reg
+ // var in 'var = call' is flagged as lvIsMultiRegRet to prevent it from
+ // being promoted.
+ unsigned lclNum = treeNode->AsLclVarCommon()->gtLclNum;
+ LclVarDsc* varDsc = &(compiler->lvaTable[lclNum]);
+ noway_assert(varDsc->lvIsMultiRegRet);
+
+ GenTree* op1 = treeNode->gtGetOp1();
+ GenTree* actualOp1 = op1->gtSkipReloadOrCopy();
+ GenTreeCall* call = actualOp1->AsCall();
+ assert(call->HasMultiRegRetVal());
+
+ genConsumeRegs(op1);
+
+ ReturnTypeDesc* pRetTypeDesc = call->GetReturnTypeDesc();
+ unsigned regCount = pRetTypeDesc->GetReturnRegCount();
+
+ if (treeNode->gtRegNum != REG_NA)
+ {
+ // Right now the only enregistrable multi-reg return types supported are SIMD types.
+ assert(varTypeIsSIMD(treeNode));
+ NYI("GT_STORE_LCL_VAR of a SIMD enregisterable struct");
+ }
+ else
+ {
+ // Stack store
+ int offset = 0;
+ for (unsigned i = 0; i < regCount; ++i)
+ {
+ var_types type = pRetTypeDesc->GetReturnRegType(i);
+ regNumber reg = call->GetRegNumByIdx(i);
+ if (op1->IsCopyOrReload())
+ {
+ // GT_COPY/GT_RELOAD will have valid reg for those positions
+ // that need to be copied or reloaded.
+ regNumber reloadReg = op1->AsCopyOrReload()->GetRegNumByIdx(i);
+ if (reloadReg != REG_NA)
+ {
+ reg = reloadReg;
+ }
+ }
+
+ assert(reg != REG_NA);
+ getEmitter()->emitIns_S_R(ins_Store(type), emitTypeSize(type), reg, lclNum, offset);
+ offset += genTypeSize(type);
+ }
+
+ varDsc->lvRegNum = REG_STK;
+ }
+}
+
+//------------------------------------------------------------------------
+// genRangeCheck: generate code for GT_ARR_BOUNDS_CHECK node.
+//
+void CodeGen::genRangeCheck(GenTreePtr oper)
+{
+#ifdef FEATURE_SIMD
+ noway_assert(oper->OperGet() == GT_ARR_BOUNDS_CHECK || oper->OperGet() == GT_SIMD_CHK);
+#else // !FEATURE_SIMD
+ noway_assert(oper->OperGet() == GT_ARR_BOUNDS_CHECK);
+#endif // !FEATURE_SIMD
+
+ GenTreeBoundsChk* bndsChk = oper->AsBoundsChk();
+
+ GenTreePtr arrLen = bndsChk->gtArrLen;
+ GenTreePtr arrIndex = bndsChk->gtIndex;
+ GenTreePtr arrRef = NULL;
+ int lenOffset = 0;
+
+ GenTree* src1;
+ GenTree* src2;
+ emitJumpKind jmpKind;
+
+ genConsumeRegs(arrIndex);
+ genConsumeRegs(arrLen);
+
+ if (arrIndex->isContainedIntOrIImmed())
+ {
+ // To encode using a cmp immediate, we place the
+ // constant operand in the second position
+ src1 = arrLen;
+ src2 = arrIndex;
+ jmpKind = genJumpKindForOper(GT_LE, CK_UNSIGNED);
+ }
+ else
+ {
+ src1 = arrIndex;
+ src2 = arrLen;
+ jmpKind = genJumpKindForOper(GT_GE, CK_UNSIGNED);
+ }
+
+ getEmitter()->emitInsBinary(INS_cmp, EA_4BYTE, src1, src2);
+ genJumpToThrowHlpBlk(jmpKind, SCK_RNGCHK_FAIL, bndsChk->gtIndRngFailBB);
+}
+
+//------------------------------------------------------------------------
+// genOffsetOfMDArrayLowerBound: Returns the offset from the Array object to the
+// lower bound for the given dimension.
+//
+// Arguments:
+// elemType - the element type of the array
+// rank - the rank of the array
+// dimension - the dimension for which the lower bound offset will be returned.
+//
+// Return Value:
+// The offset.
+// TODO-Cleanup: move to CodeGenCommon.cpp
+
+// static
+unsigned CodeGen::genOffsetOfMDArrayLowerBound(var_types elemType, unsigned rank, unsigned dimension)
+{
+ // Note that the lower bound and length fields of the Array object are always TYP_INT
+ return compiler->eeGetArrayDataOffset(elemType) + genTypeSize(TYP_INT) * (dimension + rank);
+}
+
+//------------------------------------------------------------------------
+// genOffsetOfMDArrayLength: Returns the offset from the Array object to the
+// size for the given dimension.
+//
+// Arguments:
+// elemType - the element type of the array
+// rank - the rank of the array
+// dimension - the dimension for which the lower bound offset will be returned.
+//
+// Return Value:
+// The offset.
+// TODO-Cleanup: move to CodeGenCommon.cpp
+
+// static
+unsigned CodeGen::genOffsetOfMDArrayDimensionSize(var_types elemType, unsigned rank, unsigned dimension)
+{
+ // Note that the lower bound and length fields of the Array object are always TYP_INT
+ return compiler->eeGetArrayDataOffset(elemType) + genTypeSize(TYP_INT) * dimension;
+}
+
+//------------------------------------------------------------------------
+// genCodeForArrIndex: Generates code to bounds check the index for one dimension of an array reference,
+// producing the effective index by subtracting the lower bound.
+//
+// Arguments:
+// arrIndex - the node for which we're generating code
+//
+// Return Value:
+// None.
+//
+void CodeGen::genCodeForArrIndex(GenTreeArrIndex* arrIndex)
+{
+ emitter* emit = getEmitter();
+ GenTreePtr arrObj = arrIndex->ArrObj();
+ GenTreePtr indexNode = arrIndex->IndexExpr();
+ regNumber arrReg = genConsumeReg(arrObj);
+ regNumber indexReg = genConsumeReg(indexNode);
+ regNumber tgtReg = arrIndex->gtRegNum;
+ noway_assert(tgtReg != REG_NA);
+
+ // We will use a temp register to load the lower bound and dimension size values
+ //
+ regMaskTP tmpRegsMask = arrIndex->gtRsvdRegs; // there will be two bits set
+ tmpRegsMask &= ~genRegMask(tgtReg); // remove the bit for 'tgtReg' from 'tmpRegsMask'
+
+ regMaskTP tmpRegMask = genFindLowestBit(tmpRegsMask); // set tmpRegMsk to a one-bit mask
+ regNumber tmpReg = genRegNumFromMask(tmpRegMask); // set tmpReg from that mask
+ noway_assert(tmpReg != REG_NA);
+
+ assert(tgtReg != tmpReg);
+
+ unsigned dim = arrIndex->gtCurrDim;
+ unsigned rank = arrIndex->gtArrRank;
+ var_types elemType = arrIndex->gtArrElemType;
+ unsigned offset;
+
+ offset = genOffsetOfMDArrayLowerBound(elemType, rank, dim);
+ emit->emitIns_R_R_I(ins_Load(TYP_INT), EA_PTRSIZE, tmpReg, arrReg, offset); // a 4 BYTE sign extending load
+ emit->emitIns_R_R_R(INS_sub, EA_4BYTE, tgtReg, indexReg, tmpReg);
+
+ offset = genOffsetOfMDArrayDimensionSize(elemType, rank, dim);
+ emit->emitIns_R_R_I(ins_Load(TYP_INT), EA_PTRSIZE, tmpReg, arrReg, offset); // a 4 BYTE sign extending load
+ emit->emitIns_R_R(INS_cmp, EA_4BYTE, tgtReg, tmpReg);
+
+ emitJumpKind jmpGEU = genJumpKindForOper(GT_GE, CK_UNSIGNED);
+ genJumpToThrowHlpBlk(jmpGEU, SCK_RNGCHK_FAIL);
+
+ genProduceReg(arrIndex);
+}
+
+//------------------------------------------------------------------------
+// genCodeForArrOffset: Generates code to compute the flattened array offset for
+// one dimension of an array reference:
+// result = (prevDimOffset * dimSize) + effectiveIndex
+// where dimSize is obtained from the arrObj operand
+//
+// Arguments:
+// arrOffset - the node for which we're generating code
+//
+// Return Value:
+// None.
+//
+// Notes:
+// dimSize and effectiveIndex are always non-negative, the former by design,
+// and the latter because it has been normalized to be zero-based.
+
+void CodeGen::genCodeForArrOffset(GenTreeArrOffs* arrOffset)
+{
+ GenTreePtr offsetNode = arrOffset->gtOffset;
+ GenTreePtr indexNode = arrOffset->gtIndex;
+ regNumber tgtReg = arrOffset->gtRegNum;
+
+ noway_assert(tgtReg != REG_NA);
+
+ if (!offsetNode->IsIntegralConst(0))
+ {
+ emitter* emit = getEmitter();
+ regNumber offsetReg = genConsumeReg(offsetNode);
+ regNumber indexReg = genConsumeReg(indexNode);
+ regNumber arrReg = genConsumeReg(arrOffset->gtArrObj);
+ noway_assert(offsetReg != REG_NA);
+ noway_assert(indexReg != REG_NA);
+ noway_assert(arrReg != REG_NA);
+
+ regMaskTP tmpRegMask = arrOffset->gtRsvdRegs;
+ regNumber tmpReg = genRegNumFromMask(tmpRegMask);
+ noway_assert(tmpReg != REG_NA);
+
+ unsigned dim = arrOffset->gtCurrDim;
+ unsigned rank = arrOffset->gtArrRank;
+ var_types elemType = arrOffset->gtArrElemType;
+ unsigned offset = genOffsetOfMDArrayDimensionSize(elemType, rank, dim);
+
+// Load tmpReg with the dimension size and evaluate
+// tgtReg = offsetReg*dim_size + indexReg.
+#if defined(_TARGET_ARM_)
+ emit->emitIns_R_R_I(ins_Load(TYP_INT), EA_4BYTE, tmpReg, arrReg, offset); // a 4 BYTE sign extending load
+ emit->emitIns_R_R_R(INS_MUL, EA_4BYTE, tgtReg, tmpReg, offsetReg);
+ emit->emitIns_R_R_R(INS_add, EA_4BYTE, tgtReg, tgtReg, indexReg);
+#elif defined(_TARGET_ARM64_)
+ emit->emitIns_R_R_I(ins_Load(TYP_INT), EA_8BYTE, tmpReg, arrReg, offset); // a 4 BYTE sign extending load
+ emit->emitIns_R_R_R_R(INS_madd, EA_4BYTE, tgtReg, tmpReg, offsetReg, indexReg);
+#endif // _TARGET_*
+ }
+ else
+ {
+ regNumber indexReg = genConsumeReg(indexNode);
+ if (indexReg != tgtReg)
+ {
+ inst_RV_RV(INS_mov, tgtReg, indexReg, TYP_INT);
+ }
+ }
+ genProduceReg(arrOffset);
+}
+
+//------------------------------------------------------------------------
+// indirForm: Make a temporary indir we can feed to pattern matching routines
+// in cases where we don't want to instantiate all the indirs that happen.
+//
+GenTreeIndir CodeGen::indirForm(var_types type, GenTree* base)
+{
+ GenTreeIndir i(GT_IND, type, base, nullptr);
+ i.gtRegNum = REG_NA;
+ // has to be nonnull (because contained nodes can't be the last in block)
+ // but don't want it to be a valid pointer
+ i.gtNext = (GenTree*)(-1);
+ return i;
+}
+
+//------------------------------------------------------------------------
+// intForm: Make a temporary int we can feed to pattern matching routines
+// in cases where we don't want to instantiate.
+//
+GenTreeIntCon CodeGen::intForm(var_types type, ssize_t value)
+{
+ GenTreeIntCon i(type, value);
+ i.gtRegNum = REG_NA;
+ // has to be nonnull (because contained nodes can't be the last in block)
+ // but don't want it to be a valid pointer
+ i.gtNext = (GenTree*)(-1);
+ return i;
+}
+
+//------------------------------------------------------------------------
+// genCodeForShift: Generates the code sequence for a GenTree node that
+// represents a bit shift or rotate operation (<<, >>, >>>, rol, ror).
+//
+// Arguments:
+// tree - the bit shift node (that specifies the type of bit shift to perform).
+//
+// Assumptions:
+// a) All GenTrees are register allocated.
+//
+void CodeGen::genCodeForShift(GenTreePtr tree)
+{
+ var_types targetType = tree->TypeGet();
+ genTreeOps oper = tree->OperGet();
+ instruction ins = genGetInsForOper(oper, targetType);
+ emitAttr size = emitTypeSize(tree);
+
+ assert(tree->gtRegNum != REG_NA);
+
+ genConsumeOperands(tree->AsOp());
+
+ GenTreePtr operand = tree->gtGetOp1();
+ GenTreePtr shiftBy = tree->gtGetOp2();
+ if (!shiftBy->IsCnsIntOrI())
+ {
+ getEmitter()->emitIns_R_R_R(ins, size, tree->gtRegNum, operand->gtRegNum, shiftBy->gtRegNum);
+ }
+ else
+ {
+ unsigned immWidth = emitter::getBitWidth(size); // For ARM64, immWidth will be set to 32 or 64
+ ssize_t shiftByImm = shiftBy->gtIntCon.gtIconVal & (immWidth - 1);
+
+ getEmitter()->emitIns_R_R_I(ins, size, tree->gtRegNum, operand->gtRegNum, shiftByImm);
+ }
+
+ genProduceReg(tree);
+}
+
+// Generate code for a CpBlk node by the means of the VM memcpy helper call
+// Preconditions:
+// a) The size argument of the CpBlk is not an integer constant
+// b) The size argument is a constant but is larger than CPBLK_MOVS_LIMIT bytes.
+void CodeGen::genCodeForCpBlk(GenTreeBlk* cpBlkNode)
+{
+ // Make sure we got the arguments of the cpblk operation in the right registers
+ unsigned blockSize = cpBlkNode->Size();
+ GenTreePtr dstAddr = cpBlkNode->Addr();
+ assert(!dstAddr->isContained());
+
+ genConsumeBlockOp(cpBlkNode, REG_ARG_0, REG_ARG_1, REG_ARG_2);
+
+#ifdef _TARGET_ARM64_
+ if (blockSize != 0)
+ {
+ assert(blockSize > CPBLK_UNROLL_LIMIT);
+ }
+#endif // _TARGET_ARM64_
+
+ genEmitHelperCall(CORINFO_HELP_MEMCPY, 0, EA_UNKNOWN);
+}
+
+// Generates code for InitBlk by calling the VM memset helper function.
+// Preconditions:
+// a) The size argument of the InitBlk is not an integer constant.
+// b) The size argument of the InitBlk is >= INITBLK_STOS_LIMIT bytes.
+void CodeGen::genCodeForInitBlk(GenTreeBlk* initBlkNode)
+{
+ // Make sure we got the arguments of the initblk operation in the right registers
+ unsigned size = initBlkNode->Size();
+ GenTreePtr dstAddr = initBlkNode->Addr();
+ GenTreePtr initVal = initBlkNode->Data();
+ if (initVal->OperIsInitVal())
+ {
+ initVal = initVal->gtGetOp1();
+ }
+
+ assert(!dstAddr->isContained());
+ assert(!initVal->isContained());
+ if (initBlkNode->gtOper == GT_STORE_DYN_BLK)
+ {
+ assert(initBlkNode->AsDynBlk()->gtDynamicSize->gtRegNum == REG_ARG_2);
+ }
+ else
+ {
+ assert(initBlkNode->gtRsvdRegs == RBM_ARG_2);
+ }
+
+#ifdef _TARGET_ARM64_
+ if (size != 0)
+ {
+ assert(size > INITBLK_UNROLL_LIMIT);
+ }
+#endif // _TARGET_ARM64_
+
+ genConsumeBlockOp(initBlkNode, REG_ARG_0, REG_ARG_1, REG_ARG_2);
+ genEmitHelperCall(CORINFO_HELP_MEMSET, 0, EA_UNKNOWN);
+}
+
+//------------------------------------------------------------------------
+// genRegCopy: Generate a register copy.
+//
+void CodeGen::genRegCopy(GenTree* treeNode)
+{
+ assert(treeNode->OperGet() == GT_COPY);
+
+ var_types targetType = treeNode->TypeGet();
+ regNumber targetReg = treeNode->gtRegNum;
+ assert(targetReg != REG_NA);
+
+ GenTree* op1 = treeNode->gtOp.gtOp1;
+
+ // Check whether this node and the node from which we're copying the value have the same
+ // register type.
+ // This can happen if (currently iff) we have a SIMD vector type that fits in an integer
+ // register, in which case it is passed as an argument, or returned from a call,
+ // in an integer register and must be copied if it's in an xmm register.
+
+ if (varTypeIsFloating(treeNode) != varTypeIsFloating(op1))
+ {
+ NYI_ARM("genRegCopy floating point");
+#ifdef _TARGET_ARM64_
+ inst_RV_RV(INS_fmov, targetReg, genConsumeReg(op1), targetType);
+#endif // _TARGET_ARM64_
+ }
+ else
+ {
+ inst_RV_RV(ins_Copy(targetType), targetReg, genConsumeReg(op1), targetType);
+ }
+
+ if (op1->IsLocal())
+ {
+ // The lclVar will never be a def.
+ // If it is a last use, the lclVar will be killed by genConsumeReg(), as usual, and genProduceReg will
+ // appropriately set the gcInfo for the copied value.
+ // If not, there are two cases we need to handle:
+ // - If this is a TEMPORARY copy (indicated by the GTF_VAR_DEATH flag) the variable
+ // will remain live in its original register.
+ // genProduceReg() will appropriately set the gcInfo for the copied value,
+ // and genConsumeReg will reset it.
+ // - Otherwise, we need to update register info for the lclVar.
+
+ GenTreeLclVarCommon* lcl = op1->AsLclVarCommon();
+ assert((lcl->gtFlags & GTF_VAR_DEF) == 0);
+
+ if ((lcl->gtFlags & GTF_VAR_DEATH) == 0 && (treeNode->gtFlags & GTF_VAR_DEATH) == 0)
+ {
+ LclVarDsc* varDsc = &compiler->lvaTable[lcl->gtLclNum];
+
+ // If we didn't just spill it (in genConsumeReg, above), then update the register info
+ if (varDsc->lvRegNum != REG_STK)
+ {
+ // The old location is dying
+ genUpdateRegLife(varDsc, /*isBorn*/ false, /*isDying*/ true DEBUGARG(op1));
+
+ gcInfo.gcMarkRegSetNpt(genRegMask(op1->gtRegNum));
+
+ genUpdateVarReg(varDsc, treeNode);
+
+ // The new location is going live
+ genUpdateRegLife(varDsc, /*isBorn*/ true, /*isDying*/ false DEBUGARG(treeNode));
+ }
+ }
+ }
+
+ genProduceReg(treeNode);
+}
+
+//------------------------------------------------------------------------
+// genCallInstruction: Produce code for a GT_CALL node
+//
+void CodeGen::genCallInstruction(GenTreeCall* call)
+{
+ gtCallTypes callType = (gtCallTypes)call->gtCallType;
+
+ IL_OFFSETX ilOffset = BAD_IL_OFFSET;
+
+ // all virtuals should have been expanded into a control expression
+ assert(!call->IsVirtual() || call->gtControlExpr || call->gtCallAddr);
+
+ // Consume all the arg regs
+ for (GenTreePtr list = call->gtCallLateArgs; list; list = list->MoveNext())
+ {
+ assert(list->OperIsList());
+
+ GenTreePtr argNode = list->Current();
+
+ fgArgTabEntryPtr curArgTabEntry = compiler->gtArgEntryByNode(call, argNode->gtSkipReloadOrCopy());
+ assert(curArgTabEntry);
+
+ if (curArgTabEntry->regNum == REG_STK)
+ continue;
+
+ // Deal with multi register passed struct args.
+ if (argNode->OperGet() == GT_FIELD_LIST)
+ {
+ GenTreeArgList* argListPtr = argNode->AsArgList();
+ unsigned iterationNum = 0;
+ regNumber argReg = curArgTabEntry->regNum;
+ for (; argListPtr != nullptr; argListPtr = argListPtr->Rest(), iterationNum++)
+ {
+ GenTreePtr putArgRegNode = argListPtr->gtOp.gtOp1;
+ assert(putArgRegNode->gtOper == GT_PUTARG_REG);
+
+ genConsumeReg(putArgRegNode);
+
+ if (putArgRegNode->gtRegNum != argReg)
+ {
+ inst_RV_RV(ins_Move_Extend(putArgRegNode->TypeGet(), putArgRegNode->InReg()), argReg,
+ putArgRegNode->gtRegNum);
+ }
+
+ argReg = genRegArgNext(argReg);
+ }
+ }
+ else
+ {
+ regNumber argReg = curArgTabEntry->regNum;
+ genConsumeReg(argNode);
+ if (argNode->gtRegNum != argReg)
+ {
+ inst_RV_RV(ins_Move_Extend(argNode->TypeGet(), argNode->InReg()), argReg, argNode->gtRegNum);
+ }
+ }
+
+ // In the case of a varargs call,
+ // the ABI dictates that if we have floating point args,
+ // we must pass the enregistered arguments in both the
+ // integer and floating point registers so, let's do that.
+ if (call->IsVarargs() && varTypeIsFloating(argNode))
+ {
+ NYI_ARM("CodeGen - IsVarargs");
+ NYI_ARM64("CodeGen - IsVarargs");
+ }
+ }
+
+ // Insert a null check on "this" pointer if asked.
+ if (call->NeedsNullCheck())
+ {
+ const regNumber regThis = genGetThisArgReg(call);
+
+#if defined(_TARGET_ARM_)
+ regMaskTP tempMask = genFindLowestBit(call->gtRsvdRegs);
+ const regNumber tmpReg = genRegNumFromMask(tempMask);
+ if (genCountBits(call->gtRsvdRegs) > 1)
+ {
+ call->gtRsvdRegs &= ~tempMask;
+ }
+ getEmitter()->emitIns_R_R_I(INS_ldr, EA_4BYTE, tmpReg, regThis, 0);
+#elif defined(_TARGET_ARM64_)
+ getEmitter()->emitIns_R_R_I(INS_ldr, EA_4BYTE, REG_ZR, regThis, 0);
+#endif // _TARGET_*
+ }
+
+ // Either gtControlExpr != null or gtCallAddr != null or it is a direct non-virtual call to a user or helper method.
+ CORINFO_METHOD_HANDLE methHnd;
+ GenTree* target = call->gtControlExpr;
+ if (callType == CT_INDIRECT)
+ {
+ assert(target == nullptr);
+ target = call->gtCallAddr;
+ methHnd = nullptr;
+ }
+ else
+ {
+ methHnd = call->gtCallMethHnd;
+ }
+
+ CORINFO_SIG_INFO* sigInfo = nullptr;
+#ifdef DEBUG
+ // Pass the call signature information down into the emitter so the emitter can associate
+ // native call sites with the signatures they were generated from.
+ if (callType != CT_HELPER)
+ {
+ sigInfo = call->callSig;
+ }
+#endif // DEBUG
+
+ // If fast tail call, then we are done. In this case we setup the args (both reg args
+ // and stack args in incoming arg area) and call target. Epilog sequence would
+ // generate "br <reg>".
+ if (call->IsFastTailCall())
+ {
+ // Don't support fast tail calling JIT helpers
+ assert(callType != CT_HELPER);
+
+ // Fast tail calls materialize call target either in gtControlExpr or in gtCallAddr.
+ assert(target != nullptr);
+
+ genConsumeReg(target);
+
+ NYI_ARM("fast tail call");
+
+#ifdef _TARGET_ARM64_
+ // Use IP0 as the call target register.
+ if (target->gtRegNum != REG_IP0)
+ {
+ inst_RV_RV(INS_mov, REG_IP0, target->gtRegNum);
+ }
+#endif // _TARGET_ARM64_
+
+ return;
+ }
+
+ // For a pinvoke to unmanaged code we emit a label to clear
+ // the GC pointer state before the callsite.
+ // We can't utilize the typical lazy killing of GC pointers
+ // at (or inside) the callsite.
+ if (call->IsUnmanaged())
+ {
+ genDefineTempLabel(genCreateTempLabel());
+ }
+
+ // Determine return value size(s).
+ ReturnTypeDesc* pRetTypeDesc = call->GetReturnTypeDesc();
+ emitAttr retSize = EA_PTRSIZE;
+ emitAttr secondRetSize = EA_UNKNOWN;
+
+ if (call->HasMultiRegRetVal())
+ {
+ retSize = emitTypeSize(pRetTypeDesc->GetReturnRegType(0));
+ secondRetSize = emitTypeSize(pRetTypeDesc->GetReturnRegType(1));
+ }
+ else
+ {
+ assert(!varTypeIsStruct(call));
+
+ if (call->gtType == TYP_REF || call->gtType == TYP_ARRAY)
+ {
+ retSize = EA_GCREF;
+ }
+ else if (call->gtType == TYP_BYREF)
+ {
+ retSize = EA_BYREF;
+ }
+ }
+
+ // We need to propagate the IL offset information to the call instruction, so we can emit
+ // an IL to native mapping record for the call, to support managed return value debugging.
+ // We don't want tail call helper calls that were converted from normal calls to get a record,
+ // so we skip this hash table lookup logic in that case.
+ if (compiler->opts.compDbgInfo && compiler->genCallSite2ILOffsetMap != nullptr && !call->IsTailCall())
+ {
+ (void)compiler->genCallSite2ILOffsetMap->Lookup(call, &ilOffset);
+ }
+
+ if (target != nullptr)
+ {
+ // A call target can not be a contained indirection
+ assert(!target->isContainedIndir());
+
+ genConsumeReg(target);
+
+ // We have already generated code for gtControlExpr evaluating it into a register.
+ // We just need to emit "call reg" in this case.
+ //
+ assert(genIsValidIntReg(target->gtRegNum));
+
+ genEmitCall(emitter::EC_INDIR_R, methHnd,
+ INDEBUG_LDISASM_COMMA(sigInfo) nullptr, // addr
+ retSize MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize), ilOffset, target->gtRegNum);
+ }
+ else
+ {
+ // Generate a direct call to a non-virtual user defined or helper method
+ assert(callType == CT_HELPER || callType == CT_USER_FUNC);
+
+ void* addr = nullptr;
+ if (callType == CT_HELPER)
+ {
+ // Direct call to a helper method.
+ CorInfoHelpFunc helperNum = compiler->eeGetHelperNum(methHnd);
+ noway_assert(helperNum != CORINFO_HELP_UNDEF);
+
+ void* pAddr = nullptr;
+ addr = compiler->compGetHelperFtn(helperNum, (void**)&pAddr);
+
+ if (addr == nullptr)
+ {
+ addr = pAddr;
+ }
+ }
+ else
+ {
+ // Direct call to a non-virtual user function.
+ CORINFO_ACCESS_FLAGS aflags = CORINFO_ACCESS_ANY;
+ if (call->IsSameThis())
+ {
+ aflags = (CORINFO_ACCESS_FLAGS)(aflags | CORINFO_ACCESS_THIS);
+ }
+
+ if ((call->NeedsNullCheck()) == 0)
+ {
+ aflags = (CORINFO_ACCESS_FLAGS)(aflags | CORINFO_ACCESS_NONNULL);
+ }
+
+ CORINFO_CONST_LOOKUP addrInfo;
+ compiler->info.compCompHnd->getFunctionEntryPoint(methHnd, &addrInfo, aflags);
+
+ addr = addrInfo.addr;
+ }
+
+ assert(addr != nullptr);
+
+// Non-virtual direct call to known addresses
+#ifdef _TARGET_ARM_
+ if (!arm_Valid_Imm_For_BL((ssize_t)addr))
+ {
+ regNumber tmpReg = genRegNumFromMask(call->gtRsvdRegs);
+ instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, tmpReg, (ssize_t)addr);
+ genEmitCall(emitter::EC_INDIR_R, methHnd, INDEBUG_LDISASM_COMMA(sigInfo) NULL, retSize, ilOffset, tmpReg);
+ }
+ else
+#endif // _TARGET_ARM_
+ {
+ genEmitCall(emitter::EC_FUNC_TOKEN, methHnd, INDEBUG_LDISASM_COMMA(sigInfo) addr,
+ retSize MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize), ilOffset);
+ }
+
+#if 0 && defined(_TARGET_ARM64_)
+ // Use this path if you want to load an absolute call target using
+ // a sequence of movs followed by an indirect call (blr instruction)
+
+ // Load the call target address in x16
+ instGen_Set_Reg_To_Imm(EA_8BYTE, REG_IP0, (ssize_t) addr);
+
+ // indirect call to constant address in IP0
+ genEmitCall(emitter::EC_INDIR_R,
+ methHnd,
+ INDEBUG_LDISASM_COMMA(sigInfo)
+ nullptr, //addr
+ retSize,
+ secondRetSize,
+ ilOffset,
+ REG_IP0);
+#endif
+ }
+
+ // if it was a pinvoke we may have needed to get the address of a label
+ if (genPendingCallLabel)
+ {
+ assert(call->IsUnmanaged());
+ genDefineTempLabel(genPendingCallLabel);
+ genPendingCallLabel = nullptr;
+ }
+
+ // Update GC info:
+ // All Callee arg registers are trashed and no longer contain any GC pointers.
+ // TODO-Bug?: As a matter of fact shouldn't we be killing all of callee trashed regs here?
+ // For now we will assert that other than arg regs gc ref/byref set doesn't contain any other
+ // registers from RBM_CALLEE_TRASH
+ assert((gcInfo.gcRegGCrefSetCur & (RBM_CALLEE_TRASH & ~RBM_ARG_REGS)) == 0);
+ assert((gcInfo.gcRegByrefSetCur & (RBM_CALLEE_TRASH & ~RBM_ARG_REGS)) == 0);
+ gcInfo.gcRegGCrefSetCur &= ~RBM_ARG_REGS;
+ gcInfo.gcRegByrefSetCur &= ~RBM_ARG_REGS;
+
+ var_types returnType = call->TypeGet();
+ if (returnType != TYP_VOID)
+ {
+ regNumber returnReg;
+
+ if (call->HasMultiRegRetVal())
+ {
+ assert(pRetTypeDesc != nullptr);
+ unsigned regCount = pRetTypeDesc->GetReturnRegCount();
+
+ // If regs allocated to call node are different from ABI return
+ // regs in which the call has returned its result, move the result
+ // to regs allocated to call node.
+ for (unsigned i = 0; i < regCount; ++i)
+ {
+ var_types regType = pRetTypeDesc->GetReturnRegType(i);
+ returnReg = pRetTypeDesc->GetABIReturnReg(i);
+ regNumber allocatedReg = call->GetRegNumByIdx(i);
+ if (returnReg != allocatedReg)
+ {
+ inst_RV_RV(ins_Copy(regType), allocatedReg, returnReg, regType);
+ }
+ }
+ }
+ else
+ {
+#ifdef _TARGET_ARM_
+ if (call->IsHelperCall(compiler, CORINFO_HELP_INIT_PINVOKE_FRAME))
+ {
+ // The CORINFO_HELP_INIT_PINVOKE_FRAME helper uses a custom calling convention that returns with
+ // TCB in REG_PINVOKE_TCB. fgMorphCall() sets the correct argument registers.
+ returnReg = REG_PINVOKE_TCB;
+ }
+ else
+#endif // _TARGET_ARM_
+ if (varTypeIsFloating(returnType))
+ {
+ returnReg = REG_FLOATRET;
+ }
+ else
+ {
+ returnReg = REG_INTRET;
+ }
+
+ if (call->gtRegNum != returnReg)
+ {
+ inst_RV_RV(ins_Copy(returnType), call->gtRegNum, returnReg, returnType);
+ }
+ }
+
+ genProduceReg(call);
+ }
+
+ // If there is nothing next, that means the result is thrown away, so this value is not live.
+ // However, for minopts or debuggable code, we keep it live to support managed return value debugging.
+ if ((call->gtNext == nullptr) && !compiler->opts.MinOpts() && !compiler->opts.compDbgCode)
+ {
+ gcInfo.gcMarkRegSetNpt(RBM_INTRET);
+ }
+}
+
+//------------------------------------------------------------------------
+// genIntToIntCast: Generate code for an integer cast
+//
+// Arguments:
+// treeNode - The GT_CAST node
+//
+// Return Value:
+// None.
+//
+// Assumptions:
+// The treeNode must have an assigned register.
+// For a signed convert from byte, the source must be in a byte-addressable register.
+// Neither the source nor target type can be a floating point type.
+//
+// TODO-ARM64-CQ: Allow castOp to be a contained node without an assigned register.
+//
+void CodeGen::genIntToIntCast(GenTreePtr treeNode)
+{
+ assert(treeNode->OperGet() == GT_CAST);
+
+ GenTreePtr castOp = treeNode->gtCast.CastOp();
+ emitter* emit = getEmitter();
+
+ var_types dstType = treeNode->CastToType();
+ var_types srcType = genActualType(castOp->TypeGet());
+ emitAttr movSize = emitActualTypeSize(dstType);
+ bool movRequired = false;
+
+#ifdef _TARGET_ARM_
+ if (varTypeIsLong(srcType))
+ {
+ genLongToIntCast(treeNode);
+ return;
+ }
+#endif // _TARGET_ARM_
+
+ regNumber targetReg = treeNode->gtRegNum;
+ regNumber sourceReg = castOp->gtRegNum;
+
+ // For Long to Int conversion we will have a reserved integer register to hold the immediate mask
+ regNumber tmpReg = (treeNode->gtRsvdRegs == RBM_NONE) ? REG_NA : genRegNumFromMask(treeNode->gtRsvdRegs);
+
+ assert(genIsValidIntReg(targetReg));
+ assert(genIsValidIntReg(sourceReg));
+
+ instruction ins = INS_invalid;
+
+ genConsumeReg(castOp);
+ Lowering::CastInfo castInfo;
+
+ // Get information about the cast.
+ Lowering::getCastDescription(treeNode, &castInfo);
+
+ if (castInfo.requiresOverflowCheck)
+ {
+ emitAttr cmpSize = EA_ATTR(genTypeSize(srcType));
+
+ if (castInfo.signCheckOnly)
+ {
+ // We only need to check for a negative value in sourceReg
+ emit->emitIns_R_I(INS_cmp, cmpSize, sourceReg, 0);
+ emitJumpKind jmpLT = genJumpKindForOper(GT_LT, CK_SIGNED);
+ genJumpToThrowHlpBlk(jmpLT, SCK_OVERFLOW);
+ noway_assert(genTypeSize(srcType) == 4 || genTypeSize(srcType) == 8);
+ // This is only interesting case to ensure zero-upper bits.
+ if ((srcType == TYP_INT) && (dstType == TYP_ULONG))
+ {
+ // cast to TYP_ULONG:
+ // We use a mov with size=EA_4BYTE
+ // which will zero out the upper bits
+ movSize = EA_4BYTE;
+ movRequired = true;
+ }
+ }
+ else if (castInfo.unsignedSource || castInfo.unsignedDest)
+ {
+ // When we are converting from/to unsigned,
+ // we only have to check for any bits set in 'typeMask'
+
+ noway_assert(castInfo.typeMask != 0);
+ emit->emitIns_R_I(INS_tst, cmpSize, sourceReg, castInfo.typeMask);
+ emitJumpKind jmpNotEqual = genJumpKindForOper(GT_NE, CK_SIGNED);
+ genJumpToThrowHlpBlk(jmpNotEqual, SCK_OVERFLOW);
+ }
+ else
+ {
+ // For a narrowing signed cast
+ //
+ // We must check the value is in a signed range.
+
+ // Compare with the MAX
+
+ noway_assert((castInfo.typeMin != 0) && (castInfo.typeMax != 0));
+
+#if defined(_TARGET_ARM_)
+ if (emitter::emitIns_valid_imm_for_cmp(castInfo.typeMax, INS_FLAGS_DONT_CARE))
+#elif defined(_TARGET_ARM64_)
+ if (emitter::emitIns_valid_imm_for_cmp(castInfo.typeMax, cmpSize))
+#endif // _TARGET_*
+ {
+ emit->emitIns_R_I(INS_cmp, cmpSize, sourceReg, castInfo.typeMax);
+ }
+ else
+ {
+ noway_assert(tmpReg != REG_NA);
+ instGen_Set_Reg_To_Imm(cmpSize, tmpReg, castInfo.typeMax);
+ emit->emitIns_R_R(INS_cmp, cmpSize, sourceReg, tmpReg);
+ }
+
+ emitJumpKind jmpGT = genJumpKindForOper(GT_GT, CK_SIGNED);
+ genJumpToThrowHlpBlk(jmpGT, SCK_OVERFLOW);
+
+// Compare with the MIN
+
+#if defined(_TARGET_ARM_)
+ if (emitter::emitIns_valid_imm_for_cmp(castInfo.typeMin, INS_FLAGS_DONT_CARE))
+#elif defined(_TARGET_ARM64_)
+ if (emitter::emitIns_valid_imm_for_cmp(castInfo.typeMin, cmpSize))
+#endif // _TARGET_*
+ {
+ emit->emitIns_R_I(INS_cmp, cmpSize, sourceReg, castInfo.typeMin);
+ }
+ else
+ {
+ noway_assert(tmpReg != REG_NA);
+ instGen_Set_Reg_To_Imm(cmpSize, tmpReg, castInfo.typeMin);
+ emit->emitIns_R_R(INS_cmp, cmpSize, sourceReg, tmpReg);
+ }
+
+ emitJumpKind jmpLT = genJumpKindForOper(GT_LT, CK_SIGNED);
+ genJumpToThrowHlpBlk(jmpLT, SCK_OVERFLOW);
+ }
+ ins = INS_mov;
+ }
+ else // Non-overflow checking cast.
+ {
+ if (genTypeSize(srcType) == genTypeSize(dstType))
+ {
+ ins = INS_mov;
+ }
+ else
+ {
+ var_types extendType = TYP_UNKNOWN;
+
+ // If we need to treat a signed type as unsigned
+ if ((treeNode->gtFlags & GTF_UNSIGNED) != 0)
+ {
+ extendType = genUnsignedType(srcType);
+ movSize = emitTypeSize(extendType);
+ movRequired = true;
+ }
+ else
+ {
+ if (genTypeSize(srcType) < genTypeSize(dstType))
+ {
+ extendType = srcType;
+#ifdef _TARGET_ARM_
+ movSize = emitTypeSize(srcType);
+#endif // _TARGET_ARM_
+ if (srcType == TYP_UINT)
+ {
+#ifdef _TARGET_ARM64_
+ // If we are casting from a smaller type to
+ // a larger type, then we need to make sure the
+ // higher 4 bytes are zero to gaurentee the correct value.
+ // Therefore using a mov with EA_4BYTE in place of EA_8BYTE
+ // will zero the upper bits
+ movSize = EA_4BYTE;
+#endif // _TARGET_ARM64_
+ movRequired = true;
+ }
+ }
+ else // (genTypeSize(srcType) > genTypeSize(dstType))
+ {
+ extendType = dstType;
+#if defined(_TARGET_ARM_)
+ movSize = emitTypeSize(dstType);
+#elif defined(_TARGET_ARM64_)
+ if (dstType == TYP_INT)
+ {
+ movSize = EA_8BYTE; // a sxtw instruction requires EA_8BYTE
+ }
+#endif // _TARGET_*
+ }
+ }
+
+ ins = ins_Move_Extend(extendType, castOp->InReg());
+ }
+ }
+
+ // We should never be generating a load from memory instruction here!
+ assert(!emit->emitInsIsLoad(ins));
+
+ if ((ins != INS_mov) || movRequired || (targetReg != sourceReg))
+ {
+ emit->emitIns_R_R(ins, movSize, targetReg, sourceReg);
+ }
+
+ genProduceReg(treeNode);
+}
+
+//------------------------------------------------------------------------
+// genFloatToFloatCast: Generate code for a cast between float and double
+//
+// Arguments:
+// treeNode - The GT_CAST node
+//
+// Return Value:
+// None.
+//
+// Assumptions:
+// Cast is a non-overflow conversion.
+// The treeNode must have an assigned register.
+// The cast is between float and double.
+//
+void CodeGen::genFloatToFloatCast(GenTreePtr treeNode)
+{
+ // float <--> double conversions are always non-overflow ones
+ assert(treeNode->OperGet() == GT_CAST);
+ assert(!treeNode->gtOverflow());
+
+ regNumber targetReg = treeNode->gtRegNum;
+ assert(genIsValidFloatReg(targetReg));
+
+ GenTreePtr op1 = treeNode->gtOp.gtOp1;
+ assert(!op1->isContained()); // Cannot be contained
+ assert(genIsValidFloatReg(op1->gtRegNum)); // Must be a valid float reg.
+
+ var_types dstType = treeNode->CastToType();
+ var_types srcType = op1->TypeGet();
+ assert(varTypeIsFloating(srcType) && varTypeIsFloating(dstType));
+
+ genConsumeOperands(treeNode->AsOp());
+
+ // treeNode must be a reg
+ assert(!treeNode->isContained());
+
+#if defined(_TARGET_ARM_)
+
+ if (srcType != dstType)
+ {
+ instruction insVcvt = (srcType == TYP_FLOAT) ? INS_vcvt_f2d // convert Float to Double
+ : INS_vcvt_d2f; // convert Double to Float
+
+ getEmitter()->emitIns_R_R(insVcvt, emitTypeSize(treeNode), treeNode->gtRegNum, op1->gtRegNum);
+ }
+ else if (treeNode->gtRegNum != op1->gtRegNum)
+ {
+ getEmitter()->emitIns_R_R(INS_vmov, emitTypeSize(treeNode), treeNode->gtRegNum, op1->gtRegNum);
+ }
+
+#elif defined(_TARGET_ARM64_)
+
+ if (srcType != dstType)
+ {
+ insOpts cvtOption = (srcType == TYP_FLOAT) ? INS_OPTS_S_TO_D // convert Single to Double
+ : INS_OPTS_D_TO_S; // convert Double to Single
+
+ getEmitter()->emitIns_R_R(INS_fcvt, emitTypeSize(treeNode), treeNode->gtRegNum, op1->gtRegNum, cvtOption);
+ }
+ else if (treeNode->gtRegNum != op1->gtRegNum)
+ {
+ // If double to double cast or float to float cast. Emit a move instruction.
+ getEmitter()->emitIns_R_R(INS_mov, emitTypeSize(treeNode), treeNode->gtRegNum, op1->gtRegNum);
+ }
+
+#endif // _TARGET_*
+
+ genProduceReg(treeNode);
+}
+
+//------------------------------------------------------------------------
+// genCreateAndStoreGCInfo: Create and record GC Info for the function.
+//
+void CodeGen::genCreateAndStoreGCInfo(unsigned codeSize,
+ unsigned prologSize,
+ unsigned epilogSize DEBUGARG(void* codePtr))
+{
+ IAllocator* allowZeroAlloc = new (compiler, CMK_GC) AllowZeroAllocator(compiler->getAllocatorGC());
+ GcInfoEncoder* gcInfoEncoder = new (compiler, CMK_GC)
+ GcInfoEncoder(compiler->info.compCompHnd, compiler->info.compMethodInfo, allowZeroAlloc, NOMEM);
+ assert(gcInfoEncoder != nullptr);
+
+ // Follow the code pattern of the x86 gc info encoder (genCreateAndStoreGCInfoJIT32).
+ gcInfo.gcInfoBlockHdrSave(gcInfoEncoder, codeSize, prologSize);
+
+ // We keep the call count for the second call to gcMakeRegPtrTable() below.
+ unsigned callCnt = 0;
+
+ // First we figure out the encoder ID's for the stack slots and registers.
+ gcInfo.gcMakeRegPtrTable(gcInfoEncoder, codeSize, prologSize, GCInfo::MAKE_REG_PTR_MODE_ASSIGN_SLOTS, &callCnt);
+
+ // Now we've requested all the slots we'll need; "finalize" these (make more compact data structures for them).
+ gcInfoEncoder->FinalizeSlotIds();
+
+ // Now we can actually use those slot ID's to declare live ranges.
+ gcInfo.gcMakeRegPtrTable(gcInfoEncoder, codeSize, prologSize, GCInfo::MAKE_REG_PTR_MODE_DO_WORK, &callCnt);
+
+#ifdef _TARGET_ARM64_
+
+ if (compiler->opts.compDbgEnC)
+ {
+ // what we have to preserve is called the "frame header" (see comments in VM\eetwain.cpp)
+ // which is:
+ // -return address
+ // -saved off RBP
+ // -saved 'this' pointer and bool for synchronized methods
+
+ // 4 slots for RBP + return address + RSI + RDI
+ int preservedAreaSize = 4 * REGSIZE_BYTES;
+
+ if (compiler->info.compFlags & CORINFO_FLG_SYNCH)
+ {
+ if (!(compiler->info.compFlags & CORINFO_FLG_STATIC))
+ preservedAreaSize += REGSIZE_BYTES;
+
+ preservedAreaSize += 1; // bool for synchronized methods
+ }
+
+ // Used to signal both that the method is compiled for EnC, and also the size of the block at the top of the
+ // frame
+ gcInfoEncoder->SetSizeOfEditAndContinuePreservedArea(preservedAreaSize);
+ }
+
+#endif // _TARGET_ARM64_
+
+ gcInfoEncoder->Build();
+
+ // GC Encoder automatically puts the GC info in the right spot using ICorJitInfo::allocGCInfo(size_t)
+ // let's save the values anyway for debugging purposes
+ compiler->compInfoBlkAddr = gcInfoEncoder->Emit();
+ compiler->compInfoBlkSize = 0; // not exposed by the GCEncoder interface
+}
+
+#endif // _TARGET_ARMARCH_
+
+#endif // !LEGACY_BACKEND
diff --git a/src/jit/codegenxarch.cpp b/src/jit/codegenxarch.cpp
index 5c23cb525a..23c2a186a4 100644
--- a/src/jit/codegenxarch.cpp
+++ b/src/jit/codegenxarch.cpp
@@ -5310,9 +5310,7 @@ void CodeGen::genCallInstruction(GenTreeCall* call)
gcInfo.gcMarkRegSetNpt(RBM_INTRET);
}
- unsigned stackAdjustBias = 0;
-
-#if defined(_TARGET_X86_)
+#if !FEATURE_EH_FUNCLETS
//-------------------------------------------------------------------------
// Create a label for tracking of region protected by the monitor in synchronized methods.
// This needs to be here, rather than above where fPossibleSyncHelperCall is set,
@@ -5340,7 +5338,11 @@ void CodeGen::genCallInstruction(GenTreeCall* call)
break;
}
}
+#endif // !FEATURE_EH_FUNCLETS
+
+ unsigned stackAdjustBias = 0;
+#if defined(_TARGET_X86_)
// Is the caller supposed to pop the arguments?
if (fCallerPop && (stackArgBytes != 0))
{
diff --git a/src/jit/compiler.h b/src/jit/compiler.h
index 832ed031d8..9ca0e1a3e1 100644
--- a/src/jit/compiler.h
+++ b/src/jit/compiler.h
@@ -3646,7 +3646,7 @@ public:
GenTreePtr fgGetCritSectOfStaticMethod();
-#if !defined(_TARGET_X86_)
+#if FEATURE_EH_FUNCLETS
void fgAddSyncMethodEnterExit();
@@ -3654,7 +3654,7 @@ public:
void fgConvertSyncReturnToLeave(BasicBlock* block);
-#endif // !_TARGET_X86_
+#endif // FEATURE_EH_FUNCLETS
void fgAddReversePInvokeEnterExit();
@@ -9482,6 +9482,10 @@ const instruction INS_ADDC = INS_adc;
const instruction INS_SUBC = INS_sbc;
const instruction INS_NOT = INS_mvn;
+const instruction INS_ABS = INS_vabs;
+const instruction INS_ROUND = INS_invalid;
+const instruction INS_SQRT = INS_vsqrt;
+
#endif
#ifdef _TARGET_ARM64_
@@ -9503,6 +9507,10 @@ const instruction INS_ADDC = INS_adc;
const instruction INS_SUBC = INS_sbc;
const instruction INS_NOT = INS_mvn;
+const instruction INS_ABS = INS_fabs;
+const instruction INS_ROUND = INS_frintn;
+const instruction INS_SQRT = INS_fsqrt;
+
#endif
/*****************************************************************************/
diff --git a/src/jit/decomposelongs.cpp b/src/jit/decomposelongs.cpp
index 5130012545..d284c1cb47 100644
--- a/src/jit/decomposelongs.cpp
+++ b/src/jit/decomposelongs.cpp
@@ -265,7 +265,7 @@ GenTree* DecomposeLongs::DecomposeNode(GenTree* tree)
default:
{
JITDUMP("Illegal TYP_LONG node %s in Decomposition.", GenTree::NodeName(tree->OperGet()));
- noway_assert(!"Illegal TYP_LONG node in Decomposition.");
+ assert(!"Illegal TYP_LONG node in Decomposition.");
break;
}
}
@@ -607,9 +607,7 @@ GenTree* DecomposeLongs::DecomposeCast(LIR::Use& use)
hiResult->gtFlags &= ~GTF_UNSIGNED;
hiResult->gtOp.gtOp1 = hiSrcOp;
- Range().Remove(cast);
Range().Remove(srcOp);
- Range().InsertAfter(hiSrcOp, hiResult);
}
else
{
@@ -654,8 +652,8 @@ GenTree* DecomposeLongs::DecomposeCast(LIR::Use& use)
loResult = cast->gtGetOp1();
hiResult = m_compiler->gtNewZeroConNode(TYP_INT);
+ Range().InsertAfter(cast, hiResult);
Range().Remove(cast);
- Range().InsertAfter(loResult, hiResult);
}
else
{
@@ -668,9 +666,10 @@ GenTree* DecomposeLongs::DecomposeCast(LIR::Use& use)
GenTree* shiftBy = m_compiler->gtNewIconNode(31, TYP_INT);
hiResult = m_compiler->gtNewOperNode(GT_RSH, TYP_INT, loCopy, shiftBy);
- Range().Remove(cast);
- Range().InsertAfter(loResult, loCopy, shiftBy, hiResult);
+ Range().InsertAfter(cast, loCopy, shiftBy, hiResult);
m_compiler->lvaIncRefCnts(loCopy);
+
+ Range().Remove(cast);
}
}
}
@@ -1014,15 +1013,25 @@ GenTree* DecomposeLongs::DecomposeShift(LIR::Use& use)
{
assert(use.IsInitialized());
- GenTree* tree = use.Def();
- GenTree* gtLong = tree->gtGetOp1();
+ GenTree* shift = use.Def();
+ GenTree* gtLong = shift->gtGetOp1();
GenTree* loOp1 = gtLong->gtGetOp1();
GenTree* hiOp1 = gtLong->gtGetOp2();
- GenTree* shiftByOp = tree->gtGetOp2();
+ GenTree* shiftByOp = shift->gtGetOp2();
- genTreeOps oper = tree->OperGet();
+ genTreeOps oper = shift->OperGet();
genTreeOps shiftByOper = shiftByOp->OperGet();
+ // tLo = ...
+ // ...
+ // tHi = ...
+ // ...
+ // tLong = long tLo, tHi
+ // ...
+ // tShiftAmount = ...
+ // ...
+ // tShift = shift tLong, tShiftAmount
+
assert((oper == GT_LSH) || (oper == GT_RSH) || (oper == GT_RSZ));
// If we are shifting by a constant int, we do not want to use a helper, instead, we decompose.
@@ -1033,9 +1042,9 @@ GenTree* DecomposeLongs::DecomposeShift(LIR::Use& use)
if (count == 0)
{
- GenTree* next = tree->gtNext;
- // Remove tree and don't do anything else.
- Range().Remove(tree);
+ GenTree* next = shift->gtNext;
+ // Remove shift and don't do anything else.
+ Range().Remove(shift);
use.ReplaceWith(m_compiler, gtLong);
return next;
}
@@ -1049,15 +1058,27 @@ GenTree* DecomposeLongs::DecomposeShift(LIR::Use& use)
{
case GT_LSH:
{
- Range().Remove(hiOp1);
if (count < 32)
{
- // Hi is a GT_LSH_HI, lo is a GT_LSH. Will produce:
- // reg1 = lo
- // shl lo, shift
- // shld hi, reg1, shift
+ // For shifts of < 32 bits, we transform the code to:
+ //
+ // tLo = ...
+ // st.lclVar vLo, tLo
+ // ...
+ // tHi = ...
+ // ...
+ // tShiftLo = lsh vLo, tShiftAmountLo
+ // tShitHiLong = long vLo, tHi
+ // tShiftHi = lsh_hi tShiftHiLong, tShiftAmountHi
+ //
+ // This will produce:
+ //
+ // reg1 = lo
+ // shl lo, shift
+ // shld hi, reg1, shift
Range().Remove(gtLong);
+
loOp1 = RepresentOpAsLocalVar(loOp1, gtLong, &gtLong->gtOp.gtOp1);
unsigned loOp1LclNum = loOp1->AsLclVarCommon()->gtLclNum;
Range().Remove(loOp1);
@@ -1075,16 +1096,25 @@ GenTree* DecomposeLongs::DecomposeShift(LIR::Use& use)
m_compiler->lvaIncRefCnts(loCopy);
- Range().InsertBefore(tree, loCopy, hiOp1, hiOp);
- Range().InsertBefore(tree, shiftByHi, hiResult);
- Range().InsertBefore(tree, loOp1, shiftByLo, loResult);
+ Range().InsertBefore(shift, loOp1, shiftByLo, loResult);
+ Range().InsertBefore(shift, loCopy, hiOp, shiftByHi, hiResult);
- insertAfter = loResult;
+ insertAfter = hiResult;
}
else
{
assert(count >= 32);
+ // Since we're left shifting at least 32 bits, we can remove the hi part of the shifted value iff
+ // it has no side effects.
+ //
+ // TODO-CQ: we could go perform this removal transitively (i.e. iteratively remove everything that
+ // feeds the hi operand while there are no side effects)
+ if ((hiOp1->gtFlags & GTF_ALL_EFFECT) == 0)
+ {
+ Range().Remove(hiOp1);
+ }
+
if (count < 64)
{
if (count == 32)
@@ -1103,7 +1133,6 @@ GenTree* DecomposeLongs::DecomposeShift(LIR::Use& use)
else
{
Range().Remove(gtLong);
- Range().Remove(loOp1);
assert(count > 32 && count < 64);
// Move loOp1 into hiResult, do a GT_LSH with count - 32.
@@ -1111,23 +1140,33 @@ GenTree* DecomposeLongs::DecomposeShift(LIR::Use& use)
// temp
GenTree* shiftBy = m_compiler->gtNewIconNode(count - 32, TYP_INT);
hiResult = m_compiler->gtNewOperNode(oper, TYP_INT, loOp1, shiftBy);
- Range().InsertBefore(tree, loOp1, shiftBy, hiResult);
+ Range().InsertBefore(shift, shiftBy, hiResult);
}
}
else
{
- Range().Remove(gtLong);
- Range().Remove(loOp1);
assert(count >= 64);
+ Range().Remove(gtLong);
+
+ // Since we're left shifting at least 64 bits, we can remove the lo part of the shifted value
+ // iff it has no side effects.
+ //
+ // TODO-CQ: we could go perform this removal transitively (i.e. iteratively remove everything
+ // that feeds the lo operand while there are no side effects)
+ if ((loOp1->gtFlags & GTF_ALL_EFFECT) == 0)
+ {
+ Range().Remove(loOp1);
+ }
+
// Zero out hi (shift of >= 64 bits moves all the bits out of the two registers)
hiResult = m_compiler->gtNewZeroConNode(TYP_INT);
- Range().InsertBefore(tree, hiResult);
+ Range().InsertBefore(shift, hiResult);
}
// Zero out loResult (shift of >= 32 bits shifts all lo bits to hiResult)
loResult = m_compiler->gtNewZeroConNode(TYP_INT);
- Range().InsertBefore(tree, loResult);
+ Range().InsertBefore(shift, loResult);
insertAfter = loResult;
}
@@ -1160,14 +1199,22 @@ GenTree* DecomposeLongs::DecomposeShift(LIR::Use& use)
GenTree* loOp = new (m_compiler, GT_LONG) GenTreeOp(GT_LONG, TYP_LONG, loOp1, hiCopy);
loResult = m_compiler->gtNewOperNode(GT_RSH_LO, TYP_INT, loOp, shiftByLo);
- Range().InsertBefore(tree, hiCopy, loOp);
- Range().InsertBefore(tree, shiftByLo, loResult);
- Range().InsertBefore(tree, shiftByHi, hiResult);
+ Range().InsertBefore(shift, hiCopy, loOp);
+ Range().InsertBefore(shift, shiftByLo, loResult);
+ Range().InsertBefore(shift, shiftByHi, hiResult);
}
else
{
- Range().Remove(loOp1);
- Range().Remove(hiOp1);
+ // Since we're right shifting at least 32 bits, we can remove the lo part of the shifted value iff
+ // it has no side effects.
+ //
+ // TODO-CQ: we could go perform this removal transitively (i.e. iteratively remove everything that
+ // feeds the lo operand while there are no side effects)
+ if ((loOp1->gtFlags & GTF_ALL_EFFECT) == 0)
+ {
+ Range().Remove(loOp1);
+ }
+
assert(count >= 32);
if (count < 64)
{
@@ -1175,7 +1222,6 @@ GenTree* DecomposeLongs::DecomposeShift(LIR::Use& use)
{
// Move hiOp1 into loResult.
loResult = hiOp1;
- Range().InsertBefore(tree, loResult);
}
else
{
@@ -1184,21 +1230,31 @@ GenTree* DecomposeLongs::DecomposeShift(LIR::Use& use)
// Move hiOp1 into loResult, do a GT_RSZ with count - 32.
GenTree* shiftBy = m_compiler->gtNewIconNode(count - 32, TYP_INT);
loResult = m_compiler->gtNewOperNode(oper, TYP_INT, hiOp1, shiftBy);
- Range().InsertBefore(tree, hiOp1, shiftBy, loResult);
+ Range().InsertBefore(shift, shiftBy, loResult);
}
}
else
{
assert(count >= 64);
+ // Since we're right shifting at least 64 bits, we can remove the hi part of the shifted value
+ // iff it has no side effects.
+ //
+ // TODO-CQ: we could go perform this removal transitively (i.e. iteratively remove everything
+ // that feeds the hi operand while there are no side effects)
+ if ((hiOp1->gtFlags & GTF_ALL_EFFECT) == 0)
+ {
+ Range().Remove(hiOp1);
+ }
+
// Zero out lo
loResult = m_compiler->gtNewZeroConNode(TYP_INT);
- Range().InsertBefore(tree, loResult);
+ Range().InsertBefore(shift, loResult);
}
// Zero out hi
hiResult = m_compiler->gtNewZeroConNode(TYP_INT);
- Range().InsertBefore(tree, hiResult);
+ Range().InsertBefore(shift, hiResult);
}
insertAfter = hiResult;
@@ -1207,7 +1263,6 @@ GenTree* DecomposeLongs::DecomposeShift(LIR::Use& use)
case GT_RSH:
{
Range().Remove(gtLong);
- Range().Remove(loOp1);
hiOp1 = RepresentOpAsLocalVar(hiOp1, gtLong, &gtLong->gtOp.gtOp2);
unsigned hiOp1LclNum = hiOp1->AsLclVarCommon()->gtLclNum;
@@ -1232,20 +1287,31 @@ GenTree* DecomposeLongs::DecomposeShift(LIR::Use& use)
GenTree* loOp = new (m_compiler, GT_LONG) GenTreeOp(GT_LONG, TYP_LONG, loOp1, hiCopy);
loResult = m_compiler->gtNewOperNode(GT_RSH_LO, TYP_INT, loOp, shiftByLo);
- Range().InsertBefore(tree, loOp1, hiCopy, loOp);
- Range().InsertBefore(tree, shiftByLo, loResult);
- Range().InsertBefore(tree, shiftByHi, hiOp1, hiResult);
+ Range().InsertBefore(shift, hiCopy, loOp);
+ Range().InsertBefore(shift, shiftByLo, loResult);
+ Range().InsertBefore(shift, shiftByHi, hiOp1, hiResult);
}
else
{
assert(count >= 32);
+
+ // Since we're right shifting at least 32 bits, we can remove the lo part of the shifted value iff
+ // it has no side effects.
+ //
+ // TODO-CQ: we could go perform this removal transitively (i.e. iteratively remove everything that
+ // feeds the lo operand while there are no side effects)
+ if ((loOp1->gtFlags & GTF_ALL_EFFECT) == 0)
+ {
+ Range().Remove(loOp1);
+ }
+
if (count < 64)
{
if (count == 32)
{
// Move hiOp1 into loResult.
loResult = hiOp1;
- Range().InsertBefore(tree, loResult);
+ Range().InsertBefore(shift, loResult);
}
else
{
@@ -1254,13 +1320,13 @@ GenTree* DecomposeLongs::DecomposeShift(LIR::Use& use)
// Move hiOp1 into loResult, do a GT_RSH with count - 32.
GenTree* shiftBy = m_compiler->gtNewIconNode(count - 32, TYP_INT);
loResult = m_compiler->gtNewOperNode(oper, TYP_INT, hiOp1, shiftBy);
- Range().InsertBefore(tree, hiOp1, shiftBy, loResult);
+ Range().InsertBefore(shift, hiOp1, shiftBy, loResult);
}
// Propagate sign bit in hiResult
GenTree* shiftBy = m_compiler->gtNewIconNode(31, TYP_INT);
hiResult = m_compiler->gtNewOperNode(GT_RSH, TYP_INT, hiCopy, shiftBy);
- Range().InsertBefore(tree, shiftBy, hiCopy, hiResult);
+ Range().InsertBefore(shift, shiftBy, hiCopy, hiResult);
m_compiler->lvaIncRefCnts(hiCopy);
}
@@ -1271,12 +1337,12 @@ GenTree* DecomposeLongs::DecomposeShift(LIR::Use& use)
// Propagate sign bit in loResult
GenTree* loShiftBy = m_compiler->gtNewIconNode(31, TYP_INT);
loResult = m_compiler->gtNewOperNode(GT_RSH, TYP_INT, hiCopy, loShiftBy);
- Range().InsertBefore(tree, hiCopy, loShiftBy, loResult);
+ Range().InsertBefore(shift, hiCopy, loShiftBy, loResult);
// Propagate sign bit in hiResult
GenTree* shiftBy = m_compiler->gtNewIconNode(31, TYP_INT);
hiResult = m_compiler->gtNewOperNode(GT_RSH, TYP_INT, hiOp1, shiftBy);
- Range().InsertBefore(tree, shiftBy, hiOp1, hiResult);
+ Range().InsertBefore(shift, shiftBy, hiOp1, hiResult);
m_compiler->lvaIncRefCnts(hiCopy);
}
@@ -1289,15 +1355,16 @@ GenTree* DecomposeLongs::DecomposeShift(LIR::Use& use)
unreached();
}
- // Remove tree from Range
- Range().Remove(tree);
+ // Remove shift from Range
+ Range().Remove(shift);
return FinalizeDecomposition(use, loResult, hiResult, insertAfter);
}
else
{
- // arguments are single used, but LIR call can work only with local vars.
- shiftByOp = RepresentOpAsLocalVar(shiftByOp, tree, &tree->gtOp.gtOp2);
+ // Because calls must be created as HIR and lowered to LIR, we need to dump
+ // any LIR temps into lclVars before using them as arguments.
+ shiftByOp = RepresentOpAsLocalVar(shiftByOp, shift, &shift->gtOp.gtOp2);
loOp1 = RepresentOpAsLocalVar(loOp1, gtLong, &gtLong->gtOp.gtOp1);
hiOp1 = RepresentOpAsLocalVar(hiOp1, gtLong, &gtLong->gtOp.gtOp2);
@@ -1326,16 +1393,16 @@ GenTree* DecomposeLongs::DecomposeShift(LIR::Use& use)
GenTreeArgList* argList = m_compiler->gtNewArgList(loOp1, hiOp1, shiftByOp);
GenTree* call = m_compiler->gtNewHelperCallNode(helper, TYP_LONG, 0, argList);
- call->gtFlags |= tree->gtFlags & GTF_ALL_EFFECT;
+ call->gtFlags |= shift->gtFlags & GTF_ALL_EFFECT;
GenTreeCall* callNode = call->AsCall();
ReturnTypeDesc* retTypeDesc = callNode->GetReturnTypeDesc();
retTypeDesc->InitializeLongReturnType(m_compiler);
call = m_compiler->fgMorphArgs(callNode);
- Range().InsertAfter(tree, LIR::SeqTree(m_compiler, call));
+ Range().InsertAfter(shift, LIR::SeqTree(m_compiler, call));
- Range().Remove(tree);
+ Range().Remove(shift);
use.ReplaceWith(m_compiler, call);
return call;
}
diff --git a/src/jit/emitarm.cpp b/src/jit/emitarm.cpp
index ebefa27fb8..53ee88b3a2 100644
--- a/src/jit/emitarm.cpp
+++ b/src/jit/emitarm.cpp
@@ -1422,6 +1422,20 @@ DONE:
/*****************************************************************************
*
+ * emitIns_valid_imm_for_ldst_offset() returns true when the immediate 'imm'
+ * can be encoded as the offset in a ldr/str instruction.
+ */
+/*static*/ bool emitter::emitIns_valid_imm_for_ldst_offset(int imm, emitAttr size)
+{
+ if ((imm & 0x0fff) == imm)
+ return true; // encodable using IF_T2_K1
+ if (unsigned_abs(imm) <= 0x0ff)
+ return true; // encodable using IF_T2_H0
+ return false;
+}
+
+/*****************************************************************************
+ *
* Add an instruction with no operands.
*/
@@ -7608,10 +7622,26 @@ void emitter::emitInsLoadStoreOp(instruction ins, emitAttr attr, regNumber dataR
}
}
}
- else
+ else // no Index
{
- // TODO check offset is valid for encoding
- emitIns_R_R_I(ins, attr, dataReg, memBase->gtRegNum, offset);
+ if (emitIns_valid_imm_for_ldst_offset(offset, attr))
+ {
+ // Then load/store dataReg from/to [memBase + offset]
+ emitIns_R_R_I(ins, attr, dataReg, memBase->gtRegNum, offset);
+ }
+ else
+ {
+ // We require a tmpReg to hold the offset
+ regMaskTP tmpRegMask = indir->gtRsvdRegs;
+ regNumber tmpReg = genRegNumFromMask(tmpRegMask);
+ noway_assert(tmpReg != REG_NA);
+
+ // First load/store tmpReg with the large offset constant
+ codeGen->instGen_Set_Reg_To_Imm(EA_PTRSIZE, tmpReg, offset);
+
+ // Then load/store dataReg from/to [memBase + tmpReg]
+ emitIns_R_R_R(ins, attr, dataReg, memBase->gtRegNum, tmpReg);
+ }
}
}
else
diff --git a/src/jit/emitarm.h b/src/jit/emitarm.h
index 4ec189385c..1e286e8425 100644
--- a/src/jit/emitarm.h
+++ b/src/jit/emitarm.h
@@ -232,6 +232,13 @@ inline static bool insOptsROR(insOpts opt)
return (opt == INS_OPTS_ROR);
}
+// Returns the number of bits used by the given 'size'.
+inline static unsigned getBitWidth(emitAttr size)
+{
+ assert(size <= EA_8BYTE);
+ return (unsigned)size * BITS_PER_BYTE;
+}
+
/************************************************************************/
/* The public entry points to output instructions */
/************************************************************************/
@@ -243,6 +250,7 @@ static bool emitIns_valid_imm_for_small_mov(regNumber reg, int imm, insFlags fla
static bool emitIns_valid_imm_for_add(int imm, insFlags flags);
static bool emitIns_valid_imm_for_cmp(int imm, insFlags flags);
static bool emitIns_valid_imm_for_add_sp(int imm);
+static bool emitIns_valid_imm_for_ldst_offset(int imm, emitAttr size);
void emitIns(instruction ins);
diff --git a/src/jit/emitarm64.cpp b/src/jit/emitarm64.cpp
index 70409b8cd2..93994e7918 100644
--- a/src/jit/emitarm64.cpp
+++ b/src/jit/emitarm64.cpp
@@ -10813,18 +10813,20 @@ void emitter::emitInsLoadStoreOp(instruction ins, emitAttr attr, regNumber dataR
regNumber tmpReg = genRegNumFromMask(tmpRegMask);
noway_assert(tmpReg != REG_NA);
+ emitAttr addType = varTypeIsGC(memBase) ? EA_BYREF : EA_PTRSIZE;
+
if (emitIns_valid_imm_for_add(offset, EA_8BYTE))
{
if (lsl > 0)
{
// Generate code to set tmpReg = base + index*scale
- emitIns_R_R_R_I(INS_add, EA_PTRSIZE, tmpReg, memBase->gtRegNum, index->gtRegNum, lsl,
+ emitIns_R_R_R_I(INS_add, addType, tmpReg, memBase->gtRegNum, index->gtRegNum, lsl,
INS_OPTS_LSL);
}
else // no scale
{
// Generate code to set tmpReg = base + index
- emitIns_R_R_R(INS_add, EA_PTRSIZE, tmpReg, memBase->gtRegNum, index->gtRegNum);
+ emitIns_R_R_R(INS_add, addType, tmpReg, memBase->gtRegNum, index->gtRegNum);
}
noway_assert(emitInsIsLoad(ins) || (tmpReg != dataReg));
@@ -10839,7 +10841,7 @@ void emitter::emitInsLoadStoreOp(instruction ins, emitAttr attr, regNumber dataR
codeGen->instGen_Set_Reg_To_Imm(EA_PTRSIZE, tmpReg, offset);
// Then add the base register
// rd = rd + base
- emitIns_R_R_R(INS_add, EA_PTRSIZE, tmpReg, tmpReg, memBase->gtRegNum);
+ emitIns_R_R_R(INS_add, addType, tmpReg, tmpReg, memBase->gtRegNum);
noway_assert(emitInsIsLoad(ins) || (tmpReg != dataReg));
noway_assert(tmpReg != index->gtRegNum);
diff --git a/src/jit/flowgraph.cpp b/src/jit/flowgraph.cpp
index 90e37bd37a..3374b8c820 100644
--- a/src/jit/flowgraph.cpp
+++ b/src/jit/flowgraph.cpp
@@ -7630,7 +7630,7 @@ GenTreePtr Compiler::fgGetCritSectOfStaticMethod()
return tree;
}
-#if !defined(_TARGET_X86_)
+#if FEATURE_EH_FUNCLETS
/*****************************************************************************
*
@@ -8005,7 +8005,7 @@ void Compiler::fgConvertSyncReturnToLeave(BasicBlock* block)
#endif
}
-#endif // !_TARGET_X86_
+#endif // FEATURE_EH_FUNCLETS
//------------------------------------------------------------------------
// fgAddReversePInvokeEnterExit: Add enter/exit calls for reverse PInvoke methods
@@ -8266,7 +8266,7 @@ void Compiler::fgAddInternal()
}
}
-#if !defined(_TARGET_X86_)
+#if FEATURE_EH_FUNCLETS
// Add the synchronized method enter/exit calls and try/finally protection. Note
// that this must happen before the one BBJ_RETURN block is created below, so the
// BBJ_RETURN block gets placed at the top-level, not within an EH region. (Otherwise,
@@ -8276,7 +8276,7 @@ void Compiler::fgAddInternal()
{
fgAddSyncMethodEnterExit();
}
-#endif // !_TARGET_X86_
+#endif // FEATURE_EH_FUNCLETS
if (oneReturn)
{
@@ -8495,7 +8495,7 @@ void Compiler::fgAddInternal()
#endif
}
-#if defined(_TARGET_X86_)
+#if !FEATURE_EH_FUNCLETS
/* Is this a 'synchronized' method? */
@@ -8571,7 +8571,7 @@ void Compiler::fgAddInternal()
syncEndEmitCookie = NULL;
}
-#endif // _TARGET_X86_
+#endif // !FEATURE_EH_FUNCLETS
/* Do we need to do runtime call out to check the security? */
diff --git a/src/jit/gcencode.cpp b/src/jit/gcencode.cpp
index 1e9f288045..4c300ac15f 100644
--- a/src/jit/gcencode.cpp
+++ b/src/jit/gcencode.cpp
@@ -1318,6 +1318,8 @@ size_t GCInfo::gcInfoBlockHdrSave(
header->syncStartOffset = INVALID_SYNC_OFFSET;
header->syncEndOffset = INVALID_SYNC_OFFSET;
+#ifndef UNIX_X86_ABI
+ // JIT is responsible for synchronization on funclet-based EH model that x86/Linux uses.
if (compiler->info.compFlags & CORINFO_FLG_SYNCH)
{
assert(compiler->syncStartEmitCookie != NULL);
@@ -1332,6 +1334,7 @@ size_t GCInfo::gcInfoBlockHdrSave(
// synchronized methods can't have more than 1 epilog
assert(header->epilogCount <= 1);
}
+#endif
header->revPInvokeOffset = INVALID_REV_PINVOKE_OFFSET;
diff --git a/src/jit/importer.cpp b/src/jit/importer.cpp
index c6f3e1058f..54427ba4dd 100644
--- a/src/jit/importer.cpp
+++ b/src/jit/importer.cpp
@@ -5671,7 +5671,11 @@ GenTreeCall* Compiler::impImportIndirectCall(CORINFO_SIG_INFO* sig, IL_OFFSETX i
/* Get the function pointer */
GenTreePtr fptr = impPopStack().val;
- assert(genActualType(fptr->gtType) == TYP_I_IMPL);
+
+ // The function pointer is typically a sized to match the target pointer size
+ // However, stubgen IL optimization can change LDC.I8 to LDC.I4
+ // See ILCodeStream::LowerOpcode
+ assert(genActualType(fptr->gtType) == TYP_I_IMPL || genActualType(fptr->gtType) == TYP_INT);
#ifdef DEBUG
// This temporary must never be converted to a double in stress mode,
@@ -10109,7 +10113,11 @@ void Compiler::impImportBlockCode(BasicBlock* block)
const bool isSingleILStoreLocal =
!lvaTable[lclNum].lvHasMultipleILStoreOp && !lvaTable[lclNum].lvHasLdAddrOp;
- if (isSingleILStoreLocal)
+ // Conservative check that there is just one
+ // definition that reaches this store.
+ const bool hasSingleReachingDef = (block->bbStackDepthOnEntry() == 0);
+
+ if (isSingleILStoreLocal && hasSingleReachingDef)
{
lvaUpdateClass(lclNum, op1, clsHnd);
}
diff --git a/src/jit/jit.settings.targets b/src/jit/jit.settings.targets
index 8749b80242..bde639556b 100644
--- a/src/jit/jit.settings.targets
+++ b/src/jit/jit.settings.targets
@@ -120,6 +120,9 @@
<CppCompile Include="..\TargetArm.cpp" />
<CppCompile Condition="'$(ClDefines.Contains(`LEGACY_BACKEND`))'=='True'" Include="..\registerfp.cpp" />
<CppCompile Condition="'$(ClDefines.Contains(`LEGACY_BACKEND`))'=='False'" Include="..\DecomposeLongs.cpp" />
+ <CppCompile Condition="'$(ClDefines.Contains(`LEGACY_BACKEND`))'=='False'" Include="..\LowerArmArch.cpp" />
+ <CppCompile Condition="'$(ClDefines.Contains(`LEGACY_BACKEND`))'=='False'" Include="..\lsraarmarch.cpp" />
+ <CppCompile Condition="'$(ClDefines.Contains(`LEGACY_BACKEND`))'=='False'" Include="..\CodeGenArmArch.cpp" />
<CppCompile Condition="'$(ClDefines.Contains(`LEGACY_BACKEND`))'=='False'" Include="..\LowerArm.cpp" />
<CppCompile Condition="'$(ClDefines.Contains(`LEGACY_BACKEND`))'=='False'" Include="..\lsraarm.cpp" />
<CppCompile Condition="'$(ClDefines.Contains(`LEGACY_BACKEND`))'=='False'" Include="..\CodeGenArm.cpp" />
@@ -129,6 +132,9 @@
<!-- ARM64 target is always RyuJIT backend -->
<CppCompile Include="..\emitarm64.cpp" />
<CppCompile Include="..\TargetArm64.cpp" />
+ <CppCompile Include="..\LowerArmArch.cpp" />
+ <CppCompile Include="..\lsraarmarch.cpp" />
+ <CppCompile Include="..\CodeGenArmArch.cpp" />
<CppCompile Include="..\LowerArm64.cpp" />
<CppCompile Include="..\lsraarm64.cpp" />
<CppCompile Include="..\CodeGenArm64.cpp" />
diff --git a/src/jit/lowerarm.cpp b/src/jit/lowerarm.cpp
index 851ec45041..0701520b0a 100644
--- a/src/jit/lowerarm.cpp
+++ b/src/jit/lowerarm.cpp
@@ -31,273 +31,6 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#include "lsra.h"
//------------------------------------------------------------------------
-// LowerStoreLoc: Lower a store of a lclVar
-//
-// Arguments:
-// storeLoc - the local store (GT_STORE_LCL_FLD or GT_STORE_LCL_VAR)
-//
-// Notes:
-// This involves:
-// - Widening operations of unsigneds.
-//
-void Lowering::LowerStoreLoc(GenTreeLclVarCommon* storeLoc)
-{
- // Try to widen the ops if they are going into a local var.
- GenTree* op1 = storeLoc->gtGetOp1();
- if ((storeLoc->gtOper == GT_STORE_LCL_VAR) && (op1->gtOper == GT_CNS_INT))
- {
- GenTreeIntCon* con = op1->AsIntCon();
- ssize_t ival = con->gtIconVal;
- unsigned varNum = storeLoc->gtLclNum;
- LclVarDsc* varDsc = comp->lvaTable + varNum;
-
- if (varDsc->lvIsSIMDType())
- {
- noway_assert(storeLoc->gtType != TYP_STRUCT);
- }
- unsigned size = genTypeSize(storeLoc);
- // If we are storing a constant into a local variable
- // we extend the size of the store here
- if ((size < 4) && !varTypeIsStruct(varDsc))
- {
- if (!varTypeIsUnsigned(varDsc))
- {
- if (genTypeSize(storeLoc) == 1)
- {
- if ((ival & 0x7f) != ival)
- {
- ival = ival | 0xffffff00;
- }
- }
- else
- {
- assert(genTypeSize(storeLoc) == 2);
- if ((ival & 0x7fff) != ival)
- {
- ival = ival | 0xffff0000;
- }
- }
- }
-
- // A local stack slot is at least 4 bytes in size, regardless of
- // what the local var is typed as, so auto-promote it here
- // unless it is a field of a promoted struct
- // TODO-ARM-CQ: if the field is promoted shouldn't we also be able to do this?
- if (!varDsc->lvIsStructField)
- {
- storeLoc->gtType = TYP_INT;
- con->SetIconValue(ival);
- }
- }
- }
-}
-
-void Lowering::LowerBlockStore(GenTreeBlk* blkNode)
-{
- GenTree* dstAddr = blkNode->Addr();
- unsigned size = blkNode->gtBlkSize;
- GenTree* source = blkNode->Data();
- Compiler* compiler = comp;
-
- // Sources are dest address and initVal or source.
- GenTreePtr srcAddrOrFill = nullptr;
- bool isInitBlk = blkNode->OperIsInitBlkOp();
-
- if (!isInitBlk)
- {
- // CopyObj or CopyBlk
- if ((blkNode->OperGet() == GT_STORE_OBJ) && ((blkNode->AsObj()->gtGcPtrCount == 0) || blkNode->gtBlkOpGcUnsafe))
- {
- blkNode->SetOper(GT_STORE_BLK);
- }
- if (source->gtOper == GT_IND)
- {
- srcAddrOrFill = blkNode->Data()->gtGetOp1();
- }
- }
-
- if (isInitBlk)
- {
- GenTreePtr initVal = source;
- if (initVal->OperIsInitVal())
- {
- initVal = initVal->gtGetOp1();
- }
- srcAddrOrFill = initVal;
-
-#if 0
- if ((size != 0) && (size <= INITBLK_UNROLL_LIMIT) && initVal->IsCnsIntOrI())
- {
- // TODO-ARM-CQ: Currently we generate a helper call for every
- // initblk we encounter. Later on we should implement loop unrolling
- // code sequences to improve CQ.
- // For reference see the code in LowerXArch.cpp.
- NYI_ARM("initblk loop unrolling is currently not implemented.");
- }
- else
-#endif // 0
- {
- blkNode->gtBlkOpKind = GenTreeBlk::BlkOpKindHelper;
- }
- }
- else
- {
- if (blkNode->OperGet() == GT_STORE_OBJ)
- {
- // CopyObj
-
- NYI_ARM("Lowering for GT_STORE_OBJ isn't implemented");
- }
- else
- {
- // CopyBlk
- short internalIntCount = 0;
- regMaskTP internalIntCandidates = RBM_NONE;
-
-#if 0
- // In case of a CpBlk with a constant size and less than CPBLK_UNROLL_LIMIT size
- // we should unroll the loop to improve CQ.
- // For reference see the code in lowerxarch.cpp.
-
- // TODO-ARM-CQ: cpblk loop unrolling is currently not implemented.
- if ((size != 0) && (size <= INITBLK_UNROLL_LIMIT))
- {
- blkNode->gtBlkOpKind = GenTreeBlk::BlkOpKindUnroll;
- }
- else
-#endif // 0
- {
- // In case we have a constant integer this means we went beyond
- // CPBLK_UNROLL_LIMIT bytes of size, still we should never have the case of
- // any GC-Pointers in the src struct.
- blkNode->gtBlkOpKind = GenTreeBlk::BlkOpKindHelper;
- }
- }
- }
-}
-
-//------------------------------------------------------------------------
-// LowerCast: Lower GT_CAST(srcType, DstType) nodes.
-//
-// Arguments:
-// tree - GT_CAST node to be lowered
-//
-// Return Value:
-// None.
-//
-// Notes:
-// Casts from small int type to float/double are transformed as follows:
-// GT_CAST(byte, float/double) = GT_CAST(GT_CAST(byte, int32), float/double)
-// GT_CAST(sbyte, float/double) = GT_CAST(GT_CAST(sbyte, int32), float/double)
-// GT_CAST(int16, float/double) = GT_CAST(GT_CAST(int16, int32), float/double)
-// GT_CAST(uint16, float/double) = GT_CAST(GT_CAST(uint16, int32), float/double)
-//
-// Similarly casts from float/double to a smaller int type are transformed as follows:
-// GT_CAST(float/double, byte) = GT_CAST(GT_CAST(float/double, int32), byte)
-// GT_CAST(float/double, sbyte) = GT_CAST(GT_CAST(float/double, int32), sbyte)
-// GT_CAST(float/double, int16) = GT_CAST(GT_CAST(double/double, int32), int16)
-// GT_CAST(float/double, uint16) = GT_CAST(GT_CAST(double/double, int32), uint16)
-//
-// Note that for the overflow conversions we still depend on helper calls and
-// don't expect to see them here.
-// i) GT_CAST(float/double, int type with overflow detection)
-//
-void Lowering::LowerCast(GenTree* tree)
-{
- assert(tree->OperGet() == GT_CAST);
-
- JITDUMP("LowerCast for: ");
- DISPNODE(tree);
- JITDUMP("\n");
-
- GenTreePtr op1 = tree->gtOp.gtOp1;
- var_types dstType = tree->CastToType();
- var_types srcType = op1->TypeGet();
- var_types tmpType = TYP_UNDEF;
-
- if (varTypeIsFloating(srcType))
- {
- noway_assert(!tree->gtOverflow());
- }
-
- // Case of src is a small type and dst is a floating point type.
- if (varTypeIsSmall(srcType) && varTypeIsFloating(dstType))
- {
- NYI_ARM("Lowering for cast from small type to float"); // Not tested yet.
- // These conversions can never be overflow detecting ones.
- noway_assert(!tree->gtOverflow());
- tmpType = TYP_INT;
- }
- // case of src is a floating point type and dst is a small type.
- else if (varTypeIsFloating(srcType) && varTypeIsSmall(dstType))
- {
- NYI_ARM("Lowering for cast from float to small type"); // Not tested yet.
- tmpType = TYP_INT;
- }
-
- if (tmpType != TYP_UNDEF)
- {
- GenTreePtr tmp = comp->gtNewCastNode(tmpType, op1, tmpType);
- tmp->gtFlags |= (tree->gtFlags & (GTF_UNSIGNED | GTF_OVERFLOW | GTF_EXCEPT));
-
- tree->gtFlags &= ~GTF_UNSIGNED;
- tree->gtOp.gtOp1 = tmp;
- BlockRange().InsertAfter(op1, tmp);
- }
-}
-
-//------------------------------------------------------------------------
-// LowerRotate: Lower GT_ROL and GT_ROL nodes.
-//
-// Arguments:
-// tree - the node to lower
-//
-// Return Value:
-// None.
-//
-void Lowering::LowerRotate(GenTreePtr tree)
-{
- if (tree->OperGet() == GT_ROL)
- {
- // There is no ROL instruction on ARM. Convert ROL into ROR.
- GenTreePtr rotatedValue = tree->gtOp.gtOp1;
- unsigned rotatedValueBitSize = genTypeSize(rotatedValue->gtType) * 8;
- GenTreePtr rotateLeftIndexNode = tree->gtOp.gtOp2;
-
- if (rotateLeftIndexNode->IsCnsIntOrI())
- {
- ssize_t rotateLeftIndex = rotateLeftIndexNode->gtIntCon.gtIconVal;
- ssize_t rotateRightIndex = rotatedValueBitSize - rotateLeftIndex;
- rotateLeftIndexNode->gtIntCon.gtIconVal = rotateRightIndex;
- }
- else
- {
- GenTreePtr tmp =
- comp->gtNewOperNode(GT_NEG, genActualType(rotateLeftIndexNode->gtType), rotateLeftIndexNode);
- BlockRange().InsertAfter(rotateLeftIndexNode, tmp);
- tree->gtOp.gtOp2 = tmp;
- }
- tree->ChangeOper(GT_ROR);
- }
-}
-
-//------------------------------------------------------------------------
-// LowerPutArgStk: Lower a GT_PUTARG_STK node
-//
-// Arguments:
-// argNode - a GT_PUTARG_STK node
-//
-// Return Value:
-// None.
-//
-// Notes:
-// There is currently no Lowering required for this on ARM.
-//
-void Lowering::LowerPutArgStk(GenTreePutArgStk* argNode, fgArgTabEntryPtr info)
-{
-}
-
-//------------------------------------------------------------------------
// IsCallTargetInRange: Can a call target address be encoded in-place?
//
// Return Value:
diff --git a/src/jit/lowerarm64.cpp b/src/jit/lowerarm64.cpp
index 7a564eff0b..b24ed8221c 100644
--- a/src/jit/lowerarm64.cpp
+++ b/src/jit/lowerarm64.cpp
@@ -29,290 +29,6 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#include "sideeffects.h"
#include "lower.h"
-//------------------------------------------------------------------------
-// LowerStoreLoc: Lower a store of a lclVar
-//
-// Arguments:
-// storeLoc - the local store (GT_STORE_LCL_FLD or GT_STORE_LCL_VAR)
-//
-// Notes:
-// This involves:
-// - Widening operations of unsigneds.
-
-void Lowering::LowerStoreLoc(GenTreeLclVarCommon* storeLoc)
-{
- // Try to widen the ops if they are going into a local var.
- GenTree* op1 = storeLoc->gtGetOp1();
- if ((storeLoc->gtOper == GT_STORE_LCL_VAR) && (op1->gtOper == GT_CNS_INT))
- {
- GenTreeIntCon* con = op1->AsIntCon();
- ssize_t ival = con->gtIconVal;
- unsigned varNum = storeLoc->gtLclNum;
- LclVarDsc* varDsc = comp->lvaTable + varNum;
-
- if (varDsc->lvIsSIMDType())
- {
- noway_assert(storeLoc->gtType != TYP_STRUCT);
- }
- unsigned size = genTypeSize(storeLoc);
- // If we are storing a constant into a local variable
- // we extend the size of the store here
- if ((size < 4) && !varTypeIsStruct(varDsc))
- {
- if (!varTypeIsUnsigned(varDsc))
- {
- if (genTypeSize(storeLoc) == 1)
- {
- if ((ival & 0x7f) != ival)
- {
- ival = ival | 0xffffff00;
- }
- }
- else
- {
- assert(genTypeSize(storeLoc) == 2);
- if ((ival & 0x7fff) != ival)
- {
- ival = ival | 0xffff0000;
- }
- }
- }
-
- // A local stack slot is at least 4 bytes in size, regardless of
- // what the local var is typed as, so auto-promote it here
- // unless it is a field of a promoted struct
- // TODO-ARM64-CQ: if the field is promoted shouldn't we also be able to do this?
- if (!varDsc->lvIsStructField)
- {
- storeLoc->gtType = TYP_INT;
- con->SetIconValue(ival);
- }
- }
- }
-}
-
-//------------------------------------------------------------------------
-// LowerBlockStore: Set block store type
-//
-// Arguments:
-// blkNode - The block store node of interest
-//
-// Return Value:
-// None.
-//
-
-void Lowering::LowerBlockStore(GenTreeBlk* blkNode)
-{
- GenTree* dstAddr = blkNode->Addr();
- unsigned size = blkNode->gtBlkSize;
- GenTree* source = blkNode->Data();
- Compiler* compiler = comp;
-
- // Sources are dest address and initVal or source.
- GenTreePtr srcAddrOrFill = nullptr;
- bool isInitBlk = blkNode->OperIsInitBlkOp();
-
- if (!isInitBlk)
- {
- // CopyObj or CopyBlk
- if ((blkNode->OperGet() == GT_STORE_OBJ) && ((blkNode->AsObj()->gtGcPtrCount == 0) || blkNode->gtBlkOpGcUnsafe))
- {
- blkNode->SetOper(GT_STORE_BLK);
- }
- if (source->gtOper == GT_IND)
- {
- srcAddrOrFill = blkNode->Data()->gtGetOp1();
- }
- }
-
- if (isInitBlk)
- {
- GenTreePtr initVal = source;
- if (initVal->OperIsInitVal())
- {
- initVal = initVal->gtGetOp1();
- }
- srcAddrOrFill = initVal;
-
- if ((size != 0) && (size <= INITBLK_UNROLL_LIMIT) && initVal->IsCnsIntOrI())
- {
- // The fill value of an initblk is interpreted to hold a
- // value of (unsigned int8) however a constant of any size
- // may practically reside on the evaluation stack. So extract
- // the lower byte out of the initVal constant and replicate
- // it to a larger constant whose size is sufficient to support
- // the largest width store of the desired inline expansion.
-
- ssize_t fill = initVal->gtIntCon.gtIconVal & 0xFF;
- if (size < REGSIZE_BYTES)
- {
- initVal->gtIntCon.gtIconVal = 0x01010101 * fill;
- }
- else
- {
- initVal->gtIntCon.gtIconVal = 0x0101010101010101LL * fill;
- initVal->gtType = TYP_LONG;
- }
- blkNode->gtBlkOpKind = GenTreeBlk::BlkOpKindUnroll;
- }
- else
- {
- blkNode->gtBlkOpKind = GenTreeBlk::BlkOpKindHelper;
- }
- }
- else
- {
- // CopyObj or CopyBlk
- // Sources are src and dest and size if not constant.
-
- if (blkNode->OperGet() == GT_STORE_OBJ)
- {
- // CopyObj
-
- GenTreeObj* objNode = blkNode->AsObj();
-
- unsigned slots = objNode->gtSlots;
-
-#ifdef DEBUG
- // CpObj must always have at least one GC-Pointer as a member.
- assert(objNode->gtGcPtrCount > 0);
-
- assert(dstAddr->gtType == TYP_BYREF || dstAddr->gtType == TYP_I_IMPL);
-
- CORINFO_CLASS_HANDLE clsHnd = objNode->gtClass;
- size_t classSize = compiler->info.compCompHnd->getClassSize(clsHnd);
- size_t blkSize = roundUp(classSize, TARGET_POINTER_SIZE);
-
- // Currently, the EE always round up a class data structure so
- // we are not handling the case where we have a non multiple of pointer sized
- // struct. This behavior may change in the future so in order to keeps things correct
- // let's assert it just to be safe. Going forward we should simply
- // handle this case.
- assert(classSize == blkSize);
- assert((blkSize / TARGET_POINTER_SIZE) == slots);
- assert(objNode->HasGCPtr());
-#endif
-
- blkNode->gtBlkOpKind = GenTreeBlk::BlkOpKindUnroll;
- }
- else
- {
- // CopyBlk
- short internalIntCount = 0;
- regMaskTP internalIntCandidates = RBM_NONE;
-
- if ((size != 0) && (size <= INITBLK_UNROLL_LIMIT))
- {
- blkNode->gtBlkOpKind = GenTreeBlk::BlkOpKindUnroll;
- }
- else
- {
- // In case we have a constant integer this means we went beyond
- // CPBLK_UNROLL_LIMIT bytes of size, still we should never have the case of
- // any GC-Pointers in the src struct.
- blkNode->gtBlkOpKind = GenTreeBlk::BlkOpKindHelper;
- }
- }
- }
-}
-
-/* Lower GT_CAST(srcType, DstType) nodes.
- *
- * Casts from small int type to float/double are transformed as follows:
- * GT_CAST(byte, float/double) = GT_CAST(GT_CAST(byte, int32), float/double)
- * GT_CAST(sbyte, float/double) = GT_CAST(GT_CAST(sbyte, int32), float/double)
- * GT_CAST(int16, float/double) = GT_CAST(GT_CAST(int16, int32), float/double)
- * GT_CAST(uint16, float/double) = GT_CAST(GT_CAST(uint16, int32), float/double)
- *
- * SSE2 conversion instructions operate on signed integers. casts from Uint32/Uint64
- * are morphed as follows by front-end and hence should not be seen here.
- * GT_CAST(uint32, float/double) = GT_CAST(GT_CAST(uint32, long), float/double)
- * GT_CAST(uint64, float) = GT_CAST(GT_CAST(uint64, double), float)
- *
- *
- * Similarly casts from float/double to a smaller int type are transformed as follows:
- * GT_CAST(float/double, byte) = GT_CAST(GT_CAST(float/double, int32), byte)
- * GT_CAST(float/double, sbyte) = GT_CAST(GT_CAST(float/double, int32), sbyte)
- * GT_CAST(float/double, int16) = GT_CAST(GT_CAST(double/double, int32), int16)
- * GT_CAST(float/double, uint16) = GT_CAST(GT_CAST(double/double, int32), uint16)
- *
- * SSE2 has instructions to convert a float/double vlaue into a signed 32/64-bit
- * integer. The above transformations help us to leverage those instructions.
- *
- * Note that for the overflow conversions we still depend on helper calls and
- * don't expect to see them here.
- * i) GT_CAST(float/double, int type with overflow detection)
- *
- */
-void Lowering::LowerCast(GenTree* tree)
-{
- assert(tree->OperGet() == GT_CAST);
-
- GenTreePtr op1 = tree->gtOp.gtOp1;
- var_types dstType = tree->CastToType();
- var_types srcType = op1->TypeGet();
- var_types tmpType = TYP_UNDEF;
-
- // We should never see the following casts as they are expected to be lowered
- // apropriately or converted into helper calls by front-end.
- // srcType = float/double dstType = * and overflow detecting cast
- // Reason: must be converted to a helper call
- //
- if (varTypeIsFloating(srcType))
- {
- noway_assert(!tree->gtOverflow());
- }
-
- // Case of src is a small type and dst is a floating point type.
- if (varTypeIsSmall(srcType) && varTypeIsFloating(dstType))
- {
- // These conversions can never be overflow detecting ones.
- noway_assert(!tree->gtOverflow());
- tmpType = TYP_INT;
- }
- // case of src is a floating point type and dst is a small type.
- else if (varTypeIsFloating(srcType) && varTypeIsSmall(dstType))
- {
- tmpType = TYP_INT;
- }
-
- if (tmpType != TYP_UNDEF)
- {
- GenTreePtr tmp = comp->gtNewCastNode(tmpType, op1, tmpType);
- tmp->gtFlags |= (tree->gtFlags & (GTF_UNSIGNED | GTF_OVERFLOW | GTF_EXCEPT));
-
- tree->gtFlags &= ~GTF_UNSIGNED;
- tree->gtOp.gtOp1 = tmp;
- BlockRange().InsertAfter(op1, tmp);
- }
-}
-
-void Lowering::LowerRotate(GenTreePtr tree)
-{
- if (tree->OperGet() == GT_ROL)
- {
- // There is no ROL instruction on ARM. Convert ROL into ROR.
- GenTreePtr rotatedValue = tree->gtOp.gtOp1;
- unsigned rotatedValueBitSize = genTypeSize(rotatedValue->gtType) * 8;
- GenTreePtr rotateLeftIndexNode = tree->gtOp.gtOp2;
-
- if (rotateLeftIndexNode->IsCnsIntOrI())
- {
- ssize_t rotateLeftIndex = rotateLeftIndexNode->gtIntCon.gtIconVal;
- ssize_t rotateRightIndex = rotatedValueBitSize - rotateLeftIndex;
- rotateLeftIndexNode->gtIntCon.gtIconVal = rotateRightIndex;
- }
- else
- {
- GenTreePtr tmp =
- comp->gtNewOperNode(GT_NEG, genActualType(rotateLeftIndexNode->gtType), rotateLeftIndexNode);
- BlockRange().InsertAfter(rotateLeftIndexNode, tmp);
- tree->gtOp.gtOp2 = tmp;
- }
- tree->ChangeOper(GT_ROR);
- }
-}
-
// returns true if the tree can use the read-modify-write memory instruction form
bool Lowering::isRMWRegOper(GenTreePtr tree)
{
diff --git a/src/jit/lowerarmarch.cpp b/src/jit/lowerarmarch.cpp
new file mode 100644
index 0000000000..4ff3552eb0
--- /dev/null
+++ b/src/jit/lowerarmarch.cpp
@@ -0,0 +1,346 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+XX XX
+XX Lowering for ARM and ARM64 common code XX
+XX XX
+XX This encapsulates common logic for lowering trees for the ARM and ARM64 XX
+XX architectures. For a more detailed view of what is lowering, please XX
+XX take a look at Lower.cpp XX
+XX XX
+XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+*/
+
+#include "jitpch.h"
+#ifdef _MSC_VER
+#pragma hdrstop
+#endif
+
+#ifndef LEGACY_BACKEND // This file is ONLY used for the RyuJIT backend that uses the linear scan register allocator
+
+#ifdef _TARGET_ARMARCH_ // This file is ONLY used for ARM and ARM64 architectures
+
+#include "jit.h"
+#include "sideeffects.h"
+#include "lower.h"
+#include "lsra.h"
+
+//------------------------------------------------------------------------
+// LowerStoreLoc: Lower a store of a lclVar
+//
+// Arguments:
+// storeLoc - the local store (GT_STORE_LCL_FLD or GT_STORE_LCL_VAR)
+//
+// Notes:
+// This involves:
+// - Widening operations of unsigneds.
+//
+void Lowering::LowerStoreLoc(GenTreeLclVarCommon* storeLoc)
+{
+ // Try to widen the ops if they are going into a local var.
+ GenTree* op1 = storeLoc->gtGetOp1();
+ if ((storeLoc->gtOper == GT_STORE_LCL_VAR) && (op1->gtOper == GT_CNS_INT))
+ {
+ GenTreeIntCon* con = op1->AsIntCon();
+ ssize_t ival = con->gtIconVal;
+ unsigned varNum = storeLoc->gtLclNum;
+ LclVarDsc* varDsc = comp->lvaTable + varNum;
+
+ if (varDsc->lvIsSIMDType())
+ {
+ noway_assert(storeLoc->gtType != TYP_STRUCT);
+ }
+ unsigned size = genTypeSize(storeLoc);
+ // If we are storing a constant into a local variable
+ // we extend the size of the store here
+ if ((size < 4) && !varTypeIsStruct(varDsc))
+ {
+ if (!varTypeIsUnsigned(varDsc))
+ {
+ if (genTypeSize(storeLoc) == 1)
+ {
+ if ((ival & 0x7f) != ival)
+ {
+ ival = ival | 0xffffff00;
+ }
+ }
+ else
+ {
+ assert(genTypeSize(storeLoc) == 2);
+ if ((ival & 0x7fff) != ival)
+ {
+ ival = ival | 0xffff0000;
+ }
+ }
+ }
+
+ // A local stack slot is at least 4 bytes in size, regardless of
+ // what the local var is typed as, so auto-promote it here
+ // unless it is a field of a promoted struct
+ // TODO-CQ: if the field is promoted shouldn't we also be able to do this?
+ if (!varDsc->lvIsStructField)
+ {
+ storeLoc->gtType = TYP_INT;
+ con->SetIconValue(ival);
+ }
+ }
+ }
+}
+
+//------------------------------------------------------------------------
+// LowerBlockStore: Set block store type
+//
+// Arguments:
+// blkNode - The block store node of interest
+//
+// Return Value:
+// None.
+//
+void Lowering::LowerBlockStore(GenTreeBlk* blkNode)
+{
+ GenTree* dstAddr = blkNode->Addr();
+ unsigned size = blkNode->gtBlkSize;
+ GenTree* source = blkNode->Data();
+ Compiler* compiler = comp;
+
+ // Sources are dest address and initVal or source.
+ GenTreePtr srcAddrOrFill = nullptr;
+ bool isInitBlk = blkNode->OperIsInitBlkOp();
+
+ if (!isInitBlk)
+ {
+ // CopyObj or CopyBlk
+ if ((blkNode->OperGet() == GT_STORE_OBJ) && ((blkNode->AsObj()->gtGcPtrCount == 0) || blkNode->gtBlkOpGcUnsafe))
+ {
+ blkNode->SetOper(GT_STORE_BLK);
+ }
+ if (source->gtOper == GT_IND)
+ {
+ srcAddrOrFill = blkNode->Data()->gtGetOp1();
+ }
+ }
+
+ if (isInitBlk)
+ {
+ GenTreePtr initVal = source;
+ if (initVal->OperIsInitVal())
+ {
+ initVal = initVal->gtGetOp1();
+ }
+ srcAddrOrFill = initVal;
+
+#ifdef _TARGET_ARM64_
+ if ((size != 0) && (size <= INITBLK_UNROLL_LIMIT) && initVal->IsCnsIntOrI())
+ {
+ // TODO-ARM-CQ: Currently we generate a helper call for every
+ // initblk we encounter. Later on we should implement loop unrolling
+ // code sequences to improve CQ.
+ // For reference see the code in LowerXArch.cpp.
+ NYI_ARM("initblk loop unrolling is currently not implemented.");
+
+ // The fill value of an initblk is interpreted to hold a
+ // value of (unsigned int8) however a constant of any size
+ // may practically reside on the evaluation stack. So extract
+ // the lower byte out of the initVal constant and replicate
+ // it to a larger constant whose size is sufficient to support
+ // the largest width store of the desired inline expansion.
+
+ ssize_t fill = initVal->gtIntCon.gtIconVal & 0xFF;
+ if (size < REGSIZE_BYTES)
+ {
+ initVal->gtIntCon.gtIconVal = 0x01010101 * fill;
+ }
+ else
+ {
+ initVal->gtIntCon.gtIconVal = 0x0101010101010101LL * fill;
+ initVal->gtType = TYP_LONG;
+ }
+ blkNode->gtBlkOpKind = GenTreeBlk::BlkOpKindUnroll;
+ }
+ else
+#endif // _TARGET_ARM64_
+ {
+ blkNode->gtBlkOpKind = GenTreeBlk::BlkOpKindHelper;
+ }
+ }
+ else
+ {
+ // CopyObj or CopyBlk
+ // Sources are src and dest and size if not constant.
+
+ if (blkNode->OperGet() == GT_STORE_OBJ)
+ {
+ // CopyObj
+
+ NYI_ARM("Lowering for GT_STORE_OBJ isn't implemented");
+
+#ifdef _TARGET_ARM64_
+
+ GenTreeObj* objNode = blkNode->AsObj();
+
+ unsigned slots = objNode->gtSlots;
+
+#ifdef DEBUG
+ // CpObj must always have at least one GC-Pointer as a member.
+ assert(objNode->gtGcPtrCount > 0);
+
+ assert(dstAddr->gtType == TYP_BYREF || dstAddr->gtType == TYP_I_IMPL);
+
+ CORINFO_CLASS_HANDLE clsHnd = objNode->gtClass;
+ size_t classSize = compiler->info.compCompHnd->getClassSize(clsHnd);
+ size_t blkSize = roundUp(classSize, TARGET_POINTER_SIZE);
+
+ // Currently, the EE always round up a class data structure so
+ // we are not handling the case where we have a non multiple of pointer sized
+ // struct. This behavior may change in the future so in order to keeps things correct
+ // let's assert it just to be safe. Going forward we should simply
+ // handle this case.
+ assert(classSize == blkSize);
+ assert((blkSize / TARGET_POINTER_SIZE) == slots);
+ assert(objNode->HasGCPtr());
+#endif
+
+ blkNode->gtBlkOpKind = GenTreeBlk::BlkOpKindUnroll;
+
+#endif // _TARGET_ARM64_
+ }
+ else
+ {
+ // CopyBlk
+ short internalIntCount = 0;
+ regMaskTP internalIntCandidates = RBM_NONE;
+
+#ifdef _TARGET_ARM64_
+ // In case of a CpBlk with a constant size and less than CPBLK_UNROLL_LIMIT size
+ // we should unroll the loop to improve CQ.
+ // For reference see the code in lowerxarch.cpp.
+ // TODO-ARM-CQ: cpblk loop unrolling is currently not implemented.
+
+ if ((size != 0) && (size <= INITBLK_UNROLL_LIMIT))
+ {
+ blkNode->gtBlkOpKind = GenTreeBlk::BlkOpKindUnroll;
+ }
+ else
+#endif // _TARGET_ARM64_
+ {
+ // In case we have a constant integer this means we went beyond
+ // CPBLK_UNROLL_LIMIT bytes of size, still we should never have the case of
+ // any GC-Pointers in the src struct.
+ blkNode->gtBlkOpKind = GenTreeBlk::BlkOpKindHelper;
+ }
+ }
+ }
+}
+
+//------------------------------------------------------------------------
+// LowerCast: Lower GT_CAST(srcType, DstType) nodes.
+//
+// Arguments:
+// tree - GT_CAST node to be lowered
+//
+// Return Value:
+// None.
+//
+// Notes:
+// Casts from small int type to float/double are transformed as follows:
+// GT_CAST(byte, float/double) = GT_CAST(GT_CAST(byte, int32), float/double)
+// GT_CAST(sbyte, float/double) = GT_CAST(GT_CAST(sbyte, int32), float/double)
+// GT_CAST(int16, float/double) = GT_CAST(GT_CAST(int16, int32), float/double)
+// GT_CAST(uint16, float/double) = GT_CAST(GT_CAST(uint16, int32), float/double)
+//
+// Similarly casts from float/double to a smaller int type are transformed as follows:
+// GT_CAST(float/double, byte) = GT_CAST(GT_CAST(float/double, int32), byte)
+// GT_CAST(float/double, sbyte) = GT_CAST(GT_CAST(float/double, int32), sbyte)
+// GT_CAST(float/double, int16) = GT_CAST(GT_CAST(double/double, int32), int16)
+// GT_CAST(float/double, uint16) = GT_CAST(GT_CAST(double/double, int32), uint16)
+//
+// Note that for the overflow conversions we still depend on helper calls and
+// don't expect to see them here.
+// i) GT_CAST(float/double, int type with overflow detection)
+//
+void Lowering::LowerCast(GenTree* tree)
+{
+ assert(tree->OperGet() == GT_CAST);
+
+ JITDUMP("LowerCast for: ");
+ DISPNODE(tree);
+ JITDUMP("\n");
+
+ GenTreePtr op1 = tree->gtOp.gtOp1;
+ var_types dstType = tree->CastToType();
+ var_types srcType = op1->TypeGet();
+ var_types tmpType = TYP_UNDEF;
+
+ if (varTypeIsFloating(srcType))
+ {
+ noway_assert(!tree->gtOverflow());
+ }
+
+ // Case of src is a small type and dst is a floating point type.
+ if (varTypeIsSmall(srcType) && varTypeIsFloating(dstType))
+ {
+ NYI_ARM("Lowering for cast from small type to float"); // Not tested yet.
+ // These conversions can never be overflow detecting ones.
+ noway_assert(!tree->gtOverflow());
+ tmpType = TYP_INT;
+ }
+ // case of src is a floating point type and dst is a small type.
+ else if (varTypeIsFloating(srcType) && varTypeIsSmall(dstType))
+ {
+ NYI_ARM("Lowering for cast from float to small type"); // Not tested yet.
+ tmpType = TYP_INT;
+ }
+
+ if (tmpType != TYP_UNDEF)
+ {
+ GenTreePtr tmp = comp->gtNewCastNode(tmpType, op1, tmpType);
+ tmp->gtFlags |= (tree->gtFlags & (GTF_UNSIGNED | GTF_OVERFLOW | GTF_EXCEPT));
+
+ tree->gtFlags &= ~GTF_UNSIGNED;
+ tree->gtOp.gtOp1 = tmp;
+ BlockRange().InsertAfter(op1, tmp);
+ }
+}
+
+//------------------------------------------------------------------------
+// LowerRotate: Lower GT_ROL and GT_ROL nodes.
+//
+// Arguments:
+// tree - the node to lower
+//
+// Return Value:
+// None.
+//
+void Lowering::LowerRotate(GenTreePtr tree)
+{
+ if (tree->OperGet() == GT_ROL)
+ {
+ // There is no ROL instruction on ARM. Convert ROL into ROR.
+ GenTreePtr rotatedValue = tree->gtOp.gtOp1;
+ unsigned rotatedValueBitSize = genTypeSize(rotatedValue->gtType) * 8;
+ GenTreePtr rotateLeftIndexNode = tree->gtOp.gtOp2;
+
+ if (rotateLeftIndexNode->IsCnsIntOrI())
+ {
+ ssize_t rotateLeftIndex = rotateLeftIndexNode->gtIntCon.gtIconVal;
+ ssize_t rotateRightIndex = rotatedValueBitSize - rotateLeftIndex;
+ rotateLeftIndexNode->gtIntCon.gtIconVal = rotateRightIndex;
+ }
+ else
+ {
+ GenTreePtr tmp =
+ comp->gtNewOperNode(GT_NEG, genActualType(rotateLeftIndexNode->gtType), rotateLeftIndexNode);
+ BlockRange().InsertAfter(rotateLeftIndexNode, tmp);
+ tree->gtOp.gtOp2 = tmp;
+ }
+ tree->ChangeOper(GT_ROR);
+ }
+}
+
+#endif // _TARGET_ARMARCH_
+
+#endif // !LEGACY_BACKEND
diff --git a/src/jit/lsraarm.cpp b/src/jit/lsraarm.cpp
index 912fd36c19..e35e57908a 100644
--- a/src/jit/lsraarm.cpp
+++ b/src/jit/lsraarm.cpp
@@ -30,270 +30,6 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#include "lsra.h"
//------------------------------------------------------------------------
-// TreeNodeInfoInitStoreLoc: Lower a store of a lclVar
-//
-// Arguments:
-// storeLoc - the local store (GT_STORE_LCL_FLD or GT_STORE_LCL_VAR)
-//
-// Notes:
-// This involves:
-// - Setting the appropriate candidates for a store of a multi-reg call return value.
-// - Handling of contained immediates and widening operations of unsigneds.
-//
-void Lowering::TreeNodeInfoInitStoreLoc(GenTreeLclVarCommon* storeLoc)
-{
- TreeNodeInfo* info = &(storeLoc->gtLsraInfo);
-
- // Is this the case of var = call where call is returning
- // a value in multiple return registers?
- GenTree* op1 = storeLoc->gtGetOp1();
- if (op1->IsMultiRegCall())
- {
- // backend expects to see this case only for store lclvar.
- assert(storeLoc->OperGet() == GT_STORE_LCL_VAR);
-
- // srcCount = number of registers in which the value is returned by call
- GenTreeCall* call = op1->AsCall();
- ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc();
- info->srcCount = retTypeDesc->GetReturnRegCount();
-
- // Call node srcCandidates = Bitwise-OR(allregs(GetReturnRegType(i))) for all i=0..RetRegCount-1
- regMaskTP srcCandidates = m_lsra->allMultiRegCallNodeRegs(call);
- op1->gtLsraInfo.setSrcCandidates(m_lsra, srcCandidates);
- return;
- }
-
- CheckImmedAndMakeContained(storeLoc, op1);
-}
-
-//------------------------------------------------------------------------
-// TreeNodeInfoInitCmp: Lower a GT comparison node.
-//
-// Arguments:
-// tree - the node to lower
-//
-// Return Value:
-// None.
-//
-void Lowering::TreeNodeInfoInitCmp(GenTreePtr tree)
-{
- TreeNodeInfo* info = &(tree->gtLsraInfo);
-
- info->srcCount = 2;
- info->dstCount = 1;
-
- GenTreePtr op1 = tree->gtOp.gtOp1;
- GenTreePtr op2 = tree->gtOp.gtOp2;
- var_types op1Type = op1->TypeGet();
- var_types op2Type = op2->TypeGet();
-
- // Long compares will consume GT_LONG nodes, each of which produces two results.
- // Thus for each long operand there will be an additional source.
- // TODO-ARM-CQ: Mark hiOp2 and loOp2 as contained if it is a constant.
- if (varTypeIsLong(op1Type))
- {
- info->srcCount++;
- }
- if (varTypeIsLong(op2Type))
- {
- info->srcCount++;
- }
-
- CheckImmedAndMakeContained(tree, tree->gtOp.gtOp2);
-}
-
-//------------------------------------------------------------------------
-// TreeNodeInfoInitGCWriteBarrier: GC lowering helper.
-//
-// Arguments:
-// tree - the node to lower
-//
-// Return Value:
-// None.
-//
-void Lowering::TreeNodeInfoInitGCWriteBarrier(GenTree* tree)
-{
- GenTreePtr dst = tree;
- GenTreePtr addr = tree->gtOp.gtOp1;
- GenTreePtr src = tree->gtOp.gtOp2;
-
- if (addr->OperGet() == GT_LEA)
- {
- // In the case where we are doing a helper assignment, if the dst
- // is an indir through an lea, we need to actually instantiate the
- // lea in a register
- GenTreeAddrMode* lea = addr->AsAddrMode();
-
- short leaSrcCount = 0;
- if (lea->Base() != nullptr)
- {
- leaSrcCount++;
- }
- if (lea->Index() != nullptr)
- {
- leaSrcCount++;
- }
- lea->gtLsraInfo.srcCount = leaSrcCount;
- lea->gtLsraInfo.dstCount = 1;
- }
-
-#if NOGC_WRITE_BARRIERS
- NYI_ARM("NOGC_WRITE_BARRIERS");
-#else
- // For the standard JIT Helper calls
- // op1 goes into REG_ARG_0 and
- // op2 goes into REG_ARG_1
- //
- addr->gtLsraInfo.setSrcCandidates(m_lsra, RBM_ARG_0);
- src->gtLsraInfo.setSrcCandidates(m_lsra, RBM_ARG_1);
-#endif // NOGC_WRITE_BARRIERS
-
- // Both src and dst must reside in a register, which they should since we haven't set
- // either of them as contained.
- assert(addr->gtLsraInfo.dstCount == 1);
- assert(src->gtLsraInfo.dstCount == 1);
-}
-
-//------------------------------------------------------------------------
-// TreeNodeInfoInitIndir: Specify register requirements for address expression
-// of an indirection operation.
-//
-// Arguments:
-// indirTree - GT_IND, GT_STOREIND, block node or GT_NULLCHECK gentree node
-//
-void Lowering::TreeNodeInfoInitIndir(GenTreePtr indirTree)
-{
- assert(indirTree->OperIsIndir());
- // If this is the rhs of a block copy (i.e. non-enregisterable struct),
- // it has no register requirements.
- if (indirTree->TypeGet() == TYP_STRUCT)
- {
- return;
- }
-
- GenTreePtr addr = indirTree->gtGetOp1();
- TreeNodeInfo* info = &(indirTree->gtLsraInfo);
-
- GenTreePtr base = nullptr;
- GenTreePtr index = nullptr;
- unsigned cns = 0;
- unsigned mul;
- bool rev;
- bool modifiedSources = false;
-
- if ((addr->OperGet() == GT_LEA) && IsSafeToContainMem(indirTree, addr))
- {
- GenTreeAddrMode* lea = addr->AsAddrMode();
- base = lea->Base();
- index = lea->Index();
- cns = lea->gtOffset;
-
- m_lsra->clearOperandCounts(addr);
- // The srcCount is decremented because addr is now "contained",
- // then we account for the base and index below, if they are non-null.
- info->srcCount--;
- }
- else if (comp->codeGen->genCreateAddrMode(addr, -1, true, 0, &rev, &base, &index, &mul, &cns, true /*nogen*/) &&
- !(modifiedSources = AreSourcesPossiblyModifiedLocals(indirTree, base, index)))
- {
- // An addressing mode will be constructed that may cause some
- // nodes to not need a register, and cause others' lifetimes to be extended
- // to the GT_IND or even its parent if it's an assignment
-
- assert(base != addr);
- m_lsra->clearOperandCounts(addr);
-
- GenTreePtr arrLength = nullptr;
-
- // Traverse the computation below GT_IND to find the operands
- // for the addressing mode, marking the various constants and
- // intermediate results as not consuming/producing.
- // If the traversal were more complex, we might consider using
- // a traversal function, but the addressing mode is only made
- // up of simple arithmetic operators, and the code generator
- // only traverses one leg of each node.
-
- bool foundBase = (base == nullptr);
- bool foundIndex = (index == nullptr);
- GenTreePtr nextChild = nullptr;
- for (GenTreePtr child = addr; child != nullptr && !child->OperIsLeaf(); child = nextChild)
- {
- nextChild = nullptr;
- GenTreePtr op1 = child->gtOp.gtOp1;
- GenTreePtr op2 = (child->OperIsBinary()) ? child->gtOp.gtOp2 : nullptr;
-
- if (op1 == base)
- {
- foundBase = true;
- }
- else if (op1 == index)
- {
- foundIndex = true;
- }
- else
- {
- m_lsra->clearOperandCounts(op1);
- if (!op1->OperIsLeaf())
- {
- nextChild = op1;
- }
- }
-
- if (op2 != nullptr)
- {
- if (op2 == base)
- {
- foundBase = true;
- }
- else if (op2 == index)
- {
- foundIndex = true;
- }
- else
- {
- m_lsra->clearOperandCounts(op2);
- if (!op2->OperIsLeaf())
- {
- assert(nextChild == nullptr);
- nextChild = op2;
- }
- }
- }
- }
- assert(foundBase && foundIndex);
- info->srcCount--; // it gets incremented below.
- }
- else if (addr->gtOper == GT_ARR_ELEM)
- {
- // The GT_ARR_ELEM consumes all the indices and produces the offset.
- // The array object lives until the mem access.
- // We also consume the target register to which the address is
- // computed
-
- info->srcCount++;
- assert(addr->gtLsraInfo.srcCount >= 2);
- addr->gtLsraInfo.srcCount -= 1;
- }
- else
- {
- // it is nothing but a plain indir
- info->srcCount--; // base gets added in below
- base = addr;
- }
-
- if (base != nullptr)
- {
- info->srcCount++;
- }
-
- if (index != nullptr && !modifiedSources)
- {
- info->srcCount++;
- info->internalIntCount++;
- }
-}
-
-//------------------------------------------------------------------------
// TreeNodeInfoInitReturn: Set the NodeInfo for a GT_RETURN.
//
// Arguments:
@@ -381,353 +117,6 @@ void Lowering::TreeNodeInfoInitReturn(GenTree* tree)
}
}
-//------------------------------------------------------------------------
-// TreeNodeInfoInitShiftRotate: Set the NodeInfo for a shift or rotate.
-//
-// Arguments:
-// tree - The node of interest
-//
-// Return Value:
-// None.
-//
-void Lowering::TreeNodeInfoInitShiftRotate(GenTree* tree)
-{
- TreeNodeInfo* info = &(tree->gtLsraInfo);
- LinearScan* l = m_lsra;
-
- info->srcCount = 2;
- info->dstCount = 1;
-
- GenTreePtr shiftBy = tree->gtOp.gtOp2;
- GenTreePtr source = tree->gtOp.gtOp1;
- if (shiftBy->IsCnsIntOrI())
- {
- l->clearDstCount(shiftBy);
- info->srcCount--;
- }
-
- // The first operand of a GT_LSH_HI and GT_RSH_LO oper is a GT_LONG so that
- // we can have a three operand form. Increment the srcCount.
- if (tree->OperGet() == GT_LSH_HI || tree->OperGet() == GT_RSH_LO)
- {
- assert(source->OperGet() == GT_LONG);
-
- info->srcCount++;
-
- if (tree->OperGet() == GT_LSH_HI)
- {
- GenTreePtr sourceLo = source->gtOp.gtOp1;
- sourceLo->gtLsraInfo.isDelayFree = true;
- }
- else
- {
- GenTreePtr sourceHi = source->gtOp.gtOp2;
- sourceHi->gtLsraInfo.isDelayFree = true;
- }
-
- source->gtLsraInfo.hasDelayFreeSrc = true;
- info->hasDelayFreeSrc = true;
- }
-}
-
-//------------------------------------------------------------------------
-// TreeNodeInfoInitPutArgReg: Set the NodeInfo for a PUTARG_REG.
-//
-// Arguments:
-// node - The PUTARG_REG node.
-// argReg - The register in which to pass the argument.
-// info - The info for the node's using call.
-// isVarArgs - True if the call uses a varargs calling convention.
-// callHasFloatRegArgs - Set to true if this PUTARG_REG uses an FP register.
-//
-// Return Value:
-// None.
-//
-void Lowering::TreeNodeInfoInitPutArgReg(
- GenTreeUnOp* node, regNumber argReg, TreeNodeInfo& info, bool isVarArgs, bool* callHasFloatRegArgs)
-{
- assert(node != nullptr);
- assert(node->OperIsPutArgReg());
- assert(argReg != REG_NA);
-
- // Each register argument corresponds to one source.
- info.srcCount++;
-
- // Set the register requirements for the node.
- const regMaskTP argMask = genRegMask(argReg);
- node->gtLsraInfo.setDstCandidates(m_lsra, argMask);
- node->gtLsraInfo.setSrcCandidates(m_lsra, argMask);
-
- // To avoid redundant moves, have the argument operand computed in the
- // register in which the argument is passed to the call.
- node->gtOp.gtOp1->gtLsraInfo.setSrcCandidates(m_lsra, m_lsra->getUseCandidates(node));
-
- *callHasFloatRegArgs |= varTypeIsFloating(node->TypeGet());
-}
-
-//------------------------------------------------------------------------
-// TreeNodeInfoInitCall: Set the NodeInfo for a call.
-//
-// Arguments:
-// call - The call node of interest
-//
-// Return Value:
-// None.
-//
-void Lowering::TreeNodeInfoInitCall(GenTreeCall* call)
-{
- TreeNodeInfo* info = &(call->gtLsraInfo);
- LinearScan* l = m_lsra;
- Compiler* compiler = comp;
- bool hasMultiRegRetVal = false;
- ReturnTypeDesc* retTypeDesc = nullptr;
-
- info->srcCount = 0;
- if (call->TypeGet() != TYP_VOID)
- {
- hasMultiRegRetVal = call->HasMultiRegRetVal();
- if (hasMultiRegRetVal)
- {
- // dst count = number of registers in which the value is returned by call
- retTypeDesc = call->GetReturnTypeDesc();
- info->dstCount = retTypeDesc->GetReturnRegCount();
- }
- else
- {
- info->dstCount = 1;
- }
- }
- else
- {
- info->dstCount = 0;
- }
-
- GenTree* ctrlExpr = call->gtControlExpr;
- if (call->gtCallType == CT_INDIRECT)
- {
- // either gtControlExpr != null or gtCallAddr != null.
- // Both cannot be non-null at the same time.
- assert(ctrlExpr == nullptr);
- assert(call->gtCallAddr != nullptr);
- ctrlExpr = call->gtCallAddr;
- }
-
- // set reg requirements on call target represented as control sequence.
- if (ctrlExpr != nullptr)
- {
- // we should never see a gtControlExpr whose type is void.
- assert(ctrlExpr->TypeGet() != TYP_VOID);
-
- info->srcCount++;
- // In case of fast tail implemented as jmp, make sure that gtControlExpr is
- // computed into a register.
- if (call->IsFastTailCall())
- {
- NYI_ARM("tail call");
- }
- }
- else
- {
- info->internalIntCount = 1;
- }
-
- RegisterType registerType = call->TypeGet();
-
- // Set destination candidates for return value of the call.
- if (call->IsHelperCall(compiler, CORINFO_HELP_INIT_PINVOKE_FRAME))
- {
- // The ARM CORINFO_HELP_INIT_PINVOKE_FRAME helper uses a custom calling convention that returns with
- // TCB in REG_PINVOKE_TCB. fgMorphCall() sets the correct argument registers.
- info->setDstCandidates(l, RBM_PINVOKE_TCB);
- }
- else if (hasMultiRegRetVal)
- {
- assert(retTypeDesc != nullptr);
- info->setDstCandidates(l, retTypeDesc->GetABIReturnRegs());
- }
- else if (varTypeIsFloating(registerType))
- {
- info->setDstCandidates(l, RBM_FLOATRET);
- }
- else if (registerType == TYP_LONG)
- {
- info->setDstCandidates(l, RBM_LNGRET);
- }
- else
- {
- info->setDstCandidates(l, RBM_INTRET);
- }
-
- // If there is an explicit this pointer, we don't want that node to produce anything
- // as it is redundant
- if (call->gtCallObjp != nullptr)
- {
- GenTreePtr thisPtrNode = call->gtCallObjp;
-
- if (thisPtrNode->gtOper == GT_PUTARG_REG)
- {
- l->clearOperandCounts(thisPtrNode);
- l->clearDstCount(thisPtrNode->gtOp.gtOp1);
- }
- else
- {
- l->clearDstCount(thisPtrNode);
- }
- }
-
- // First, count reg args
- bool callHasFloatRegArgs = false;
-
- for (GenTreePtr list = call->gtCallLateArgs; list; list = list->MoveNext())
- {
- assert(list->OperIsList());
-
- GenTreePtr argNode = list->Current();
-
- fgArgTabEntryPtr curArgTabEntry = compiler->gtArgEntryByNode(call, argNode);
- assert(curArgTabEntry);
-
- if (curArgTabEntry->regNum == REG_STK)
- {
- // late arg that is not passed in a register
- assert(argNode->gtOper == GT_PUTARG_STK);
-
- TreeNodeInfoInitPutArgStk(argNode->AsPutArgStk(), curArgTabEntry);
- continue;
- }
-
- // A GT_FIELD_LIST has a TYP_VOID, but is used to represent a multireg struct
- if (argNode->OperGet() == GT_FIELD_LIST)
- {
- // There could be up to 2-4 PUTARG_REGs in the list (3 or 4 can only occur for HFAs)
- regNumber argReg = curArgTabEntry->regNum;
- for (GenTreeFieldList* entry = argNode->AsFieldList(); entry != nullptr; entry = entry->Rest())
- {
- TreeNodeInfoInitPutArgReg(entry->Current()->AsUnOp(), argReg, *info, false, &callHasFloatRegArgs);
-
- // Update argReg for the next putarg_reg (if any)
- argReg = genRegArgNext(argReg);
- }
- }
- else
- {
- TreeNodeInfoInitPutArgReg(argNode->AsUnOp(), curArgTabEntry->regNum, *info, false, &callHasFloatRegArgs);
- }
- }
-
- // Now, count stack args
- // Note that these need to be computed into a register, but then
- // they're just stored to the stack - so the reg doesn't
- // need to remain live until the call. In fact, it must not
- // because the code generator doesn't actually consider it live,
- // so it can't be spilled.
-
- GenTreePtr args = call->gtCallArgs;
- while (args)
- {
- GenTreePtr arg = args->gtOp.gtOp1;
-
- // Skip arguments that have been moved to the Late Arg list
- if (!(args->gtFlags & GTF_LATE_ARG))
- {
- if (arg->gtOper == GT_PUTARG_STK)
- {
- fgArgTabEntryPtr curArgTabEntry = compiler->gtArgEntryByNode(call, arg);
- assert(curArgTabEntry);
-
- assert(curArgTabEntry->regNum == REG_STK);
-
- TreeNodeInfoInitPutArgStk(arg->AsPutArgStk(), curArgTabEntry);
- }
- else
- {
- TreeNodeInfo* argInfo = &(arg->gtLsraInfo);
- if (argInfo->dstCount != 0)
- {
- argInfo->isLocalDefUse = true;
- }
-
- argInfo->dstCount = 0;
- }
- }
- args = args->gtOp.gtOp2;
- }
-
- if (call->IsVarargs() && callHasFloatRegArgs && !call->IsFastTailCall() && (ctrlExpr != nullptr))
- {
- NYI_ARM("float reg varargs");
- }
-
- if (call->NeedsNullCheck())
- {
- info->internalIntCount++;
- }
-}
-
-//------------------------------------------------------------------------
-// TreeNodeInfoInitPutArgStk: Set the NodeInfo for a GT_PUTARG_STK node
-//
-// Arguments:
-// argNode - a GT_PUTARG_STK node
-//
-// Return Value:
-// None.
-//
-// Notes:
-// Set the child node(s) to be contained when we have a multireg arg
-//
-void Lowering::TreeNodeInfoInitPutArgStk(GenTreePutArgStk* argNode, fgArgTabEntryPtr info)
-{
- assert(argNode->gtOper == GT_PUTARG_STK);
-
- GenTreePtr putArgChild = argNode->gtOp.gtOp1;
-
- // Initialize 'argNode' as not contained, as this is both the default case
- // and how MakeSrcContained expects to find things setup.
- //
- argNode->gtLsraInfo.srcCount = 1;
- argNode->gtLsraInfo.dstCount = 0;
-
- // Do we have a TYP_STRUCT argument (or a GT_FIELD_LIST), if so it must be a multireg pass-by-value struct
- if ((putArgChild->TypeGet() == TYP_STRUCT) || (putArgChild->OperGet() == GT_FIELD_LIST))
- {
- // We will use store instructions that each write a register sized value
-
- if (putArgChild->OperGet() == GT_FIELD_LIST)
- {
- // We consume all of the items in the GT_FIELD_LIST
- argNode->gtLsraInfo.srcCount = info->numSlots;
- }
- else
- {
- // We could use a ldp/stp sequence so we need two internal registers
- argNode->gtLsraInfo.internalIntCount = 2;
-
- if (putArgChild->OperGet() == GT_OBJ)
- {
- GenTreePtr objChild = putArgChild->gtOp.gtOp1;
- if (objChild->OperGet() == GT_LCL_VAR_ADDR)
- {
- // We will generate all of the code for the GT_PUTARG_STK, the GT_OBJ and the GT_LCL_VAR_ADDR
- // as one contained operation
- //
- MakeSrcContained(putArgChild, objChild);
- }
- }
-
- // We will generate all of the code for the GT_PUTARG_STK and it's child node
- // as one contained operation
- //
- MakeSrcContained(argNode, putArgChild);
- }
- }
- else
- {
- // We must not have a multi-reg struct
- assert(info->numSlots == 1);
- }
-}
-
void Lowering::TreeNodeInfoInitLclHeap(GenTree* tree)
{
TreeNodeInfo* info = &(tree->gtLsraInfo);
@@ -811,147 +200,6 @@ void Lowering::TreeNodeInfoInitLclHeap(GenTree* tree)
}
//------------------------------------------------------------------------
-// TreeNodeInfoInitBlockStore: Set the NodeInfo for a block store.
-//
-// Arguments:
-// blkNode - The block store node of interest
-//
-// Return Value:
-// None.
-void Lowering::TreeNodeInfoInitBlockStore(GenTreeBlk* blkNode)
-{
- GenTree* dstAddr = blkNode->Addr();
- unsigned size = blkNode->gtBlkSize;
- GenTree* source = blkNode->Data();
- LinearScan* l = m_lsra;
- Compiler* compiler = comp;
-
- // Sources are dest address and initVal or source.
- // We may require an additional source or temp register for the size.
- blkNode->gtLsraInfo.srcCount = 2;
- blkNode->gtLsraInfo.dstCount = 0;
- GenTreePtr srcAddrOrFill = nullptr;
- bool isInitBlk = blkNode->OperIsInitBlkOp();
-
- if (!isInitBlk)
- {
- // CopyObj or CopyBlk
- if (source->gtOper == GT_IND)
- {
- srcAddrOrFill = blkNode->Data()->gtGetOp1();
- // We're effectively setting source as contained, but can't call MakeSrcContained, because the
- // "inheritance" of the srcCount is to a child not a parent - it would "just work" but could be misleading.
- // If srcAddr is already non-contained, we don't need to change it.
- if (srcAddrOrFill->gtLsraInfo.getDstCount() == 0)
- {
- srcAddrOrFill->gtLsraInfo.setDstCount(1);
- srcAddrOrFill->gtLsraInfo.setSrcCount(source->gtLsraInfo.srcCount);
- }
- m_lsra->clearOperandCounts(source);
- }
- else if (!source->IsMultiRegCall() && !source->OperIsSIMD())
- {
- assert(source->IsLocal());
- MakeSrcContained(blkNode, source);
- }
- }
-
- if (isInitBlk)
- {
- GenTreePtr initVal = source;
- if (initVal->OperIsInitVal())
- {
- initVal = initVal->gtGetOp1();
- }
- srcAddrOrFill = initVal;
-
- if (blkNode->gtBlkOpKind == GenTreeBlk::BlkOpKindUnroll)
- {
- // TODO-ARM-CQ: Currently we generate a helper call for every
- // initblk we encounter. Later on we should implement loop unrolling
- // code sequences to improve CQ.
- // For reference see the code in lsraxarch.cpp.
- NYI_ARM("initblk loop unrolling is currently not implemented.");
- }
- else
- {
- assert(blkNode->gtBlkOpKind == GenTreeBlk::BlkOpKindHelper);
- // The helper follows the regular ABI.
- dstAddr->gtLsraInfo.setSrcCandidates(l, RBM_ARG_0);
- initVal->gtLsraInfo.setSrcCandidates(l, RBM_ARG_1);
- if (size != 0)
- {
- // Reserve a temp register for the block size argument.
- blkNode->gtLsraInfo.setInternalCandidates(l, RBM_ARG_2);
- blkNode->gtLsraInfo.internalIntCount = 1;
- }
- else
- {
- // The block size argument is a third argument to GT_STORE_DYN_BLK
- noway_assert(blkNode->gtOper == GT_STORE_DYN_BLK);
- blkNode->gtLsraInfo.setSrcCount(3);
- GenTree* sizeNode = blkNode->AsDynBlk()->gtDynamicSize;
- sizeNode->gtLsraInfo.setSrcCandidates(l, RBM_ARG_2);
- }
- }
- }
- else
- {
- // CopyObj or CopyBlk
- // Sources are src and dest and size if not constant.
- if (blkNode->OperGet() == GT_STORE_OBJ)
- {
- // CopyObj
- NYI_ARM("GT_STORE_OBJ is needed of write barriers implementation");
- }
- else
- {
- // CopyBlk
- short internalIntCount = 0;
- regMaskTP internalIntCandidates = RBM_NONE;
-
- if (blkNode->gtBlkOpKind == GenTreeBlk::BlkOpKindUnroll)
- {
- // TODO-ARM-CQ: cpblk loop unrolling is currently not implemented.
- // In case of a CpBlk with a constant size and less than CPBLK_UNROLL_LIMIT size
- // we should unroll the loop to improve CQ.
- // For reference see the code in lsraxarch.cpp.
- NYI_ARM("cpblk loop unrolling is currently not implemented.");
- }
- else
- {
- assert(blkNode->gtBlkOpKind == GenTreeBlk::BlkOpKindHelper);
- dstAddr->gtLsraInfo.setSrcCandidates(l, RBM_ARG_0);
- // The srcAddr goes in arg1.
- if (srcAddrOrFill != nullptr)
- {
- srcAddrOrFill->gtLsraInfo.setSrcCandidates(l, RBM_ARG_1);
- }
- if (size != 0)
- {
- // Reserve a temp register for the block size argument.
- internalIntCandidates |= RBM_ARG_2;
- internalIntCount++;
- }
- else
- {
- // The block size argument is a third argument to GT_STORE_DYN_BLK
- noway_assert(blkNode->gtOper == GT_STORE_DYN_BLK);
- blkNode->gtLsraInfo.setSrcCount(3);
- GenTree* blockSize = blkNode->AsDynBlk()->gtDynamicSize;
- blockSize->gtLsraInfo.setSrcCandidates(l, RBM_ARG_2);
- }
- }
- if (internalIntCount != 0)
- {
- blkNode->gtLsraInfo.internalIntCount = internalIntCount;
- blkNode->gtLsraInfo.setInternalCandidates(l, internalIntCandidates);
- }
- }
- }
-}
-
-//------------------------------------------------------------------------
// TreeNodeInfoInit: Set the register requirements for RA.
//
// Notes:
diff --git a/src/jit/lsraarm64.cpp b/src/jit/lsraarm64.cpp
index e2159cba87..3b2d465495 100644
--- a/src/jit/lsraarm64.cpp
+++ b/src/jit/lsraarm64.cpp
@@ -29,43 +29,6 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#include "lower.h"
//------------------------------------------------------------------------
-// TreeNodeInfoInitStoreLoc: Set register requirements for a store of a lclVar
-//
-// Arguments:
-// storeLoc - the local store (GT_STORE_LCL_FLD or GT_STORE_LCL_VAR)
-//
-// Notes:
-// This involves:
-// - Setting the appropriate candidates for a store of a multi-reg call return value.
-// - Handling of contained immediates.
-
-void Lowering::TreeNodeInfoInitStoreLoc(GenTreeLclVarCommon* storeLoc)
-{
- TreeNodeInfo* info = &(storeLoc->gtLsraInfo);
-
- // Is this the case of var = call where call is returning
- // a value in multiple return registers?
- GenTree* op1 = storeLoc->gtGetOp1();
- if (op1->IsMultiRegCall())
- {
- // backend expects to see this case only for store lclvar.
- assert(storeLoc->OperGet() == GT_STORE_LCL_VAR);
-
- // srcCount = number of registers in which the value is returned by call
- GenTreeCall* call = op1->AsCall();
- ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc();
- info->srcCount = retTypeDesc->GetReturnRegCount();
-
- // Call node srcCandidates = Bitwise-OR(allregs(GetReturnRegType(i))) for all i=0..RetRegCount-1
- regMaskTP srcCandidates = m_lsra->allMultiRegCallNodeRegs(call);
- op1->gtLsraInfo.setSrcCandidates(m_lsra, srcCandidates);
- return;
- }
-
- CheckImmedAndMakeContained(storeLoc, op1);
-}
-
-//------------------------------------------------------------------------
// TreeNodeInfoInit: Set the register requirements for RA.
//
// Notes:
@@ -435,19 +398,8 @@ void Lowering::TreeNodeInfoInit(GenTree* tree)
case GT_RSH:
case GT_RSZ:
case GT_ROR:
- {
- info->srcCount = 2;
- info->dstCount = 1;
-
- GenTreePtr shiftBy = tree->gtOp.gtOp2;
- GenTreePtr source = tree->gtOp.gtOp1;
- if (shiftBy->IsCnsIntOrI())
- {
- l->clearDstCount(shiftBy);
- info->srcCount--;
- }
- }
- break;
+ TreeNodeInfoInitShiftRotate(tree);
+ break;
case GT_EQ:
case GT_NE:
@@ -847,458 +799,6 @@ void Lowering::TreeNodeInfoInitReturn(GenTree* tree)
}
}
-//------------------------------------------------------------------------
-// TreeNodeInfoInitPutArgReg: Set the NodeInfo for a PUTARG_REG.
-//
-// Arguments:
-// node - The PUTARG_REG node.
-// argReg - The register in which to pass the argument.
-// info - The info for the node's using call.
-// isVarArgs - True if the call uses a varargs calling convention.
-// callHasFloatRegArgs - Set to true if this PUTARG_REG uses an FP register.
-//
-// Return Value:
-// None.
-//
-void Lowering::TreeNodeInfoInitPutArgReg(
- GenTreeUnOp* node, regNumber argReg, TreeNodeInfo& info, bool isVarArgs, bool* callHasFloatRegArgs)
-{
- assert(node != nullptr);
- assert(node->OperIsPutArgReg());
- assert(argReg != REG_NA);
-
- // Each register argument corresponds to one source.
- info.srcCount++;
-
- // Set the register requirements for the node.
- const regMaskTP argMask = genRegMask(argReg);
- node->gtLsraInfo.setDstCandidates(m_lsra, argMask);
- node->gtLsraInfo.setSrcCandidates(m_lsra, argMask);
-
- // To avoid redundant moves, have the argument operand computed in the
- // register in which the argument is passed to the call.
- node->gtOp.gtOp1->gtLsraInfo.setSrcCandidates(m_lsra, m_lsra->getUseCandidates(node));
-
- *callHasFloatRegArgs |= varTypeIsFloating(node->TypeGet());
-}
-
-//------------------------------------------------------------------------
-// TreeNodeInfoInitCall: Set the NodeInfo for a call.
-//
-// Arguments:
-// call - The call node of interest
-//
-// Return Value:
-// None.
-//
-void Lowering::TreeNodeInfoInitCall(GenTreeCall* call)
-{
- TreeNodeInfo* info = &(call->gtLsraInfo);
- LinearScan* l = m_lsra;
- Compiler* compiler = comp;
- bool hasMultiRegRetVal = false;
- ReturnTypeDesc* retTypeDesc = nullptr;
-
- info->srcCount = 0;
- if (call->TypeGet() != TYP_VOID)
- {
- hasMultiRegRetVal = call->HasMultiRegRetVal();
- if (hasMultiRegRetVal)
- {
- // dst count = number of registers in which the value is returned by call
- retTypeDesc = call->GetReturnTypeDesc();
- info->dstCount = retTypeDesc->GetReturnRegCount();
- }
- else
- {
- info->dstCount = 1;
- }
- }
- else
- {
- info->dstCount = 0;
- }
-
- GenTree* ctrlExpr = call->gtControlExpr;
- if (call->gtCallType == CT_INDIRECT)
- {
- // either gtControlExpr != null or gtCallAddr != null.
- // Both cannot be non-null at the same time.
- assert(ctrlExpr == nullptr);
- assert(call->gtCallAddr != nullptr);
- ctrlExpr = call->gtCallAddr;
- }
-
- // set reg requirements on call target represented as control sequence.
- if (ctrlExpr != nullptr)
- {
- // we should never see a gtControlExpr whose type is void.
- assert(ctrlExpr->TypeGet() != TYP_VOID);
-
- info->srcCount++;
-
- // In case of fast tail implemented as jmp, make sure that gtControlExpr is
- // computed into a register.
- if (call->IsFastTailCall())
- {
- // Fast tail call - make sure that call target is always computed in IP0
- // so that epilog sequence can generate "br xip0" to achieve fast tail call.
- ctrlExpr->gtLsraInfo.setSrcCandidates(l, genRegMask(REG_IP0));
- }
- }
-
- RegisterType registerType = call->TypeGet();
-
- // Set destination candidates for return value of the call.
- if (hasMultiRegRetVal)
- {
- assert(retTypeDesc != nullptr);
- info->setDstCandidates(l, retTypeDesc->GetABIReturnRegs());
- }
- else if (varTypeIsFloating(registerType))
- {
- info->setDstCandidates(l, RBM_FLOATRET);
- }
- else if (registerType == TYP_LONG)
- {
- info->setDstCandidates(l, RBM_LNGRET);
- }
- else
- {
- info->setDstCandidates(l, RBM_INTRET);
- }
-
- // If there is an explicit this pointer, we don't want that node to produce anything
- // as it is redundant
- if (call->gtCallObjp != nullptr)
- {
- GenTreePtr thisPtrNode = call->gtCallObjp;
-
- if (thisPtrNode->gtOper == GT_PUTARG_REG)
- {
- l->clearOperandCounts(thisPtrNode);
- l->clearDstCount(thisPtrNode->gtOp.gtOp1);
- }
- else
- {
- l->clearDstCount(thisPtrNode);
- }
- }
-
- // First, count reg args
- bool callHasFloatRegArgs = false;
-
- for (GenTreePtr list = call->gtCallLateArgs; list; list = list->MoveNext())
- {
- assert(list->OperIsList());
-
- GenTreePtr argNode = list->Current();
-
- fgArgTabEntryPtr curArgTabEntry = compiler->gtArgEntryByNode(call, argNode);
- assert(curArgTabEntry);
-
- if (curArgTabEntry->regNum == REG_STK)
- {
- // late arg that is not passed in a register
- assert(argNode->gtOper == GT_PUTARG_STK);
-
- TreeNodeInfoInitPutArgStk(argNode->AsPutArgStk(), curArgTabEntry);
- continue;
- }
-
- // A GT_FIELD_LIST has a TYP_VOID, but is used to represent a multireg struct
- if (argNode->OperGet() == GT_FIELD_LIST)
- {
- // There could be up to 2-4 PUTARG_REGs in the list (3 or 4 can only occur for HFAs)
- regNumber argReg = curArgTabEntry->regNum;
- for (GenTreeFieldList* entry = argNode->AsFieldList(); entry != nullptr; entry = entry->Rest())
- {
- TreeNodeInfoInitPutArgReg(entry->Current()->AsUnOp(), argReg, *info, false, &callHasFloatRegArgs);
-
- // Update argReg for the next putarg_reg (if any)
- argReg = genRegArgNext(argReg);
- }
- }
- else
- {
- TreeNodeInfoInitPutArgReg(argNode->AsUnOp(), curArgTabEntry->regNum, *info, false, &callHasFloatRegArgs);
- }
- }
-
- // Now, count stack args
- // Note that these need to be computed into a register, but then
- // they're just stored to the stack - so the reg doesn't
- // need to remain live until the call. In fact, it must not
- // because the code generator doesn't actually consider it live,
- // so it can't be spilled.
-
- GenTreePtr args = call->gtCallArgs;
- while (args)
- {
- GenTreePtr arg = args->gtOp.gtOp1;
-
- // Skip arguments that have been moved to the Late Arg list
- if (!(args->gtFlags & GTF_LATE_ARG))
- {
- if (arg->gtOper == GT_PUTARG_STK)
- {
- fgArgTabEntryPtr curArgTabEntry = compiler->gtArgEntryByNode(call, arg);
- assert(curArgTabEntry);
-
- assert(curArgTabEntry->regNum == REG_STK);
-
- TreeNodeInfoInitPutArgStk(arg->AsPutArgStk(), curArgTabEntry);
- }
- else
- {
- TreeNodeInfo* argInfo = &(arg->gtLsraInfo);
- if (argInfo->dstCount != 0)
- {
- argInfo->isLocalDefUse = true;
- }
-
- argInfo->dstCount = 0;
- }
- }
- args = args->gtOp.gtOp2;
- }
-
- // If it is a fast tail call, it is already preferenced to use IP0.
- // Therefore, no need set src candidates on call tgt again.
- if (call->IsVarargs() && callHasFloatRegArgs && !call->IsFastTailCall() && (ctrlExpr != nullptr))
- {
- // Don't assign the call target to any of the argument registers because
- // we will use them to also pass floating point arguments as required
- // by Arm64 ABI.
- ctrlExpr->gtLsraInfo.setSrcCandidates(l, l->allRegs(TYP_INT) & ~(RBM_ARG_REGS));
- }
-}
-
-//------------------------------------------------------------------------
-// TreeNodeInfoInitPutArgStk: Set the NodeInfo for a GT_PUTARG_STK node
-//
-// Arguments:
-// argNode - a GT_PUTARG_STK node
-//
-// Return Value:
-// None.
-//
-// Notes:
-// Set the child node(s) to be contained when we have a multireg arg
-//
-void Lowering::TreeNodeInfoInitPutArgStk(GenTreePutArgStk* argNode, fgArgTabEntryPtr info)
-{
- assert(argNode->gtOper == GT_PUTARG_STK);
-
- GenTreePtr putArgChild = argNode->gtOp.gtOp1;
-
- // Initialize 'argNode' as not contained, as this is both the default case
- // and how MakeSrcContained expects to find things setup.
- //
- argNode->gtLsraInfo.srcCount = 1;
- argNode->gtLsraInfo.dstCount = 0;
-
- // Do we have a TYP_STRUCT argument (or a GT_FIELD_LIST), if so it must be a multireg pass-by-value struct
- if ((putArgChild->TypeGet() == TYP_STRUCT) || (putArgChild->OperGet() == GT_FIELD_LIST))
- {
- // We will use store instructions that each write a register sized value
-
- if (putArgChild->OperGet() == GT_FIELD_LIST)
- {
- // We consume all of the items in the GT_FIELD_LIST
- argNode->gtLsraInfo.srcCount = info->numSlots;
- }
- else
- {
- // We could use a ldp/stp sequence so we need two internal registers
- argNode->gtLsraInfo.internalIntCount = 2;
-
- if (putArgChild->OperGet() == GT_OBJ)
- {
- GenTreePtr objChild = putArgChild->gtOp.gtOp1;
- if (objChild->OperGet() == GT_LCL_VAR_ADDR)
- {
- // We will generate all of the code for the GT_PUTARG_STK, the GT_OBJ and the GT_LCL_VAR_ADDR
- // as one contained operation
- //
- MakeSrcContained(putArgChild, objChild);
- }
- }
-
- // We will generate all of the code for the GT_PUTARG_STK and it's child node
- // as one contained operation
- //
- MakeSrcContained(argNode, putArgChild);
- }
- }
- else
- {
- // We must not have a multi-reg struct
- assert(info->numSlots == 1);
- }
-}
-
-//------------------------------------------------------------------------
-// TreeNodeInfoInitBlockStore: Set the NodeInfo for a block store.
-//
-// Arguments:
-// blkNode - The block store node of interest
-//
-// Return Value:
-// None.
-//
-// Notes:
-
-void Lowering::TreeNodeInfoInitBlockStore(GenTreeBlk* blkNode)
-{
- GenTree* dstAddr = blkNode->Addr();
- unsigned size = blkNode->gtBlkSize;
- GenTree* source = blkNode->Data();
- LinearScan* l = m_lsra;
- Compiler* compiler = comp;
-
- // Sources are dest address and initVal or source.
- // We may require an additional source or temp register for the size.
- blkNode->gtLsraInfo.srcCount = 2;
- blkNode->gtLsraInfo.dstCount = 0;
- GenTreePtr srcAddrOrFill = nullptr;
- bool isInitBlk = blkNode->OperIsInitBlkOp();
-
- if (!isInitBlk)
- {
- // CopyObj or CopyBlk
- if (source->gtOper == GT_IND)
- {
- srcAddrOrFill = blkNode->Data()->gtGetOp1();
- // We're effectively setting source as contained, but can't call MakeSrcContained, because the
- // "inheritance" of the srcCount is to a child not a parent - it would "just work" but could be misleading.
- // If srcAddr is already non-contained, we don't need to change it.
- if (srcAddrOrFill->gtLsraInfo.getDstCount() == 0)
- {
- srcAddrOrFill->gtLsraInfo.setDstCount(1);
- srcAddrOrFill->gtLsraInfo.setSrcCount(source->gtLsraInfo.srcCount);
- }
- m_lsra->clearOperandCounts(source);
- }
- else if (!source->IsMultiRegCall() && !source->OperIsSIMD())
- {
- assert(source->IsLocal());
- MakeSrcContained(blkNode, source);
- }
- }
-
- if (isInitBlk)
- {
- GenTreePtr initVal = source;
- if (initVal->OperIsInitVal())
- {
- initVal = initVal->gtGetOp1();
- }
- srcAddrOrFill = initVal;
-
- if (blkNode->gtBlkOpKind == GenTreeBlk::BlkOpKindUnroll)
- {
- // No additional temporaries required
- ssize_t fill = initVal->gtIntCon.gtIconVal & 0xFF;
- if (fill == 0)
- {
- MakeSrcContained(blkNode, source);
- }
- }
- else
- {
- assert(blkNode->gtBlkOpKind == GenTreeBlk::BlkOpKindHelper);
- // The helper follows the regular ABI.
- dstAddr->gtLsraInfo.setSrcCandidates(l, RBM_ARG_0);
- initVal->gtLsraInfo.setSrcCandidates(l, RBM_ARG_1);
- if (size != 0)
- {
- // Reserve a temp register for the block size argument.
- blkNode->gtLsraInfo.setInternalCandidates(l, RBM_ARG_2);
- blkNode->gtLsraInfo.internalIntCount = 1;
- }
- else
- {
- // The block size argument is a third argument to GT_STORE_DYN_BLK
- noway_assert(blkNode->gtOper == GT_STORE_DYN_BLK);
- blkNode->gtLsraInfo.setSrcCount(3);
- GenTree* sizeNode = blkNode->AsDynBlk()->gtDynamicSize;
- sizeNode->gtLsraInfo.setSrcCandidates(l, RBM_ARG_2);
- }
- }
- }
- else
- {
- // CopyObj or CopyBlk
- // Sources are src and dest and size if not constant.
-
- if (blkNode->OperGet() == GT_STORE_OBJ)
- {
- // CopyObj
-
- // We don't need to materialize the struct size but we still need
- // a temporary register to perform the sequence of loads and stores.
- blkNode->gtLsraInfo.internalIntCount = 1;
-
- dstAddr->gtLsraInfo.setSrcCandidates(l, RBM_WRITE_BARRIER_DST_BYREF);
- // If we have a source address we want it in REG_WRITE_BARRIER_SRC_BYREF.
- // Otherwise, if it is a local, codegen will put its address in REG_WRITE_BARRIER_SRC_BYREF,
- // which is killed by a StoreObj (and thus needn't be reserved).
- if (srcAddrOrFill != nullptr)
- {
- srcAddrOrFill->gtLsraInfo.setSrcCandidates(l, RBM_WRITE_BARRIER_SRC_BYREF);
- }
- }
- else
- {
- // CopyBlk
- short internalIntCount = 0;
- regMaskTP internalIntCandidates = RBM_NONE;
-
- if (blkNode->gtBlkOpKind == GenTreeBlk::BlkOpKindUnroll)
- {
- internalIntCount = 1;
- internalIntCandidates = RBM_ALLINT;
-
- if (size >= 2 * REGSIZE_BYTES)
- {
- // Use ldp/stp to reduce code size and improve performance
- internalIntCount++;
- }
- }
- else
- {
- assert(blkNode->gtBlkOpKind == GenTreeBlk::BlkOpKindHelper);
- dstAddr->gtLsraInfo.setSrcCandidates(l, RBM_ARG_0);
- // The srcAddr goes in arg1.
- if (srcAddrOrFill != nullptr)
- {
- srcAddrOrFill->gtLsraInfo.setSrcCandidates(l, RBM_ARG_1);
- }
- if (size != 0)
- {
- // Reserve a temp register for the block size argument.
- internalIntCandidates |= RBM_ARG_2;
- internalIntCount++;
- }
- else
- {
- // The block size argument is a third argument to GT_STORE_DYN_BLK
- noway_assert(blkNode->gtOper == GT_STORE_DYN_BLK);
- blkNode->gtLsraInfo.setSrcCount(3);
- GenTree* blockSize = blkNode->AsDynBlk()->gtDynamicSize;
- blockSize->gtLsraInfo.setSrcCandidates(l, RBM_ARG_2);
- }
- }
- if (internalIntCount != 0)
- {
- blkNode->gtLsraInfo.internalIntCount = internalIntCount;
- blkNode->gtLsraInfo.setInternalCandidates(l, internalIntCandidates);
- }
- }
- }
-}
-
#ifdef FEATURE_SIMD
//------------------------------------------------------------------------
// TreeNodeInfoInitSIMD: Set the NodeInfo for a GT_SIMD tree.
@@ -1500,223 +1000,6 @@ void Lowering::TreeNodeInfoInitSIMD(GenTree* tree)
}
#endif // FEATURE_SIMD
-void Lowering::TreeNodeInfoInitGCWriteBarrier(GenTree* tree)
-{
- GenTreePtr dst = tree;
- GenTreePtr addr = tree->gtOp.gtOp1;
- GenTreePtr src = tree->gtOp.gtOp2;
-
- if (addr->OperGet() == GT_LEA)
- {
- // In the case where we are doing a helper assignment, if the dst
- // is an indir through an lea, we need to actually instantiate the
- // lea in a register
- GenTreeAddrMode* lea = addr->AsAddrMode();
-
- short leaSrcCount = 0;
- if (lea->Base() != nullptr)
- {
- leaSrcCount++;
- }
- if (lea->Index() != nullptr)
- {
- leaSrcCount++;
- }
- lea->gtLsraInfo.srcCount = leaSrcCount;
- lea->gtLsraInfo.dstCount = 1;
- }
-
-#if NOGC_WRITE_BARRIERS
- // For the NOGC JIT Helper calls
- //
- // the 'addr' goes into x14 (REG_WRITE_BARRIER_DST_BYREF)
- // the 'src' goes into x15 (REG_WRITE_BARRIER)
- //
- addr->gtLsraInfo.setSrcCandidates(m_lsra, RBM_WRITE_BARRIER_DST_BYREF);
- src->gtLsraInfo.setSrcCandidates(m_lsra, RBM_WRITE_BARRIER);
-#else
- // For the standard JIT Helper calls
- // op1 goes into REG_ARG_0 and
- // op2 goes into REG_ARG_1
- //
- addr->gtLsraInfo.setSrcCandidates(m_lsra, RBM_ARG_0);
- src->gtLsraInfo.setSrcCandidates(m_lsra, RBM_ARG_1);
-#endif // NOGC_WRITE_BARRIERS
-
- // Both src and dst must reside in a register, which they should since we haven't set
- // either of them as contained.
- assert(addr->gtLsraInfo.dstCount == 1);
- assert(src->gtLsraInfo.dstCount == 1);
-}
-
-//-----------------------------------------------------------------------------------------
-// TreeNodeInfoInitIndir: Specify register requirements for address expression of an indirection operation.
-//
-// Arguments:
-// indirTree - GT_IND or GT_STOREIND gentree node
-//
-void Lowering::TreeNodeInfoInitIndir(GenTreePtr indirTree)
-{
- assert(indirTree->OperIsIndir());
- // If this is the rhs of a block copy (i.e. non-enregisterable struct),
- // it has no register requirements.
- if (indirTree->TypeGet() == TYP_STRUCT)
- {
- return;
- }
-
- GenTreePtr addr = indirTree->gtGetOp1();
- TreeNodeInfo* info = &(indirTree->gtLsraInfo);
-
- GenTreePtr base = nullptr;
- GenTreePtr index = nullptr;
- unsigned cns = 0;
- unsigned mul;
- bool rev;
- bool modifiedSources = false;
-
- if ((addr->OperGet() == GT_LEA) && IsSafeToContainMem(indirTree, addr))
- {
- GenTreeAddrMode* lea = addr->AsAddrMode();
- base = lea->Base();
- index = lea->Index();
- cns = lea->gtOffset;
-
- m_lsra->clearOperandCounts(addr);
- // The srcCount is decremented because addr is now "contained",
- // then we account for the base and index below, if they are non-null.
- info->srcCount--;
- }
- else if (comp->codeGen->genCreateAddrMode(addr, -1, true, 0, &rev, &base, &index, &mul, &cns, true /*nogen*/) &&
- !(modifiedSources = AreSourcesPossiblyModifiedLocals(indirTree, base, index)))
- {
- // An addressing mode will be constructed that may cause some
- // nodes to not need a register, and cause others' lifetimes to be extended
- // to the GT_IND or even its parent if it's an assignment
-
- assert(base != addr);
- m_lsra->clearOperandCounts(addr);
-
- GenTreePtr arrLength = nullptr;
-
- // Traverse the computation below GT_IND to find the operands
- // for the addressing mode, marking the various constants and
- // intermediate results as not consuming/producing.
- // If the traversal were more complex, we might consider using
- // a traversal function, but the addressing mode is only made
- // up of simple arithmetic operators, and the code generator
- // only traverses one leg of each node.
-
- bool foundBase = (base == nullptr);
- bool foundIndex = (index == nullptr);
- GenTreePtr nextChild = nullptr;
- for (GenTreePtr child = addr; child != nullptr && !child->OperIsLeaf(); child = nextChild)
- {
- nextChild = nullptr;
- GenTreePtr op1 = child->gtOp.gtOp1;
- GenTreePtr op2 = (child->OperIsBinary()) ? child->gtOp.gtOp2 : nullptr;
-
- if (op1 == base)
- {
- foundBase = true;
- }
- else if (op1 == index)
- {
- foundIndex = true;
- }
- else
- {
- m_lsra->clearOperandCounts(op1);
- if (!op1->OperIsLeaf())
- {
- nextChild = op1;
- }
- }
-
- if (op2 != nullptr)
- {
- if (op2 == base)
- {
- foundBase = true;
- }
- else if (op2 == index)
- {
- foundIndex = true;
- }
- else
- {
- m_lsra->clearOperandCounts(op2);
- if (!op2->OperIsLeaf())
- {
- assert(nextChild == nullptr);
- nextChild = op2;
- }
- }
- }
- }
- assert(foundBase && foundIndex);
- info->srcCount--; // it gets incremented below.
- }
- else if (addr->gtOper == GT_ARR_ELEM)
- {
- // The GT_ARR_ELEM consumes all the indices and produces the offset.
- // The array object lives until the mem access.
- // We also consume the target register to which the address is
- // computed
-
- info->srcCount++;
- assert(addr->gtLsraInfo.srcCount >= 2);
- addr->gtLsraInfo.srcCount -= 1;
- }
- else
- {
- // it is nothing but a plain indir
- info->srcCount--; // base gets added in below
- base = addr;
- }
-
- if (base != nullptr)
- {
- info->srcCount++;
- }
-
- if (index != nullptr && !modifiedSources)
- {
- info->srcCount++;
- }
-
- // On ARM64 we may need a single internal register
- // (when both conditions are true then we still only need a single internal register)
- if ((index != nullptr) && (cns != 0))
- {
- // ARM64 does not support both Index and offset so we need an internal register
- info->internalIntCount = 1;
- }
- else if (!emitter::emitIns_valid_imm_for_ldst_offset(cns, emitTypeSize(indirTree)))
- {
- // This offset can't be contained in the ldr/str instruction, so we need an internal register
- info->internalIntCount = 1;
- }
-}
-
-//------------------------------------------------------------------------
-// TreeNodeInfoInitCmp: Set the register requirements for a compare.
-//
-// Arguments:
-// tree - The node of interest
-//
-// Return Value:
-// None.
-//
-void Lowering::TreeNodeInfoInitCmp(GenTreePtr tree)
-{
- TreeNodeInfo* info = &(tree->gtLsraInfo);
-
- info->srcCount = 2;
- info->dstCount = 1;
- CheckImmedAndMakeContained(tree, tree->gtOp.gtOp2);
-}
-
#endif // _TARGET_ARM64_
#endif // !LEGACY_BACKEND
diff --git a/src/jit/lsraarmarch.cpp b/src/jit/lsraarmarch.cpp
new file mode 100644
index 0000000000..7d999d880f
--- /dev/null
+++ b/src/jit/lsraarmarch.cpp
@@ -0,0 +1,868 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+XX XX
+XX Register Requirements for ARM and ARM64 common code XX
+XX XX
+XX This encapsulates common logic for setting register requirements for XX
+XX the ARM and ARM64 architectures. XX
+XX XX
+XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+*/
+
+#include "jitpch.h"
+#ifdef _MSC_VER
+#pragma hdrstop
+#endif
+
+#ifndef LEGACY_BACKEND // This file is ONLY used for the RyuJIT backend that uses the linear scan register allocator
+
+#ifdef _TARGET_ARMARCH_ // This file is ONLY used for ARM and ARM64 architectures
+
+#include "jit.h"
+#include "sideeffects.h"
+#include "lower.h"
+#include "lsra.h"
+
+//------------------------------------------------------------------------
+// TreeNodeInfoInitStoreLoc: Set register requirements for a store of a lclVar
+//
+// Arguments:
+// storeLoc - the local store (GT_STORE_LCL_FLD or GT_STORE_LCL_VAR)
+//
+// Notes:
+// This involves:
+// - Setting the appropriate candidates for a store of a multi-reg call return value.
+// - Handling of contained immediates.
+//
+void Lowering::TreeNodeInfoInitStoreLoc(GenTreeLclVarCommon* storeLoc)
+{
+ TreeNodeInfo* info = &(storeLoc->gtLsraInfo);
+
+ // Is this the case of var = call where call is returning
+ // a value in multiple return registers?
+ GenTree* op1 = storeLoc->gtGetOp1();
+ if (op1->IsMultiRegCall())
+ {
+ // backend expects to see this case only for store lclvar.
+ assert(storeLoc->OperGet() == GT_STORE_LCL_VAR);
+
+ // srcCount = number of registers in which the value is returned by call
+ GenTreeCall* call = op1->AsCall();
+ ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc();
+ info->srcCount = retTypeDesc->GetReturnRegCount();
+
+ // Call node srcCandidates = Bitwise-OR(allregs(GetReturnRegType(i))) for all i=0..RetRegCount-1
+ regMaskTP srcCandidates = m_lsra->allMultiRegCallNodeRegs(call);
+ op1->gtLsraInfo.setSrcCandidates(m_lsra, srcCandidates);
+ return;
+ }
+
+ CheckImmedAndMakeContained(storeLoc, op1);
+}
+
+//------------------------------------------------------------------------
+// TreeNodeInfoInitCmp: Lower a GT comparison node.
+//
+// Arguments:
+// tree - the node to lower
+//
+// Return Value:
+// None.
+//
+void Lowering::TreeNodeInfoInitCmp(GenTreePtr tree)
+{
+ TreeNodeInfo* info = &(tree->gtLsraInfo);
+
+ info->srcCount = 2;
+ info->dstCount = 1;
+
+#ifdef _TARGET_ARM_
+
+ GenTreePtr op1 = tree->gtOp.gtOp1;
+ GenTreePtr op2 = tree->gtOp.gtOp2;
+ var_types op1Type = op1->TypeGet();
+ var_types op2Type = op2->TypeGet();
+
+ // Long compares will consume GT_LONG nodes, each of which produces two results.
+ // Thus for each long operand there will be an additional source.
+ // TODO-ARM-CQ: Mark hiOp2 and loOp2 as contained if it is a constant.
+ if (varTypeIsLong(op1Type))
+ {
+ info->srcCount++;
+ }
+ if (varTypeIsLong(op2Type))
+ {
+ info->srcCount++;
+ }
+
+#endif // _TARGET_ARM_
+
+ CheckImmedAndMakeContained(tree, tree->gtOp.gtOp2);
+}
+
+void Lowering::TreeNodeInfoInitGCWriteBarrier(GenTree* tree)
+{
+ GenTreePtr dst = tree;
+ GenTreePtr addr = tree->gtOp.gtOp1;
+ GenTreePtr src = tree->gtOp.gtOp2;
+
+ if (addr->OperGet() == GT_LEA)
+ {
+ // In the case where we are doing a helper assignment, if the dst
+ // is an indir through an lea, we need to actually instantiate the
+ // lea in a register
+ GenTreeAddrMode* lea = addr->AsAddrMode();
+
+ short leaSrcCount = 0;
+ if (lea->Base() != nullptr)
+ {
+ leaSrcCount++;
+ }
+ if (lea->Index() != nullptr)
+ {
+ leaSrcCount++;
+ }
+ lea->gtLsraInfo.srcCount = leaSrcCount;
+ lea->gtLsraInfo.dstCount = 1;
+ }
+
+#if NOGC_WRITE_BARRIERS
+ NYI_ARM("NOGC_WRITE_BARRIERS");
+
+ // For the NOGC JIT Helper calls
+ //
+ // the 'addr' goes into x14 (REG_WRITE_BARRIER_DST_BYREF)
+ // the 'src' goes into x15 (REG_WRITE_BARRIER)
+ //
+ addr->gtLsraInfo.setSrcCandidates(m_lsra, RBM_WRITE_BARRIER_DST_BYREF);
+ src->gtLsraInfo.setSrcCandidates(m_lsra, RBM_WRITE_BARRIER);
+#else
+ // For the standard JIT Helper calls
+ // op1 goes into REG_ARG_0 and
+ // op2 goes into REG_ARG_1
+ //
+ addr->gtLsraInfo.setSrcCandidates(m_lsra, RBM_ARG_0);
+ src->gtLsraInfo.setSrcCandidates(m_lsra, RBM_ARG_1);
+#endif // NOGC_WRITE_BARRIERS
+
+ // Both src and dst must reside in a register, which they should since we haven't set
+ // either of them as contained.
+ assert(addr->gtLsraInfo.dstCount == 1);
+ assert(src->gtLsraInfo.dstCount == 1);
+}
+
+//------------------------------------------------------------------------
+// TreeNodeInfoInitIndir: Specify register requirements for address expression
+// of an indirection operation.
+//
+// Arguments:
+// indirTree - GT_IND, GT_STOREIND, block node or GT_NULLCHECK gentree node
+//
+void Lowering::TreeNodeInfoInitIndir(GenTreePtr indirTree)
+{
+ assert(indirTree->OperIsIndir());
+ // If this is the rhs of a block copy (i.e. non-enregisterable struct),
+ // it has no register requirements.
+ if (indirTree->TypeGet() == TYP_STRUCT)
+ {
+ return;
+ }
+
+ GenTreePtr addr = indirTree->gtGetOp1();
+ TreeNodeInfo* info = &(indirTree->gtLsraInfo);
+
+ GenTreePtr base = nullptr;
+ GenTreePtr index = nullptr;
+ unsigned cns = 0;
+ unsigned mul;
+ bool rev;
+ bool modifiedSources = false;
+
+ if ((addr->OperGet() == GT_LEA) && IsSafeToContainMem(indirTree, addr))
+ {
+ GenTreeAddrMode* lea = addr->AsAddrMode();
+ base = lea->Base();
+ index = lea->Index();
+ cns = lea->gtOffset;
+
+ m_lsra->clearOperandCounts(addr);
+ // The srcCount is decremented because addr is now "contained",
+ // then we account for the base and index below, if they are non-null.
+ info->srcCount--;
+ }
+ else if (comp->codeGen->genCreateAddrMode(addr, -1, true, 0, &rev, &base, &index, &mul, &cns, true /*nogen*/) &&
+ !(modifiedSources = AreSourcesPossiblyModifiedLocals(indirTree, base, index)))
+ {
+ // An addressing mode will be constructed that may cause some
+ // nodes to not need a register, and cause others' lifetimes to be extended
+ // to the GT_IND or even its parent if it's an assignment
+
+ assert(base != addr);
+ m_lsra->clearOperandCounts(addr);
+
+ GenTreePtr arrLength = nullptr;
+
+ // Traverse the computation below GT_IND to find the operands
+ // for the addressing mode, marking the various constants and
+ // intermediate results as not consuming/producing.
+ // If the traversal were more complex, we might consider using
+ // a traversal function, but the addressing mode is only made
+ // up of simple arithmetic operators, and the code generator
+ // only traverses one leg of each node.
+
+ bool foundBase = (base == nullptr);
+ bool foundIndex = (index == nullptr);
+ GenTreePtr nextChild = nullptr;
+ for (GenTreePtr child = addr; child != nullptr && !child->OperIsLeaf(); child = nextChild)
+ {
+ nextChild = nullptr;
+ GenTreePtr op1 = child->gtOp.gtOp1;
+ GenTreePtr op2 = (child->OperIsBinary()) ? child->gtOp.gtOp2 : nullptr;
+
+ if (op1 == base)
+ {
+ foundBase = true;
+ }
+ else if (op1 == index)
+ {
+ foundIndex = true;
+ }
+ else
+ {
+ m_lsra->clearOperandCounts(op1);
+ if (!op1->OperIsLeaf())
+ {
+ nextChild = op1;
+ }
+ }
+
+ if (op2 != nullptr)
+ {
+ if (op2 == base)
+ {
+ foundBase = true;
+ }
+ else if (op2 == index)
+ {
+ foundIndex = true;
+ }
+ else
+ {
+ m_lsra->clearOperandCounts(op2);
+ if (!op2->OperIsLeaf())
+ {
+ assert(nextChild == nullptr);
+ nextChild = op2;
+ }
+ }
+ }
+ }
+ assert(foundBase && foundIndex);
+ info->srcCount--; // it gets incremented below.
+ }
+ else if (addr->gtOper == GT_ARR_ELEM)
+ {
+ // The GT_ARR_ELEM consumes all the indices and produces the offset.
+ // The array object lives until the mem access.
+ // We also consume the target register to which the address is
+ // computed
+
+ info->srcCount++;
+ assert(addr->gtLsraInfo.srcCount >= 2);
+ addr->gtLsraInfo.srcCount -= 1;
+ }
+ else
+ {
+ // it is nothing but a plain indir
+ info->srcCount--; // base gets added in below
+ base = addr;
+ }
+
+ if (base != nullptr)
+ {
+ info->srcCount++;
+ }
+
+ if (index != nullptr && !modifiedSources)
+ {
+ info->srcCount++;
+ }
+
+ // On ARM we may need a single internal register
+ // (when both conditions are true then we still only need a single internal register)
+ if ((index != nullptr) && (cns != 0))
+ {
+ // ARM does not support both Index and offset so we need an internal register
+ info->internalIntCount = 1;
+ }
+ else if (!emitter::emitIns_valid_imm_for_ldst_offset(cns, emitTypeSize(indirTree)))
+ {
+ // This offset can't be contained in the ldr/str instruction, so we need an internal register
+ info->internalIntCount = 1;
+ }
+}
+
+//------------------------------------------------------------------------
+// TreeNodeInfoInitShiftRotate: Set the NodeInfo for a shift or rotate.
+//
+// Arguments:
+// tree - The node of interest
+//
+// Return Value:
+// None.
+//
+void Lowering::TreeNodeInfoInitShiftRotate(GenTree* tree)
+{
+ TreeNodeInfo* info = &(tree->gtLsraInfo);
+ LinearScan* l = m_lsra;
+
+ info->srcCount = 2;
+ info->dstCount = 1;
+
+ GenTreePtr shiftBy = tree->gtOp.gtOp2;
+ GenTreePtr source = tree->gtOp.gtOp1;
+ if (shiftBy->IsCnsIntOrI())
+ {
+ l->clearDstCount(shiftBy);
+ info->srcCount--;
+ }
+
+#ifdef _TARGET_ARM_
+
+ // The first operand of a GT_LSH_HI and GT_RSH_LO oper is a GT_LONG so that
+ // we can have a three operand form. Increment the srcCount.
+ if (tree->OperGet() == GT_LSH_HI || tree->OperGet() == GT_RSH_LO)
+ {
+ assert(source->OperGet() == GT_LONG);
+
+ info->srcCount++;
+
+ if (tree->OperGet() == GT_LSH_HI)
+ {
+ GenTreePtr sourceLo = source->gtOp.gtOp1;
+ sourceLo->gtLsraInfo.isDelayFree = true;
+ }
+ else
+ {
+ GenTreePtr sourceHi = source->gtOp.gtOp2;
+ sourceHi->gtLsraInfo.isDelayFree = true;
+ }
+
+ source->gtLsraInfo.hasDelayFreeSrc = true;
+ info->hasDelayFreeSrc = true;
+ }
+
+#endif // _TARGET_ARM_
+}
+
+//------------------------------------------------------------------------
+// TreeNodeInfoInitPutArgReg: Set the NodeInfo for a PUTARG_REG.
+//
+// Arguments:
+// node - The PUTARG_REG node.
+// argReg - The register in which to pass the argument.
+// info - The info for the node's using call.
+// isVarArgs - True if the call uses a varargs calling convention.
+// callHasFloatRegArgs - Set to true if this PUTARG_REG uses an FP register.
+//
+// Return Value:
+// None.
+//
+void Lowering::TreeNodeInfoInitPutArgReg(
+ GenTreeUnOp* node, regNumber argReg, TreeNodeInfo& info, bool isVarArgs, bool* callHasFloatRegArgs)
+{
+ assert(node != nullptr);
+ assert(node->OperIsPutArgReg());
+ assert(argReg != REG_NA);
+
+ // Each register argument corresponds to one source.
+ info.srcCount++;
+
+ // Set the register requirements for the node.
+ const regMaskTP argMask = genRegMask(argReg);
+ node->gtLsraInfo.setDstCandidates(m_lsra, argMask);
+ node->gtLsraInfo.setSrcCandidates(m_lsra, argMask);
+
+ // To avoid redundant moves, have the argument operand computed in the
+ // register in which the argument is passed to the call.
+ node->gtOp.gtOp1->gtLsraInfo.setSrcCandidates(m_lsra, m_lsra->getUseCandidates(node));
+
+ *callHasFloatRegArgs |= varTypeIsFloating(node->TypeGet());
+}
+
+//------------------------------------------------------------------------
+// TreeNodeInfoInitCall: Set the NodeInfo for a call.
+//
+// Arguments:
+// call - The call node of interest
+//
+// Return Value:
+// None.
+//
+void Lowering::TreeNodeInfoInitCall(GenTreeCall* call)
+{
+ TreeNodeInfo* info = &(call->gtLsraInfo);
+ LinearScan* l = m_lsra;
+ Compiler* compiler = comp;
+ bool hasMultiRegRetVal = false;
+ ReturnTypeDesc* retTypeDesc = nullptr;
+
+ info->srcCount = 0;
+ if (call->TypeGet() != TYP_VOID)
+ {
+ hasMultiRegRetVal = call->HasMultiRegRetVal();
+ if (hasMultiRegRetVal)
+ {
+ // dst count = number of registers in which the value is returned by call
+ retTypeDesc = call->GetReturnTypeDesc();
+ info->dstCount = retTypeDesc->GetReturnRegCount();
+ }
+ else
+ {
+ info->dstCount = 1;
+ }
+ }
+ else
+ {
+ info->dstCount = 0;
+ }
+
+ GenTree* ctrlExpr = call->gtControlExpr;
+ if (call->gtCallType == CT_INDIRECT)
+ {
+ // either gtControlExpr != null or gtCallAddr != null.
+ // Both cannot be non-null at the same time.
+ assert(ctrlExpr == nullptr);
+ assert(call->gtCallAddr != nullptr);
+ ctrlExpr = call->gtCallAddr;
+ }
+
+ // set reg requirements on call target represented as control sequence.
+ if (ctrlExpr != nullptr)
+ {
+ // we should never see a gtControlExpr whose type is void.
+ assert(ctrlExpr->TypeGet() != TYP_VOID);
+
+ info->srcCount++;
+
+ // In case of fast tail implemented as jmp, make sure that gtControlExpr is
+ // computed into a register.
+ if (call->IsFastTailCall())
+ {
+ NYI_ARM("tail call");
+
+#ifdef _TARGET_ARM64_
+ // Fast tail call - make sure that call target is always computed in IP0
+ // so that epilog sequence can generate "br xip0" to achieve fast tail call.
+ ctrlExpr->gtLsraInfo.setSrcCandidates(l, genRegMask(REG_IP0));
+#endif // _TARGET_ARM64_
+ }
+ }
+#ifdef _TARGET_ARM_
+ else
+ {
+ info->internalIntCount = 1;
+ }
+#endif // _TARGET_ARM_
+
+ RegisterType registerType = call->TypeGet();
+
+// Set destination candidates for return value of the call.
+
+#ifdef _TARGET_ARM_
+ if (call->IsHelperCall(compiler, CORINFO_HELP_INIT_PINVOKE_FRAME))
+ {
+ // The ARM CORINFO_HELP_INIT_PINVOKE_FRAME helper uses a custom calling convention that returns with
+ // TCB in REG_PINVOKE_TCB. fgMorphCall() sets the correct argument registers.
+ info->setDstCandidates(l, RBM_PINVOKE_TCB);
+ }
+ else
+#endif // _TARGET_ARM_
+ if (hasMultiRegRetVal)
+ {
+ assert(retTypeDesc != nullptr);
+ info->setDstCandidates(l, retTypeDesc->GetABIReturnRegs());
+ }
+ else if (varTypeIsFloating(registerType))
+ {
+ info->setDstCandidates(l, RBM_FLOATRET);
+ }
+ else if (registerType == TYP_LONG)
+ {
+ info->setDstCandidates(l, RBM_LNGRET);
+ }
+ else
+ {
+ info->setDstCandidates(l, RBM_INTRET);
+ }
+
+ // If there is an explicit this pointer, we don't want that node to produce anything
+ // as it is redundant
+ if (call->gtCallObjp != nullptr)
+ {
+ GenTreePtr thisPtrNode = call->gtCallObjp;
+
+ if (thisPtrNode->gtOper == GT_PUTARG_REG)
+ {
+ l->clearOperandCounts(thisPtrNode);
+ l->clearDstCount(thisPtrNode->gtOp.gtOp1);
+ }
+ else
+ {
+ l->clearDstCount(thisPtrNode);
+ }
+ }
+
+ // First, count reg args
+ bool callHasFloatRegArgs = false;
+
+ for (GenTreePtr list = call->gtCallLateArgs; list; list = list->MoveNext())
+ {
+ assert(list->OperIsList());
+
+ GenTreePtr argNode = list->Current();
+
+ fgArgTabEntryPtr curArgTabEntry = compiler->gtArgEntryByNode(call, argNode);
+ assert(curArgTabEntry);
+
+ if (curArgTabEntry->regNum == REG_STK)
+ {
+ // late arg that is not passed in a register
+ assert(argNode->gtOper == GT_PUTARG_STK);
+
+ TreeNodeInfoInitPutArgStk(argNode->AsPutArgStk(), curArgTabEntry);
+ continue;
+ }
+
+ // A GT_FIELD_LIST has a TYP_VOID, but is used to represent a multireg struct
+ if (argNode->OperGet() == GT_FIELD_LIST)
+ {
+ // There could be up to 2-4 PUTARG_REGs in the list (3 or 4 can only occur for HFAs)
+ regNumber argReg = curArgTabEntry->regNum;
+ for (GenTreeFieldList* entry = argNode->AsFieldList(); entry != nullptr; entry = entry->Rest())
+ {
+ TreeNodeInfoInitPutArgReg(entry->Current()->AsUnOp(), argReg, *info, false, &callHasFloatRegArgs);
+
+ // Update argReg for the next putarg_reg (if any)
+ argReg = genRegArgNext(argReg);
+ }
+ }
+ else
+ {
+ TreeNodeInfoInitPutArgReg(argNode->AsUnOp(), curArgTabEntry->regNum, *info, false, &callHasFloatRegArgs);
+ }
+ }
+
+ // Now, count stack args
+ // Note that these need to be computed into a register, but then
+ // they're just stored to the stack - so the reg doesn't
+ // need to remain live until the call. In fact, it must not
+ // because the code generator doesn't actually consider it live,
+ // so it can't be spilled.
+
+ GenTreePtr args = call->gtCallArgs;
+ while (args)
+ {
+ GenTreePtr arg = args->gtOp.gtOp1;
+
+ // Skip arguments that have been moved to the Late Arg list
+ if (!(args->gtFlags & GTF_LATE_ARG))
+ {
+ if (arg->gtOper == GT_PUTARG_STK)
+ {
+ fgArgTabEntryPtr curArgTabEntry = compiler->gtArgEntryByNode(call, arg);
+ assert(curArgTabEntry);
+
+ assert(curArgTabEntry->regNum == REG_STK);
+
+ TreeNodeInfoInitPutArgStk(arg->AsPutArgStk(), curArgTabEntry);
+ }
+ else
+ {
+ TreeNodeInfo* argInfo = &(arg->gtLsraInfo);
+ if (argInfo->dstCount != 0)
+ {
+ argInfo->isLocalDefUse = true;
+ }
+
+ argInfo->dstCount = 0;
+ }
+ }
+ args = args->gtOp.gtOp2;
+ }
+
+ // If it is a fast tail call, it is already preferenced to use IP0.
+ // Therefore, no need set src candidates on call tgt again.
+ if (call->IsVarargs() && callHasFloatRegArgs && !call->IsFastTailCall() && (ctrlExpr != nullptr))
+ {
+ NYI_ARM("float reg varargs");
+
+ // Don't assign the call target to any of the argument registers because
+ // we will use them to also pass floating point arguments as required
+ // by Arm64 ABI.
+ ctrlExpr->gtLsraInfo.setSrcCandidates(l, l->allRegs(TYP_INT) & ~(RBM_ARG_REGS));
+ }
+
+#ifdef _TARGET_ARM_
+
+ if (call->NeedsNullCheck())
+ {
+ info->internalIntCount++;
+ }
+
+#endif // _TARGET_ARM_
+}
+
+//------------------------------------------------------------------------
+// TreeNodeInfoInitPutArgStk: Set the NodeInfo for a GT_PUTARG_STK node
+//
+// Arguments:
+// argNode - a GT_PUTARG_STK node
+//
+// Return Value:
+// None.
+//
+// Notes:
+// Set the child node(s) to be contained when we have a multireg arg
+//
+void Lowering::TreeNodeInfoInitPutArgStk(GenTreePutArgStk* argNode, fgArgTabEntryPtr info)
+{
+ assert(argNode->gtOper == GT_PUTARG_STK);
+
+ GenTreePtr putArgChild = argNode->gtOp.gtOp1;
+
+ // Initialize 'argNode' as not contained, as this is both the default case
+ // and how MakeSrcContained expects to find things setup.
+ //
+ argNode->gtLsraInfo.srcCount = 1;
+ argNode->gtLsraInfo.dstCount = 0;
+
+ // Do we have a TYP_STRUCT argument (or a GT_FIELD_LIST), if so it must be a multireg pass-by-value struct
+ if ((putArgChild->TypeGet() == TYP_STRUCT) || (putArgChild->OperGet() == GT_FIELD_LIST))
+ {
+ // We will use store instructions that each write a register sized value
+
+ if (putArgChild->OperGet() == GT_FIELD_LIST)
+ {
+ // We consume all of the items in the GT_FIELD_LIST
+ argNode->gtLsraInfo.srcCount = info->numSlots;
+ }
+ else
+ {
+ // We could use a ldp/stp sequence so we need two internal registers
+ argNode->gtLsraInfo.internalIntCount = 2;
+
+ if (putArgChild->OperGet() == GT_OBJ)
+ {
+ GenTreePtr objChild = putArgChild->gtOp.gtOp1;
+ if (objChild->OperGet() == GT_LCL_VAR_ADDR)
+ {
+ // We will generate all of the code for the GT_PUTARG_STK, the GT_OBJ and the GT_LCL_VAR_ADDR
+ // as one contained operation
+ //
+ MakeSrcContained(putArgChild, objChild);
+ }
+ }
+
+ // We will generate all of the code for the GT_PUTARG_STK and it's child node
+ // as one contained operation
+ //
+ MakeSrcContained(argNode, putArgChild);
+ }
+ }
+ else
+ {
+ // We must not have a multi-reg struct
+ assert(info->numSlots == 1);
+ }
+}
+
+//------------------------------------------------------------------------
+// TreeNodeInfoInitBlockStore: Set the NodeInfo for a block store.
+//
+// Arguments:
+// blkNode - The block store node of interest
+//
+// Return Value:
+// None.
+//
+void Lowering::TreeNodeInfoInitBlockStore(GenTreeBlk* blkNode)
+{
+ GenTree* dstAddr = blkNode->Addr();
+ unsigned size = blkNode->gtBlkSize;
+ GenTree* source = blkNode->Data();
+ LinearScan* l = m_lsra;
+ Compiler* compiler = comp;
+
+ // Sources are dest address and initVal or source.
+ // We may require an additional source or temp register for the size.
+ blkNode->gtLsraInfo.srcCount = 2;
+ blkNode->gtLsraInfo.dstCount = 0;
+ GenTreePtr srcAddrOrFill = nullptr;
+ bool isInitBlk = blkNode->OperIsInitBlkOp();
+
+ if (!isInitBlk)
+ {
+ // CopyObj or CopyBlk
+ if (source->gtOper == GT_IND)
+ {
+ srcAddrOrFill = blkNode->Data()->gtGetOp1();
+ // We're effectively setting source as contained, but can't call MakeSrcContained, because the
+ // "inheritance" of the srcCount is to a child not a parent - it would "just work" but could be misleading.
+ // If srcAddr is already non-contained, we don't need to change it.
+ if (srcAddrOrFill->gtLsraInfo.getDstCount() == 0)
+ {
+ srcAddrOrFill->gtLsraInfo.setDstCount(1);
+ srcAddrOrFill->gtLsraInfo.setSrcCount(source->gtLsraInfo.srcCount);
+ }
+ m_lsra->clearOperandCounts(source);
+ }
+ else if (!source->IsMultiRegCall() && !source->OperIsSIMD())
+ {
+ assert(source->IsLocal());
+ MakeSrcContained(blkNode, source);
+ }
+ }
+
+ if (isInitBlk)
+ {
+ GenTreePtr initVal = source;
+ if (initVal->OperIsInitVal())
+ {
+ initVal = initVal->gtGetOp1();
+ }
+ srcAddrOrFill = initVal;
+
+ if (blkNode->gtBlkOpKind == GenTreeBlk::BlkOpKindUnroll)
+ {
+ // TODO-ARM-CQ: Currently we generate a helper call for every
+ // initblk we encounter. Later on we should implement loop unrolling
+ // code sequences to improve CQ.
+ // For reference see the code in lsraxarch.cpp.
+ NYI_ARM("initblk loop unrolling is currently not implemented.");
+
+#ifdef _TARGET_ARM64_
+ // No additional temporaries required
+ ssize_t fill = initVal->gtIntCon.gtIconVal & 0xFF;
+ if (fill == 0)
+ {
+ MakeSrcContained(blkNode, source);
+ }
+#endif // _TARGET_ARM64_
+ }
+ else
+ {
+ assert(blkNode->gtBlkOpKind == GenTreeBlk::BlkOpKindHelper);
+ // The helper follows the regular ABI.
+ dstAddr->gtLsraInfo.setSrcCandidates(l, RBM_ARG_0);
+ initVal->gtLsraInfo.setSrcCandidates(l, RBM_ARG_1);
+ if (size != 0)
+ {
+ // Reserve a temp register for the block size argument.
+ blkNode->gtLsraInfo.setInternalCandidates(l, RBM_ARG_2);
+ blkNode->gtLsraInfo.internalIntCount = 1;
+ }
+ else
+ {
+ // The block size argument is a third argument to GT_STORE_DYN_BLK
+ noway_assert(blkNode->gtOper == GT_STORE_DYN_BLK);
+ blkNode->gtLsraInfo.setSrcCount(3);
+ GenTree* sizeNode = blkNode->AsDynBlk()->gtDynamicSize;
+ sizeNode->gtLsraInfo.setSrcCandidates(l, RBM_ARG_2);
+ }
+ }
+ }
+ else
+ {
+ // CopyObj or CopyBlk
+ // Sources are src and dest and size if not constant.
+ if (blkNode->OperGet() == GT_STORE_OBJ)
+ {
+ // CopyObj
+ NYI_ARM("GT_STORE_OBJ is needed of write barriers implementation");
+
+#ifdef _TARGET_ARM64_
+
+ // We don't need to materialize the struct size but we still need
+ // a temporary register to perform the sequence of loads and stores.
+ blkNode->gtLsraInfo.internalIntCount = 1;
+
+ dstAddr->gtLsraInfo.setSrcCandidates(l, RBM_WRITE_BARRIER_DST_BYREF);
+ // If we have a source address we want it in REG_WRITE_BARRIER_SRC_BYREF.
+ // Otherwise, if it is a local, codegen will put its address in REG_WRITE_BARRIER_SRC_BYREF,
+ // which is killed by a StoreObj (and thus needn't be reserved).
+ if (srcAddrOrFill != nullptr)
+ {
+ srcAddrOrFill->gtLsraInfo.setSrcCandidates(l, RBM_WRITE_BARRIER_SRC_BYREF);
+ }
+
+#endif // _TARGET_ARM64_
+ }
+ else
+ {
+ // CopyBlk
+ short internalIntCount = 0;
+ regMaskTP internalIntCandidates = RBM_NONE;
+
+ if (blkNode->gtBlkOpKind == GenTreeBlk::BlkOpKindUnroll)
+ {
+ // TODO-ARM-CQ: cpblk loop unrolling is currently not implemented.
+ // In case of a CpBlk with a constant size and less than CPBLK_UNROLL_LIMIT size
+ // we should unroll the loop to improve CQ.
+ // For reference see the code in lsraxarch.cpp.
+ NYI_ARM("cpblk loop unrolling is currently not implemented.");
+
+#ifdef _TARGET_ARM64_
+
+ internalIntCount = 1;
+ internalIntCandidates = RBM_ALLINT;
+
+ if (size >= 2 * REGSIZE_BYTES)
+ {
+ // Use ldp/stp to reduce code size and improve performance
+ internalIntCount++;
+ }
+
+#endif // _TARGET_ARM64_
+ }
+ else
+ {
+ assert(blkNode->gtBlkOpKind == GenTreeBlk::BlkOpKindHelper);
+ dstAddr->gtLsraInfo.setSrcCandidates(l, RBM_ARG_0);
+ // The srcAddr goes in arg1.
+ if (srcAddrOrFill != nullptr)
+ {
+ srcAddrOrFill->gtLsraInfo.setSrcCandidates(l, RBM_ARG_1);
+ }
+ if (size != 0)
+ {
+ // Reserve a temp register for the block size argument.
+ internalIntCandidates |= RBM_ARG_2;
+ internalIntCount++;
+ }
+ else
+ {
+ // The block size argument is a third argument to GT_STORE_DYN_BLK
+ noway_assert(blkNode->gtOper == GT_STORE_DYN_BLK);
+ blkNode->gtLsraInfo.setSrcCount(3);
+ GenTree* blockSize = blkNode->AsDynBlk()->gtDynamicSize;
+ blockSize->gtLsraInfo.setSrcCandidates(l, RBM_ARG_2);
+ }
+ }
+ if (internalIntCount != 0)
+ {
+ blkNode->gtLsraInfo.internalIntCount = internalIntCount;
+ blkNode->gtLsraInfo.setInternalCandidates(l, internalIntCandidates);
+ }
+ }
+ }
+}
+
+#endif // _TARGET_ARMARCH_
+
+#endif // !LEGACY_BACKEND
diff --git a/src/mscorlib/Resources/Strings.resx b/src/mscorlib/Resources/Strings.resx
index 0ff48147fd..cb9474fe00 100644
--- a/src/mscorlib/Resources/Strings.resx
+++ b/src/mscorlib/Resources/Strings.resx
@@ -679,7 +679,7 @@
<data name="Arg_SecurityException" xml:space="preserve">
<value>Security error.</value>
</data>
- <data name="Arg_SerializationException" xml:space="preserve">
+ <data name="SerializationException" xml:space="preserve">
<value>Serialization error.</value>
</data>
<data name="Arg_SetMethNotFnd" xml:space="preserve">
@@ -3566,4 +3566,46 @@
<data name="DebugAssertShortMessage" xml:space="preserve">
<value>---- Assert Short Message ----</value>
</data>
+ <data name="LockRecursionException_ReadAfterWriteNotAllowed" xml:space="preserve">
+ <value>A read lock may not be acquired with the write lock held in this mode.</value>
+ </data>
+ <data name="LockRecursionException_RecursiveReadNotAllowed" xml:space="preserve">
+ <value>Recursive read lock acquisitions not allowed in this mode.</value>
+ </data>
+ <data name="LockRecursionException_RecursiveWriteNotAllowed" xml:space="preserve">
+ <value>Recursive write lock acquisitions not allowed in this mode.</value>
+ </data>
+ <data name="LockRecursionException_ReadAfterWriteNotAllowed" xml:space="preserve">
+ <value>A read lock may not be acquired with the write lock held in this mode.</value>
+ </data>
+ <data name="LockRecursionException_RecursiveUpgradeNotAllowed" xml:space="preserve">
+ <value>Recursive upgradeable lock acquisitions not allowed in this mode.</value>
+ </data>
+ <data name="LockRecursionException_RecursiveReadNotAllowed" xml:space="preserve">
+ <value>Recursive read lock acquisitions not allowed in this mode.</value>
+ </data>
+ <data name="LockRecursionException_WriteAfterReadNotAllowed" xml:space="preserve">
+ <value>Write lock may not be acquired with read lock held. This pattern is prone to deadlocks. Please ensure that read locks are released before taking a write lock. If an upgrade is necessary, use an upgrade lock in place of the read lock.</value>
+ </data>
+ <data name="LockRecursionException_WriteAfterReadNotAllowed" xml:space="preserve">
+ <value>Write lock may not be acquired with read lock held. This pattern is prone to deadlocks. Please ensure that read locks are released before taking a write lock. If an upgrade is necessary, use an upgrade lock in place of the read lock.</value>
+ </data>
+ <data name="SynchronizationLockException_MisMatchedUpgrade" xml:space="preserve">
+ <value>The upgradeable lock is being released without being held.</value>
+ </data>
+ <data name="SynchronizationLockException_MisMatchedRead" xml:space="preserve">
+ <value>The read lock is being released without being held.</value>
+ </data>
+ <data name="SynchronizationLockException_IncorrectDispose" xml:space="preserve">
+ <value>The lock is being disposed while still being used. It either is being held by a thread and/or has active waiters waiting to acquire the lock.</value>
+ </data>
+ <data name="LockRecursionException_UpgradeAfterReadNotAllowed" xml:space="preserve">
+ <value>Upgradeable lock may not be acquired with read lock held.</value>
+ </data>
+ <data name="LockRecursionException_UpgradeAfterWriteNotAllowed" xml:space="preserve">
+ <value>Upgradeable lock may not be acquired with write lock held in this mode. Acquiring Upgradeable lock gives the ability to read along with an option to upgrade to a writer.</value>
+ </data>
+ <data name="SynchronizationLockException_MisMatchedWrite" xml:space="preserve">
+ <value>The write lock is being released without being held.</value>
+ </data>
</root>
diff --git a/src/mscorlib/System.Private.CoreLib.csproj b/src/mscorlib/System.Private.CoreLib.csproj
index 76027f8d92..3a0917f75e 100644
--- a/src/mscorlib/System.Private.CoreLib.csproj
+++ b/src/mscorlib/System.Private.CoreLib.csproj
@@ -493,41 +493,24 @@
<Compile Include="$(BclSourcesRoot)\System\Globalization\GlobalizationAssembly.cs" />
</ItemGroup>
<ItemGroup>
- <Compile Include="$(BclSourcesRoot)\System\Threading\AbandonedMutexException.cs" />
- <Compile Include="$(BclSourcesRoot)\System\Threading\AsyncLocal.cs" />
- <Compile Include="$(BclSourcesRoot)\System\Threading\AutoResetEvent.cs" />
- <Compile Include="$(BclSourcesRoot)\System\Threading\SendOrPostCallback.cs" />
<Compile Include="$(BclSourcesRoot)\System\Threading\SynchronizationContext.cs" />
- <Compile Include="$(BclSourcesRoot)\System\Threading\EventResetMode.cs" />
<Compile Include="$(BclSourcesRoot)\System\Threading\EventWaitHandle.cs" />
- <Compile Include="$(BclSourcesRoot)\System\Threading\ExecutionContext.cs" />
<Compile Include="$(BclSourcesRoot)\System\Threading\Interlocked.cs" />
- <Compile Include="$(BclSourcesRoot)\System\Threading\LockRecursionException.cs" />
- <Compile Include="$(BclSourcesRoot)\System\Threading\ManualResetEvent.cs" />
<Compile Include="$(BclSourcesRoot)\System\Threading\Monitor.cs" />
<Compile Include="$(BclSourcesRoot)\System\Threading\Mutex.cs" />
<Compile Include="$(BclSourcesRoot)\System\Threading\Overlapped.cs" />
- <Compile Include="$(BclSourcesRoot)\System\Threading\ParameterizedThreadStart.cs" />
<Compile Include="$(BclSourcesRoot)\System\Threading\Semaphore.cs" />
- <Compile Include="$(BclSourcesRoot)\System\Threading\SemaphoreFullException.cs" />
- <Compile Include="$(BclSourcesRoot)\System\Threading\SynchronizationLockException.cs" />
<Compile Include="$(BclSourcesRoot)\System\Threading\Thread.cs" />
<Compile Include="$(BclSourcesRoot)\System\Threading\ThreadInterruptedException.cs" />
<Compile Include="$(BclSourcesRoot)\System\Threading\ThreadPool.cs" />
- <Compile Include="$(BclSourcesRoot)\System\Threading\ThreadPriority.cs" />
- <Compile Include="$(BclSourcesRoot)\System\Threading\ThreadStart.cs" />
- <Compile Include="$(BclSourcesRoot)\System\Threading\ThreadState.cs" />
- <Compile Include="$(BclSourcesRoot)\System\Threading\ThreadStateException.cs" />
- <Compile Include="$(BclSourcesRoot)\System\Threading\ThreadStartException.cs" />
<Compile Include="$(BclSourcesRoot)\System\Threading\Timer.cs" />
<Compile Include="$(BclSourcesRoot)\System\Threading\Volatile.cs" />
<Compile Include="$(BclSourcesRoot)\System\Threading\WaitHandle.cs" />
- <Compile Include="$(BclSourcesRoot)\System\Threading\WaitHandleCannotBeOpenedException.cs" />
- <Compile Include="$(BclSourcesRoot)\System\Threading\ApartmentState.cs" />
<Compile Include="$(BclSourcesRoot)\System\Threading\SpinLock.cs" />
<Compile Include="$(BclSourcesRoot)\System\Threading\SpinWait.cs" />
<Compile Include="$(BclSourcesRoot)\System\Threading\LazyInitializer.cs" />
<Compile Include="$(BclSourcesRoot)\System\Threading\ThreadLocal.cs" />
+ <Compile Include="$(BclSourcesRoot)\System\Threading\ReaderWriterLockSlim.cs" />
<Compile Include="$(BclSourcesRoot)\System\Threading\SemaphoreSlim.cs" />
<Compile Include="$(BclSourcesRoot)\System\Threading\ManualResetEventSlim.cs" />
<Compile Include="$(BclSourcesRoot)\System\Threading\CancellationTokenRegistration.cs" />
@@ -616,15 +599,7 @@
<ItemGroup>
<Compile Include="$(BclSourcesRoot)\System\Runtime\Serialization\FormatterConverter.cs" />
<Compile Include="$(BclSourcesRoot)\System\Runtime\Serialization\FormatterServices.cs" />
- <Compile Include="$(BclSourcesRoot)\System\Runtime\Serialization\IDeserializationCallback.cs" />
- <Compile Include="$(BclSourcesRoot)\System\Runtime\Serialization\SerializationAttributes.cs" />
- <Compile Include="$(BclSourcesRoot)\System\Runtime\Serialization\SerializationException.cs" />
<Compile Include="$(BclSourcesRoot)\System\Runtime\Serialization\SerializationInfo.cs" />
- <Compile Include="$(BclSourcesRoot)\System\Runtime\Serialization\SerializationInfoEnumerator.cs" />
- <Compile Include="$(BclSourcesRoot)\System\Runtime\Serialization\StreamingContext.cs" />
- </ItemGroup>
- <ItemGroup>
- <Compile Include="$(BclSourcesRoot)\System\Runtime\Versioning\NonVersionableAttribute.cs" />
</ItemGroup>
<ItemGroup>
<Compile Include="$(BclSourcesRoot)\System\Runtime\Remoting\ObjectHandle.cs" />
diff --git a/src/mscorlib/shared/Microsoft/Win32/SafeHandles/SafeFileHandle.Windows.cs b/src/mscorlib/shared/Microsoft/Win32/SafeHandles/SafeFileHandle.Windows.cs
index a1abdd0ef1..4eabe8f08c 100644
--- a/src/mscorlib/shared/Microsoft/Win32/SafeHandles/SafeFileHandle.Windows.cs
+++ b/src/mscorlib/shared/Microsoft/Win32/SafeHandles/SafeFileHandle.Windows.cs
@@ -10,9 +10,6 @@ using Microsoft.Win32;
namespace Microsoft.Win32.SafeHandles
{
-#if PROJECTN
- [Internal.Runtime.CompilerServices.RelocatedTypeAttribute("System.IO.FileSystem")]
-#endif
public sealed class SafeFileHandle : SafeHandleZeroOrMinusOneIsInvalid
{
private bool? _isAsync;
diff --git a/src/mscorlib/shared/System.Private.CoreLib.Shared.projitems b/src/mscorlib/shared/System.Private.CoreLib.Shared.projitems
index 01ad69f2f8..6ef7fc1681 100644
--- a/src/mscorlib/shared/System.Private.CoreLib.Shared.projitems
+++ b/src/mscorlib/shared/System.Private.CoreLib.Shared.projitems
@@ -57,6 +57,7 @@
<Compile Include="$(MSBuildThisFileDirectory)System\Collections\IList.cs"/>
<Compile Include="$(MSBuildThisFileDirectory)System\Collections\IStructuralComparable.cs"/>
<Compile Include="$(MSBuildThisFileDirectory)System\Collections\IStructuralEquatable.cs"/>
+ <Compile Include="$(MSBuildThisFileDirectory)System\ComponentModel\DefaultValueAttribute.cs"/>
<Compile Include="$(MSBuildThisFileDirectory)System\ComponentModel\EditorBrowsableAttribute.cs"/>
<Compile Include="$(MSBuildThisFileDirectory)System\Configuration\Assemblies\AssemblyHashAlgorithm.cs"/>
<Compile Include="$(MSBuildThisFileDirectory)System\Configuration\Assemblies\AssemblyVersionCompatibility.cs"/>
@@ -274,6 +275,7 @@
<Compile Include="$(MSBuildThisFileDirectory)System\Runtime\CompilerServices\IndexerNameAttribute.cs"/>
<Compile Include="$(MSBuildThisFileDirectory)System\Runtime\CompilerServices\InternalsVisibleToAttribute.cs"/>
<Compile Include="$(MSBuildThisFileDirectory)System\Runtime\CompilerServices\INotifyCompletion.cs"/>
+ <Compile Include="$(MSBuildThisFileDirectory)System\Runtime\CompilerServices\IsConst.cs"/>
<Compile Include="$(MSBuildThisFileDirectory)System\Runtime\CompilerServices\IsVolatile.cs"/>
<Compile Include="$(MSBuildThisFileDirectory)System\Runtime\CompilerServices\IteratorStateMachineAttribute.cs"/>
<Compile Include="$(MSBuildThisFileDirectory)System\Runtime\CompilerServices\ITuple.cs"/>
@@ -284,8 +286,10 @@
<Compile Include="$(MSBuildThisFileDirectory)System\Runtime\CompilerServices\ReferenceAssemblyAttribute.cs"/>
<Compile Include="$(MSBuildThisFileDirectory)System\Runtime\CompilerServices\RuntimeCompatibilityAttribute.cs"/>
<Compile Include="$(MSBuildThisFileDirectory)System\Runtime\CompilerServices\RuntimeFeature.cs"/>
+ <Compile Include="$(MSBuildThisFileDirectory)System\Runtime\CompilerServices\SpecialNameAttribute.cs"/>
<Compile Include="$(MSBuildThisFileDirectory)System\Runtime\CompilerServices\StateMachineAttribute.cs"/>
<Compile Include="$(MSBuildThisFileDirectory)System\Runtime\CompilerServices\StringFreezingAttribute.cs"/>
+ <Compile Include="$(MSBuildThisFileDirectory)System\Runtime\CompilerServices\StrongBox.cs"/>
<Compile Include="$(MSBuildThisFileDirectory)System\Runtime\CompilerServices\SuppressIldasmAttribute.cs"/>
<Compile Include="$(MSBuildThisFileDirectory)System\Runtime\CompilerServices\TupleElementNamesAttribute.cs"/>
<Compile Include="$(MSBuildThisFileDirectory)System\Runtime\CompilerServices\TypeForwardedFromAttribute.cs"/>
@@ -303,11 +307,21 @@
<Compile Include="$(MSBuildThisFileDirectory)System\Runtime\InteropServices\UnmanagedFunctionPointerAttribute.cs"/>
<Compile Include="$(MSBuildThisFileDirectory)System\Runtime\InteropServices\UnmanagedType.cs"/>
<Compile Include="$(MSBuildThisFileDirectory)System\Runtime\InteropServices\VarEnum.cs"/>
+ <Compile Include="$(MSBuildThisFileDirectory)System\Runtime\Serialization\IDeserializationCallback.cs"/>
<Compile Include="$(MSBuildThisFileDirectory)System\Runtime\Serialization\IFormatterConverter.cs"/>
<Compile Include="$(MSBuildThisFileDirectory)System\Runtime\Serialization\IObjectReference.cs"/>
<Compile Include="$(MSBuildThisFileDirectory)System\Runtime\Serialization\ISafeSerializationData.cs"/>
<Compile Include="$(MSBuildThisFileDirectory)System\Runtime\Serialization\ISerializable.cs"/>
+ <Compile Include="$(MSBuildThisFileDirectory)System\Runtime\Serialization\OnDeserializedAttribute.cs"/>
+ <Compile Include="$(MSBuildThisFileDirectory)System\Runtime\Serialization\OnDeserializingAttribute.cs"/>
+ <Compile Include="$(MSBuildThisFileDirectory)System\Runtime\Serialization\OnSerializedAttribute.cs"/>
+ <Compile Include="$(MSBuildThisFileDirectory)System\Runtime\Serialization\OnSerializingAttribute.cs"/>
+ <Compile Include="$(MSBuildThisFileDirectory)System\Runtime\Serialization\OptionalFieldAttribute.cs"/>
<Compile Include="$(MSBuildThisFileDirectory)System\Runtime\Serialization\SafeSerializationEventArgs.cs"/>
+ <Compile Include="$(MSBuildThisFileDirectory)System\Runtime\Serialization\SerializationException.cs"/>
+ <Compile Include="$(MSBuildThisFileDirectory)System\Runtime\Serialization\SerializationInfoEnumerator.cs"/>
+ <Compile Include="$(MSBuildThisFileDirectory)System\Runtime\Serialization\StreamingContext.cs"/>
+ <Compile Include="$(MSBuildThisFileDirectory)System\Runtime\Versioning\NonVersionableAttribute.cs"/>
<Compile Include="$(MSBuildThisFileDirectory)System\Runtime\Versioning\TargetFrameworkAttribute.cs"/>
<Compile Include="$(MSBuildThisFileDirectory)System\Security\AllowPartiallyTrustedCallersAttribute.cs"/>
<Compile Include="$(MSBuildThisFileDirectory)System\Security\CryptographicException.cs"/>
@@ -341,12 +355,32 @@
<Compile Include="$(MSBuildThisFileDirectory)System\Text\UTF8Encoding.cs"/>
<Compile Include="$(MSBuildThisFileDirectory)System\Text\UnicodeEncoding.cs"/>
<Compile Include="$(MSBuildThisFileDirectory)System\ThreadAttributes.cs"/>
+ <Compile Include="$(MSBuildThisFileDirectory)System\Threading\AbandonedMutexException.cs"/>
+ <Compile Include="$(MSBuildThisFileDirectory)System\Threading\ApartmentState.cs"/>
+ <Compile Include="$(MSBuildThisFileDirectory)System\Threading\AsyncLocal.cs"/>
+ <Compile Include="$(MSBuildThisFileDirectory)System\Threading\AutoResetEvent.cs"/>
<Compile Include="$(MSBuildThisFileDirectory)System\Threading\DeferredDisposableLifetime.cs"/>
+ <Compile Include="$(MSBuildThisFileDirectory)System\Threading\EventResetMode.cs"/>
+ <Compile Include="$(MSBuildThisFileDirectory)System\Threading\ExecutionContext.cs"/>
+ <Compile Include="$(MSBuildThisFileDirectory)System\Threading\LazyThreadSafetyMode.cs"/>
+ <Compile Include="$(MSBuildThisFileDirectory)System\Threading\LockRecursionException.cs"/>
+ <Compile Include="$(MSBuildThisFileDirectory)System\Threading\ManualResetEvent.cs"/>
+ <Compile Include="$(MSBuildThisFileDirectory)System\Threading\ParameterizedThreadStart.cs"/>
+ <Compile Include="$(MSBuildThisFileDirectory)System\Threading\SemaphoreFullException.cs"/>
+ <Compile Include="$(MSBuildThisFileDirectory)System\Threading\SendOrPostCallback.cs"/>
+ <Compile Include="$(MSBuildThisFileDirectory)System\Threading\SynchronizationLockException.cs"/>
<Compile Include="$(MSBuildThisFileDirectory)System\Threading\Tasks\TaskCanceledException.cs"/>
<Compile Include="$(MSBuildThisFileDirectory)System\Threading\Tasks\TaskExtensions.cs"/>
<Compile Include="$(MSBuildThisFileDirectory)System\Threading\Tasks\TaskSchedulerException.cs"/>
<Compile Include="$(MSBuildThisFileDirectory)System\Threading\ThreadAbortException.cs"/>
+ <Compile Include="$(MSBuildThisFileDirectory)System\Threading\ThreadPriority.cs"/>
+ <Compile Include="$(MSBuildThisFileDirectory)System\Threading\ThreadStart.cs"/>
+ <Compile Include="$(MSBuildThisFileDirectory)System\Threading\ThreadStartException.cs"/>
+ <Compile Include="$(MSBuildThisFileDirectory)System\Threading\ThreadState.cs"/>
+ <Compile Include="$(MSBuildThisFileDirectory)System\Threading\ThreadStateException.cs"/>
<Compile Include="$(MSBuildThisFileDirectory)System\Threading\Timeout.cs"/>
+ <Compile Include="$(MSBuildThisFileDirectory)System\Threading\TimeoutHelper.cs"/>
+ <Compile Include="$(MSBuildThisFileDirectory)System\Threading\WaitHandleCannotBeOpenedException.cs"/>
<Compile Include="$(MSBuildThisFileDirectory)System\ThreadStaticAttribute.cs"/>
<Compile Include="$(MSBuildThisFileDirectory)System\TimeoutException.cs"/>
<Compile Include="$(MSBuildThisFileDirectory)System\TimeZone.cs"/>
diff --git a/src/mscorlib/shared/System/ComponentModel/DefaultValueAttribute.cs b/src/mscorlib/shared/System/ComponentModel/DefaultValueAttribute.cs
new file mode 100644
index 0000000000..3cdc907297
--- /dev/null
+++ b/src/mscorlib/shared/System/ComponentModel/DefaultValueAttribute.cs
@@ -0,0 +1,228 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+using System;
+using System.ComponentModel;
+using System.Diagnostics;
+using System.Globalization;
+using System.Runtime.InteropServices;
+
+namespace System.ComponentModel
+{
+ /// <devdoc>
+ /// <para>Specifies the default value for a property.</para>
+ /// </devdoc>
+ [System.Diagnostics.CodeAnalysis.SuppressMessage("Microsoft.Design", "CA1019:DefineAccessorsForAttributeArguments")]
+ [System.Diagnostics.CodeAnalysis.SuppressMessage("Microsoft.Performance", "CA1813:AvoidUnsealedAttributes")]
+ [AttributeUsage(AttributeTargets.All)]
+ public class DefaultValueAttribute : Attribute
+ {
+ /// <devdoc>
+ /// This is the default value.
+ /// </devdoc>
+ private object _value;
+
+ /// <devdoc>
+ /// <para>Initializes a new instance of the <see cref='System.ComponentModel.DefaultValueAttribute'/> class, converting the
+ /// specified value to the
+ /// specified type, and using the U.S. English culture as the
+ /// translation
+ /// context.</para>
+ /// </devdoc>
+ public DefaultValueAttribute(Type type, string value)
+ {
+ // The try/catch here is because attributes should never throw exceptions. We would fail to
+ // load an otherwise normal class.
+ try
+ {
+ if (type.IsSubclassOf(typeof(Enum)))
+ {
+ _value = Enum.Parse(type, value, true);
+ }
+ else if (type == typeof(TimeSpan))
+ {
+ _value = TimeSpan.Parse(value);
+ }
+ else
+ {
+ _value = Convert.ChangeType(value, type, CultureInfo.InvariantCulture);
+ }
+ }
+ catch
+ {
+ }
+ }
+
+ /// <devdoc>
+ /// <para>Initializes a new instance of the <see cref='System.ComponentModel.DefaultValueAttribute'/> class using a Unicode
+ /// character.</para>
+ /// </devdoc>
+ public DefaultValueAttribute(char value)
+ {
+ _value = value;
+ }
+ /// <devdoc>
+ /// <para>Initializes a new instance of the <see cref='System.ComponentModel.DefaultValueAttribute'/> class using an 8-bit unsigned
+ /// integer.</para>
+ /// </devdoc>
+ public DefaultValueAttribute(byte value)
+ {
+ _value = value;
+ }
+ /// <devdoc>
+ /// <para>Initializes a new instance of the <see cref='System.ComponentModel.DefaultValueAttribute'/> class using a 16-bit signed
+ /// integer.</para>
+ /// </devdoc>
+ public DefaultValueAttribute(short value)
+ {
+ _value = value;
+ }
+ /// <devdoc>
+ /// <para>Initializes a new instance of the <see cref='System.ComponentModel.DefaultValueAttribute'/> class using a 32-bit signed
+ /// integer.</para>
+ /// </devdoc>
+ public DefaultValueAttribute(int value)
+ {
+ _value = value;
+ }
+ /// <devdoc>
+ /// <para>Initializes a new instance of the <see cref='System.ComponentModel.DefaultValueAttribute'/> class using a 64-bit signed
+ /// integer.</para>
+ /// </devdoc>
+ public DefaultValueAttribute(long value)
+ {
+ _value = value;
+ }
+ /// <devdoc>
+ /// <para>Initializes a new instance of the <see cref='System.ComponentModel.DefaultValueAttribute'/> class using a
+ /// single-precision floating point
+ /// number.</para>
+ /// </devdoc>
+ public DefaultValueAttribute(float value)
+ {
+ _value = value;
+ }
+ /// <devdoc>
+ /// <para>Initializes a new instance of the <see cref='System.ComponentModel.DefaultValueAttribute'/> class using a
+ /// double-precision floating point
+ /// number.</para>
+ /// </devdoc>
+ public DefaultValueAttribute(double value)
+ {
+ _value = value;
+ }
+ /// <devdoc>
+ /// <para>Initializes a new instance of the <see cref='System.ComponentModel.DefaultValueAttribute'/> class using a <see cref='System.Boolean'/>
+ /// value.</para>
+ /// </devdoc>
+ public DefaultValueAttribute(bool value)
+ {
+ _value = value;
+ }
+ /// <devdoc>
+ /// <para>Initializes a new instance of the <see cref='System.ComponentModel.DefaultValueAttribute'/> class using a <see cref='System.String'/>.</para>
+ /// </devdoc>
+ public DefaultValueAttribute(string value)
+ {
+ _value = value;
+ }
+
+ /// <devdoc>
+ /// <para>Initializes a new instance of the <see cref='System.ComponentModel.DefaultValueAttribute'/>
+ /// class.</para>
+ /// </devdoc>
+ public DefaultValueAttribute(object value)
+ {
+ _value = value;
+ }
+
+ /// <devdoc>
+ /// <para>Initializes a new instance of the <see cref='System.ComponentModel.DefaultValueAttribute'/> class using a <see cref='System.SByte'/>
+ /// value.</para>
+ /// </devdoc>
+ [CLSCompliant(false)]
+ public DefaultValueAttribute(sbyte value)
+ {
+ _value = value;
+ }
+
+ /// <devdoc>
+ /// <para>Initializes a new instance of the <see cref='System.ComponentModel.DefaultValueAttribute'/> class using a <see cref='System.UInt16'/>
+ /// value.</para>
+ /// </devdoc>
+ [CLSCompliant(false)]
+ public DefaultValueAttribute(ushort value)
+ {
+ _value = value;
+ }
+
+ /// <devdoc>
+ /// <para>Initializes a new instance of the <see cref='System.ComponentModel.DefaultValueAttribute'/> class using a <see cref='System.UInt32'/>
+ /// value.</para>
+ /// </devdoc>
+ [CLSCompliant(false)]
+ public DefaultValueAttribute(uint value)
+ {
+ _value = value;
+ }
+
+ /// <devdoc>
+ /// <para>Initializes a new instance of the <see cref='System.ComponentModel.DefaultValueAttribute'/> class using a <see cref='System.UInt64'/>
+ /// value.</para>
+ /// </devdoc>
+ [CLSCompliant(false)]
+ public DefaultValueAttribute(ulong value)
+ {
+ _value = value;
+ }
+
+ /// <devdoc>
+ /// <para>
+ /// Gets the default value of the property this
+ /// attribute is
+ /// bound to.
+ /// </para>
+ /// </devdoc>
+ public virtual object Value
+ {
+ get
+ {
+ return _value;
+ }
+ }
+
+ public override bool Equals(object obj)
+ {
+ if (obj == this)
+ {
+ return true;
+ }
+
+ DefaultValueAttribute other = obj as DefaultValueAttribute;
+
+ if (other != null)
+ {
+ if (Value != null)
+ {
+ return Value.Equals(other.Value);
+ }
+ else
+ {
+ return (other.Value == null);
+ }
+ }
+ return false;
+ }
+
+ public override int GetHashCode()
+ {
+ return base.GetHashCode();
+ }
+
+ protected void SetValue(object value)
+ {
+ _value = value;
+ }
+ }
+}
diff --git a/src/mscorlib/shared/System/IO/EndOfStreamException.cs b/src/mscorlib/shared/System/IO/EndOfStreamException.cs
index 52ab22cfbb..7c4b2b744f 100644
--- a/src/mscorlib/shared/System/IO/EndOfStreamException.cs
+++ b/src/mscorlib/shared/System/IO/EndOfStreamException.cs
@@ -6,10 +6,6 @@ using System.Runtime.Serialization;
namespace System.IO
{
-#if PROJECTN
- [Internal.Runtime.CompilerServices.RelocatedType("System.IO")]
- [Internal.Runtime.CompilerServices.RelocatedType("System.Runtime.Extensions")]
-#endif
[Serializable]
public class EndOfStreamException : IOException
{
diff --git a/src/mscorlib/shared/System/IO/FileAccess.cs b/src/mscorlib/shared/System/IO/FileAccess.cs
index eaa94f3483..c6e583b34a 100644
--- a/src/mscorlib/shared/System/IO/FileAccess.cs
+++ b/src/mscorlib/shared/System/IO/FileAccess.cs
@@ -11,9 +11,6 @@ namespace System.IO
//
[Serializable]
[Flags]
-#if PROJECTN
- [Internal.Runtime.CompilerServices.RelocatedTypeAttribute("System.IO.FileSystem.Primitives")]
-#endif
public enum FileAccess
{
// Specifies read access to the file. Data can be read from the file and
diff --git a/src/mscorlib/shared/System/IO/FileMode.cs b/src/mscorlib/shared/System/IO/FileMode.cs
index 73ef68bb66..77f2fe6f20 100644
--- a/src/mscorlib/shared/System/IO/FileMode.cs
+++ b/src/mscorlib/shared/System/IO/FileMode.cs
@@ -12,9 +12,6 @@ namespace System.IO
// to the end of the file). To truncate a file or create it if it doesn't
// exist, use Create.
//
-#if PROJECTN
- [Internal.Runtime.CompilerServices.RelocatedTypeAttribute("System.IO.FileSystem.Primitives")]
-#endif
public enum FileMode
{
// Creates a new file. An exception is raised if the file already exists.
diff --git a/src/mscorlib/shared/System/IO/FileOptions.cs b/src/mscorlib/shared/System/IO/FileOptions.cs
index d9188dd44e..ae8396a588 100644
--- a/src/mscorlib/shared/System/IO/FileOptions.cs
+++ b/src/mscorlib/shared/System/IO/FileOptions.cs
@@ -11,9 +11,6 @@ namespace System.IO
// We didn't expose a number of these values because we didn't believe
// a number of them made sense in managed code, at least not yet.
[Flags]
-#if PROJECTN
- [Internal.Runtime.CompilerServices.RelocatedTypeAttribute("System.IO.FileSystem")]
-#endif
public enum FileOptions
{
// NOTE: any change to FileOptions enum needs to be
diff --git a/src/mscorlib/shared/System/IO/FileShare.cs b/src/mscorlib/shared/System/IO/FileShare.cs
index a96ae5cff7..e9b9b5e32f 100644
--- a/src/mscorlib/shared/System/IO/FileShare.cs
+++ b/src/mscorlib/shared/System/IO/FileShare.cs
@@ -14,9 +14,6 @@ namespace System.IO
// FILE_SHARE_WRITE, and FILE_SHARE_DELETE in winnt.h
//
[Flags]
-#if PROJECTN
- [Internal.Runtime.CompilerServices.RelocatedTypeAttribute("System.IO.FileSystem.Primitives")]
-#endif
public enum FileShare
{
// No sharing. Any request to open the file (by this process or another
diff --git a/src/mscorlib/shared/System/IO/FileStream.cs b/src/mscorlib/shared/System/IO/FileStream.cs
index 7545d0c696..7db8518435 100644
--- a/src/mscorlib/shared/System/IO/FileStream.cs
+++ b/src/mscorlib/shared/System/IO/FileStream.cs
@@ -9,9 +9,6 @@ using System.Diagnostics;
namespace System.IO
{
-#if PROJECTN
- [Internal.Runtime.CompilerServices.RelocatedTypeAttribute("System.IO.FileSystem")]
-#endif
public partial class FileStream : Stream
{
private const FileShare DefaultShare = FileShare.Read;
diff --git a/src/mscorlib/shared/System/IO/Path.cs b/src/mscorlib/shared/System/IO/Path.cs
index d26530b8e2..b3a8783c32 100644
--- a/src/mscorlib/shared/System/IO/Path.cs
+++ b/src/mscorlib/shared/System/IO/Path.cs
@@ -11,9 +11,6 @@ namespace System.IO
// Provides methods for processing file system strings in a cross-platform manner.
// Most of the methods don't do a complete parsing (such as examining a UNC hostname),
// but they will handle most string operations.
-#if PROJECTN
- [Internal.Runtime.CompilerServices.RelocatedTypeAttribute("System.Runtime.Extensions")]
-#endif
public static partial class Path
{
// Public static readonly variant of the separators. The Path implementation itself is using
diff --git a/src/mscorlib/shared/System/Progress.cs b/src/mscorlib/shared/System/Progress.cs
index 48a6a35e75..755e7719fe 100644
--- a/src/mscorlib/shared/System/Progress.cs
+++ b/src/mscorlib/shared/System/Progress.cs
@@ -19,9 +19,6 @@ namespace System
/// when the instance is constructed. If there is no current SynchronizationContext
/// at the time of construction, the callbacks will be invoked on the ThreadPool.
/// </remarks>
-#if PROJECTN
- [Internal.Runtime.CompilerServices.RelocatedType("System.Runtime.Extensions")]
-#endif
public class Progress<T> : IProgress<T>
{
/// <summary>The synchronization context captured upon construction. This will never be null.</summary>
diff --git a/src/mscorlib/shared/System/Runtime/CompilerServices/IsConst.cs b/src/mscorlib/shared/System/Runtime/CompilerServices/IsConst.cs
new file mode 100644
index 0000000000..7f948b608a
--- /dev/null
+++ b/src/mscorlib/shared/System/Runtime/CompilerServices/IsConst.cs
@@ -0,0 +1,10 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+namespace System.Runtime.CompilerServices
+{
+ public static partial class IsConst
+ {
+ }
+}
diff --git a/src/mscorlib/shared/System/Runtime/CompilerServices/SpecialNameAttribute.cs b/src/mscorlib/shared/System/Runtime/CompilerServices/SpecialNameAttribute.cs
new file mode 100644
index 0000000000..b18e62895f
--- /dev/null
+++ b/src/mscorlib/shared/System/Runtime/CompilerServices/SpecialNameAttribute.cs
@@ -0,0 +1,12 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+namespace System.Runtime.CompilerServices
+{
+ [AttributeUsage(AttributeTargets.Class | AttributeTargets.Method | AttributeTargets.Property | AttributeTargets.Field | AttributeTargets.Event | AttributeTargets.Struct)]
+ public sealed class SpecialNameAttribute : Attribute
+ {
+ public SpecialNameAttribute() { }
+ }
+}
diff --git a/src/mscorlib/shared/System/Runtime/CompilerServices/StrongBox.cs b/src/mscorlib/shared/System/Runtime/CompilerServices/StrongBox.cs
new file mode 100644
index 0000000000..0a1a565f54
--- /dev/null
+++ b/src/mscorlib/shared/System/Runtime/CompilerServices/StrongBox.cs
@@ -0,0 +1,59 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+namespace System.Runtime.CompilerServices
+{
+ /// <summary>
+ /// Holds a reference to a value.
+ /// </summary>
+ /// <typeparam name="T">The type of the value that the <see cref = "StrongBox{T}"></see> references.</typeparam>
+ public class StrongBox<T> : IStrongBox
+ {
+ /// <summary>
+ /// Gets the strongly typed value associated with the <see cref = "StrongBox{T}"></see>
+ /// <remarks>This is explicitly exposed as a field instead of a property to enable loading the address of the field.</remarks>
+ /// </summary>
+ [System.Diagnostics.CodeAnalysis.SuppressMessage("Microsoft.Design", "CA1051:DoNotDeclareVisibleInstanceFields")]
+ public T Value;
+
+ /// <summary>
+ /// Initializes a new StrongBox which can receive a value when used in a reference call.
+ /// </summary>
+ public StrongBox()
+ {
+ }
+
+ /// <summary>
+ /// Initializes a new <see cref = "StrongBox{T}"></see> with the specified value.
+ /// </summary>
+ /// <param name="value">A value that the <see cref = "StrongBox{T}"></see> will reference.</param>
+ public StrongBox(T value)
+ {
+ Value = value;
+ }
+
+ object IStrongBox.Value
+ {
+ get
+ {
+ return Value;
+ }
+ set
+ {
+ Value = (T)value;
+ }
+ }
+ }
+
+ /// <summary>
+ /// Defines a property for accessing the value that an object references.
+ /// </summary>
+ public interface IStrongBox
+ {
+ /// <summary>
+ /// Gets or sets the value the object references.
+ /// </summary>
+ object Value { get; set; }
+ }
+}
diff --git a/src/mscorlib/shared/System/Runtime/Serialization/IDeserializationCallback.cs b/src/mscorlib/shared/System/Runtime/Serialization/IDeserializationCallback.cs
new file mode 100644
index 0000000000..a1c1671a8b
--- /dev/null
+++ b/src/mscorlib/shared/System/Runtime/Serialization/IDeserializationCallback.cs
@@ -0,0 +1,11 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+namespace System.Runtime.Serialization
+{
+ public interface IDeserializationCallback
+ {
+ void OnDeserialization(object sender);
+ }
+}
diff --git a/src/mscorlib/shared/System/Runtime/Serialization/OnDeserializedAttribute.cs b/src/mscorlib/shared/System/Runtime/Serialization/OnDeserializedAttribute.cs
new file mode 100644
index 0000000000..408a55ccf9
--- /dev/null
+++ b/src/mscorlib/shared/System/Runtime/Serialization/OnDeserializedAttribute.cs
@@ -0,0 +1,11 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+namespace System.Runtime.Serialization
+{
+ [AttributeUsage(AttributeTargets.Method, Inherited = false)]
+ public sealed class OnDeserializedAttribute : Attribute
+ {
+ }
+}
diff --git a/src/mscorlib/shared/System/Runtime/Serialization/OnDeserializingAttribute.cs b/src/mscorlib/shared/System/Runtime/Serialization/OnDeserializingAttribute.cs
new file mode 100644
index 0000000000..162857e8d3
--- /dev/null
+++ b/src/mscorlib/shared/System/Runtime/Serialization/OnDeserializingAttribute.cs
@@ -0,0 +1,11 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+namespace System.Runtime.Serialization
+{
+ [AttributeUsage(AttributeTargets.Method, Inherited = false)]
+ public sealed class OnDeserializingAttribute : Attribute
+ {
+ }
+}
diff --git a/src/mscorlib/shared/System/Runtime/Serialization/OnSerializedAttribute.cs b/src/mscorlib/shared/System/Runtime/Serialization/OnSerializedAttribute.cs
new file mode 100644
index 0000000000..020dd0257c
--- /dev/null
+++ b/src/mscorlib/shared/System/Runtime/Serialization/OnSerializedAttribute.cs
@@ -0,0 +1,11 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+namespace System.Runtime.Serialization
+{
+ [AttributeUsage(AttributeTargets.Method, Inherited = false)]
+ public sealed class OnSerializedAttribute : Attribute
+ {
+ }
+}
diff --git a/src/mscorlib/shared/System/Runtime/Serialization/OnSerializingAttribute.cs b/src/mscorlib/shared/System/Runtime/Serialization/OnSerializingAttribute.cs
new file mode 100644
index 0000000000..8dc8af3f23
--- /dev/null
+++ b/src/mscorlib/shared/System/Runtime/Serialization/OnSerializingAttribute.cs
@@ -0,0 +1,11 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+namespace System.Runtime.Serialization
+{
+ [AttributeUsage(AttributeTargets.Method, Inherited = false)]
+ public sealed class OnSerializingAttribute : Attribute
+ {
+ }
+}
diff --git a/src/mscorlib/shared/System/Runtime/Serialization/OptionalFieldAttribute.cs b/src/mscorlib/shared/System/Runtime/Serialization/OptionalFieldAttribute.cs
new file mode 100644
index 0000000000..84daa539be
--- /dev/null
+++ b/src/mscorlib/shared/System/Runtime/Serialization/OptionalFieldAttribute.cs
@@ -0,0 +1,25 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+namespace System.Runtime.Serialization
+{
+ [AttributeUsage(AttributeTargets.Field, Inherited = false)]
+ public sealed class OptionalFieldAttribute : Attribute
+ {
+ private int _versionAdded = 1;
+
+ public int VersionAdded
+ {
+ get { return _versionAdded; }
+ set
+ {
+ if (value < 1)
+ {
+ throw new ArgumentException(SR.Serialization_OptionalFieldVersionValue);
+ }
+ _versionAdded = value;
+ }
+ }
+ }
+}
diff --git a/src/mscorlib/src/System/Runtime/Serialization/SerializationException.cs b/src/mscorlib/shared/System/Runtime/Serialization/SerializationException.cs
index 48de8a7b5b..a359daf4f9 100644
--- a/src/mscorlib/src/System/Runtime/Serialization/SerializationException.cs
+++ b/src/mscorlib/shared/System/Runtime/Serialization/SerializationException.cs
@@ -2,18 +2,6 @@
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
-/*=============================================================================
-**
-**
-**
-** Purpose: Thrown when something goes wrong during serialization or
-** deserialization.
-**
-**
-=============================================================================*/
-
-
-using System;
using System.Runtime.Serialization;
namespace System.Runtime.Serialization
@@ -21,12 +9,12 @@ namespace System.Runtime.Serialization
[Serializable]
public class SerializationException : SystemException
{
- private static String _nullMessage = SR.Arg_SerializationException;
+ private static String s_nullMessage = SR.SerializationException;
// Creates a new SerializationException with its message
// string set to a default message.
public SerializationException()
- : base(_nullMessage)
+ : base(s_nullMessage)
{
HResult = __HResults.COR_E_SERIALIZATION;
}
@@ -37,12 +25,14 @@ namespace System.Runtime.Serialization
HResult = __HResults.COR_E_SERIALIZATION;
}
- public SerializationException(String message, Exception innerException) : base(message, innerException)
+ public SerializationException(String message, Exception innerException)
+ : base(message, innerException)
{
HResult = __HResults.COR_E_SERIALIZATION;
}
- protected SerializationException(SerializationInfo info, StreamingContext context) : base(info, context)
+ protected SerializationException(SerializationInfo info, StreamingContext context)
+ : base(info, context)
{
}
}
diff --git a/src/mscorlib/shared/System/Runtime/Serialization/SerializationInfoEnumerator.cs b/src/mscorlib/shared/System/Runtime/Serialization/SerializationInfoEnumerator.cs
new file mode 100644
index 0000000000..6399510736
--- /dev/null
+++ b/src/mscorlib/shared/System/Runtime/Serialization/SerializationInfoEnumerator.cs
@@ -0,0 +1,127 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+using System.Collections;
+using System.Diagnostics;
+
+namespace System.Runtime.Serialization
+{
+ public struct SerializationEntry
+ {
+ private string _name;
+ private object _value;
+ private Type _type;
+
+ internal SerializationEntry(string entryName, object entryValue, Type entryType)
+ {
+ _name = entryName;
+ _value = entryValue;
+ _type = entryType;
+ }
+
+ public object Value => _value;
+ public string Name => _name;
+ public Type ObjectType => _type;
+ }
+
+ public sealed class SerializationInfoEnumerator : IEnumerator
+ {
+ private readonly string[] _members;
+ private readonly object[] _data;
+ private readonly Type[] _types;
+ private readonly int _numItems;
+ private int _currItem;
+ private bool _current;
+
+ internal SerializationInfoEnumerator(string[] members, object[] info, Type[] types, int numItems)
+ {
+ Debug.Assert(members != null, "[SerializationInfoEnumerator.ctor]members!=null");
+ Debug.Assert(info != null, "[SerializationInfoEnumerator.ctor]info!=null");
+ Debug.Assert(types != null, "[SerializationInfoEnumerator.ctor]types!=null");
+ Debug.Assert(numItems >= 0, "[SerializationInfoEnumerator.ctor]numItems>=0");
+ Debug.Assert(members.Length >= numItems, "[SerializationInfoEnumerator.ctor]members.Length>=numItems");
+ Debug.Assert(info.Length >= numItems, "[SerializationInfoEnumerator.ctor]info.Length>=numItems");
+ Debug.Assert(types.Length >= numItems, "[SerializationInfoEnumerator.ctor]types.Length>=numItems");
+
+ _members = members;
+ _data = info;
+ _types = types;
+
+ //The MoveNext semantic is much easier if we enforce that [0..m_numItems] are valid entries
+ //in the enumerator, hence we subtract 1.
+ _numItems = numItems - 1;
+ _currItem = -1;
+ _current = false;
+ }
+
+ public bool MoveNext()
+ {
+ if (_currItem < _numItems)
+ {
+ _currItem++;
+ _current = true;
+ }
+ else
+ {
+ _current = false;
+ }
+
+ return _current;
+ }
+
+ object IEnumerator.Current => Current;
+
+ public SerializationEntry Current
+ {
+ get
+ {
+ if (_current == false)
+ {
+ throw new InvalidOperationException(SR.InvalidOperation_EnumOpCantHappen);
+ }
+ return new SerializationEntry(_members[_currItem], _data[_currItem], _types[_currItem]);
+ }
+ }
+
+ public void Reset()
+ {
+ _currItem = -1;
+ _current = false;
+ }
+
+ public string Name
+ {
+ get
+ {
+ if (_current == false)
+ {
+ throw new InvalidOperationException(SR.InvalidOperation_EnumOpCantHappen);
+ }
+ return _members[_currItem];
+ }
+ }
+ public object Value
+ {
+ get
+ {
+ if (_current == false)
+ {
+ throw new InvalidOperationException(SR.InvalidOperation_EnumOpCantHappen);
+ }
+ return _data[_currItem];
+ }
+ }
+ public Type ObjectType
+ {
+ get
+ {
+ if (_current == false)
+ {
+ throw new InvalidOperationException(SR.InvalidOperation_EnumOpCantHappen);
+ }
+ return _types[_currItem];
+ }
+ }
+ }
+}
diff --git a/src/mscorlib/shared/System/Runtime/Serialization/StreamingContext.cs b/src/mscorlib/shared/System/Runtime/Serialization/StreamingContext.cs
new file mode 100644
index 0000000000..1026a87d1e
--- /dev/null
+++ b/src/mscorlib/shared/System/Runtime/Serialization/StreamingContext.cs
@@ -0,0 +1,53 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+namespace System.Runtime.Serialization
+{
+ [Serializable]
+ public struct StreamingContext
+ {
+ private readonly object _additionalContext;
+ private readonly StreamingContextStates _state;
+
+ public StreamingContext(StreamingContextStates state) : this(state, null)
+ {
+ }
+
+ public StreamingContext(StreamingContextStates state, object additional)
+ {
+ _state = state;
+ _additionalContext = additional;
+ }
+
+ public override bool Equals(object obj)
+ {
+ if (!(obj is StreamingContext))
+ {
+ return false;
+ }
+ StreamingContext ctx = (StreamingContext)obj;
+ return ctx._additionalContext == _additionalContext && ctx._state == _state;
+ }
+
+ public override int GetHashCode() => (int)_state;
+
+ public StreamingContextStates State => _state;
+
+ public object Context => _additionalContext;
+ }
+
+ [Flags]
+ public enum StreamingContextStates
+ {
+ CrossProcess = 0x01,
+ CrossMachine = 0x02,
+ File = 0x04,
+ Persistence = 0x08,
+ Remoting = 0x10,
+ Other = 0x20,
+ Clone = 0x40,
+ CrossAppDomain = 0x80,
+ All = 0xFF,
+ }
+}
diff --git a/src/mscorlib/src/System/Runtime/Versioning/NonVersionableAttribute.cs b/src/mscorlib/shared/System/Runtime/Versioning/NonVersionableAttribute.cs
index e4809953bc..e4809953bc 100644
--- a/src/mscorlib/src/System/Runtime/Versioning/NonVersionableAttribute.cs
+++ b/src/mscorlib/shared/System/Runtime/Versioning/NonVersionableAttribute.cs
diff --git a/src/mscorlib/src/System/Threading/AbandonedMutexException.cs b/src/mscorlib/shared/System/Threading/AbandonedMutexException.cs
index 60808ad18f..8056a3b330 100644
--- a/src/mscorlib/src/System/Threading/AbandonedMutexException.cs
+++ b/src/mscorlib/shared/System/Threading/AbandonedMutexException.cs
@@ -1,28 +1,24 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
-
-//
//
// AbandonedMutexException
// Thrown when a wait completes because one or more mutexes was abandoned.
// AbandonedMutexs indicate serious error in user code or machine state.
////////////////////////////////////////////////////////////////////////////////
-
using System;
-using System.Runtime.Serialization;
using System.Threading;
using System.Runtime.InteropServices;
+using System.Runtime.Serialization;
namespace System.Threading
{
[Serializable]
- [ComVisibleAttribute(false)]
public class AbandonedMutexException : SystemException
{
- private int m_MutexIndex = -1;
- private Mutex m_Mutex = null;
+ private int _mutexIndex = -1;
+ private Mutex _mutex = null;
public AbandonedMutexException()
: base(SR.Threading_AbandonedMutexException)
@@ -63,32 +59,19 @@ namespace System.Threading
SetupException(location, handle);
}
- private void SetupException(int location, WaitHandle handle)
+ protected AbandonedMutexException(SerializationInfo info, StreamingContext context)
+ : base(info, context)
{
- m_MutexIndex = location;
- if (handle != null)
- m_Mutex = handle as Mutex;
}
- protected AbandonedMutexException(SerializationInfo info, StreamingContext context) : base(info, context)
- {
- }
-
- public Mutex Mutex
+ private void SetupException(int location, WaitHandle handle)
{
- get
- {
- return m_Mutex;
- }
+ _mutexIndex = location;
+ if (handle != null)
+ _mutex = handle as Mutex;
}
- public int MutexIndex
- {
- get
- {
- return m_MutexIndex;
- }
- }
+ public Mutex Mutex => _mutex;
+ public int MutexIndex => _mutexIndex;
}
}
-
diff --git a/src/mscorlib/src/System/Threading/ApartmentState.cs b/src/mscorlib/shared/System/Threading/ApartmentState.cs
index 445d70f1c6..47c1677cb5 100644
--- a/src/mscorlib/src/System/Threading/ApartmentState.cs
+++ b/src/mscorlib/shared/System/Threading/ApartmentState.cs
@@ -2,19 +2,8 @@
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
-//
-/*=============================================================================
-**
-**
-**
-** Purpose: Enum to represent the different threading models
-**
-**
-=============================================================================*/
-
namespace System.Threading
{
- [Serializable]
public enum ApartmentState
{
/*=========================================================================
diff --git a/src/mscorlib/src/System/Threading/AsyncLocal.cs b/src/mscorlib/shared/System/Threading/AsyncLocal.cs
index b1d90b3983..59c8fb3c88 100644
--- a/src/mscorlib/src/System/Threading/AsyncLocal.cs
+++ b/src/mscorlib/shared/System/Threading/AsyncLocal.cs
@@ -2,11 +2,8 @@
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
-using System;
using System.Collections.Generic;
using System.Diagnostics;
-using System.Diagnostics.Contracts;
-using System.Security;
namespace System.Threading
{
diff --git a/src/mscorlib/src/System/Threading/AutoResetEvent.cs b/src/mscorlib/shared/System/Threading/AutoResetEvent.cs
index 41a012cf58..8320d7ad5a 100644
--- a/src/mscorlib/src/System/Threading/AutoResetEvent.cs
+++ b/src/mscorlib/shared/System/Threading/AutoResetEvent.cs
@@ -2,19 +2,6 @@
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
-//
-/*=============================================================================
-**
-**
-**
-** Purpose: An example of a WaitHandle class
-**
-**
-=============================================================================*/
-
-using System;
-using System.Runtime.InteropServices;
-
namespace System.Threading
{
public sealed class AutoResetEvent : EventWaitHandle
diff --git a/src/mscorlib/src/System/Threading/EventResetMode.cs b/src/mscorlib/shared/System/Threading/EventResetMode.cs
index 89bf7a769e..7aac0f51eb 100644
--- a/src/mscorlib/src/System/Threading/EventResetMode.cs
+++ b/src/mscorlib/shared/System/Threading/EventResetMode.cs
@@ -2,7 +2,6 @@
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
-//
/*=============================================================================
**
** Enum: EventResetMode
@@ -13,12 +12,8 @@
**
=============================================================================*/
-
-using System.Runtime.InteropServices;
-
namespace System.Threading
{
- [ComVisibleAttribute(false)]
public enum EventResetMode
{
AutoReset = 0,
diff --git a/src/mscorlib/src/System/Threading/ExecutionContext.cs b/src/mscorlib/shared/System/Threading/ExecutionContext.cs
index ec125ad8d2..67857e9b11 100644
--- a/src/mscorlib/src/System/Threading/ExecutionContext.cs
+++ b/src/mscorlib/shared/System/Threading/ExecutionContext.cs
@@ -1,29 +1,22 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
+
/*============================================================
**
**
**
** Purpose: Capture execution context for a thread
**
-**
+**
===========================================================*/
-using System;
-using System.Security;
-using System.Runtime.Remoting;
-using System.Collections;
-using System.Collections.Generic;
-using System.Reflection;
-using System.Runtime.ExceptionServices;
-using System.Runtime.Serialization;
-using System.Runtime.InteropServices;
-using System.Runtime.CompilerServices;
-using System.Runtime.ConstrainedExecution;
using System.Diagnostics;
using System.Diagnostics.Contracts;
-using System.Diagnostics.CodeAnalysis;
+using System.Runtime.ExceptionServices;
+using System.Runtime.Serialization;
+
+using Thread = Internal.Runtime.Augments.RuntimeThread;
namespace System.Threading
{
@@ -147,7 +140,6 @@ namespace System.Threading
return executionContext != null && executionContext.m_isFlowSuppressed;
}
- [HandleProcessCorruptedStateExceptions]
public static void Run(ExecutionContext executionContext, ContextCallback callback, Object state)
{
if (executionContext == null)
@@ -165,7 +157,7 @@ namespace System.Threading
{
// Note: we have a "catch" rather than a "finally" because we want
// to stop the first pass of EH here. That way we can restore the previous
- // context before any of our callers' EH filters run. That means we need to
+ // context before any of our callers' EH filters run. That means we need to
// end the scope separately in the non-exceptional case below.
ecsw.Undo(currentThread);
throw;
@@ -190,7 +182,7 @@ namespace System.Threading
}
}
- static internal void EstablishCopyOnWriteScope(Thread currentThread, ref ExecutionContextSwitcher ecsw)
+ internal static void EstablishCopyOnWriteScope(Thread currentThread, ref ExecutionContextSwitcher ecsw)
{
Debug.Assert(currentThread == Thread.CurrentThread);
@@ -198,7 +190,6 @@ namespace System.Threading
ecsw.m_sc = currentThread.SynchronizationContext;
}
- [HandleProcessCorruptedStateExceptions]
private static void OnContextChanged(ExecutionContext previous, ExecutionContext current)
{
Debug.Assert(previous != null);
@@ -377,5 +368,3 @@ namespace System.Threading
}
}
}
-
-
diff --git a/src/mscorlib/shared/System/Threading/LazyThreadSafetyMode.cs b/src/mscorlib/shared/System/Threading/LazyThreadSafetyMode.cs
new file mode 100644
index 0000000000..2d13f23762
--- /dev/null
+++ b/src/mscorlib/shared/System/Threading/LazyThreadSafetyMode.cs
@@ -0,0 +1,44 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+// =+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
+//
+// a set of lightweight static helpers for lazy initialization.
+//
+// =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+
+namespace System.Threading
+{
+ /// <summary>
+ /// Specifies how a <see cref="T:System.Threading.Lazy{T}"/> instance should synchronize access among multiple threads.
+ /// </summary>
+ public enum LazyThreadSafetyMode
+ {
+ /// <summary>
+ /// This mode makes no guarantees around the thread-safety of the <see cref="T:System.Threading.Lazy{T}"/> instance. If used from multiple threads, the behavior of the <see cref="T:System.Threading.Lazy{T}"/> is undefined.
+ /// This mode should be used when a <see cref="T:System.Threading.Lazy{T}"/> is guaranteed to never be initialized from more than one thread simultaneously and high performance is crucial.
+ /// If valueFactory throws an exception when the <see cref="T:System.Threading.Lazy{T}"/> is initialized, the exception will be cached and returned on subsequent accesses to Value. Also, if valueFactory recursively
+ /// accesses Value on this <see cref="T:System.Threading.Lazy{T}"/> instance, a <see cref="T:System.InvalidOperationException"/> will be thrown.
+ /// </summary>
+ None,
+
+ /// <summary>
+ /// When multiple threads attempt to simultaneously initialize a <see cref="T:System.Threading.Lazy{T}"/> instance, this mode allows each thread to execute the
+ /// valueFactory but only the first thread to complete initialization will be allowed to set the final value of the <see cref="T:System.Threading.Lazy{T}"/>.
+ /// Once initialized successfully, any future calls to Value will return the cached result. If valueFactory throws an exception on any thread, that exception will be
+ /// propagated out of Value. If any thread executes valueFactory without throwing an exception and, therefore, successfully sets the value, that value will be returned on
+ /// subsequent accesses to Value from any thread. If no thread succeeds in setting the value, IsValueCreated will remain false and subsequent accesses to Value will result in
+ /// the valueFactory delegate re-executing. Also, if valueFactory recursively accesses Value on this <see cref="T:System.Threading.Lazy{T}"/> instance, an exception will NOT be thrown.
+ /// </summary>
+ PublicationOnly,
+
+ /// <summary>
+ /// This mode uses locks to ensure that only a single thread can initialize a <see cref="T:System.Threading.Lazy{T}"/> instance in a thread-safe manner. In general,
+ /// taken if this mode is used in conjunction with a <see cref="T:System.Threading.Lazy{T}"/> valueFactory delegate that uses locks internally, a deadlock can occur if not
+ /// handled carefully. If valueFactory throws an exception when the<see cref="T:System.Threading.Lazy{T}"/> is initialized, the exception will be cached and returned on
+ /// subsequent accesses to Value. Also, if valueFactory recursively accesses Value on this <see cref="T:System.Threading.Lazy{T}"/> instance, a <see cref="T:System.InvalidOperationException"/> will be thrown.
+ /// </summary>
+ ExecutionAndPublication
+ }
+}
diff --git a/src/mscorlib/src/System/Threading/LockRecursionException.cs b/src/mscorlib/shared/System/Threading/LockRecursionException.cs
index 5624282a40..2f296cb14e 100644
--- a/src/mscorlib/src/System/Threading/LockRecursionException.cs
+++ b/src/mscorlib/shared/System/Threading/LockRecursionException.cs
@@ -2,28 +2,28 @@
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
-/*============================================================
-//
-//
-//
-// Purpose:
-// This exception represents a failed attempt to recursively
-// acquire a lock, because the particular lock kind doesn't
-// support it in its current state.
-============================================================*/
-
using System;
using System.Runtime.Serialization;
-using System.Runtime.CompilerServices;
namespace System.Threading
{
[Serializable]
public class LockRecursionException : System.Exception
{
- public LockRecursionException() { }
- public LockRecursionException(string message) : base(message) { }
+ public LockRecursionException()
+ {
+ }
+
+ public LockRecursionException(string message)
+ : base(message)
+ {
+ }
+
+ public LockRecursionException(string message, Exception innerException)
+ : base(message, innerException)
+ {
+ }
+
protected LockRecursionException(SerializationInfo info, StreamingContext context) : base(info, context) { }
- public LockRecursionException(string message, Exception innerException) : base(message, innerException) { }
}
}
diff --git a/src/mscorlib/src/System/Threading/ManualResetEvent.cs b/src/mscorlib/shared/System/Threading/ManualResetEvent.cs
index bb875c4f49..4b8d61f960 100644
--- a/src/mscorlib/src/System/Threading/ManualResetEvent.cs
+++ b/src/mscorlib/shared/System/Threading/ManualResetEvent.cs
@@ -2,19 +2,6 @@
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
-//
-/*=============================================================================
-**
-**
-**
-** Purpose: An example of a WaitHandle class
-**
-**
-=============================================================================*/
-
-using System;
-using System.Runtime.InteropServices;
-
namespace System.Threading
{
public sealed class ManualResetEvent : EventWaitHandle
diff --git a/src/mscorlib/src/System/Threading/ParameterizedThreadStart.cs b/src/mscorlib/shared/System/Threading/ParameterizedThreadStart.cs
index 74f17a5d75..c0f29e8e80 100644
--- a/src/mscorlib/src/System/Threading/ParameterizedThreadStart.cs
+++ b/src/mscorlib/shared/System/Threading/ParameterizedThreadStart.cs
@@ -2,7 +2,6 @@
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
-//
/*=============================================================================
**
**
@@ -13,12 +12,7 @@
**
=============================================================================*/
-
-using System.Threading;
-using System.Runtime.InteropServices;
-
namespace System.Threading
{
- [ComVisibleAttribute(false)]
public delegate void ParameterizedThreadStart(object obj);
}
diff --git a/src/mscorlib/src/System/Threading/SemaphoreFullException.cs b/src/mscorlib/shared/System/Threading/SemaphoreFullException.cs
index bbcc226f95..19ac19d6e0 100644
--- a/src/mscorlib/src/System/Threading/SemaphoreFullException.cs
+++ b/src/mscorlib/shared/System/Threading/SemaphoreFullException.cs
@@ -4,12 +4,10 @@
using System;
using System.Runtime.Serialization;
-using System.Runtime.InteropServices;
namespace System.Threading
{
[Serializable]
- [ComVisibleAttribute(false)]
public class SemaphoreFullException : SystemException
{
public SemaphoreFullException() : base(SR.Threading_SemaphoreFullException)
@@ -29,4 +27,3 @@ namespace System.Threading
}
}
}
-
diff --git a/src/mscorlib/src/System/Threading/SendOrPostCallback.cs b/src/mscorlib/shared/System/Threading/SendOrPostCallback.cs
index c08b1aba6e..6692d35ab2 100644
--- a/src/mscorlib/src/System/Threading/SendOrPostCallback.cs
+++ b/src/mscorlib/shared/System/Threading/SendOrPostCallback.cs
@@ -1,14 +1,6 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
-/*============================================================
-**
-**
-**
-** Purpose: Represents a method to be called when a message is to be dispatched to a synchronization context.
-**
-**
-===========================================================*/
namespace System.Threading
{
diff --git a/src/mscorlib/src/System/Threading/SynchronizationLockException.cs b/src/mscorlib/shared/System/Threading/SynchronizationLockException.cs
index 42892fe282..120577fdcf 100644
--- a/src/mscorlib/src/System/Threading/SynchronizationLockException.cs
+++ b/src/mscorlib/shared/System/Threading/SynchronizationLockException.cs
@@ -2,7 +2,6 @@
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
-//
/*=============================================================================
**
**
@@ -13,8 +12,6 @@
**
=============================================================================*/
-
-using System;
using System.Runtime.Serialization;
namespace System.Threading
@@ -45,5 +42,3 @@ namespace System.Threading
}
}
}
-
-
diff --git a/src/mscorlib/src/System/Threading/ThreadPriority.cs b/src/mscorlib/shared/System/Threading/ThreadPriority.cs
index 64b77ea89d..3b34bd5eac 100644
--- a/src/mscorlib/src/System/Threading/ThreadPriority.cs
+++ b/src/mscorlib/shared/System/Threading/ThreadPriority.cs
@@ -2,21 +2,8 @@
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
-//
-/*=============================================================================
-**
-**
-**
-** Purpose: Enums for the priorities of a Thread
-**
-**
-=============================================================================*/
-
-using System.Threading;
-
namespace System.Threading
{
- [Serializable]
public enum ThreadPriority
{
/*=========================================================================
diff --git a/src/mscorlib/src/System/Threading/ThreadStart.cs b/src/mscorlib/shared/System/Threading/ThreadStart.cs
index c065fddbcf..5532539fc7 100644
--- a/src/mscorlib/src/System/Threading/ThreadStart.cs
+++ b/src/mscorlib/shared/System/Threading/ThreadStart.cs
@@ -2,7 +2,6 @@
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
-//
/*=============================================================================
**
**
@@ -13,12 +12,7 @@
**
=============================================================================*/
-using System.Threading;
-
namespace System.Threading
{
- // Define the delegate
- // NOTE: If you change the signature here, there is code in COMSynchronization
- // that invokes this delegate in native.
public delegate void ThreadStart();
}
diff --git a/src/mscorlib/src/System/Threading/ThreadStartException.cs b/src/mscorlib/shared/System/Threading/ThreadStartException.cs
index 54ec4ba535..2ff77bc5fd 100644
--- a/src/mscorlib/src/System/Threading/ThreadStartException.cs
+++ b/src/mscorlib/shared/System/Threading/ThreadStartException.cs
@@ -2,35 +2,28 @@
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
-//
-
-using System;
using System.Runtime.Serialization;
-using System.Runtime.InteropServices;
namespace System.Threading
{
[Serializable]
public sealed class ThreadStartException : SystemException
{
- private ThreadStartException()
+ internal ThreadStartException()
: base(SR.Arg_ThreadStartException)
{
HResult = __HResults.COR_E_THREADSTART;
}
- private ThreadStartException(Exception reason)
+ internal ThreadStartException(Exception reason)
: base(SR.Arg_ThreadStartException, reason)
{
HResult = __HResults.COR_E_THREADSTART;
}
- //required for serialization
internal ThreadStartException(SerializationInfo info, StreamingContext context)
: base(info, context)
{
}
}
}
-
-
diff --git a/src/mscorlib/src/System/Threading/ThreadState.cs b/src/mscorlib/shared/System/Threading/ThreadState.cs
index c95772e9a7..4bf3b5184d 100644
--- a/src/mscorlib/src/System/Threading/ThreadState.cs
+++ b/src/mscorlib/shared/System/Threading/ThreadState.cs
@@ -2,19 +2,8 @@
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
-//
-/*=============================================================================
-**
-**
-**
-** Purpose: Enum to represent the different thread states
-**
-**
-=============================================================================*/
-
namespace System.Threading
{
- [Serializable]
[Flags]
public enum ThreadState
{
diff --git a/src/mscorlib/src/System/Threading/ThreadStateException.cs b/src/mscorlib/shared/System/Threading/ThreadStateException.cs
index a2b8b15bd1..33bc8baee6 100644
--- a/src/mscorlib/src/System/Threading/ThreadStateException.cs
+++ b/src/mscorlib/shared/System/Threading/ThreadStateException.cs
@@ -2,7 +2,6 @@
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
-//
/*=============================================================================
**
**
@@ -13,7 +12,6 @@
**
=============================================================================*/
-using System;
using System.Runtime.Serialization;
namespace System.Threading
@@ -39,7 +37,8 @@ namespace System.Threading
HResult = __HResults.COR_E_THREADSTATE;
}
- protected ThreadStateException(SerializationInfo info, StreamingContext context) : base(info, context)
+ protected ThreadStateException(SerializationInfo info, StreamingContext context)
+ : base(info, context)
{
}
}
diff --git a/src/mscorlib/shared/System/Threading/TimeoutHelper.cs b/src/mscorlib/shared/System/Threading/TimeoutHelper.cs
new file mode 100644
index 0000000000..c66c9add92
--- /dev/null
+++ b/src/mscorlib/shared/System/Threading/TimeoutHelper.cs
@@ -0,0 +1,54 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+using System.Diagnostics;
+
+namespace System.Threading
+{
+ /// <summary>
+ /// A helper class to capture a start time using Environment.TickCout as a time in milliseconds, also updates a given timeout bu subtracting the current time from
+ /// the start time
+ /// </summary>
+ internal static class TimeoutHelper
+ {
+ /// <summary>
+ /// Returns the Environment.TickCount as a start time in milliseconds as a uint, TickCount tools over from postive to negative every ~ 25 days
+ /// then ~25 days to back to positive again, uint is sued to ignore the sign and double the range to 50 days
+ /// </summary>
+ /// <returns></returns>
+ public static uint GetTime()
+ {
+ return (uint)Environment.TickCount;
+ }
+
+ /// <summary>
+ /// Helper function to measure and update the elapsed time
+ /// </summary>
+ /// <param name="startTime"> The first time (in milliseconds) observed when the wait started</param>
+ /// <param name="originalWaitMillisecondsTimeout">The orginal wait timeoutout in milliseconds</param>
+ /// <returns>The new wait time in milliseconds, -1 if the time expired</returns>
+ public static int UpdateTimeOut(uint startTime, int originalWaitMillisecondsTimeout)
+ {
+ // The function must be called in case the time out is not infinite
+ Debug.Assert(originalWaitMillisecondsTimeout != Timeout.Infinite);
+
+ uint elapsedMilliseconds = (GetTime() - startTime);
+
+ // Check the elapsed milliseconds is greater than max int because this property is uint
+ if (elapsedMilliseconds > int.MaxValue)
+ {
+ return 0;
+ }
+
+ // Subtract the elapsed time from the current wait time
+ int currentWaitTimeout = originalWaitMillisecondsTimeout - (int)elapsedMilliseconds; ;
+ if (currentWaitTimeout <= 0)
+ {
+ return 0;
+ }
+
+ return currentWaitTimeout;
+ }
+ }
+}
diff --git a/src/mscorlib/src/System/Threading/WaitHandleCannotBeOpenedException.cs b/src/mscorlib/shared/System/Threading/WaitHandleCannotBeOpenedException.cs
index a6f0267aeb..e44946a669 100644
--- a/src/mscorlib/src/System/Threading/WaitHandleCannotBeOpenedException.cs
+++ b/src/mscorlib/shared/System/Threading/WaitHandleCannotBeOpenedException.cs
@@ -2,17 +2,11 @@
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
-//
-
-using System;
using System.Runtime.Serialization;
-using System.Runtime.InteropServices;
namespace System.Threading
{
[Serializable]
- [ComVisibleAttribute(false)]
-
public class WaitHandleCannotBeOpenedException : ApplicationException
{
public WaitHandleCannotBeOpenedException() : base(SR.Threading_WaitHandleCannotBeOpenedException)
@@ -35,4 +29,3 @@ namespace System.Threading
}
}
}
-
diff --git a/src/mscorlib/src/System/Collections/Generic/Dictionary.cs b/src/mscorlib/src/System/Collections/Generic/Dictionary.cs
index 6759ab2c1c..409b23b541 100644
--- a/src/mscorlib/src/System/Collections/Generic/Dictionary.cs
+++ b/src/mscorlib/src/System/Collections/Generic/Dictionary.cs
@@ -584,6 +584,9 @@ namespace System.Collections.Generic
entries = newEntries;
}
+ // The overload Remove(TKey key, out TValue value) is a copy of this method with one additional
+ // statement to copy the value for entry being removed into the output parameter.
+ // Code has been intentionally duplicated for performance reasons.
public bool Remove(TKey key)
{
if (key == null)
@@ -622,6 +625,51 @@ namespace System.Collections.Generic
return false;
}
+ // This overload is a copy of the overload Remove(TKey key) with one additional
+ // statement to copy the value for entry being removed into the output parameter.
+ // Code has been intentionally duplicated for performance reasons.
+ public bool Remove(TKey key, out TValue value)
+ {
+ if (key == null)
+ {
+ ThrowHelper.ThrowArgumentNullException(ExceptionArgument.key);
+ }
+
+ if (buckets != null)
+ {
+ int hashCode = comparer.GetHashCode(key) & 0x7FFFFFFF;
+ int bucket = hashCode % buckets.Length;
+ int last = -1;
+ for (int i = buckets[bucket]; i >= 0; last = i, i = entries[i].next)
+ {
+ if (entries[i].hashCode == hashCode && comparer.Equals(entries[i].key, key))
+ {
+ if (last < 0)
+ {
+ buckets[bucket] = entries[i].next;
+ }
+ else
+ {
+ entries[last].next = entries[i].next;
+ }
+
+ value = entries[i].value;
+
+ entries[i].hashCode = -1;
+ entries[i].next = freeList;
+ entries[i].key = default(TKey);
+ entries[i].value = default(TValue);
+ freeList = i;
+ freeCount++;
+ version++;
+ return true;
+ }
+ }
+ }
+ value = default(TValue);
+ return false;
+ }
+
public bool TryGetValue(TKey key, out TValue value)
{
int i = FindEntry(key);
diff --git a/src/mscorlib/src/System/DelegateSerializationHolder.cs b/src/mscorlib/src/System/DelegateSerializationHolder.cs
index 5f8f0ef3f0..d7ad827673 100644
--- a/src/mscorlib/src/System/DelegateSerializationHolder.cs
+++ b/src/mscorlib/src/System/DelegateSerializationHolder.cs
@@ -206,26 +206,34 @@ namespace System
// We cannot use Type.GetType directly, because of AppCompat - assembly names starting with '[' would fail to load.
RuntimeType type = (RuntimeType)Assembly.GetType_Compat(de.assembly, de.type);
- RuntimeType targetType = (RuntimeType)Assembly.GetType_Compat(de.targetTypeAssembly, de.targetTypeName);
+
+ // {de.targetTypeAssembly, de.targetTypeName} do not actually refer to the type of the target, but the reflected
+ // type of the method. Those types may be the same when the method is on the target's type or on a type in its
+ // inheritance chain, but those types may not be the same when the method is an extension method for the
+ // target's type or a type in its inheritance chain.
// If we received the new style delegate encoding we already have the target MethodInfo in hand.
if (m_methods != null)
{
- if (de.target != null && !targetType.IsInstanceOfType(de.target))
- throw new InvalidCastException();
- Object target = de.target;
- d = Delegate.CreateDelegateNoSecurityCheck(type, target, m_methods[index]);
+ // The method info is serialized, so the target type info is redundant. The desktop framework does no
+ // additional verification on the target type info.
+ d = Delegate.CreateDelegateNoSecurityCheck(type, de.target, m_methods[index]);
}
else
{
if (de.target != null)
{
- if (!targetType.IsInstanceOfType(de.target))
- throw new InvalidCastException();
+ // For consistency with the desktop framework, when the method info is not serialized for a closed
+ // delegate, the method is assumed to be on the target's type or in its inheritance chain. An extension
+ // method would not work on this path for the above reason and also because the delegate binds to
+ // instance methods only. The desktop framework does no additional verification on the target type info.
d = Delegate.CreateDelegate(type, de.target, de.methodName);
}
else
+ {
+ RuntimeType targetType = (RuntimeType)Assembly.GetType_Compat(de.targetTypeAssembly, de.targetTypeName);
d = Delegate.CreateDelegate(type, targetType, de.methodName);
+ }
}
}
catch (Exception e)
diff --git a/src/mscorlib/src/System/Reflection/AssemblyName.cs b/src/mscorlib/src/System/Reflection/AssemblyName.cs
index a1b9034391..80fdf5d162 100644
--- a/src/mscorlib/src/System/Reflection/AssemblyName.cs
+++ b/src/mscorlib/src/System/Reflection/AssemblyName.cs
@@ -372,26 +372,26 @@ namespace System.Reflection
nInit();
}
- static public bool ReferenceMatchesDefinition(AssemblyName reference,
- AssemblyName definition)
- {
- // Optimization for common use case
- if (Object.ReferenceEquals(reference, definition))
- {
+ /// <summary>
+ /// Compares the simple names disregarding Version, Culture and PKT. While this clearly does not
+ /// match the intent of this api, this api has been broken this way since its debut and we cannot
+ /// change its behavior now.
+ /// </summary>
+ public static bool ReferenceMatchesDefinition(AssemblyName reference, AssemblyName definition)
+ {
+ if (object.ReferenceEquals(reference, definition))
return true;
- }
- return ReferenceMatchesDefinitionInternal(reference, definition, true);
- }
+ if (reference == null)
+ throw new ArgumentNullException(nameof(reference));
- /// "parse" tells us to parse the simple name of the assembly as if it was the full name
- /// almost never the right thing to do, but needed for compat
- [MethodImplAttribute(MethodImplOptions.InternalCall)]
- static internal extern bool ReferenceMatchesDefinitionInternal(AssemblyName reference,
- AssemblyName definition,
- bool parse);
-
+ if (definition == null)
+ throw new ArgumentNullException(nameof(definition));
+ string refName = reference.Name ?? string.Empty;
+ string defName = definition.Name ?? string.Empty;
+ return refName.Equals(defName, StringComparison.OrdinalIgnoreCase);
+ }
[MethodImplAttribute(MethodImplOptions.InternalCall)]
internal extern void nInit(out RuntimeAssembly assembly, bool forIntrospection, bool raiseResolveEvent);
diff --git a/src/mscorlib/src/System/Runtime/Serialization/IDeserializationCallback.cs b/src/mscorlib/src/System/Runtime/Serialization/IDeserializationCallback.cs
deleted file mode 100644
index 2b07ce9c38..0000000000
--- a/src/mscorlib/src/System/Runtime/Serialization/IDeserializationCallback.cs
+++ /dev/null
@@ -1,25 +0,0 @@
-// Licensed to the .NET Foundation under one or more agreements.
-// The .NET Foundation licenses this file to you under the MIT license.
-// See the LICENSE file in the project root for more information.
-
-/*============================================================
-**
-** Interface: IDeserializationEventListener
-**
-**
-** Purpose: Implemented by any class that wants to indicate that
-** it wishes to receive deserialization events.
-**
-**
-===========================================================*/
-
-using System;
-
-namespace System.Runtime.Serialization
-{
- // Interface does not need to be marked with the serializable attribute
- public interface IDeserializationCallback
- {
- void OnDeserialization(Object sender);
- }
-}
diff --git a/src/mscorlib/src/System/Runtime/Serialization/SerializationAttributes.cs b/src/mscorlib/src/System/Runtime/Serialization/SerializationAttributes.cs
deleted file mode 100644
index be9f4aefee..0000000000
--- a/src/mscorlib/src/System/Runtime/Serialization/SerializationAttributes.cs
+++ /dev/null
@@ -1,61 +0,0 @@
-// Licensed to the .NET Foundation under one or more agreements.
-// The .NET Foundation licenses this file to you under the MIT license.
-// See the LICENSE file in the project root for more information.
-
-/*============================================================
-**
-**
-**
-** Purpose: Various Attributes for Serialization
-**
-**
-============================================================*/
-
-using System;
-using System.Diagnostics.Contracts;
-using System.Reflection;
-
-namespace System.Runtime.Serialization
-{
- [AttributeUsage(AttributeTargets.Field, Inherited = false)]
- public sealed class OptionalFieldAttribute : Attribute
- {
- private int versionAdded = 1;
- public OptionalFieldAttribute() { }
-
- public int VersionAdded
- {
- get
- {
- return versionAdded;
- }
- set
- {
- if (value < 1)
- throw new ArgumentException(SR.Serialization_OptionalFieldVersionValue);
- Contract.EndContractBlock();
- versionAdded = value;
- }
- }
- }
-
- [AttributeUsage(AttributeTargets.Method, Inherited = false)]
- public sealed class OnSerializingAttribute : Attribute
- {
- }
-
- [AttributeUsage(AttributeTargets.Method, Inherited = false)]
- public sealed class OnSerializedAttribute : Attribute
- {
- }
-
- [AttributeUsage(AttributeTargets.Method, Inherited = false)]
- public sealed class OnDeserializingAttribute : Attribute
- {
- }
-
- [AttributeUsage(AttributeTargets.Method, Inherited = false)]
- public sealed class OnDeserializedAttribute : Attribute
- {
- }
-}
diff --git a/src/mscorlib/src/System/Runtime/Serialization/SerializationInfoEnumerator.cs b/src/mscorlib/src/System/Runtime/Serialization/SerializationInfoEnumerator.cs
deleted file mode 100644
index 791f7a8c73..0000000000
--- a/src/mscorlib/src/System/Runtime/Serialization/SerializationInfoEnumerator.cs
+++ /dev/null
@@ -1,176 +0,0 @@
-// Licensed to the .NET Foundation under one or more agreements.
-// The .NET Foundation licenses this file to you under the MIT license.
-// See the LICENSE file in the project root for more information.
-
-/*============================================================
-**
-**
-**
-** Purpose: A formatter-friendly mechanism for walking all of
-** the data in a SerializationInfo. Follows the IEnumerator
-** mechanism from Collections.
-**
-**
-============================================================*/
-
-using System;
-using System.Diagnostics;
-using System.Collections;
-using System.Diagnostics.Contracts;
-
-namespace System.Runtime.Serialization
-{
- //
- // The tuple returned by SerializationInfoEnumerator.Current.
- //
- public struct SerializationEntry
- {
- private Type m_type;
- private Object m_value;
- private String m_name;
-
- public Object Value
- {
- get
- {
- return m_value;
- }
- }
-
- public String Name
- {
- get
- {
- return m_name;
- }
- }
-
- public Type ObjectType
- {
- get
- {
- return m_type;
- }
- }
-
- internal SerializationEntry(String entryName, Object entryValue, Type entryType)
- {
- m_value = entryValue;
- m_name = entryName;
- m_type = entryType;
- }
- }
-
- //
- // A simple enumerator over the values stored in the SerializationInfo.
- // This does not snapshot the values, it just keeps pointers to the
- // member variables of the SerializationInfo that created it.
- //
- public sealed class SerializationInfoEnumerator : IEnumerator
- {
- private String[] m_members;
- private Object[] m_data;
- private Type[] m_types;
- private int m_numItems;
- private int m_currItem;
- private bool m_current;
-
- internal SerializationInfoEnumerator(String[] members, Object[] info, Type[] types, int numItems)
- {
- Debug.Assert(members != null, "[SerializationInfoEnumerator.ctor]members!=null");
- Debug.Assert(info != null, "[SerializationInfoEnumerator.ctor]info!=null");
- Debug.Assert(types != null, "[SerializationInfoEnumerator.ctor]types!=null");
- Debug.Assert(numItems >= 0, "[SerializationInfoEnumerator.ctor]numItems>=0");
- Debug.Assert(members.Length >= numItems, "[SerializationInfoEnumerator.ctor]members.Length>=numItems");
- Debug.Assert(info.Length >= numItems, "[SerializationInfoEnumerator.ctor]info.Length>=numItems");
- Debug.Assert(types.Length >= numItems, "[SerializationInfoEnumerator.ctor]types.Length>=numItems");
-
- m_members = members;
- m_data = info;
- m_types = types;
- //The MoveNext semantic is much easier if we enforce that [0..m_numItems] are valid entries
- //in the enumerator, hence we subtract 1.
- m_numItems = numItems - 1;
- m_currItem = -1;
- m_current = false;
- }
-
- public bool MoveNext()
- {
- if (m_currItem < m_numItems)
- {
- m_currItem++;
- m_current = true;
- }
- else
- {
- m_current = false;
- }
- return m_current;
- }
-
- Object IEnumerator.Current
- { //Actually returns a SerializationEntry
- get
- {
- if (m_current == false)
- {
- throw new InvalidOperationException(SR.InvalidOperation_EnumOpCantHappen);
- }
- return (Object)(new SerializationEntry(m_members[m_currItem], m_data[m_currItem], m_types[m_currItem]));
- }
- }
-
- public SerializationEntry Current
- { //Actually returns a SerializationEntry
- get
- {
- if (m_current == false)
- {
- throw new InvalidOperationException(SR.InvalidOperation_EnumOpCantHappen);
- }
- return (new SerializationEntry(m_members[m_currItem], m_data[m_currItem], m_types[m_currItem]));
- }
- }
-
- public void Reset()
- {
- m_currItem = -1;
- m_current = false;
- }
-
- public String Name
- {
- get
- {
- if (m_current == false)
- {
- throw new InvalidOperationException(SR.InvalidOperation_EnumOpCantHappen);
- }
- return m_members[m_currItem];
- }
- }
- public Object Value
- {
- get
- {
- if (m_current == false)
- {
- throw new InvalidOperationException(SR.InvalidOperation_EnumOpCantHappen);
- }
- return m_data[m_currItem];
- }
- }
- public Type ObjectType
- {
- get
- {
- if (m_current == false)
- {
- throw new InvalidOperationException(SR.InvalidOperation_EnumOpCantHappen);
- }
- return m_types[m_currItem];
- }
- }
- }
-}
diff --git a/src/mscorlib/src/System/Runtime/Serialization/StreamingContext.cs b/src/mscorlib/src/System/Runtime/Serialization/StreamingContext.cs
deleted file mode 100644
index 12a7631714..0000000000
--- a/src/mscorlib/src/System/Runtime/Serialization/StreamingContext.cs
+++ /dev/null
@@ -1,84 +0,0 @@
-// Licensed to the .NET Foundation under one or more agreements.
-// The .NET Foundation licenses this file to you under the MIT license.
-// See the LICENSE file in the project root for more information.
-
-/*============================================================
-**
-** ValueType: StreamingContext
-**
-**
-** Purpose: A value type indicating the source or destination of our streaming.
-**
-**
-===========================================================*/
-
-using System.Runtime.Remoting;
-using System;
-
-namespace System.Runtime.Serialization
-{
- [Serializable]
- public struct StreamingContext
- {
- internal Object m_additionalContext;
- internal StreamingContextStates m_state;
-
- public StreamingContext(StreamingContextStates state)
- : this(state, null)
- {
- }
-
- public StreamingContext(StreamingContextStates state, Object additional)
- {
- m_state = state;
- m_additionalContext = additional;
- }
-
- public Object Context
- {
- get { return m_additionalContext; }
- }
-
- public override bool Equals(Object obj)
- {
- if (!(obj is StreamingContext))
- {
- return false;
- }
- if (((StreamingContext)obj).m_additionalContext == m_additionalContext &&
- ((StreamingContext)obj).m_state == m_state)
- {
- return true;
- }
- return false;
- }
-
- public override int GetHashCode()
- {
- return (int)m_state;
- }
-
- public StreamingContextStates State
- {
- get { return m_state; }
- }
- }
-
- // **********************************************************
- // Keep these in sync with the version in vm\runtimehandles.h
- // **********************************************************
- [Serializable]
- [Flags]
- public enum StreamingContextStates
- {
- CrossProcess = 0x01,
- CrossMachine = 0x02,
- File = 0x04,
- Persistence = 0x08,
- Remoting = 0x10,
- Other = 0x20,
- Clone = 0x40,
- CrossAppDomain = 0x80,
- All = 0xFF,
- }
-}
diff --git a/src/mscorlib/src/System/Threading/LazyInitializer.cs b/src/mscorlib/src/System/Threading/LazyInitializer.cs
index e264a8f166..d585ba6c35 100644
--- a/src/mscorlib/src/System/Threading/LazyInitializer.cs
+++ b/src/mscorlib/src/System/Threading/LazyInitializer.cs
@@ -17,37 +17,6 @@ using System.Diagnostics.Contracts;
namespace System.Threading
{
/// <summary>
- /// Specifies how a <see cref="T:System.Threading.Lazy{T}"/> instance should synchronize access among multiple threads.
- /// </summary>
- public enum LazyThreadSafetyMode
- {
- /// <summary>
- /// This mode makes no guarantees around the thread-safety of the <see cref="T:System.Threading.Lazy{T}"/> instance. If used from multiple threads, the behavior of the <see cref="T:System.Threading.Lazy{T}"/> is undefined.
- /// This mode should be used when a <see cref="T:System.Threading.Lazy{T}"/> is guaranteed to never be initialized from more than one thread simultaneously and high performance is crucial.
- /// If valueFactory throws an exception when the <see cref="T:System.Threading.Lazy{T}"/> is initialized, the exception will be cached and returned on subsequent accesses to Value. Also, if valueFactory recursively
- /// accesses Value on this <see cref="T:System.Threading.Lazy{T}"/> instance, a <see cref="T:System.InvalidOperationException"/> will be thrown.
- /// </summary>
- None,
-
- /// <summary>
- /// When multiple threads attempt to simultaneously initialize a <see cref="T:System.Threading.Lazy{T}"/> instance, this mode allows each thread to execute the
- /// valueFactory but only the first thread to complete initialization will be allowed to set the final value of the <see cref="T:System.Threading.Lazy{T}"/>.
- /// Once initialized successfully, any future calls to Value will return the cached result. If valueFactory throws an exception on any thread, that exception will be
- /// propagated out of Value. If any thread executes valueFactory without throwing an exception and, therefore, successfully sets the value, that value will be returned on
- /// subsequent accesses to Value from any thread. If no thread succeeds in setting the value, IsValueCreated will remain false and subsequent accesses to Value will result in
- /// the valueFactory delegate re-executing. Also, if valueFactory recursively accesses Value on this <see cref="T:System.Threading.Lazy{T}"/> instance, an exception will NOT be thrown.
- /// </summary>
- PublicationOnly,
-
- /// <summary>
- /// This mode uses locks to ensure that only a single thread can initialize a <see cref="T:System.Threading.Lazy{T}"/> instance in a thread-safe manner. In general,
- /// taken if this mode is used in conjunction with a <see cref="T:System.Threading.Lazy{T}"/> valueFactory delegate that uses locks internally, a deadlock can occur if not
- /// handled carefully. If valueFactory throws an exception when the<see cref="T:System.Threading.Lazy{T}"/> is initialized, the exception will be cached and returned on
- /// subsequent accesses to Value. Also, if valueFactory recursively accesses Value on this <see cref="T:System.Threading.Lazy{T}"/> instance, a <see cref="T:System.InvalidOperationException"/> will be thrown.
- /// </summary>
- ExecutionAndPublication
- }
- /// <summary>
/// Provides lazy initialization routines.
/// </summary>
/// <remarks>
diff --git a/src/mscorlib/src/System/Threading/ReaderWriterLockSlim.cs b/src/mscorlib/src/System/Threading/ReaderWriterLockSlim.cs
new file mode 100644
index 0000000000..98517ad85f
--- /dev/null
+++ b/src/mscorlib/src/System/Threading/ReaderWriterLockSlim.cs
@@ -0,0 +1,1311 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+using Internal.Runtime.Augments;
+using System.Diagnostics; // for TraceInformation
+using System.Threading;
+using System.Runtime.CompilerServices;
+
+namespace System.Threading
+{
+ public enum LockRecursionPolicy
+ {
+ NoRecursion = 0,
+ SupportsRecursion = 1,
+ }
+
+ //
+ // ReaderWriterCount tracks how many of each kind of lock is held by each thread.
+ // We keep a linked list for each thread, attached to a ThreadStatic field.
+ // These are reused wherever possible, so that a given thread will only
+ // allocate N of these, where N is the maximum number of locks held simultaneously
+ // by that thread.
+ //
+ internal class ReaderWriterCount
+ {
+ // Which lock does this object belong to? This is a numeric ID for two reasons:
+ // 1) We don't want this field to keep the lock object alive, and a WeakReference would
+ // be too expensive.
+ // 2) Setting the value of a long is faster than setting the value of a reference.
+ // The "hot" paths in ReaderWriterLockSlim are short enough that this actually
+ // matters.
+ public long lockID;
+
+ // How many reader locks does this thread hold on this ReaderWriterLockSlim instance?
+ public int readercount;
+
+ // Ditto for writer/upgrader counts. These are only used if the lock allows recursion.
+ // But we have to have the fields on every ReaderWriterCount instance, because
+ // we reuse it for different locks.
+ public int writercount;
+ public int upgradecount;
+
+ // Next RWC in this thread's list.
+ public ReaderWriterCount next;
+ }
+
+ /// <summary>
+ /// A reader-writer lock implementation that is intended to be simple, yet very
+ /// efficient. In particular only 1 interlocked operation is taken for any lock
+ /// operation (we use spin locks to achieve this). The spin lock is never held
+ /// for more than a few instructions (in particular, we never call event APIs
+ /// or in fact any non-trivial API while holding the spin lock).
+ /// </summary>
+ public class ReaderWriterLockSlim : IDisposable
+ {
+ //Specifying if locked can be reacquired recursively.
+ private bool _fIsReentrant;
+
+ // Lock specification for myLock: This lock protects exactly the local fields associated with this
+ // instance of ReaderWriterLockSlim. It does NOT protect the memory associated with
+ // the events that hang off this lock (eg writeEvent, readEvent upgradeEvent).
+ private int _myLock;
+
+ //The variables controlling spinning behavior of Mylock(which is a spin-lock)
+
+ private const int LockSpinCycles = 20;
+ private const int LockSpinCount = 10;
+ private const int LockSleep0Count = 5;
+
+ // These variables allow use to avoid Setting events (which is expensive) if we don't have to.
+ private uint _numWriteWaiters; // maximum number of threads that can be doing a WaitOne on the writeEvent
+ private uint _numReadWaiters; // maximum number of threads that can be doing a WaitOne on the readEvent
+ private uint _numWriteUpgradeWaiters; // maximum number of threads that can be doing a WaitOne on the upgradeEvent (at most 1).
+ private uint _numUpgradeWaiters;
+
+ //Variable used for quick check when there are no waiters.
+ private bool _fNoWaiters;
+
+ private int _upgradeLockOwnerId;
+ private int _writeLockOwnerId;
+
+ // conditions we wait on.
+ private EventWaitHandle _writeEvent; // threads waiting to acquire a write lock go here.
+ private EventWaitHandle _readEvent; // threads waiting to acquire a read lock go here (will be released in bulk)
+ private EventWaitHandle _upgradeEvent; // thread waiting to acquire the upgrade lock
+ private EventWaitHandle _waitUpgradeEvent; // thread waiting to upgrade from the upgrade lock to a write lock go here (at most one)
+
+ // Every lock instance has a unique ID, which is used by ReaderWriterCount to associate itself with the lock
+ // without holding a reference to it.
+ private static long s_nextLockID;
+ private long _lockID;
+
+ // See comments on ReaderWriterCount.
+ [ThreadStatic]
+ private static ReaderWriterCount t_rwc;
+
+ private bool _fUpgradeThreadHoldingRead;
+
+ private const int MaxSpinCount = 20;
+
+ //The uint, that contains info like if the writer lock is held, num of
+ //readers etc.
+ private uint _owners;
+
+ //Various R/W masks
+ //Note:
+ //The Uint is divided as follows:
+ //
+ //Writer-Owned Waiting-Writers Waiting Upgraders Num-Readers
+ // 31 30 29 28.......0
+ //
+ //Dividing the uint, allows to vastly simplify logic for checking if a
+ //reader should go in etc. Setting the writer bit will automatically
+ //make the value of the uint much larger than the max num of readers
+ //allowed, thus causing the check for max_readers to fail.
+
+ private const uint WRITER_HELD = 0x80000000;
+ private const uint WAITING_WRITERS = 0x40000000;
+ private const uint WAITING_UPGRADER = 0x20000000;
+
+ //The max readers is actually one less then its theoretical max.
+ //This is done in order to prevent reader count overflows. If the reader
+ //count reaches max, other readers will wait.
+ private const uint MAX_READER = 0x10000000 - 2;
+
+ private const uint READER_MASK = 0x10000000 - 1;
+
+ private bool _fDisposed;
+
+ private void InitializeThreadCounts()
+ {
+ _upgradeLockOwnerId = -1;
+ _writeLockOwnerId = -1;
+ }
+
+ public ReaderWriterLockSlim()
+ : this(LockRecursionPolicy.NoRecursion)
+ {
+ }
+
+ public ReaderWriterLockSlim(LockRecursionPolicy recursionPolicy)
+ {
+ if (recursionPolicy == LockRecursionPolicy.SupportsRecursion)
+ {
+ _fIsReentrant = true;
+ }
+ InitializeThreadCounts();
+ _fNoWaiters = true;
+ _lockID = Interlocked.Increment(ref s_nextLockID);
+ }
+
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ private static bool IsRWEntryEmpty(ReaderWriterCount rwc)
+ {
+ if (rwc.lockID == 0)
+ return true;
+ else if (rwc.readercount == 0 && rwc.writercount == 0 && rwc.upgradecount == 0)
+ return true;
+ else
+ return false;
+ }
+
+ private bool IsRwHashEntryChanged(ReaderWriterCount lrwc)
+ {
+ return lrwc.lockID != _lockID;
+ }
+
+ /// <summary>
+ /// This routine retrieves/sets the per-thread counts needed to enforce the
+ /// various rules related to acquiring the lock.
+ ///
+ /// DontAllocate is set to true if the caller just wants to get an existing
+ /// entry for this thread, but doesn't want to add one if an existing one
+ /// could not be found.
+ /// </summary>
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ private ReaderWriterCount GetThreadRWCount(bool dontAllocate)
+ {
+ ReaderWriterCount rwc = t_rwc;
+ ReaderWriterCount empty = null;
+ while (rwc != null)
+ {
+ if (rwc.lockID == _lockID)
+ return rwc;
+
+ if (!dontAllocate && empty == null && IsRWEntryEmpty(rwc))
+ empty = rwc;
+
+ rwc = rwc.next;
+ }
+
+ if (dontAllocate)
+ return null;
+
+ if (empty == null)
+ {
+ empty = new ReaderWriterCount();
+ empty.next = t_rwc;
+ t_rwc = empty;
+ }
+
+ empty.lockID = _lockID;
+ return empty;
+ }
+
+ public void EnterReadLock()
+ {
+ TryEnterReadLock(-1);
+ }
+
+ //
+ // Common timeout support
+ //
+ private struct TimeoutTracker
+ {
+ private int _total;
+ private int _start;
+
+ public TimeoutTracker(TimeSpan timeout)
+ {
+ long ltm = (long)timeout.TotalMilliseconds;
+ if (ltm < -1 || ltm > (long)Int32.MaxValue)
+ throw new ArgumentOutOfRangeException(nameof(timeout));
+ _total = (int)ltm;
+ if (_total != -1 && _total != 0)
+ _start = Environment.TickCount;
+ else
+ _start = 0;
+ }
+
+ public TimeoutTracker(int millisecondsTimeout)
+ {
+ if (millisecondsTimeout < -1)
+ throw new ArgumentOutOfRangeException(nameof(millisecondsTimeout));
+ _total = millisecondsTimeout;
+ if (_total != -1 && _total != 0)
+ _start = Environment.TickCount;
+ else
+ _start = 0;
+ }
+
+ public int RemainingMilliseconds
+ {
+ get
+ {
+ if (_total == -1 || _total == 0)
+ return _total;
+
+ int elapsed = Environment.TickCount - _start;
+ // elapsed may be negative if TickCount has overflowed by 2^31 milliseconds.
+ if (elapsed < 0 || elapsed >= _total)
+ return 0;
+
+ return _total - elapsed;
+ }
+ }
+
+ public bool IsExpired
+ {
+ get
+ {
+ return RemainingMilliseconds == 0;
+ }
+ }
+ }
+
+ public bool TryEnterReadLock(TimeSpan timeout)
+ {
+ return TryEnterReadLock(new TimeoutTracker(timeout));
+ }
+
+ public bool TryEnterReadLock(int millisecondsTimeout)
+ {
+ return TryEnterReadLock(new TimeoutTracker(millisecondsTimeout));
+ }
+
+ private bool TryEnterReadLock(TimeoutTracker timeout)
+ {
+ return TryEnterReadLockCore(timeout);
+ }
+
+ private bool TryEnterReadLockCore(TimeoutTracker timeout)
+ {
+ if (_fDisposed)
+ throw new ObjectDisposedException(null);
+
+ ReaderWriterCount lrwc = null;
+ int id = Environment.CurrentManagedThreadId;
+
+ if (!_fIsReentrant)
+ {
+ if (id == _writeLockOwnerId)
+ {
+ //Check for AW->AR
+ throw new LockRecursionException(SR.LockRecursionException_ReadAfterWriteNotAllowed);
+ }
+
+ EnterMyLock();
+
+ lrwc = GetThreadRWCount(false);
+
+ //Check if the reader lock is already acquired. Note, we could
+ //check the presence of a reader by not allocating rwc (But that
+ //would lead to two lookups in the common case. It's better to keep
+ //a count in the structure).
+ if (lrwc.readercount > 0)
+ {
+ ExitMyLock();
+ throw new LockRecursionException(SR.LockRecursionException_RecursiveReadNotAllowed);
+ }
+ else if (id == _upgradeLockOwnerId)
+ {
+ //The upgrade lock is already held.
+ //Update the global read counts and exit.
+
+ lrwc.readercount++;
+ _owners++;
+ ExitMyLock();
+ return true;
+ }
+ }
+ else
+ {
+ EnterMyLock();
+ lrwc = GetThreadRWCount(false);
+ if (lrwc.readercount > 0)
+ {
+ lrwc.readercount++;
+ ExitMyLock();
+ return true;
+ }
+ else if (id == _upgradeLockOwnerId)
+ {
+ //The upgrade lock is already held.
+ //Update the global read counts and exit.
+ lrwc.readercount++;
+ _owners++;
+ ExitMyLock();
+ _fUpgradeThreadHoldingRead = true;
+ return true;
+ }
+ else if (id == _writeLockOwnerId)
+ {
+ //The write lock is already held.
+ //Update global read counts here,
+ lrwc.readercount++;
+ _owners++;
+ ExitMyLock();
+ return true;
+ }
+ }
+
+ bool retVal = true;
+
+ int spincount = 0;
+
+ for (; ;)
+ {
+ // We can enter a read lock if there are only read-locks have been given out
+ // and a writer is not trying to get in.
+
+ if (_owners < MAX_READER)
+ {
+ // Good case, there is no contention, we are basically done
+ _owners++; // Indicate we have another reader
+ lrwc.readercount++;
+ break;
+ }
+
+ if (spincount < MaxSpinCount)
+ {
+ ExitMyLock();
+ if (timeout.IsExpired)
+ return false;
+ spincount++;
+ SpinWait(spincount);
+ EnterMyLock();
+ //The per-thread structure may have been recycled as the lock is acquired (due to message pumping), load again.
+ if (IsRwHashEntryChanged(lrwc))
+ lrwc = GetThreadRWCount(false);
+ continue;
+ }
+
+ // Drat, we need to wait. Mark that we have waiters and wait.
+ if (_readEvent == null) // Create the needed event
+ {
+ LazyCreateEvent(ref _readEvent, false);
+ if (IsRwHashEntryChanged(lrwc))
+ lrwc = GetThreadRWCount(false);
+ continue; // since we left the lock, start over.
+ }
+
+ retVal = WaitOnEvent(_readEvent, ref _numReadWaiters, timeout, isWriteWaiter: false);
+ if (!retVal)
+ {
+ return false;
+ }
+ if (IsRwHashEntryChanged(lrwc))
+ lrwc = GetThreadRWCount(false);
+ }
+
+ ExitMyLock();
+ return retVal;
+ }
+
+ public void EnterWriteLock()
+ {
+ TryEnterWriteLock(-1);
+ }
+
+ public bool TryEnterWriteLock(TimeSpan timeout)
+ {
+ return TryEnterWriteLock(new TimeoutTracker(timeout));
+ }
+
+ public bool TryEnterWriteLock(int millisecondsTimeout)
+ {
+ return TryEnterWriteLock(new TimeoutTracker(millisecondsTimeout));
+ }
+
+ private bool TryEnterWriteLock(TimeoutTracker timeout)
+ {
+ return TryEnterWriteLockCore(timeout);
+ }
+
+ private bool TryEnterWriteLockCore(TimeoutTracker timeout)
+ {
+ if (_fDisposed)
+ throw new ObjectDisposedException(null);
+
+ int id = Environment.CurrentManagedThreadId;
+ ReaderWriterCount lrwc;
+ bool upgradingToWrite = false;
+
+ if (!_fIsReentrant)
+ {
+ if (id == _writeLockOwnerId)
+ {
+ //Check for AW->AW
+ throw new LockRecursionException(SR.LockRecursionException_RecursiveWriteNotAllowed);
+ }
+ else if (id == _upgradeLockOwnerId)
+ {
+ //AU->AW case is allowed once.
+ upgradingToWrite = true;
+ }
+
+ EnterMyLock();
+ lrwc = GetThreadRWCount(true);
+
+ //Can't acquire write lock with reader lock held.
+ if (lrwc != null && lrwc.readercount > 0)
+ {
+ ExitMyLock();
+ throw new LockRecursionException(SR.LockRecursionException_WriteAfterReadNotAllowed);
+ }
+ }
+ else
+ {
+ EnterMyLock();
+ lrwc = GetThreadRWCount(false);
+
+ if (id == _writeLockOwnerId)
+ {
+ lrwc.writercount++;
+ ExitMyLock();
+ return true;
+ }
+ else if (id == _upgradeLockOwnerId)
+ {
+ upgradingToWrite = true;
+ }
+ else if (lrwc.readercount > 0)
+ {
+ //Write locks may not be acquired if only read locks have been
+ //acquired.
+ ExitMyLock();
+ throw new LockRecursionException(SR.LockRecursionException_WriteAfterReadNotAllowed);
+ }
+ }
+
+ int spincount = 0;
+ bool retVal = true;
+
+ for (; ;)
+ {
+ if (IsWriterAcquired())
+ {
+ // Good case, there is no contention, we are basically done
+ SetWriterAcquired();
+ break;
+ }
+
+ //Check if there is just one upgrader, and no readers.
+ //Assumption: Only one thread can have the upgrade lock, so the
+ //following check will fail for all other threads that may sneak in
+ //when the upgrading thread is waiting.
+
+ if (upgradingToWrite)
+ {
+ uint readercount = GetNumReaders();
+
+ if (readercount == 1)
+ {
+ //Good case again, there is just one upgrader, and no readers.
+ SetWriterAcquired(); // indicate we have a writer.
+ break;
+ }
+ else if (readercount == 2)
+ {
+ if (lrwc != null)
+ {
+ if (IsRwHashEntryChanged(lrwc))
+ lrwc = GetThreadRWCount(false);
+
+ if (lrwc.readercount > 0)
+ {
+ //This check is needed for EU->ER->EW case, as the owner count will be two.
+ Debug.Assert(_fIsReentrant);
+ Debug.Assert(_fUpgradeThreadHoldingRead);
+
+ //Good case again, there is just one upgrader, and no readers.
+ SetWriterAcquired(); // indicate we have a writer.
+ break;
+ }
+ }
+ }
+ }
+
+ if (spincount < MaxSpinCount)
+ {
+ ExitMyLock();
+ if (timeout.IsExpired)
+ return false;
+ spincount++;
+ SpinWait(spincount);
+ EnterMyLock();
+ continue;
+ }
+
+ if (upgradingToWrite)
+ {
+ if (_waitUpgradeEvent == null) // Create the needed event
+ {
+ LazyCreateEvent(ref _waitUpgradeEvent, true);
+ continue; // since we left the lock, start over.
+ }
+
+ Debug.Assert(_numWriteUpgradeWaiters == 0, "There can be at most one thread with the upgrade lock held.");
+
+ retVal = WaitOnEvent(_waitUpgradeEvent, ref _numWriteUpgradeWaiters, timeout, isWriteWaiter: true);
+
+ //The lock is not held in case of failure.
+ if (!retVal)
+ return false;
+ }
+ else
+ {
+ // Drat, we need to wait. Mark that we have waiters and wait.
+ if (_writeEvent == null) // create the needed event.
+ {
+ LazyCreateEvent(ref _writeEvent, true);
+ continue; // since we left the lock, start over.
+ }
+
+ retVal = WaitOnEvent(_writeEvent, ref _numWriteWaiters, timeout, isWriteWaiter: true);
+ //The lock is not held in case of failure.
+ if (!retVal)
+ return false;
+ }
+ }
+
+ Debug.Assert((_owners & WRITER_HELD) > 0);
+
+ if (_fIsReentrant)
+ {
+ if (IsRwHashEntryChanged(lrwc))
+ lrwc = GetThreadRWCount(false);
+ lrwc.writercount++;
+ }
+
+ ExitMyLock();
+
+ _writeLockOwnerId = id;
+
+ return true;
+ }
+
+ public void EnterUpgradeableReadLock()
+ {
+ TryEnterUpgradeableReadLock(-1);
+ }
+
+ public bool TryEnterUpgradeableReadLock(TimeSpan timeout)
+ {
+ return TryEnterUpgradeableReadLock(new TimeoutTracker(timeout));
+ }
+
+ public bool TryEnterUpgradeableReadLock(int millisecondsTimeout)
+ {
+ return TryEnterUpgradeableReadLock(new TimeoutTracker(millisecondsTimeout));
+ }
+
+ private bool TryEnterUpgradeableReadLock(TimeoutTracker timeout)
+ {
+ return TryEnterUpgradeableReadLockCore(timeout);
+ }
+
+ private bool TryEnterUpgradeableReadLockCore(TimeoutTracker timeout)
+ {
+ if (_fDisposed)
+ throw new ObjectDisposedException(null);
+
+ int id = Environment.CurrentManagedThreadId;
+ ReaderWriterCount lrwc;
+
+ if (!_fIsReentrant)
+ {
+ if (id == _upgradeLockOwnerId)
+ {
+ //Check for AU->AU
+ throw new LockRecursionException(SR.LockRecursionException_RecursiveUpgradeNotAllowed);
+ }
+ else if (id == _writeLockOwnerId)
+ {
+ //Check for AU->AW
+ throw new LockRecursionException(SR.LockRecursionException_UpgradeAfterWriteNotAllowed);
+ }
+
+ EnterMyLock();
+ lrwc = GetThreadRWCount(true);
+ //Can't acquire upgrade lock with reader lock held.
+ if (lrwc != null && lrwc.readercount > 0)
+ {
+ ExitMyLock();
+ throw new LockRecursionException(SR.LockRecursionException_UpgradeAfterReadNotAllowed);
+ }
+ }
+ else
+ {
+ EnterMyLock();
+ lrwc = GetThreadRWCount(false);
+
+ if (id == _upgradeLockOwnerId)
+ {
+ lrwc.upgradecount++;
+ ExitMyLock();
+ return true;
+ }
+ else if (id == _writeLockOwnerId)
+ {
+ //Write lock is already held, Just update the global state
+ //to show presence of upgrader.
+ Debug.Assert((_owners & WRITER_HELD) > 0);
+ _owners++;
+ _upgradeLockOwnerId = id;
+ lrwc.upgradecount++;
+ if (lrwc.readercount > 0)
+ _fUpgradeThreadHoldingRead = true;
+ ExitMyLock();
+ return true;
+ }
+ else if (lrwc.readercount > 0)
+ {
+ //Upgrade locks may not be acquired if only read locks have been
+ //acquired.
+ ExitMyLock();
+ throw new LockRecursionException(SR.LockRecursionException_UpgradeAfterReadNotAllowed);
+ }
+ }
+
+ bool retVal = true;
+
+ int spincount = 0;
+
+ for (; ;)
+ {
+ //Once an upgrade lock is taken, it's like having a reader lock held
+ //until upgrade or downgrade operations are performed.
+
+ if ((_upgradeLockOwnerId == -1) && (_owners < MAX_READER))
+ {
+ _owners++;
+ _upgradeLockOwnerId = id;
+ break;
+ }
+
+ if (spincount < MaxSpinCount)
+ {
+ ExitMyLock();
+ if (timeout.IsExpired)
+ return false;
+ spincount++;
+ SpinWait(spincount);
+ EnterMyLock();
+ continue;
+ }
+
+ // Drat, we need to wait. Mark that we have waiters and wait.
+ if (_upgradeEvent == null) // Create the needed event
+ {
+ LazyCreateEvent(ref _upgradeEvent, true);
+ continue; // since we left the lock, start over.
+ }
+
+ //Only one thread with the upgrade lock held can proceed.
+ retVal = WaitOnEvent(_upgradeEvent, ref _numUpgradeWaiters, timeout, isWriteWaiter: false);
+ if (!retVal)
+ return false;
+ }
+
+ if (_fIsReentrant)
+ {
+ //The lock may have been dropped getting here, so make a quick check to see whether some other
+ //thread did not grab the entry.
+ if (IsRwHashEntryChanged(lrwc))
+ lrwc = GetThreadRWCount(false);
+ lrwc.upgradecount++;
+ }
+
+ ExitMyLock();
+
+ return true;
+ }
+
+ public void ExitReadLock()
+ {
+ ReaderWriterCount lrwc = null;
+
+ EnterMyLock();
+
+ lrwc = GetThreadRWCount(true);
+
+ if (lrwc == null || lrwc.readercount < 1)
+ {
+ //You have to be holding the read lock to make this call.
+ ExitMyLock();
+ throw new SynchronizationLockException(SR.SynchronizationLockException_MisMatchedRead);
+ }
+
+ if (_fIsReentrant)
+ {
+ if (lrwc.readercount > 1)
+ {
+ lrwc.readercount--;
+ ExitMyLock();
+ return;
+ }
+
+ if (Environment.CurrentManagedThreadId == _upgradeLockOwnerId)
+ {
+ _fUpgradeThreadHoldingRead = false;
+ }
+ }
+
+ Debug.Assert(_owners > 0, "ReleasingReaderLock: releasing lock and no read lock taken");
+
+ --_owners;
+
+ Debug.Assert(lrwc.readercount == 1);
+ lrwc.readercount--;
+
+ ExitAndWakeUpAppropriateWaiters();
+ }
+
+ public void ExitWriteLock()
+ {
+ ReaderWriterCount lrwc;
+ if (!_fIsReentrant)
+ {
+ if (Environment.CurrentManagedThreadId != _writeLockOwnerId)
+ {
+ //You have to be holding the write lock to make this call.
+ throw new SynchronizationLockException(SR.SynchronizationLockException_MisMatchedWrite);
+ }
+ EnterMyLock();
+ }
+ else
+ {
+ EnterMyLock();
+ lrwc = GetThreadRWCount(false);
+
+ if (lrwc == null)
+ {
+ ExitMyLock();
+ throw new SynchronizationLockException(SR.SynchronizationLockException_MisMatchedWrite);
+ }
+
+ if (lrwc.writercount < 1)
+ {
+ ExitMyLock();
+ throw new SynchronizationLockException(SR.SynchronizationLockException_MisMatchedWrite);
+ }
+
+ lrwc.writercount--;
+
+ if (lrwc.writercount > 0)
+ {
+ ExitMyLock();
+ return;
+ }
+ }
+
+ Debug.Assert((_owners & WRITER_HELD) > 0, "Calling ReleaseWriterLock when no write lock is held");
+
+ ClearWriterAcquired();
+
+ _writeLockOwnerId = -1;
+
+ ExitAndWakeUpAppropriateWaiters();
+ }
+
+ public void ExitUpgradeableReadLock()
+ {
+ ReaderWriterCount lrwc;
+ if (!_fIsReentrant)
+ {
+ if (Environment.CurrentManagedThreadId != _upgradeLockOwnerId)
+ {
+ //You have to be holding the upgrade lock to make this call.
+ throw new SynchronizationLockException(SR.SynchronizationLockException_MisMatchedUpgrade);
+ }
+ EnterMyLock();
+ }
+ else
+ {
+ EnterMyLock();
+ lrwc = GetThreadRWCount(true);
+
+ if (lrwc == null)
+ {
+ ExitMyLock();
+ throw new SynchronizationLockException(SR.SynchronizationLockException_MisMatchedUpgrade);
+ }
+
+ if (lrwc.upgradecount < 1)
+ {
+ ExitMyLock();
+ throw new SynchronizationLockException(SR.SynchronizationLockException_MisMatchedUpgrade);
+ }
+
+ lrwc.upgradecount--;
+
+ if (lrwc.upgradecount > 0)
+ {
+ ExitMyLock();
+ return;
+ }
+
+ _fUpgradeThreadHoldingRead = false;
+ }
+
+ _owners--;
+ _upgradeLockOwnerId = -1;
+
+ ExitAndWakeUpAppropriateWaiters();
+ }
+
+ /// <summary>
+ /// A routine for lazily creating a event outside the lock (so if errors
+ /// happen they are outside the lock and that we don't do much work
+ /// while holding a spin lock). If all goes well, reenter the lock and
+ /// set 'waitEvent'
+ /// </summary>
+ private void LazyCreateEvent(ref EventWaitHandle waitEvent, bool makeAutoResetEvent)
+ {
+#if DEBUG
+ Debug.Assert(MyLockHeld);
+ Debug.Assert(waitEvent == null);
+#endif
+ ExitMyLock();
+ EventWaitHandle newEvent;
+ if (makeAutoResetEvent)
+ newEvent = new AutoResetEvent(false);
+ else
+ newEvent = new ManualResetEvent(false);
+ EnterMyLock();
+ if (waitEvent == null) // maybe someone snuck in.
+ waitEvent = newEvent;
+ else
+ newEvent.Dispose();
+ }
+
+ /// <summary>
+ /// Waits on 'waitEvent' with a timeout
+ /// Before the wait 'numWaiters' is incremented and is restored before leaving this routine.
+ /// </summary>
+ private bool WaitOnEvent(
+ EventWaitHandle waitEvent,
+ ref uint numWaiters,
+ TimeoutTracker timeout,
+ bool isWriteWaiter)
+ {
+#if DEBUG
+ Debug.Assert(MyLockHeld);
+#endif
+ waitEvent.Reset();
+ numWaiters++;
+ _fNoWaiters = false;
+
+ //Setting these bits will prevent new readers from getting in.
+ if (_numWriteWaiters == 1)
+ SetWritersWaiting();
+ if (_numWriteUpgradeWaiters == 1)
+ SetUpgraderWaiting();
+
+ bool waitSuccessful = false;
+ ExitMyLock(); // Do the wait outside of any lock
+
+ try
+ {
+ waitSuccessful = waitEvent.WaitOne(timeout.RemainingMilliseconds);
+ }
+ finally
+ {
+ EnterMyLock();
+ --numWaiters;
+
+ if (_numWriteWaiters == 0 && _numWriteUpgradeWaiters == 0 && _numUpgradeWaiters == 0 && _numReadWaiters == 0)
+ _fNoWaiters = true;
+
+ if (_numWriteWaiters == 0)
+ ClearWritersWaiting();
+ if (_numWriteUpgradeWaiters == 0)
+ ClearUpgraderWaiting();
+
+ if (!waitSuccessful) // We may also be about to throw for some reason. Exit myLock.
+ {
+ if (isWriteWaiter)
+ {
+ // Write waiters block read waiters from acquiring the lock. Since this was the last write waiter, try
+ // to wake up the appropriate read waiters.
+ ExitAndWakeUpAppropriateReadWaiters();
+ }
+ else
+ ExitMyLock();
+ }
+ }
+ return waitSuccessful;
+ }
+
+ /// <summary>
+ /// Determines the appropriate events to set, leaves the locks, and sets the events.
+ /// </summary>
+ private void ExitAndWakeUpAppropriateWaiters()
+ {
+#if DEBUG
+ Debug.Assert(MyLockHeld);
+#endif
+ if (_fNoWaiters)
+ {
+ ExitMyLock();
+ return;
+ }
+
+ ExitAndWakeUpAppropriateWaitersPreferringWriters();
+ }
+
+ private void ExitAndWakeUpAppropriateWaitersPreferringWriters()
+ {
+ uint readercount = GetNumReaders();
+
+ //We need this case for EU->ER->EW case, as the read count will be 2 in
+ //that scenario.
+ if (_fIsReentrant)
+ {
+ if (_numWriteUpgradeWaiters > 0 && _fUpgradeThreadHoldingRead && readercount == 2)
+ {
+ ExitMyLock(); // Exit before signaling to improve efficiency (wakee will need the lock)
+ _waitUpgradeEvent.Set(); // release all upgraders (however there can be at most one).
+ return;
+ }
+ }
+
+ if (readercount == 1 && _numWriteUpgradeWaiters > 0)
+ {
+ //We have to be careful now, as we are dropping the lock.
+ //No new writes should be allowed to sneak in if an upgrade
+ //was pending.
+
+ ExitMyLock(); // Exit before signaling to improve efficiency (wakee will need the lock)
+ _waitUpgradeEvent.Set(); // release all upgraders (however there can be at most one).
+ }
+ else if (readercount == 0 && _numWriteWaiters > 0)
+ {
+ ExitMyLock(); // Exit before signaling to improve efficiency (wakee will need the lock)
+ _writeEvent.Set(); // release one writer.
+ }
+ else
+ {
+ ExitAndWakeUpAppropriateReadWaiters();
+ }
+ }
+
+ private void ExitAndWakeUpAppropriateReadWaiters()
+ {
+#if DEBUG
+ Debug.Assert(MyLockHeld);
+#endif
+
+ if (_numWriteWaiters != 0 || _numWriteUpgradeWaiters != 0 || _fNoWaiters)
+ {
+ ExitMyLock();
+ return;
+ }
+
+ Debug.Assert(_numReadWaiters != 0 || _numUpgradeWaiters != 0);
+
+ bool setReadEvent = _numReadWaiters != 0;
+ bool setUpgradeEvent = _numUpgradeWaiters != 0 && _upgradeLockOwnerId == -1;
+
+ ExitMyLock(); // Exit before signaling to improve efficiency (wakee will need the lock)
+
+ if (setReadEvent)
+ _readEvent.Set(); // release all readers.
+
+ if (setUpgradeEvent)
+ _upgradeEvent.Set(); //release one upgrader.
+ }
+
+ private bool IsWriterAcquired()
+ {
+ return (_owners & ~WAITING_WRITERS) == 0;
+ }
+
+ private void SetWriterAcquired()
+ {
+ _owners |= WRITER_HELD; // indicate we have a writer.
+ }
+
+ private void ClearWriterAcquired()
+ {
+ _owners &= ~WRITER_HELD;
+ }
+
+ private void SetWritersWaiting()
+ {
+ _owners |= WAITING_WRITERS;
+ }
+
+ private void ClearWritersWaiting()
+ {
+ _owners &= ~WAITING_WRITERS;
+ }
+
+ private void SetUpgraderWaiting()
+ {
+ _owners |= WAITING_UPGRADER;
+ }
+
+ private void ClearUpgraderWaiting()
+ {
+ _owners &= ~WAITING_UPGRADER;
+ }
+
+ private uint GetNumReaders()
+ {
+ return _owners & READER_MASK;
+ }
+
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ private void EnterMyLock()
+ {
+ if (Interlocked.CompareExchange(ref _myLock, 1, 0) != 0)
+ EnterMyLockSpin();
+ }
+
+ private void EnterMyLockSpin()
+ {
+ int pc = Environment.ProcessorCount;
+ for (int i = 0; ; i++)
+ {
+ if (i < LockSpinCount && pc > 1)
+ {
+ RuntimeThread.SpinWait(LockSpinCycles * (i + 1)); // Wait a few dozen instructions to let another processor release lock.
+ }
+ else if (i < (LockSpinCount + LockSleep0Count))
+ {
+ RuntimeThread.Sleep(0); // Give up my quantum.
+ }
+ else
+ {
+ RuntimeThread.Sleep(1); // Give up my quantum.
+ }
+
+ if (_myLock == 0 && Interlocked.CompareExchange(ref _myLock, 1, 0) == 0)
+ return;
+ }
+ }
+
+ private void ExitMyLock()
+ {
+ Debug.Assert(_myLock != 0, "Exiting spin lock that is not held");
+ Volatile.Write(ref _myLock, 0);
+ }
+
+#if DEBUG
+ private bool MyLockHeld { get { return _myLock != 0; } }
+#endif
+
+ private static void SpinWait(int SpinCount)
+ {
+ //Exponential back-off
+ if ((SpinCount < 5) && (Environment.ProcessorCount > 1))
+ {
+ RuntimeThread.SpinWait(LockSpinCycles * SpinCount);
+ }
+ else
+ {
+ RuntimeThread.Sleep(0);
+ }
+
+ // Don't want to Sleep(1) in this spin wait:
+ // - Don't want to spin for that long, since a proper wait will follow when the spin wait fails. The artifical
+ // delay introduced by Sleep(1) will in some cases be much longer than desired.
+ // - Sleep(1) would put the thread into a wait state, and a proper wait will follow when the spin wait fails
+ // anyway, so it's preferable to put the thread into the proper wait state
+ }
+
+ public void Dispose()
+ {
+ Dispose(true);
+ }
+
+ private void Dispose(bool disposing)
+ {
+ if (disposing && !_fDisposed)
+ {
+ if (WaitingReadCount > 0 || WaitingUpgradeCount > 0 || WaitingWriteCount > 0)
+ throw new SynchronizationLockException(SR.SynchronizationLockException_IncorrectDispose);
+
+ if (IsReadLockHeld || IsUpgradeableReadLockHeld || IsWriteLockHeld)
+ throw new SynchronizationLockException(SR.SynchronizationLockException_IncorrectDispose);
+
+ if (_writeEvent != null)
+ {
+ _writeEvent.Dispose();
+ _writeEvent = null;
+ }
+
+ if (_readEvent != null)
+ {
+ _readEvent.Dispose();
+ _readEvent = null;
+ }
+
+ if (_upgradeEvent != null)
+ {
+ _upgradeEvent.Dispose();
+ _upgradeEvent = null;
+ }
+
+ if (_waitUpgradeEvent != null)
+ {
+ _waitUpgradeEvent.Dispose();
+ _waitUpgradeEvent = null;
+ }
+
+ _fDisposed = true;
+ }
+ }
+
+ public bool IsReadLockHeld
+ {
+ get
+ {
+ if (RecursiveReadCount > 0)
+ return true;
+ else
+ return false;
+ }
+ }
+
+ public bool IsUpgradeableReadLockHeld
+ {
+ get
+ {
+ if (RecursiveUpgradeCount > 0)
+ return true;
+ else
+ return false;
+ }
+ }
+
+ public bool IsWriteLockHeld
+ {
+ get
+ {
+ if (RecursiveWriteCount > 0)
+ return true;
+ else
+ return false;
+ }
+ }
+
+ public LockRecursionPolicy RecursionPolicy
+ {
+ get
+ {
+ if (_fIsReentrant)
+ {
+ return LockRecursionPolicy.SupportsRecursion;
+ }
+ else
+ {
+ return LockRecursionPolicy.NoRecursion;
+ }
+ }
+ }
+
+ public int CurrentReadCount
+ {
+ get
+ {
+ int numreaders = (int)GetNumReaders();
+
+ if (_upgradeLockOwnerId != -1)
+ return numreaders - 1;
+ else
+ return numreaders;
+ }
+ }
+
+
+ public int RecursiveReadCount
+ {
+ get
+ {
+ int count = 0;
+ ReaderWriterCount lrwc = GetThreadRWCount(true);
+ if (lrwc != null)
+ count = lrwc.readercount;
+
+ return count;
+ }
+ }
+
+ public int RecursiveUpgradeCount
+ {
+ get
+ {
+ if (_fIsReentrant)
+ {
+ int count = 0;
+
+ ReaderWriterCount lrwc = GetThreadRWCount(true);
+ if (lrwc != null)
+ count = lrwc.upgradecount;
+
+ return count;
+ }
+ else
+ {
+ if (Environment.CurrentManagedThreadId == _upgradeLockOwnerId)
+ return 1;
+ else
+ return 0;
+ }
+ }
+ }
+
+ public int RecursiveWriteCount
+ {
+ get
+ {
+ if (_fIsReentrant)
+ {
+ int count = 0;
+
+ ReaderWriterCount lrwc = GetThreadRWCount(true);
+ if (lrwc != null)
+ count = lrwc.writercount;
+
+ return count;
+ }
+ else
+ {
+ if (Environment.CurrentManagedThreadId == _writeLockOwnerId)
+ return 1;
+ else
+ return 0;
+ }
+ }
+ }
+
+ public int WaitingReadCount
+ {
+ get
+ {
+ return (int)_numReadWaiters;
+ }
+ }
+
+ public int WaitingUpgradeCount
+ {
+ get
+ {
+ return (int)_numUpgradeWaiters;
+ }
+ }
+
+ public int WaitingWriteCount
+ {
+ get
+ {
+ return (int)_numWriteWaiters;
+ }
+ }
+ }
+}
diff --git a/src/mscorlib/src/System/Threading/SpinWait.cs b/src/mscorlib/src/System/Threading/SpinWait.cs
index ae490e8f24..30d7aa679c 100644
--- a/src/mscorlib/src/System/Threading/SpinWait.cs
+++ b/src/mscorlib/src/System/Threading/SpinWait.cs
@@ -51,7 +51,7 @@ namespace System.Threading
/// <remarks>
/// <para>
/// <see cref="SpinWait"/> encapsulates common spinning logic. On single-processor machines, yields are
- /// always used instead of busy waits, and on computers with Intel� processors employing Hyper-Threading�
+ /// always used instead of busy waits, and on computers with Intel processors employing Hyper-Threading
/// technology, it helps to prevent hardware thread starvation. SpinWait encapsulates a good mixture of
/// spinning and true yielding.
/// </para>
@@ -312,50 +312,4 @@ namespace System.Threading
get { return ProcessorCount == 1; }
}
}
-
- /// <summary>
- /// A helper class to capture a start time using Environment.TickCout as a time in milliseconds, also updates a given timeout bu subtracting the current time from
- /// the start time
- /// </summary>
- internal static class TimeoutHelper
- {
- /// <summary>
- /// Returns the Environment.TickCount as a start time in milliseconds as a uint, TickCount tools over from postive to negative every ~ 25 days
- /// then ~25 days to back to positive again, uint is sued to ignore the sign and double the range to 50 days
- /// </summary>
- /// <returns></returns>
- public static uint GetTime()
- {
- return (uint)Environment.TickCount;
- }
-
- /// <summary>
- /// Helper function to measure and update the elapsed time
- /// </summary>
- /// <param name="startTime"> The first time (in milliseconds) observed when the wait started</param>
- /// <param name="originalWaitMillisecondsTimeout">The orginal wait timeoutout in milliseconds</param>
- /// <returns>The new wait time in milliseconds, -1 if the time expired</returns>
- public static int UpdateTimeOut(uint startTime, int originalWaitMillisecondsTimeout)
- {
- // The function must be called in case the time out is not infinite
- Debug.Assert(originalWaitMillisecondsTimeout != Timeout.Infinite);
-
- uint elapsedMilliseconds = (GetTime() - startTime);
-
- // Check the elapsed milliseconds is greater than max int because this property is uint
- if (elapsedMilliseconds > int.MaxValue)
- {
- return 0;
- }
-
- // Subtract the elapsed time from the current wait time
- int currentWaitTimeout = originalWaitMillisecondsTimeout - (int)elapsedMilliseconds; ;
- if (currentWaitTimeout <= 0)
- {
- return 0;
- }
-
- return currentWaitTimeout;
- }
- }
}
diff --git a/src/pal/CMakeLists.txt b/src/pal/CMakeLists.txt
index 8e3228b622..453bddde6c 100644
--- a/src/pal/CMakeLists.txt
+++ b/src/pal/CMakeLists.txt
@@ -5,8 +5,14 @@ project(COREPAL)
if (WIN32)
set(FEATURE_EVENT_TRACE 1)
endif()
-if(CLR_CMAKE_PLATFORM_LINUX AND CLR_CMAKE_TARGET_ARCH_AMD64)
- set(FEATURE_EVENT_TRACE 1)
+if(CLR_CMAKE_PLATFORM_LINUX)
+ if(CLR_CMAKE_TARGET_ARCH_AMD64)
+ set(FEATURE_EVENT_TRACE 1)
+ elseif(CLR_CMAKE_TARGET_ARCH_ARM)
+ if(NOT(CLR_CMAKE_TARGET_TIZEN_LINUX))
+ set(FEATURE_EVENT_TRACE 1)
+ endif()
+ endif()
endif()
include_directories(${COREPAL_SOURCE_DIR}/inc)
diff --git a/src/pal/inc/pal.h b/src/pal/inc/pal.h
index 8430ea9899..5d2739e7b8 100644
--- a/src/pal/inc/pal.h
+++ b/src/pal/inc/pal.h
@@ -487,9 +487,12 @@ PAL_NotifyRuntimeStarted(VOID);
static const int MAX_DEBUGGER_TRANSPORT_PIPE_NAME_LENGTH = 64;
PALIMPORT
-void
+VOID
PALAPI
-PAL_GetTransportPipeName(char *name, DWORD id, const char *suffix);
+PAL_GetTransportPipeName(
+ OUT char *name,
+ IN DWORD id,
+ IN const char *suffix);
PALIMPORT
void
diff --git a/src/pal/prebuilt/inc/mscoree.h b/src/pal/prebuilt/inc/mscoree.h
index 12d2172a85..29f7b261d3 100644
--- a/src/pal/prebuilt/inc/mscoree.h
+++ b/src/pal/prebuilt/inc/mscoree.h
@@ -112,8 +112,13 @@ typedef interface ICLRRuntimeHost ICLRRuntimeHost;
#define __ICLRRuntimeHost2_FWD_DEFINED__
typedef interface ICLRRuntimeHost2 ICLRRuntimeHost2;
-#endif /* __ICLRRuntimeHost2_FWD_DEFINED__ */
+#endif /* __ICLRRuntimeHost4_FWD_DEFINED__ */
+#ifndef __ICLRRuntimeHost4_FWD_DEFINED__
+#define __ICLRRuntimeHost4_FWD_DEFINED__
+typedef interface ICLRRuntimeHost4 ICLRRuntimeHost4;
+
+#endif /* __ICLRRuntimeHost4_FWD_DEFINED__ */
#ifndef __ICLRExecutionManager_FWD_DEFINED__
#define __ICLRExecutionManager_FWD_DEFINED__
@@ -254,6 +259,7 @@ EXTERN_GUID(IID_ICLRErrorReportingManager, 0x980d2f1a, 0xbf79, 0x4c08, 0x81, 0x2
EXTERN_GUID(IID_ICLRErrorReportingManager2, 0xc68f63b1, 0x4d8b, 0x4e0b, 0x95, 0x64, 0x9d, 0x2e, 0xfe, 0x2f, 0xa1, 0x8c);
EXTERN_GUID(IID_ICLRRuntimeHost, 0x90F1A06C, 0x7712, 0x4762, 0x86, 0xB5, 0x7A, 0x5E, 0xBA, 0x6B, 0xDB, 0x02);
EXTERN_GUID(IID_ICLRRuntimeHost2, 0x712AB73F, 0x2C22, 0x4807, 0xAD, 0x7E, 0xF5, 0x01, 0xD7, 0xb7, 0x2C, 0x2D);
+EXTERN_GUID(IID_ICLRRuntimeHost4, 0x64F6D366, 0xD7C2, 0x4F1F, 0xB4, 0xB2, 0xE8, 0x16, 0x0C, 0xAC, 0x43, 0xAF);
EXTERN_GUID(IID_ICLRExecutionManager, 0x1000A3E7, 0xB420, 0x4620, 0xAE, 0x30, 0xFB, 0x19, 0xB5, 0x87, 0xAD, 0x1D);
EXTERN_GUID(IID_ITypeName, 0xB81FF171, 0x20F3, 0x11d2, 0x8d, 0xcc, 0x00, 0xa0, 0xc9, 0xb0, 0x05, 0x22);
EXTERN_GUID(IID_ITypeNameBuilder, 0xB81FF171, 0x20F3, 0x11d2, 0x8d, 0xcc, 0x00, 0xa0, 0xc9, 0xb0, 0x05, 0x23);
@@ -1819,6 +1825,14 @@ EXTERN_C const IID IID_ICLRRuntimeHost2;
};
+ MIDL_INTERFACE("64F6D366-D7C2-4F1F-B4B2-E8160CAC43AF")
+ ICLRRuntimeHost4 : public ICLRRuntimeHost2
+ {
+ virtual HRESULT STDMETHODCALLTYPE UnloadAppDomain2(
+ /* [in] */ DWORD dwAppDomainId,
+ /* [in] */ BOOL fWaitUntilDone,
+ /* [out] */ int *pLatchedExitCode) = 0;
+ };
#else /* C style interface */
diff --git a/src/pal/src/config.h.in b/src/pal/src/config.h.in
index ab5fa0341d..c2939f3011 100644
--- a/src/pal/src/config.h.in
+++ b/src/pal/src/config.h.in
@@ -20,6 +20,7 @@
#cmakedefine01 HAVE_RUNETYPE_H
#cmakedefine01 HAVE_SYS_SYSCTL_H
#cmakedefine01 HAVE_GNU_LIBNAMES_H
+#cmakedefine01 HAVE_PRCTL_H
#cmakedefine01 HAVE_KQUEUE
#cmakedefine01 HAVE_GETPWUID_R
diff --git a/src/pal/src/configure.cmake b/src/pal/src/configure.cmake
index 4d78f54423..03c7343056 100644
--- a/src/pal/src/configure.cmake
+++ b/src/pal/src/configure.cmake
@@ -34,6 +34,7 @@ check_include_files(lwp.h HAVE_LWP_H)
check_include_files(libunwind.h HAVE_LIBUNWIND_H)
check_include_files(runetype.h HAVE_RUNETYPE_H)
check_include_files(semaphore.h HAVE_SEMAPHORE_H)
+check_include_files(sys/prctl.h HAVE_PRCTL_H)
if(NOT CMAKE_SYSTEM_NAME STREQUAL FreeBSD AND NOT CMAKE_SYSTEM_NAME STREQUAL NetBSD)
set(CMAKE_REQUIRED_FLAGS "-ldl")
diff --git a/src/pal/src/include/pal/process.h b/src/pal/src/include/pal/process.h
index 990aec5b21..63ef5c52ec 100644
--- a/src/pal/src/include/pal/process.h
+++ b/src/pal/src/include/pal/process.h
@@ -121,6 +121,21 @@ Abstract
VOID PROCProcessUnlock(VOID);
/*++
+Function
+ PROCAbortInitialize()
+
+Abstract
+ Initialize the process abort crash dump program file path and
+ name. Doing all of this ahead of time so nothing is allocated
+ or copied in PROCAbort/signal handler.
+
+Return
+ TRUE - succeeds, FALSE - fails
+
+--*/
+BOOL PROCAbortInitialize();
+
+/*++
Function:
PROCAbort()
@@ -130,7 +145,7 @@ Function:
Does not return
--*/
PAL_NORETURN
-void PROCAbort();
+VOID PROCAbort();
/*++
Function:
@@ -141,7 +156,7 @@ Function:
(no return value)
--*/
-void PROCNotifyProcessShutdown();
+VOID PROCNotifyProcessShutdown();
/*++
Function:
diff --git a/src/pal/src/init/pal.cpp b/src/pal/src/init/pal.cpp
index fa94922325..8b0e0f53f5 100644
--- a/src/pal/src/init/pal.cpp
+++ b/src/pal/src/init/pal.cpp
@@ -650,6 +650,12 @@ PAL_InitializeCoreCLR(const char *szExePath)
return ERROR_DLL_INIT_FAILED;
}
+ if (!PROCAbortInitialize())
+ {
+ printf("PROCAbortInitialize FAILED %d (%s)\n", errno, strerror(errno));
+ return ERROR_GEN_FAILURE;
+ }
+
if (!InitializeFlushProcessWriteBuffers())
{
return ERROR_GEN_FAILURE;
diff --git a/src/pal/src/thread/process.cpp b/src/pal/src/thread/process.cpp
index 050665ce7c..e7380ee1e1 100644
--- a/src/pal/src/thread/process.cpp
+++ b/src/pal/src/thread/process.cpp
@@ -49,12 +49,17 @@ SET_DEFAULT_DEBUG_CHANNEL(PROCESS); // some headers have code with asserts, so d
#include <sys/types.h>
#include <sys/stat.h>
#include <signal.h>
+#if HAVE_PRCTL_H
+#include <sys/prctl.h>
+#include <sys/syscall.h>
+#endif
#include <sys/wait.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <debugmacrosext.h>
#include <semaphore.h>
#include <stdint.h>
+#include <dlfcn.h>
#ifdef __APPLE__
#include <sys/sysctl.h>
@@ -67,6 +72,8 @@ SET_DEFAULT_DEBUG_CHANNEL(PROCESS); // some headers have code with asserts, so d
#include <kvm.h>
#endif
+extern char *g_szCoreCLRPath;
+
using namespace CorUnix;
CObjectType CorUnix::otProcess(
@@ -87,18 +94,6 @@ CObjectType CorUnix::otProcess(
CObjectType::NoOwner
);
-static
-DWORD
-PALAPI
-StartupHelperThread(
- LPVOID p);
-
-static
-BOOL
-GetProcessIdDisambiguationKey(
- IN DWORD processId,
- OUT UINT64 *disambiguationKey);
-
//
// Helper memory page used by the FlushProcessWriteBuffers
//
@@ -153,6 +148,9 @@ DWORD gSID = (DWORD) -1;
// Function to call during PAL/process shutdown/abort
Volatile<PSHUTDOWN_CALLBACK> g_shutdownCallback = nullptr;
+// Crash dump generating program arguments. Initialized in PROCAbortInitialize().
+char *g_argvCreateDump[3] = { nullptr, nullptr, nullptr };
+
//
// Key used for associating CPalThread's with the underlying pthread
// (through pthread_setspecific)
@@ -172,22 +170,30 @@ enum FILETYPE
FILE_DIR /*Directory*/
};
+static
+DWORD
+PALAPI
+StartupHelperThread(
+ LPVOID p);
+
+static
+BOOL
+GetProcessIdDisambiguationKey(
+ IN DWORD processId,
+ OUT UINT64 *disambiguationKey);
+
PAL_ERROR
PROCGetProcessStatus(
CPalThread *pThread,
HANDLE hProcess,
PROCESS_STATE *pps,
- DWORD *pdwExitCode
- );
+ DWORD *pdwExitCode);
-static BOOL getFileName(LPCWSTR lpApplicationName, LPWSTR lpCommandLine,
- PathCharString& lpFileName);
-static char ** buildArgv(LPCWSTR lpCommandLine, PathCharString& lpAppPath,
- UINT *pnArg, BOOL prependLoader);
+static BOOL getFileName(LPCWSTR lpApplicationName, LPWSTR lpCommandLine, PathCharString& lpFileName);
+static char ** buildArgv(LPCWSTR lpCommandLine, PathCharString& lpAppPath, UINT *pnArg, BOOL prependLoader);
static BOOL getPath(PathCharString& lpFileName, PathCharString& lpPathFileName);
static int checkFileType(LPCSTR lpFileName);
-static BOOL PROCEndProcess(HANDLE hProcess, UINT uExitCode,
- BOOL bTerminateUnconditionally);
+static BOOL PROCEndProcess(HANDLE hProcess, UINT uExitCode, BOOL bTerminateUnconditionally);
ProcessModules *GetProcessModulesFromHandle(IN HANDLE hProcess, OUT LPDWORD lpCount);
ProcessModules *CreateProcessModules(IN DWORD dwProcessId, OUT LPDWORD lpCount);
@@ -1382,7 +1388,7 @@ static BOOL PROCEndProcess(HANDLE hProcess, UINT uExitCode, BOOL bTerminateUncon
// (1) it doesn't run atexit handlers
// (2) can invoke CrashReporter or produce a coredump,
// which is appropriate for TerminateProcess calls
- abort();
+ PROCAbort();
}
else
{
@@ -2081,9 +2087,12 @@ GetProcessIdDisambiguationKey(DWORD processId, UINT64 *disambiguationKey)
Builds the transport pipe names from the process id.
--*/
-void
+VOID
PALAPI
-PAL_GetTransportPipeName(char *name, DWORD id, const char *suffix)
+PAL_GetTransportPipeName(
+ OUT char *name,
+ IN DWORD id,
+ IN const char *suffix)
{
UINT64 disambiguationKey = 0;
BOOL ret = GetProcessIdDisambiguationKey(id, &disambiguationKey);
@@ -2829,7 +2838,7 @@ Return
None
--*/
-void
+VOID
DestroyProcessModules(IN ProcessModules *listHead)
{
for (ProcessModules *entry = listHead; entry != NULL; )
@@ -2841,7 +2850,7 @@ DestroyProcessModules(IN ProcessModules *listHead)
}
/*++
-Function:
+Function
PROCNotifyProcessShutdown
Calls the abort handler to do any shutdown cleanup. Call be called
@@ -2850,7 +2859,8 @@ Function:
(no return value)
--*/
__attribute__((destructor))
-void PROCNotifyProcessShutdown()
+VOID
+PROCNotifyProcessShutdown()
{
// Call back into the coreclr to clean up the debugger transport pipes
PSHUTDOWN_CALLBACK callback = InterlockedExchangePointer(&g_shutdownCallback, NULL);
@@ -2861,6 +2871,67 @@ void PROCNotifyProcessShutdown()
}
/*++
+Function
+ PROCAbortInitialize()
+
+Abstract
+ Initialize the process abort crash dump program file path and
+ name. Doing all of this ahead of time so nothing is allocated
+ or copied in PROCAbort/signal handler.
+
+Return
+ TRUE - succeeds, FALSE - fails
+
+--*/
+BOOL
+PROCAbortInitialize()
+{
+ char* enabled = getenv("COMPlus_DbgEnableMiniDump");
+ if (enabled != nullptr && _stricmp(enabled, "1") == 0)
+ {
+ if (g_szCoreCLRPath == nullptr)
+ {
+ return FALSE;
+ }
+ const char* DumpGeneratorName = "createdump";
+ int programLen = strlen(g_szCoreCLRPath) + strlen(DumpGeneratorName);
+ char* program = new char[programLen];
+
+ if (strcpy_s(program, programLen, g_szCoreCLRPath) != SAFECRT_SUCCESS)
+ {
+ return FALSE;
+ }
+ char *last = strrchr(program, '/');
+ if (last != nullptr)
+ {
+ *(last + 1) = '\0';
+ }
+ else
+ {
+ program[0] = '\0';
+ }
+ if (strcat_s(program, programLen, DumpGeneratorName) != SAFECRT_SUCCESS)
+ {
+ return FALSE;
+ }
+ char pidarg[128];
+ if (sprintf_s(pidarg, sizeof(pidarg), "%d", gPID) == -1)
+ {
+ return FALSE;
+ }
+ g_argvCreateDump[0] = program;
+ g_argvCreateDump[1] = _strdup(pidarg);
+ g_argvCreateDump[2] = nullptr;
+
+ if (g_argvCreateDump[0] == nullptr || g_argvCreateDump[1] == nullptr)
+ {
+ return FALSE;
+ }
+ }
+ return TRUE;
+}
+
+/*++
Function:
PROCAbort()
@@ -2870,10 +2941,51 @@ Function:
Does not return
--*/
PAL_NORETURN
-void
+VOID
PROCAbort()
{
+ // Do any shutdown cleanup before aborting or creating a core dump
PROCNotifyProcessShutdown();
+
+#if HAVE_PRCTL_H
+ // If enabled, launch the create minidump utility and wait until it completes
+ if (g_argvCreateDump[0] != nullptr)
+ {
+ // Fork the core dump child process.
+ pid_t childpid = fork();
+
+ // If error, write an error to trace log and abort
+ if (childpid == -1)
+ {
+ ERROR("PROCAbort: fork() FAILED %d (%s)\n", errno, strerror(errno));
+ }
+ else if (childpid == 0)
+ {
+ // Child process
+ if (execve(g_argvCreateDump[0], g_argvCreateDump, palEnvironment) == -1)
+ {
+ ERROR("PROCAbort: execve FAILED %d (%s)\n", errno, strerror(errno));
+ }
+ }
+ else
+ {
+ // Gives the child process permission to use /proc/<pid>/mem and ptrace
+ if (prctl(PR_SET_PTRACER, childpid, 0, 0, 0) == -1)
+ {
+ ERROR("PROCAbort: prctl() FAILED %d (%s)\n", errno, strerror(errno));
+ }
+ // Parent waits until the child process is done
+ int wstatus;
+ int result = waitpid(childpid, &wstatus, 0);
+ if (result != childpid)
+ {
+ ERROR("PROCAbort: waitpid FAILED result %d wstatus %d errno %d (%s)\n",
+ result, wstatus, errno, strerror(errno));
+ }
+ }
+ }
+#endif // HAVE_PRCTL_H
+ // Abort the process after waiting for the core dump to complete
abort();
}
@@ -2886,7 +2998,8 @@ Abstract
Return
TRUE if it succeeded, FALSE otherwise
--*/
-BOOL InitializeFlushProcessWriteBuffers()
+BOOL
+InitializeFlushProcessWriteBuffers()
{
// Verify that the s_helperPage is really aligned to the VIRTUAL_PAGE_SIZE
_ASSERTE((((SIZE_T)s_helperPage) & (VIRTUAL_PAGE_SIZE - 1)) == 0);
@@ -3273,7 +3386,7 @@ Parameter
pThread: Thread object
--*/
-void
+VOID
CorUnix::PROCAddThread(
CPalThread *pCurrentThread,
CPalThread *pTargetThread
@@ -3306,7 +3419,7 @@ Parameter
(no return value)
--*/
-void
+VOID
CorUnix::PROCRemoveThread(
CPalThread *pCurrentThread,
CPalThread *pTargetThread
@@ -3376,7 +3489,7 @@ Return
--*/
INT
CorUnix::PROCGetNumberOfThreads(
- void)
+ VOID)
{
return g_dwThreadCount;
}
@@ -3479,7 +3592,7 @@ Note:
This function is used in ExitThread and TerminateProcess
--*/
-void
+VOID
CorUnix::TerminateCurrentProcessNoExit(BOOL bTerminateUnconditionally)
{
BOOL locked;
diff --git a/src/pal/tools/gen-buildsys-clang.sh b/src/pal/tools/gen-buildsys-clang.sh
index 924a365af9..762a1996b9 100755
--- a/src/pal/tools/gen-buildsys-clang.sh
+++ b/src/pal/tools/gen-buildsys-clang.sh
@@ -155,10 +155,14 @@ if [ "$build_arch" == "armel" ]; then
cmake_extra_defines="$cmake_extra_defines -DARM_SOFTFP=1"
fi
-if [ "$build_arch" == "arm" -o "$build_arch" == "armel" ]; then
- overridefile=clang-compiler-override-arm.txt
+clang_version=$(echo $CC | awk -F- '{ print $NF }')
+# Use O1 option when the clang version is smaller than 3.9
+# Otherwise use O3 option in release build
+if [[ ( ${clang_version%.*} -eq 3 && ${clang_version#*.} -lt 9 ) &&
+ ( "$build_arch" == "arm" || "$build_arch" == "armel" ) ]]; then
+ overridefile=clang-compiler-override-arm.txt
else
- overridefile=clang-compiler-override.txt
+ overridefile=clang-compiler-override.txt
fi
cmake \
diff --git a/src/vm/CMakeLists.txt b/src/vm/CMakeLists.txt
index 26fcacf4af..da1aa8fe62 100644
--- a/src/vm/CMakeLists.txt
+++ b/src/vm/CMakeLists.txt
@@ -317,6 +317,7 @@ if(CLR_CMAKE_TARGET_ARCH_AMD64)
${ARCH_SOURCES_DIR}/JitHelpers_FastWriteBarriers.asm
${ARCH_SOURCES_DIR}/JitHelpers_InlineGetAppDomain.asm
${ARCH_SOURCES_DIR}/JitHelpers_InlineGetThread.asm
+ ${ARCH_SOURCES_DIR}/JitHelpers_SingleAppDomain.asm
${ARCH_SOURCES_DIR}/JitHelpers_Slow.asm
${ARCH_SOURCES_DIR}/PInvokeStubs.asm
${ARCH_SOURCES_DIR}/RedirectedHandledJITCase.asm
@@ -363,6 +364,7 @@ else(WIN32)
${ARCH_SOURCES_DIR}/getstate.S
${ARCH_SOURCES_DIR}/jithelpers_fast.S
${ARCH_SOURCES_DIR}/jithelpers_fastwritebarriers.S
+ ${ARCH_SOURCES_DIR}/jithelpers_singleappdomain.S
${ARCH_SOURCES_DIR}/jithelpers_slow.S
${ARCH_SOURCES_DIR}/pinvokestubs.S
${ARCH_SOURCES_DIR}/theprestubamd64.S
diff --git a/src/vm/amd64/JitHelpers_SingleAppDomain.asm b/src/vm/amd64/JitHelpers_SingleAppDomain.asm
new file mode 100644
index 0000000000..f1b267435a
--- /dev/null
+++ b/src/vm/amd64/JitHelpers_SingleAppDomain.asm
@@ -0,0 +1,64 @@
+; Licensed to the .NET Foundation under one or more agreements.
+; The .NET Foundation licenses this file to you under the MIT license.
+; See the LICENSE file in the project root for more information.
+
+; ==++==
+;
+
+;
+; ==--==
+; ***********************************************************************
+; File: JitHelpers_SingleAppDomain.asm
+;
+; Notes: JIT Static access helpers when coreclr host specifies single
+; appdomain flag
+; ***********************************************************************
+
+include AsmMacros.inc
+include asmconstants.inc
+
+; Min amount of stack space that a nested function should allocate.
+MIN_SIZE equ 28h
+
+extern JIT_GetSharedNonGCStaticBase_Helper:proc
+extern JIT_GetSharedGCStaticBase_Helper:proc
+
+LEAF_ENTRY JIT_GetSharedNonGCStaticBase_SingleAppDomain, _TEXT
+ ; If class is not initialized, bail to C++ helper
+ test byte ptr [rcx + OFFSETOF__DomainLocalModule__m_pDataBlob + rdx], 1
+ jz CallHelper
+ mov rax, rcx
+ REPRET
+
+ align 16
+ CallHelper:
+ ; Tail call JIT_GetSharedNonGCStaticBase_Helper
+ jmp JIT_GetSharedNonGCStaticBase_Helper
+LEAF_END JIT_GetSharedNonGCStaticBase_SingleAppDomain, _TEXT
+
+LEAF_ENTRY JIT_GetSharedNonGCStaticBaseNoCtor_SingleAppDomain, _TEXT
+ mov rax, rcx
+ ret
+LEAF_END JIT_GetSharedNonGCStaticBaseNoCtor_SingleAppDomain, _TEXT
+
+LEAF_ENTRY JIT_GetSharedGCStaticBase_SingleAppDomain, _TEXT
+ ; If class is not initialized, bail to C++ helper
+ test byte ptr [rcx + OFFSETOF__DomainLocalModule__m_pDataBlob + rdx], 1
+ jz CallHelper
+
+ mov rax, [rcx + OFFSETOF__DomainLocalModule__m_pGCStatics]
+ REPRET
+
+ align 16
+ CallHelper:
+ ; Tail call Jit_GetSharedGCStaticBase_Helper
+ jmp JIT_GetSharedGCStaticBase_Helper
+LEAF_END JIT_GetSharedGCStaticBase_SingleAppDomain, _TEXT
+
+LEAF_ENTRY JIT_GetSharedGCStaticBaseNoCtor_SingleAppDomain, _TEXT
+ mov rax, [rcx + OFFSETOF__DomainLocalModule__m_pGCStatics]
+ ret
+LEAF_END JIT_GetSharedGCStaticBaseNoCtor_SingleAppDomain, _TEXT
+
+ end
+
diff --git a/src/vm/amd64/jithelpers_singleappdomain.S b/src/vm/amd64/jithelpers_singleappdomain.S
new file mode 100644
index 0000000000..307d86b7fe
--- /dev/null
+++ b/src/vm/amd64/jithelpers_singleappdomain.S
@@ -0,0 +1,49 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+.intel_syntax noprefix
+#include "unixasmmacros.inc"
+#include "asmconstants.h"
+
+//
+// JIT Static access helpers when coreclr host specifies single
+// appdomain flag
+//
+
+LEAF_ENTRY JIT_GetSharedNonGCStaticBase_SingleAppDomain, _TEXT
+ // If class is not initialized, bail to C++ helper
+ test byte ptr [rdi + OFFSETOF__DomainLocalModule__m_pDataBlob + rsi], 1
+ jz CallHelper
+ mov rax, rdi
+ rep ret
+
+.balign 16
+CallHelper:
+ // Tail call JIT_GetSharedNonGCStaticBase_Helper
+ jmp C_FUNC(JIT_GetSharedNonGCStaticBase_Helper)
+LEAF_END_MARKED JIT_GetSharedNonGCStaticBase_SingleAppDomain, _TEXT
+
+LEAF_ENTRY JIT_GetSharedNonGCStaticBaseNoCtor_SingleAppDomain, _TEXT
+ mov rax, rdi
+ ret
+LEAF_END JIT_GetSharedNonGCStaticBaseNoCtor_SingleAppDomain, _TEXT
+
+LEAF_ENTRY JIT_GetSharedGCStaticBase_SingleAppDomain, _TEXT
+ // If class is not initialized, bail to C++ helper
+ test byte ptr [rdi + OFFSETOF__DomainLocalModule__m_pDataBlob + rsi], 1
+ jz CallHelper1
+
+ mov rax, [rdi + OFFSETOF__DomainLocalModule__m_pGCStatics]
+ rep ret
+
+.balign 16
+CallHelper1:
+ // Tail call Jit_GetSharedGCStaticBase_Helper
+ jmp C_FUNC(JIT_GetSharedGCStaticBase_Helper)
+LEAF_END JIT_GetSharedGCStaticBase_SingleAppDomain, _TEXT
+
+LEAF_ENTRY JIT_GetSharedGCStaticBaseNoCtor_SingleAppDomain, _TEXT
+ mov rax, [rdi + OFFSETOF__DomainLocalModule__m_pGCStatics]
+ ret
+LEAF_END JIT_GetSharedGCStaticBaseNoCtor_SingleAppDomain, _TEXT
diff --git a/src/vm/appdomain.cpp b/src/vm/appdomain.cpp
index 5664740b5d..5bfb12fc6b 100644
--- a/src/vm/appdomain.cpp
+++ b/src/vm/appdomain.cpp
@@ -740,10 +740,10 @@ BaseDomain::BaseDomain()
m_pLargeHeapHandleTable = NULL;
#ifndef CROSSGEN_COMPILE
- // Note that m_hHandleTableBucket is overridden by app domains
- m_hHandleTableBucket = g_HandleTableMap.pBuckets[0];
+ // Note that m_handleStore is overridden by app domains
+ m_handleStore = GCHandleTableUtilities::GetGCHandleTable()->GetGlobalHandleStore();
#else
- m_hHandleTableBucket = NULL;
+ m_handleStore = NULL;
#endif
m_pMarshalingData = NULL;
@@ -993,17 +993,6 @@ void BaseDomain::InitVSD()
}
#ifndef CROSSGEN_COMPILE
-BOOL BaseDomain::ContainsOBJECTHANDLE(OBJECTHANDLE handle)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- }
- CONTRACTL_END;
-
- return Ref_ContainHandle(m_hHandleTableBucket,handle);
-}
DWORD BaseDomain::AllocateContextStaticsOffset(DWORD* pOffsetSlot)
{
@@ -4053,7 +4042,7 @@ AppDomain::AppDomain()
m_pUMEntryThunkCache = NULL;
m_pAsyncPool = NULL;
- m_hHandleTableBucket = NULL;
+ m_handleStore = NULL;
m_ExposedObject = NULL;
m_pComIPForExposedObject = NULL;
@@ -4067,11 +4056,6 @@ AppDomain::AppDomain()
m_dwThreadEnterCount = 0;
m_dwThreadsStillInAppDomain = (ULONG)-1;
- m_pSecDesc = NULL;
- m_hHandleTableBucket=NULL;
-
- m_ExposedObject = NULL;
-
#ifdef FEATURE_COMINTEROP
m_pRefDispIDCache = NULL;
m_hndMissing = NULL;
@@ -4091,7 +4075,7 @@ AppDomain::AppDomain()
m_dwRefTakers=0;
m_dwCreationHolders=0;
#endif
-
+
#ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
m_ullTotalProcessorUsage = 0;
m_pullAllocBytes = NULL;
@@ -4289,18 +4273,13 @@ void AppDomain::Init()
// default domain cannot be unloaded.
if (GetId().m_dwId == DefaultADID)
{
- m_hHandleTableBucket = g_HandleTableMap.pBuckets[0];
+ m_handleStore = GCHandleTableUtilities::GetGCHandleTable()->GetGlobalHandleStore();
}
else
{
- m_hHandleTableBucket = Ref_CreateHandleTableBucket(m_dwIndex);
+ m_handleStore = GCHandleTableUtilities::GetGCHandleTable()->CreateHandleStore((void*)(uintptr_t)m_dwIndex.m_dwIndex);
}
-#ifdef _DEBUG
- if (((HandleTable *)(m_hHandleTableBucket->pTable[0]))->uADIndex != m_dwIndex)
- _ASSERTE (!"AD index mismatch");
-#endif // _DEBUG
-
#endif // CROSSGEN_COMPILE
#ifdef FEATURE_TYPEEQUIVALENCE
@@ -4599,16 +4578,10 @@ void AppDomain::Terminate()
BaseDomain::Terminate();
-#ifdef _DEBUG
- if (m_hHandleTableBucket &&
- m_hHandleTableBucket->pTable &&
- ((HandleTable *)(m_hHandleTableBucket->pTable[0]))->uADIndex != m_dwIndex)
- _ASSERTE (!"AD index mismatch");
-#endif // _DEBUG
-
- if (m_hHandleTableBucket) {
- Ref_DestroyHandleTableBucket(m_hHandleTableBucket);
- m_hHandleTableBucket = NULL;
+ if (m_handleStore)
+ {
+ GCHandleTableUtilities::GetGCHandleTable()->DestroyHandleStore(m_handleStore);
+ m_handleStore = NULL;
}
#ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
@@ -9225,14 +9198,7 @@ void AppDomain::ClearGCHandles()
HandleAsyncPinHandles();
// Remove our handle table as a source of GC roots
- HandleTableBucket *pBucket = m_hHandleTableBucket;
-
-#ifdef _DEBUG
- if (((HandleTable *)(pBucket->pTable[0]))->uADIndex != m_dwIndex)
- _ASSERTE (!"AD index mismatch");
-#endif // _DEBUG
-
- Ref_RemoveHandleTableBucket(pBucket);
+ GCHandleTableUtilities::GetGCHandleTable()->UprootHandleStore(m_handleStore);
}
// When an AD is unloaded, we will release all objects in this AD.
@@ -9248,13 +9214,17 @@ void AppDomain::HandleAsyncPinHandles()
}
CONTRACTL_END;
- HandleTableBucket *pBucket = m_hHandleTableBucket;
+ // TODO: Temporarily casting stuff here until Ref_RelocateAsyncPinHandles is moved to the interface.
+ HandleTableBucket *pBucket = (HandleTableBucket*)m_handleStore;
+
// IO completion port picks IO job using FIFO. Here is how we know which AsyncPinHandle can be freed.
// 1. We mark all non-pending AsyncPinHandle with READYTOCLEAN.
// 2. We queue a dump Overlapped to the IO completion as a marker.
// 3. When the Overlapped is picked up by completion port, we wait until all previous IO jobs are processed.
// 4. Then we can delete all AsyncPinHandle marked with READYTOCLEAN.
- HandleTableBucket *pBucketInDefault = SystemDomain::System()->DefaultDomain()->m_hHandleTableBucket;
+ HandleTableBucket *pBucketInDefault = (HandleTableBucket*)SystemDomain::System()->DefaultDomain()->m_handleStore;
+
+ // TODO: When this function is moved to the interface it will take void*s
Ref_RelocateAsyncPinHandles(pBucket, pBucketInDefault);
OverlappedDataObject::RequestCleanup();
@@ -9277,14 +9247,15 @@ void AppDomain::ClearGCRoots()
// this point, so only need to synchronize the preemptive mode threads.
ExecutionManager::Unload(GetLoaderAllocator());
+ IGCHandleTable* pHandleTable = GCHandleTableUtilities::GetGCHandleTable();
+
while ((pThread = ThreadStore::GetAllThreadList(pThread, 0, 0)) != NULL)
{
// Delete the thread local static store
pThread->DeleteThreadStaticData(this);
-
// <TODO>@TODO: A pre-allocated AppDomainUnloaded exception might be better.</TODO>
- if (m_hHandleTableBucket->Contains(pThread->m_LastThrownObjectHandle))
+ if (pHandleTable->ContainsHandle(m_handleStore, pThread->m_LastThrownObjectHandle))
{
// Never delete a handle to a preallocated exception object.
if (!CLRException::IsPreallocatedExceptionHandle(pThread->m_LastThrownObjectHandle))
@@ -9296,7 +9267,7 @@ void AppDomain::ClearGCRoots()
}
// Clear out the exceptions objects held by a thread.
- pThread->GetExceptionState()->ClearThrowablesForUnload(m_hHandleTableBucket);
+ pThread->GetExceptionState()->ClearThrowablesForUnload(m_handleStore);
}
//delete them while we still have the runtime suspended
diff --git a/src/vm/appdomain.hpp b/src/vm/appdomain.hpp
index 99e22d3e0a..898e50f1c2 100644
--- a/src/vm/appdomain.hpp
+++ b/src/vm/appdomain.hpp
@@ -1240,63 +1240,70 @@ public:
//****************************************************************************************
// Handles
-#if !defined(DACCESS_COMPILE) && !defined(CROSSGEN_COMPILE) // needs GetCurrentThreadHomeHeapNumber
+#if !defined(DACCESS_COMPILE) && !defined(CROSSGEN_COMPILE)
OBJECTHANDLE CreateTypedHandle(OBJECTREF object, int type)
{
WRAPPER_NO_CONTRACT;
IGCHandleTable *pHandleTable = GCHandleTableUtilities::GetGCHandleTable();
- return pHandleTable->CreateHandleOfType(m_hHandleTableBucket->pTable[GetCurrentThreadHomeHeapNumber()], OBJECTREFToObject(object), type);
+ return pHandleTable->CreateHandleOfType(m_handleStore, OBJECTREFToObject(object), type);
}
OBJECTHANDLE CreateHandle(OBJECTREF object)
{
WRAPPER_NO_CONTRACT;
CONDITIONAL_CONTRACT_VIOLATION(ModeViolation, object == NULL)
- return ::CreateHandle(m_hHandleTableBucket->pTable[GetCurrentThreadHomeHeapNumber()], object);
+ return ::CreateHandle(m_handleStore, object);
}
OBJECTHANDLE CreateWeakHandle(OBJECTREF object)
{
WRAPPER_NO_CONTRACT;
- return ::CreateWeakHandle(m_hHandleTableBucket->pTable[GetCurrentThreadHomeHeapNumber()], object);
+ return ::CreateWeakHandle(m_handleStore, object);
}
OBJECTHANDLE CreateShortWeakHandle(OBJECTREF object)
{
WRAPPER_NO_CONTRACT;
- return ::CreateShortWeakHandle(m_hHandleTableBucket->pTable[GetCurrentThreadHomeHeapNumber()], object);
+ return ::CreateShortWeakHandle(m_handleStore, object);
}
OBJECTHANDLE CreateLongWeakHandle(OBJECTREF object)
{
WRAPPER_NO_CONTRACT;
CONDITIONAL_CONTRACT_VIOLATION(ModeViolation, object == NULL)
- return ::CreateLongWeakHandle(m_hHandleTableBucket->pTable[GetCurrentThreadHomeHeapNumber()], object);
+ return ::CreateLongWeakHandle(m_handleStore, object);
}
OBJECTHANDLE CreateStrongHandle(OBJECTREF object)
{
WRAPPER_NO_CONTRACT;
- return ::CreateStrongHandle(m_hHandleTableBucket->pTable[GetCurrentThreadHomeHeapNumber()], object);
+ return ::CreateStrongHandle(m_handleStore, object);
}
OBJECTHANDLE CreatePinningHandle(OBJECTREF object)
{
WRAPPER_NO_CONTRACT;
-#if CHECK_APP_DOMAIN_LEAKS
+#if CHECK_APP_DOMAIN_LEAKS
if(IsAppDomain())
object->TryAssignAppDomain((AppDomain*)this,TRUE);
#endif
- return ::CreatePinningHandle(m_hHandleTableBucket->pTable[GetCurrentThreadHomeHeapNumber()], object);
+ return ::CreatePinningHandle(m_handleStore, object);
}
OBJECTHANDLE CreateSizedRefHandle(OBJECTREF object)
{
WRAPPER_NO_CONTRACT;
- OBJECTHANDLE h = ::CreateSizedRefHandle(
- m_hHandleTableBucket->pTable[GCHeapUtilities::IsServerHeap() ? (m_dwSizedRefHandles % m_iNumberOfProcessors) : GetCurrentThreadHomeHeapNumber()],
- object);
+ OBJECTHANDLE h;
+ if (GCHeapUtilities::IsServerHeap())
+ {
+ h = ::CreateSizedRefHandle(m_handleStore, object, m_dwSizedRefHandles % m_iNumberOfProcessors);
+ }
+ else
+ {
+ h = ::CreateSizedRefHandle(m_handleStore, object);
+ }
+
InterlockedIncrement((LONG*)&m_dwSizedRefHandles);
return h;
}
@@ -1305,7 +1312,7 @@ public:
OBJECTHANDLE CreateRefcountedHandle(OBJECTREF object)
{
WRAPPER_NO_CONTRACT;
- return ::CreateRefcountedHandle(m_hHandleTableBucket->pTable[GetCurrentThreadHomeHeapNumber()], object);
+ return ::CreateRefcountedHandle(m_handleStore, object);
}
OBJECTHANDLE CreateWinRTWeakHandle(OBJECTREF object, IWeakReference* pWinRTWeakReference)
@@ -1318,14 +1325,14 @@ public:
}
CONTRACTL_END;
- return ::CreateWinRTWeakHandle(m_hHandleTableBucket->pTable[GetCurrentThreadHomeHeapNumber()], object, pWinRTWeakReference);
+ return ::CreateWinRTWeakHandle(m_handleStore, object, pWinRTWeakReference);
}
#endif // FEATURE_COMINTEROP
OBJECTHANDLE CreateVariableHandle(OBJECTREF object, UINT type)
{
WRAPPER_NO_CONTRACT;
- return ::CreateVariableHandle(m_hHandleTableBucket->pTable[GetCurrentThreadHomeHeapNumber()], object, type);
+ return ::CreateVariableHandle(m_handleStore, object, type);
}
OBJECTHANDLE CreateDependentHandle(OBJECTREF primary, OBJECTREF secondary)
@@ -1339,12 +1346,10 @@ public:
CONTRACTL_END;
IGCHandleTable *pHandleTable = GCHandleTableUtilities::GetGCHandleTable();
- return pHandleTable->CreateDependentHandle((void*)m_hHandleTableBucket->pTable[GetCurrentThreadHomeHeapNumber()], OBJECTREFToObject(primary), OBJECTREFToObject(secondary));
+ return pHandleTable->CreateDependentHandle(m_handleStore, OBJECTREFToObject(primary), OBJECTREFToObject(secondary));
}
#endif // DACCESS_COMPILE && !CROSSGEN_COMPILE
- BOOL ContainsOBJECTHANDLE(OBJECTHANDLE handle);
-
IUnknown *GetFusionContext() {LIMITED_METHOD_CONTRACT; return m_pFusionContext; }
CLRPrivBinderCoreCLR *GetTPABinderContext() {LIMITED_METHOD_CONTRACT; return m_pTPABinderContext; }
@@ -1397,8 +1402,7 @@ protected:
CLRPrivBinderCoreCLR *m_pTPABinderContext; // Reference to the binding context that holds TPA list details
-
- HandleTableBucket *m_hHandleTableBucket;
+ void* m_handleStore;
// The large heap handle table.
LargeHeapHandleTable *m_pLargeHeapHandleTable;
diff --git a/src/vm/arm64/asmconstants.h b/src/vm/arm64/asmconstants.h
index b0300ca324..12b72f9249 100644
--- a/src/vm/arm64/asmconstants.h
+++ b/src/vm/arm64/asmconstants.h
@@ -167,5 +167,11 @@ ASMCONSTANTS_C_ASSERT(ResolveCacheElem__target == offsetof(ResolveCacheElem, tar
ASMCONSTANTS_C_ASSERT(ResolveCacheElem__pNext == offsetof(ResolveCacheElem, pNext));
#endif // CROSSGEN_COMPILE
+#define DomainLocalModule__m_pDataBlob 0x30
+#define DomainLocalModule__m_pGCStatics 0x20
+ASMCONSTANTS_C_ASSERT(DomainLocalModule__m_pDataBlob == offsetof(DomainLocalModule, m_pDataBlob));
+ASMCONSTANTS_C_ASSERT(DomainLocalModule__m_pGCStatics == offsetof(DomainLocalModule, m_pGCStatics));
+
+
#undef ASMCONSTANTS_RUNTIME_ASSERT
#undef ASMCONSTANTS_C_ASSERT
diff --git a/src/vm/arm64/asmhelpers.S b/src/vm/arm64/asmhelpers.S
index ef6b5cfffe..5bf6b5dc96 100644
--- a/src/vm/arm64/asmhelpers.S
+++ b/src/vm/arm64/asmhelpers.S
@@ -1212,3 +1212,60 @@ NESTED_END StubDispatchFixupStub, _TEXT
fmov d0, x1
LEAF_END
#endif
+
+//
+// JIT Static access helpers when coreclr host specifies single appdomain flag
+//
+
+// ------------------------------------------------------------------
+// void* JIT_GetSharedNonGCStaticBase(SIZE_T moduleDomainID, DWORD dwClassDomainID)
+
+ LEAF_ENTRY JIT_GetSharedNonGCStaticBase_SingleAppDomain
+ // If class is not initialized, bail to C++ helper
+ add x2, x0, #DomainLocalModule__m_pDataBlob
+ ldrb w2, [x2, w1]
+ tst w2, #1
+ beq LOCAL_LABEL(CallHelper)
+
+ ret lr
+
+LOCAL_LABEL(CallHelper):
+ // Tail call JIT_GetSharedNonGCStaticBase_Helper
+ b C_FUNC(JIT_GetSharedNonGCStaticBase_Helper)
+ LEAF_END
+
+
+// ------------------------------------------------------------------
+// void* JIT_GetSharedNonGCStaticBaseNoCtor(SIZE_T moduleDomainID, DWORD dwClassDomainID)
+
+ LEAF_ENTRY JIT_GetSharedNonGCStaticBaseNoCtor_SingleAppDomain
+ ret lr
+ LEAF_END
+
+
+// ------------------------------------------------------------------
+// void* JIT_GetSharedGCStaticBase(SIZE_T moduleDomainID, DWORD dwClassDomainID)
+
+ LEAF_ENTRY JIT_GetSharedGCStaticBase_SingleAppDomain
+ // If class is not initialized, bail to C++ helper
+ add x2, x0, #DomainLocalModule__m_pDataBlob
+ ldrb w2, [x2, w1]
+ tst w2, #1
+ beq LOCAL_LABEL(CallHelper)
+
+ ldr x0, [x0, #DomainLocalModule__m_pGCStatics]
+ ret lr
+
+LOCAL_LABEL(CallHelper):
+ // Tail call Jit_GetSharedGCStaticBase_Helper
+ b C_FUNC(JIT_GetSharedGCStaticBase_Helper)
+ LEAF_END
+
+
+// ------------------------------------------------------------------
+// void* JIT_GetSharedGCStaticBaseNoCtor(SIZE_T moduleDomainID, DWORD dwClassDomainID)
+
+ LEAF_ENTRY JIT_GetSharedGCStaticBaseNoCtor_SingleAppDomain
+ ldr x0, [x0, #DomainLocalModule__m_pGCStatics]
+ ret lr
+ LEAF_END
diff --git a/src/vm/arm64/asmhelpers.asm b/src/vm/arm64/asmhelpers.asm
index e8b16ded6a..24b26eb1c9 100644
--- a/src/vm/arm64/asmhelpers.asm
+++ b/src/vm/arm64/asmhelpers.asm
@@ -52,6 +52,9 @@
IMPORT $g_GCShadowEnd
#endif // WRITE_BARRIER_CHECK
+ IMPORT JIT_GetSharedNonGCStaticBase_Helper
+ IMPORT JIT_GetSharedGCStaticBase_Helper
+
TEXTAREA
;; LPVOID __stdcall GetCurrentIP(void);
@@ -1326,5 +1329,62 @@ Fail
LEAF_END
#endif
+;
+; JIT Static access helpers when coreclr host specifies single appdomain flag
+;
+
+; ------------------------------------------------------------------
+; void* JIT_GetSharedNonGCStaticBase(SIZE_T moduleDomainID, DWORD dwClassDomainID)
+
+ LEAF_ENTRY JIT_GetSharedNonGCStaticBase_SingleAppDomain
+ ; If class is not initialized, bail to C++ helper
+ add x2, x0, #DomainLocalModule__m_pDataBlob
+ ldrb w2, [x2, w1]
+ tst w2, #1
+ beq CallHelper1
+
+ ret lr
+
+CallHelper1
+ ; Tail call JIT_GetSharedNonGCStaticBase_Helper
+ b JIT_GetSharedNonGCStaticBase_Helper
+ LEAF_END
+
+
+; ------------------------------------------------------------------
+; void* JIT_GetSharedNonGCStaticBaseNoCtor(SIZE_T moduleDomainID, DWORD dwClassDomainID)
+
+ LEAF_ENTRY JIT_GetSharedNonGCStaticBaseNoCtor_SingleAppDomain
+ ret lr
+ LEAF_END
+
+
+; ------------------------------------------------------------------
+; void* JIT_GetSharedGCStaticBase(SIZE_T moduleDomainID, DWORD dwClassDomainID)
+
+ LEAF_ENTRY JIT_GetSharedGCStaticBase_SingleAppDomain
+ ; If class is not initialized, bail to C++ helper
+ add x2, x0, #DomainLocalModule__m_pDataBlob
+ ldrb w2, [x2, w1]
+ tst w2, #1
+ beq CallHelper2
+
+ ldr x0, [x0, #DomainLocalModule__m_pGCStatics]
+ ret lr
+
+CallHelper2
+ ; Tail call Jit_GetSharedGCStaticBase_Helper
+ b JIT_GetSharedGCStaticBase_Helper
+ LEAF_END
+
+
+; ------------------------------------------------------------------
+; void* JIT_GetSharedGCStaticBaseNoCtor(SIZE_T moduleDomainID, DWORD dwClassDomainID)
+
+ LEAF_ENTRY JIT_GetSharedGCStaticBaseNoCtor_SingleAppDomain
+ ldr x0, [x0, #DomainLocalModule__m_pGCStatics]
+ ret lr
+ LEAF_END
+
; Must be at very end of file
END
diff --git a/src/vm/arm64/stubs.cpp b/src/vm/arm64/stubs.cpp
index f56f6ab625..0c7eb4dfba 100644
--- a/src/vm/arm64/stubs.cpp
+++ b/src/vm/arm64/stubs.cpp
@@ -15,6 +15,11 @@
#include "virtualcallstub.h"
#include "jitinterface.h"
+EXTERN_C void JIT_GetSharedNonGCStaticBase_SingleAppDomain();
+EXTERN_C void JIT_GetSharedNonGCStaticBaseNoCtor_SingleAppDomain();
+EXTERN_C void JIT_GetSharedGCStaticBase_SingleAppDomain();
+EXTERN_C void JIT_GetSharedGCStaticBaseNoCtor_SingleAppDomain();
+
#ifndef DACCESS_COMPILE
//-----------------------------------------------------------------------
// InstructionFormat for B.cond
@@ -1078,10 +1083,18 @@ void JIT_TailCall()
_ASSERTE(!"ARM64:NYI");
}
+#if !defined(DACCESS_COMPILE) && !defined(CROSSGEN_COMPILE)
void InitJITHelpers1()
{
- return;
+ if(IsSingleAppDomain())
+ {
+ SetJitHelperFunction(CORINFO_HELP_GETSHARED_GCSTATIC_BASE, JIT_GetSharedGCStaticBase_SingleAppDomain);
+ SetJitHelperFunction(CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE, JIT_GetSharedNonGCStaticBase_SingleAppDomain);
+ SetJitHelperFunction(CORINFO_HELP_GETSHARED_GCSTATIC_BASE_NOCTOR, JIT_GetSharedGCStaticBaseNoCtor_SingleAppDomain);
+ SetJitHelperFunction(CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE_NOCTOR,JIT_GetSharedNonGCStaticBaseNoCtor_SingleAppDomain);
+ }
}
+#endif // !defined(DACCESS_COMPILE) && !defined(CROSSGEN_COMPILE)
EXTERN_C void __stdcall ProfileEnterNaked(UINT_PTR clientData)
{
diff --git a/src/vm/assembly.cpp b/src/vm/assembly.cpp
index 75430644c3..92c1ebd817 100644
--- a/src/vm/assembly.cpp
+++ b/src/vm/assembly.cpp
@@ -1849,10 +1849,7 @@ HRESULT RunMain(MethodDesc *pFD ,
else
{
*pParam->piRetVal = (INT32)threadStart.Call_RetArgSlot(&stackVar);
- if (pParam->stringArgs == NULL)
- {
- SetLatchedExitCode(*pParam->piRetVal);
- }
+ SetLatchedExitCode(*pParam->piRetVal);
}
GCPROTECT_END();
diff --git a/src/vm/assemblyname.cpp b/src/vm/assemblyname.cpp
index a7cf2ec250..90e2a467e1 100644
--- a/src/vm/assemblyname.cpp
+++ b/src/vm/assemblyname.cpp
@@ -202,41 +202,4 @@ FCIMPL4(void, AssemblyNameNative::Init, Object * refThisUNSAFE, OBJECTREF * pAss
}
FCIMPLEND
-/// "parse" tells us to parse the simple name of the assembly as if it was the full name
-/// almost never the right thing to do, but needed for compat
-/* static */
-FCIMPL3(FC_BOOL_RET, AssemblyNameNative::ReferenceMatchesDefinition, AssemblyNameBaseObject* refUNSAFE, AssemblyNameBaseObject* defUNSAFE, CLR_BOOL fParse)
-{
- FCALL_CONTRACT;
-
- struct _gc
- {
- ASSEMBLYNAMEREF pRef;
- ASSEMBLYNAMEREF pDef;
- } gc;
- gc.pRef = (ASSEMBLYNAMEREF)ObjectToOBJECTREF (refUNSAFE);
- gc.pDef = (ASSEMBLYNAMEREF)ObjectToOBJECTREF (defUNSAFE);
-
- BOOL result = FALSE;
- HELPER_METHOD_FRAME_BEGIN_RET_PROTECT(gc);
-
- Thread *pThread = GetThread();
-
- CheckPointHolder cph(pThread->m_MarshalAlloc.GetCheckpoint()); //hold checkpoint for autorelease
-
- if (gc.pRef == NULL)
- COMPlusThrow(kArgumentNullException, W("ArgumentNull_AssemblyName"));
- if (gc.pDef == NULL)
- COMPlusThrow(kArgumentNullException, W("ArgumentNull_AssemblyName"));
-
- AssemblySpec refSpec;
- refSpec.InitializeSpec(&(pThread->m_MarshalAlloc), (ASSEMBLYNAMEREF*) &gc.pRef, fParse, FALSE);
- AssemblySpec defSpec;
- defSpec.InitializeSpec(&(pThread->m_MarshalAlloc), (ASSEMBLYNAMEREF*) &gc.pDef, fParse, FALSE);
-
- result=AssemblySpec::RefMatchesDef(&refSpec,&defSpec);
- HELPER_METHOD_FRAME_END();
- FC_RETURN_BOOL(result);
-}
-FCIMPLEND
diff --git a/src/vm/assemblyname.hpp b/src/vm/assemblyname.hpp
index 2d1db9bef4..41e085cb24 100644
--- a/src/vm/assemblyname.hpp
+++ b/src/vm/assemblyname.hpp
@@ -24,7 +24,6 @@ public:
static FCDECL1(Object*, GetPublicKeyToken, Object* refThisUNSAFE);
static FCDECL1(Object*, EscapeCodeBase, StringObject* filenameUNSAFE);
static FCDECL4(void, Init, Object * refThisUNSAFE, OBJECTREF * pAssemblyRef, CLR_BOOL fForIntrospection, CLR_BOOL fRaiseResolveEvent);
- static FCDECL3(FC_BOOL_RET, ReferenceMatchesDefinition, AssemblyNameBaseObject* refUNSAFE, AssemblyNameBaseObject* defUNSAFE, CLR_BOOL fParse);
};
#endif // _AssemblyName_H
diff --git a/src/vm/corhost.cpp b/src/vm/corhost.cpp
index 75adbada94..d935ddd8c8 100644
--- a/src/vm/corhost.cpp
+++ b/src/vm/corhost.cpp
@@ -1201,6 +1201,11 @@ HRESULT GetCLRRuntimeHost(REFIID riid, IUnknown **ppUnk)
STDMETHODIMP CorHost2::UnloadAppDomain(DWORD dwDomainId, BOOL fWaitUntilDone)
{
+ return UnloadAppDomain2(dwDomainId, fWaitUntilDone, nullptr);
+}
+
+STDMETHODIMP CorHost2::UnloadAppDomain2(DWORD dwDomainId, BOOL fWaitUntilDone, int *pLatchedExitCode)
+{
WRAPPER_NO_CONTRACT;
STATIC_CONTRACT_SO_TOLERANT;
@@ -1249,14 +1254,23 @@ STDMETHODIMP CorHost2::UnloadAppDomain(DWORD dwDomainId, BOOL fWaitUntilDone)
}
END_ENTRYPOINT_NOTHROW;
+ if (pLatchedExitCode)
+ {
+ *pLatchedExitCode = GetLatchedExitCode();
+ }
+
return hr;
}
- else
- return CorRuntimeHostBase::UnloadAppDomain(dwDomainId, fWaitUntilDone);
+ return CorRuntimeHostBase::UnloadAppDomain2(dwDomainId, fWaitUntilDone, pLatchedExitCode);
}
-HRESULT CorRuntimeHostBase::UnloadAppDomain(DWORD dwDomainId, BOOL fSync)
+HRESULT CorRuntimeHostBase::UnloadAppDomain(DWORD dwDomainId, BOOL fWaitUntilDone)
+{
+ return UnloadAppDomain2(dwDomainId, fWaitUntilDone, nullptr);
+}
+
+HRESULT CorRuntimeHostBase::UnloadAppDomain2(DWORD dwDomainId, BOOL fWaitUntilDone, int *pLatchedExitCode)
{
CONTRACTL
{
@@ -1282,7 +1296,7 @@ HRESULT CorRuntimeHostBase::UnloadAppDomain(DWORD dwDomainId, BOOL fSync)
//
// However, for a thread that holds the loader lock, unloading the appDomain is
// not a supported scenario. Thus, we should not be ending up in this code
- // path for the FAULT violation.
+ // path for the FAULT violation.
//
// Hence, the CONTRACT_VIOLATION below for overriding the FORBID_FAULT
// for this scope only.
@@ -1292,18 +1306,23 @@ HRESULT CorRuntimeHostBase::UnloadAppDomain(DWORD dwDomainId, BOOL fSync)
)
{
return HOST_E_CLRNOTAVAILABLE;
- }
+ }
}
-
+
BEGIN_ENTRYPOINT_NOTHROW;
// We do not use BEGIN_EXTERNAL_ENTRYPOINT here because
// we do not want to setup Thread. Process may be OOM, and we want Unload
// to work.
- hr = AppDomain::UnloadById(ADID(dwDomainId), fSync);
+ hr = AppDomain::UnloadById(ADID(dwDomainId), fWaitUntilDone);
END_ENTRYPOINT_NOTHROW;
+ if (pLatchedExitCode)
+ {
+ *pLatchedExitCode = GetLatchedExitCode();
+ }
+
return hr;
}
@@ -1404,6 +1423,14 @@ HRESULT CorHost2::QueryInterface(REFIID riid, void **ppUnk)
*ppUnk = static_cast<ICLRRuntimeHost2 *>(this);
}
+ else if (riid == IID_ICLRRuntimeHost4)
+ {
+ ULONG version = 4;
+ if (m_Version == 0)
+ FastInterlockCompareExchange((LONG*)&m_Version, version, 0);
+
+ *ppUnk = static_cast<ICLRRuntimeHost4 *>(this);
+ }
else if (riid == IID_ICLRExecutionManager)
{
ULONG version = 2;
diff --git a/src/vm/ecalllist.h b/src/vm/ecalllist.h
index 415926eafa..6ed29b8611 100644
--- a/src/vm/ecalllist.h
+++ b/src/vm/ecalllist.h
@@ -632,7 +632,6 @@ FCFuncStart(gAssemblyNameFuncs)
FCFuncElement("nInit", AssemblyNameNative::Init)
FCFuncElement("nToString", AssemblyNameNative::ToString)
FCFuncElement("nGetPublicKeyToken", AssemblyNameNative::GetPublicKeyToken)
- FCFuncElement("ReferenceMatchesDefinitionInternal", AssemblyNameNative::ReferenceMatchesDefinition)
FCFuncElement("nGetFileInformation", AssemblyNameNative::GetFileInformation)
FCFuncEnd()
diff --git a/src/vm/eetwain.cpp b/src/vm/eetwain.cpp
index 2c9059bca1..2886daa8f6 100644
--- a/src/vm/eetwain.cpp
+++ b/src/vm/eetwain.cpp
@@ -4048,6 +4048,54 @@ bool UnwindStackFrame(PREGDISPLAY pContext,
#endif // _TARGET_X86_
+#ifdef WIN64EXCEPTIONS
+#ifdef _TARGET_X86_
+size_t EECodeManager::GetResumeSp( PCONTEXT pContext )
+{
+ PCODE currentPc = PCODE(pContext->Eip);
+
+ _ASSERTE(ExecutionManager::IsManagedCode(currentPc));
+
+ EECodeInfo codeInfo(currentPc);
+
+ PTR_CBYTE methodStart = PTR_CBYTE(codeInfo.GetSavedMethodCode());
+
+ GCInfoToken gcInfoToken = codeInfo.GetGCInfoToken();
+ PTR_VOID methodInfoPtr = gcInfoToken.Info;
+ DWORD curOffs = codeInfo.GetRelOffset();
+
+ CodeManStateBuf stateBuf;
+
+ stateBuf.hdrInfoSize = (DWORD)DecodeGCHdrInfo(gcInfoToken,
+ curOffs,
+ &stateBuf.hdrInfoBody);
+
+ PTR_CBYTE table = dac_cast<PTR_CBYTE>(methodInfoPtr) + stateBuf.hdrInfoSize;
+
+ hdrInfo *info = &stateBuf.hdrInfoBody;
+
+ _ASSERTE(info->epilogOffs == hdrInfo::NOT_IN_EPILOG && info->prologOffs == hdrInfo::NOT_IN_PROLOG);
+
+ bool isESPFrame = !info->ebpFrame && !info->doubleAlign;
+
+ if (codeInfo.IsFunclet())
+ {
+ // Treat funclet's frame as ESP frame
+ isESPFrame = true;
+ }
+
+ if (isESPFrame)
+ {
+ const size_t curESP = (size_t)(pContext->Esp);
+ return curESP + GetPushedArgSize(info, table, curOffs);
+ }
+
+ const size_t curEBP = (size_t)(pContext->Ebp);
+ return GetOutermostBaseFP(curEBP, info);
+}
+#endif // _TARGET_X86_
+#endif // WIN64EXCEPTIONS
+
#ifndef CROSSGEN_COMPILE
#ifndef WIN64EXCEPTIONS
diff --git a/src/vm/exceptionhandling.cpp b/src/vm/exceptionhandling.cpp
index 7e82cce604..31b85bdb0a 100644
--- a/src/vm/exceptionhandling.cpp
+++ b/src/vm/exceptionhandling.cpp
@@ -123,6 +123,24 @@ bool FixNonvolatileRegisters(UINT_PTR uOriginalSP,
bool fAborting
);
+void FixContext(PCONTEXT pContextRecord)
+{
+#define FIXUPREG(reg, value) \
+ do { \
+ STRESS_LOG2(LF_GCROOTS, LL_INFO100, "Updating " #reg " %p to %p\n", \
+ pContextRecord->reg, \
+ (value)); \
+ pContextRecord->reg = (value); \
+ } while (0)
+
+#ifdef _TARGET_X86_
+ size_t resumeSp = EECodeManager::GetResumeSp(pContextRecord);
+ FIXUPREG(ResumeEsp, resumeSp);
+#endif // _TARGET_X86_
+
+#undef FIXUPREG
+}
+
MethodDesc * GetUserMethodForILStub(Thread * pThread, UINT_PTR uStubSP, MethodDesc * pILStubMD, Frame ** ppFrameOut);
#ifdef FEATURE_PAL
@@ -441,6 +459,7 @@ void ExceptionTracker::UpdateNonvolatileRegisters(CONTEXT *pContextRecord, REGDI
} \
} while (0)
+
#if defined(_TARGET_X86_)
UPDATEREG(Ebx);
@@ -1181,6 +1200,8 @@ ProcessCLRException(IN PEXCEPTION_RECORD pExceptionRecord
pThread->SetFrame(pLimitFrame);
+ FixContext(pContextRecord);
+
SetIP(pContextRecord, (PCODE)uResumePC);
}
diff --git a/src/vm/exstate.cpp b/src/vm/exstate.cpp
index 29c7a063f5..c598412547 100644
--- a/src/vm/exstate.cpp
+++ b/src/vm/exstate.cpp
@@ -102,7 +102,7 @@ void ThreadExceptionState::FreeAllStackTraces()
}
}
-void ThreadExceptionState::ClearThrowablesForUnload(HandleTableBucket* pHndTblBucket)
+void ThreadExceptionState::ClearThrowablesForUnload(void* handleStore)
{
WRAPPER_NO_CONTRACT;
@@ -112,11 +112,13 @@ void ThreadExceptionState::ClearThrowablesForUnload(HandleTableBucket* pHndTblBu
ExInfo* pNode = &m_currentExInfo;
#endif // WIN64EXCEPTIONS
+ IGCHandleTable *pHandleTable = GCHandleTableUtilities::GetGCHandleTable();
+
for ( ;
pNode != NULL;
pNode = pNode->m_pPrevNestedInfo)
{
- if (pHndTblBucket->Contains(pNode->m_hThrowable))
+ if (pHandleTable->ContainsHandle(handleStore, pNode->m_hThrowable))
{
pNode->DestroyExceptionHandle();
}
diff --git a/src/vm/exstate.h b/src/vm/exstate.h
index 34f6427b51..104c76c77b 100644
--- a/src/vm/exstate.h
+++ b/src/vm/exstate.h
@@ -56,7 +56,7 @@ class ThreadExceptionState
public:
void FreeAllStackTraces();
- void ClearThrowablesForUnload(HandleTableBucket* pHndTblBucket);
+ void ClearThrowablesForUnload(void* handleStore);
#ifdef _DEBUG
typedef enum
diff --git a/src/vm/gchandletableutilities.h b/src/vm/gchandletableutilities.h
index a631b55d36..6e32add8ac 100644
--- a/src/vm/gchandletableutilities.h
+++ b/src/vm/gchandletableutilities.h
@@ -45,53 +45,73 @@ inline OBJECTREF ObjectFromHandle(OBJECTHANDLE handle)
return UNCHECKED_OBJECTREF_TO_OBJECTREF(*PTR_UNCHECKED_OBJECTREF(handle));
}
+// Quick inline check for whether a handle is null
+inline BOOL IsHandleNullUnchecked(OBJECTHANDLE handle)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return (handle == NULL || (*(_UNCHECKED_OBJECTREF *)handle) == NULL);
+}
+
+inline BOOL ObjectHandleIsNull(OBJECTHANDLE handle)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return *(Object **)handle == NULL;
+}
+
#ifndef DACCESS_COMPILE
// Handle creation convenience functions
-inline OBJECTHANDLE CreateHandle(HHANDLETABLE table, OBJECTREF object)
+inline OBJECTHANDLE CreateHandle(void* table, OBJECTREF object)
{
return GCHandleTableUtilities::GetGCHandleTable()->CreateHandleOfType(table, OBJECTREFToObject(object), HNDTYPE_DEFAULT);
}
-inline OBJECTHANDLE CreateWeakHandle(HHANDLETABLE table, OBJECTREF object)
+inline OBJECTHANDLE CreateWeakHandle(void* table, OBJECTREF object)
{
return GCHandleTableUtilities::GetGCHandleTable()->CreateHandleOfType(table, OBJECTREFToObject(object), HNDTYPE_WEAK_DEFAULT);
}
-inline OBJECTHANDLE CreateShortWeakHandle(HHANDLETABLE table, OBJECTREF object)
+inline OBJECTHANDLE CreateShortWeakHandle(void* table, OBJECTREF object)
{
return GCHandleTableUtilities::GetGCHandleTable()->CreateHandleOfType(table, OBJECTREFToObject(object), HNDTYPE_WEAK_SHORT);
}
-inline OBJECTHANDLE CreateLongWeakHandle(HHANDLETABLE table, OBJECTREF object)
+inline OBJECTHANDLE CreateLongWeakHandle(void* table, OBJECTREF object)
{
return GCHandleTableUtilities::GetGCHandleTable()->CreateHandleOfType(table, OBJECTREFToObject(object), HNDTYPE_WEAK_LONG);
}
-inline OBJECTHANDLE CreateStrongHandle(HHANDLETABLE table, OBJECTREF object)
+inline OBJECTHANDLE CreateStrongHandle(void* table, OBJECTREF object)
{
return GCHandleTableUtilities::GetGCHandleTable()->CreateHandleOfType(table, OBJECTREFToObject(object), HNDTYPE_STRONG);
}
-inline OBJECTHANDLE CreatePinningHandle(HHANDLETABLE table, OBJECTREF object)
+inline OBJECTHANDLE CreatePinningHandle(void* table, OBJECTREF object)
{
return GCHandleTableUtilities::GetGCHandleTable()->CreateHandleOfType(table, OBJECTREFToObject(object), HNDTYPE_PINNED);
}
-inline OBJECTHANDLE CreateSizedRefHandle(HHANDLETABLE table, OBJECTREF object)
+inline OBJECTHANDLE CreateAsyncPinningHandle(void* table, OBJECTREF object)
{
- return GCHandleTableUtilities::GetGCHandleTable()->CreateHandleOfType(table, OBJECTREFToObject(object), HNDTYPE_SIZEDREF);
+ return GCHandleTableUtilities::GetGCHandleTable()->CreateHandleOfType(table, OBJECTREFToObject(object), HNDTYPE_ASYNCPINNED);
}
-inline OBJECTHANDLE CreateAsyncPinningHandle(HHANDLETABLE table, OBJECTREF object)
+inline OBJECTHANDLE CreateRefcountedHandle(void* table, OBJECTREF object)
{
- return GCHandleTableUtilities::GetGCHandleTable()->CreateHandleOfType(table, OBJECTREFToObject(object), HNDTYPE_ASYNCPINNED);
+ return GCHandleTableUtilities::GetGCHandleTable()->CreateHandleOfType(table, OBJECTREFToObject(object), HNDTYPE_REFCOUNTED);
}
-inline OBJECTHANDLE CreateRefcountedHandle(HHANDLETABLE table, OBJECTREF object)
+inline OBJECTHANDLE CreateSizedRefHandle(void* table, OBJECTREF object)
{
- return GCHandleTableUtilities::GetGCHandleTable()->CreateHandleOfType(table, OBJECTREFToObject(object), HNDTYPE_REFCOUNTED);
+ return GCHandleTableUtilities::GetGCHandleTable()->CreateHandleOfType(table, OBJECTREFToObject(object), HNDTYPE_SIZEDREF);
+}
+
+inline OBJECTHANDLE CreateSizedRefHandle(void* table, OBJECTREF object, int heapToAffinitizeTo)
+{
+ return GCHandleTableUtilities::GetGCHandleTable()->CreateHandleOfType(table, OBJECTREFToObject(object), HNDTYPE_SIZEDREF, heapToAffinitizeTo);
}
// Global handle creation convenience functions
@@ -137,7 +157,7 @@ inline OBJECTHANDLE CreateGlobalRefcountedHandle(OBJECTREF object)
// Special handle creation convenience functions
#ifdef FEATURE_COMINTEROP
-inline OBJECTHANDLE CreateWinRTWeakHandle(HHANDLETABLE table, OBJECTREF object, IWeakReference* pWinRTWeakReference)
+inline OBJECTHANDLE CreateWinRTWeakHandle(void* table, OBJECTREF object, IWeakReference* pWinRTWeakReference)
{
return GCHandleTableUtilities::GetGCHandleTable()->CreateHandleWithExtraInfo(table,
OBJECTREFToObject(object),
@@ -147,7 +167,7 @@ inline OBJECTHANDLE CreateWinRTWeakHandle(HHANDLETABLE table, OBJECTREF object,
#endif // FEATURE_COMINTEROP
// Creates a variable-strength handle
-inline OBJECTHANDLE CreateVariableHandle(HHANDLETABLE table, OBJECTREF object, uint32_t type)
+inline OBJECTHANDLE CreateVariableHandle(void* table, OBJECTREF object, uint32_t type)
{
return GCHandleTableUtilities::GetGCHandleTable()->CreateHandleWithExtraInfo(table,
OBJECTREFToObject(object),
@@ -155,6 +175,179 @@ inline OBJECTHANDLE CreateVariableHandle(HHANDLETABLE table, OBJECTREF object, u
(void*)((uintptr_t)type));
}
+// Handle destruction convenience functions
+
+inline void DestroyHandle(OBJECTHANDLE handle)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ CAN_TAKE_LOCK;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ GCHandleTableUtilities::GetGCHandleTable()->DestroyHandleOfType(handle, HNDTYPE_DEFAULT);
+}
+
+inline void DestroyWeakHandle(OBJECTHANDLE handle)
+{
+ GCHandleTableUtilities::GetGCHandleTable()->DestroyHandleOfType(handle, HNDTYPE_WEAK_DEFAULT);
+}
+
+inline void DestroyShortWeakHandle(OBJECTHANDLE handle)
+{
+ GCHandleTableUtilities::GetGCHandleTable()->DestroyHandleOfType(handle, HNDTYPE_WEAK_SHORT);
+}
+
+inline void DestroyLongWeakHandle(OBJECTHANDLE handle)
+{
+ GCHandleTableUtilities::GetGCHandleTable()->DestroyHandleOfType(handle, HNDTYPE_WEAK_LONG);
+}
+
+inline void DestroyStrongHandle(OBJECTHANDLE handle)
+{
+ GCHandleTableUtilities::GetGCHandleTable()->DestroyHandleOfType(handle, HNDTYPE_STRONG);
+}
+
+inline void DestroyPinningHandle(OBJECTHANDLE handle)
+{
+ GCHandleTableUtilities::GetGCHandleTable()->DestroyHandleOfType(handle, HNDTYPE_PINNED);
+}
+
+inline void DestroyAsyncPinningHandle(OBJECTHANDLE handle)
+{
+ GCHandleTableUtilities::GetGCHandleTable()->DestroyHandleOfType(handle, HNDTYPE_ASYNCPINNED);
+}
+
+inline void DestroyRefcountedHandle(OBJECTHANDLE handle)
+{
+ GCHandleTableUtilities::GetGCHandleTable()->DestroyHandleOfType(handle, HNDTYPE_REFCOUNTED);
+}
+
+inline void DestroyDependentHandle(OBJECTHANDLE handle)
+{
+ GCHandleTableUtilities::GetGCHandleTable()->DestroyHandleOfType(handle, HNDTYPE_DEPENDENT);
+}
+
+inline void DestroyVariableHandle(OBJECTHANDLE handle)
+{
+ GCHandleTableUtilities::GetGCHandleTable()->DestroyHandleOfType(handle, HNDTYPE_VARIABLE);
+}
+
+inline void DestroyGlobalHandle(OBJECTHANDLE handle)
+{
+ GCHandleTableUtilities::GetGCHandleTable()->DestroyHandleOfType(handle, HNDTYPE_DEFAULT);
+}
+
+inline void DestroyGlobalWeakHandle(OBJECTHANDLE handle)
+{
+ GCHandleTableUtilities::GetGCHandleTable()->DestroyHandleOfType(handle, HNDTYPE_WEAK_DEFAULT);
+}
+
+inline void DestroyGlobalShortWeakHandle(OBJECTHANDLE handle)
+{
+ GCHandleTableUtilities::GetGCHandleTable()->DestroyHandleOfType(handle, HNDTYPE_WEAK_SHORT);
+}
+
+inline void DestroyGlobalLongWeakHandle(OBJECTHANDLE handle)
+{
+ GCHandleTableUtilities::GetGCHandleTable()->DestroyHandleOfType(handle, HNDTYPE_WEAK_LONG);
+}
+
+inline void DestroyGlobalStrongHandle(OBJECTHANDLE handle)
+{
+ GCHandleTableUtilities::GetGCHandleTable()->DestroyHandleOfType(handle, HNDTYPE_STRONG);
+}
+
+inline void DestroyGlobalPinningHandle(OBJECTHANDLE handle)
+{
+ GCHandleTableUtilities::GetGCHandleTable()->DestroyHandleOfType(handle, HNDTYPE_PINNED);
+}
+
+inline void DestroyGlobalRefcountedHandle(OBJECTHANDLE handle)
+{
+ GCHandleTableUtilities::GetGCHandleTable()->DestroyHandleOfType(handle, HNDTYPE_REFCOUNTED);
+}
+
+inline void DestroyTypedHandle(OBJECTHANDLE handle)
+{
+ GCHandleTableUtilities::GetGCHandleTable()->DestroyHandleOfUnknownType(handle);
+}
+
+#ifdef FEATURE_COMINTEROP
+inline void DestroyWinRTWeakHandle(OBJECTHANDLE handle)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ CAN_TAKE_LOCK;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ // Release the WinRT weak reference if we have one. We're assuming that this will not reenter the
+ // runtime, since if we are pointing at a managed object, we should not be using HNDTYPE_WEAK_WINRT
+ // but rather HNDTYPE_WEAK_SHORT or HNDTYPE_WEAK_LONG.
+ void* pExtraInfo = GCHandleTableUtilities::GetGCHandleTable()->GetExtraInfoFromHandle(handle);
+ IWeakReference* pWinRTWeakReference = reinterpret_cast<IWeakReference*>(pExtraInfo);
+ if (pWinRTWeakReference != nullptr)
+ {
+ pWinRTWeakReference->Release();
+ }
+
+ GCHandleTableUtilities::GetGCHandleTable()->DestroyHandleOfType(handle, HNDTYPE_WEAK_WINRT);
+}
+#endif
+
+// Handle holders/wrappers
+
+#ifndef FEATURE_REDHAWK
+typedef Wrapper<OBJECTHANDLE, DoNothing<OBJECTHANDLE>, DestroyHandle> OHWrapper;
+typedef Wrapper<OBJECTHANDLE, DoNothing<OBJECTHANDLE>, DestroyPinningHandle, NULL> PinningHandleHolder;
+typedef Wrapper<OBJECTHANDLE, DoNothing<OBJECTHANDLE>, DestroyAsyncPinningHandle, NULL> AsyncPinningHandleHolder;
+typedef Wrapper<OBJECTHANDLE, DoNothing<OBJECTHANDLE>, DestroyRefcountedHandle> RefCountedOHWrapper;
+
+typedef Holder<OBJECTHANDLE, DoNothing<OBJECTHANDLE>, DestroyLongWeakHandle> LongWeakHandleHolder;
+typedef Holder<OBJECTHANDLE, DoNothing<OBJECTHANDLE>, DestroyGlobalStrongHandle> GlobalStrongHandleHolder;
+typedef Holder<OBJECTHANDLE, DoNothing<OBJECTHANDLE>, DestroyGlobalShortWeakHandle> GlobalShortWeakHandleHolder;
+
+class RCOBJECTHANDLEHolder : public RefCountedOHWrapper
+{
+public:
+ FORCEINLINE RCOBJECTHANDLEHolder(OBJECTHANDLE p = NULL) : RefCountedOHWrapper(p)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+ FORCEINLINE void operator=(OBJECTHANDLE p)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ RefCountedOHWrapper::operator=(p);
+ }
+};
+
+class OBJECTHANDLEHolder : public OHWrapper
+{
+public:
+ FORCEINLINE OBJECTHANDLEHolder(OBJECTHANDLE p = NULL) : OHWrapper(p)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+ FORCEINLINE void operator=(OBJECTHANDLE p)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ OHWrapper::operator=(p);
+ }
+};
+
+#endif // !FEATURE_REDHAWK
+
#endif // !DACCESS_COMPILE
#endif // _GCHANDLETABLEUTILITIES_H_
diff --git a/src/vm/gcheaputilities.cpp b/src/vm/gcheaputilities.cpp
index e63655d363..e15558335e 100644
--- a/src/vm/gcheaputilities.cpp
+++ b/src/vm/gcheaputilities.cpp
@@ -48,8 +48,7 @@ void ValidateHandleAndAppDomain(OBJECTHANDLE handle)
IGCHandleTable *pHandleTable = GCHandleTableUtilities::GetGCHandleTable();
- void* handleTable = pHandleTable->GetHandleTableForHandle(handle);
- DWORD context = (DWORD)pHandleTable->GetHandleTableContext(handleTable);
+ DWORD context = (DWORD)pHandleTable->GetHandleContext(handle);
ADIndex appDomainIndex = ADIndex(context);
AppDomain *domain = SystemDomain::GetAppDomainAtIndex(appDomainIndex);
diff --git a/src/vm/i386/asmhelpers.S b/src/vm/i386/asmhelpers.S
index 45774fa2ce..98525aceee 100644
--- a/src/vm/i386/asmhelpers.S
+++ b/src/vm/i386/asmhelpers.S
@@ -817,16 +817,19 @@ NESTED_ENTRY DelayLoad_MethodCall, _TEXT, NoHandler
mov esi, esp
+ #define STACK_ALIGN_PADDING 4
+ sub esp, STACK_ALIGN_PADDING
+
push ecx
push edx
-
push eax
-
- // pTransitionBlock
- push esi
-
+ push esi // pTransitionBlock
+ CHECK_STACK_ALIGNMENT
call C_FUNC(ExternalMethodFixupWorker)
+ add esp, STACK_ALIGN_PADDING
+ #undef STACK_ALIGN_PADDING
+
// eax now contains replacement stub. PreStubWorker will never return
// NULL (it throws an exception if stub creation fails.)
@@ -1174,12 +1177,28 @@ NESTED_ENTRY BackPatchWorkerAsmStub, _TEXT, NoHandler
PROLOG_PUSH edx
PROLOG_END
- sub esp, 4 // for 16 bytes align
- push eax // push any indirect call address as the second arg to BackPatchWorker
- push [ebp+8] // and push return address as the first arg to BackPatchWorker
+ // Call BackPatchWorkerStaticStub
+ //
+ // Here is expected stack layout at this point:
+ // | saved edx |
+ // | saved ecx |
+ // | saved eax |
+ // +-----------+ <- ebp
+ // | saved ebp |
+ // | saved eip |
+ // +-----------+ <- CFA of BackPatchWorkerAsmStub
+ // | saved eip |
+ // +-----------+ <- CFA of ResolveStub (16-byte aligned)
+ // (Please refer to ResolveStub in vm/i386/virtualcallstubcpu.hpp for details)
+ //
+ push eax // any indirect call address as the 2nd arg
+ push DWORD PTR [ebp + 8] // return address (of ResolveStub) as the 1st arg
+ CHECK_STACK_ALIGNMENT
call C_FUNC(BackPatchWorkerStaticStub)
- add esp, 12
+
+ // Clean up arguments and alignment padding
+ add esp, 2*4
EPILOG_BEG
EPILOG_POP edx
diff --git a/src/vm/i386/excepx86.cpp b/src/vm/i386/excepx86.cpp
index 2c863b2ec3..9680e8745d 100644
--- a/src/vm/i386/excepx86.cpp
+++ b/src/vm/i386/excepx86.cpp
@@ -3703,6 +3703,13 @@ AdjustContextForVirtualStub(
pExceptionRecord->ExceptionAddress = (PVOID)callsite;
SetIP(pContext, callsite);
+#ifdef HAVE_GCCOVER
+ // Modify LastAVAddress saved in thread to distinguish between fake & real AV
+ // See comments in IsGcMarker in file excep.cpp for more details
+ pThread->SetLastAVAddress((LPVOID)GetIP(pContext));
+#endif
+
+
// put ESP back to what it was before the call.
SetSP(pContext, dac_cast<PCODE>(dac_cast<PTR_BYTE>(GetSP(pContext)) + sizeof(void*)));
diff --git a/src/vm/jitinterfacegen.cpp b/src/vm/jitinterfacegen.cpp
index ce4c1e90e3..8d1c8cdf67 100644
--- a/src/vm/jitinterfacegen.cpp
+++ b/src/vm/jitinterfacegen.cpp
@@ -61,6 +61,11 @@ extern "C" void* JIT_GetSharedNonGCStaticBaseNoCtor_Slow(SIZE_T moduleDomainID,
extern "C" void* JIT_GetSharedGCStaticBase_Slow(SIZE_T moduleDomainID, DWORD dwModuleClassID);
extern "C" void* JIT_GetSharedGCStaticBaseNoCtor_Slow(SIZE_T moduleDomainID, DWORD dwModuleClassID);
+extern "C" void* JIT_GetSharedNonGCStaticBase_SingleAppDomain(SIZE_T moduleDomainID, DWORD dwModuleClassID);
+extern "C" void* JIT_GetSharedNonGCStaticBaseNoCtor_SingleAppDomain(SIZE_T moduleDomainID, DWORD dwModuleClassID);
+extern "C" void* JIT_GetSharedGCStaticBase_SingleAppDomain(SIZE_T moduleDomainID, DWORD dwModuleClassID);
+extern "C" void* JIT_GetSharedGCStaticBaseNoCtor_SingleAppDomain(SIZE_T moduleDomainID, DWORD dwModuleClassID);
+
#ifdef _TARGET_AMD64_
extern WriteBarrierManager g_WriteBarrierManager;
#endif // _TARGET_AMD64_
@@ -283,7 +288,17 @@ void InitJITHelpers1()
SetJitHelperFunction(CORINFO_HELP_MON_ENTER_STATIC, JIT_MonEnterStatic_Slow);
SetJitHelperFunction(CORINFO_HELP_MON_EXIT_STATIC, JIT_MonExitStatic_Slow);
}
+#endif
+ if(IsSingleAppDomain())
+ {
+ SetJitHelperFunction(CORINFO_HELP_GETSHARED_GCSTATIC_BASE, JIT_GetSharedGCStaticBase_SingleAppDomain);
+ SetJitHelperFunction(CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE, JIT_GetSharedNonGCStaticBase_SingleAppDomain);
+ SetJitHelperFunction(CORINFO_HELP_GETSHARED_GCSTATIC_BASE_NOCTOR, JIT_GetSharedGCStaticBaseNoCtor_SingleAppDomain);
+ SetJitHelperFunction(CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE_NOCTOR,JIT_GetSharedNonGCStaticBaseNoCtor_SingleAppDomain);
+ }
+#ifndef FEATURE_IMPLICIT_TLS
+ else
if (gAppDomainTLSIndex >= TLS_MINIMUM_AVAILABLE)
{
SetJitHelperFunction(CORINFO_HELP_GETSHARED_GCSTATIC_BASE, JIT_GetSharedGCStaticBase_Slow);
diff --git a/src/vm/marshalnative.cpp b/src/vm/marshalnative.cpp
index 8f1b23a703..7e1d63b7c1 100644
--- a/src/vm/marshalnative.cpp
+++ b/src/vm/marshalnative.cpp
@@ -183,11 +183,13 @@ FCIMPL3(VOID, MarshalNative::PtrToStructureHelper, LPVOID ptr, Object* pObjIn, C
CONTRACTL_END;
OBJECTREF pObj = ObjectToOBJECTREF(pObjIn);
+
+ HELPER_METHOD_FRAME_BEGIN_1(pObj);
if (ptr == NULL)
- FCThrowArgumentNullVoid(W("ptr"));
+ COMPlusThrowArgumentNull(W("ptr"));
if (pObj == NULL)
- FCThrowArgumentNullVoid(W("structure"));
+ COMPlusThrowArgumentNull(W("structure"));
// Code path will accept regular layout objects.
MethodTable *pMT = pObj->GetMethodTable();
@@ -195,7 +197,7 @@ FCIMPL3(VOID, MarshalNative::PtrToStructureHelper, LPVOID ptr, Object* pObjIn, C
// Validate that the object passed in is not a value class.
if (!allowValueClasses && pMT->IsValueType())
{
- FCThrowArgumentVoid(W("structure"), W("Argument_StructMustNotBeValueClass"));
+ COMPlusThrowArgumentException(W("structure"), W("Argument_StructMustNotBeValueClass"));
}
else if (pMT->IsBlittable())
{
@@ -203,14 +205,14 @@ FCIMPL3(VOID, MarshalNative::PtrToStructureHelper, LPVOID ptr, Object* pObjIn, C
}
else if (pMT->HasLayout())
{
- HELPER_METHOD_FRAME_BEGIN_1(pObj);
- LayoutUpdateCLR((LPVOID*) &(pObj), Object::GetOffsetOfFirstField(), pMT, (LPBYTE)(ptr));
- HELPER_METHOD_FRAME_END();
+ LayoutUpdateCLR((LPVOID*) &(pObj), Object::GetOffsetOfFirstField(), pMT, (LPBYTE)(ptr));
}
else
{
- FCThrowArgumentVoid(W("structure"), W("Argument_MustHaveLayoutOrBeBlittable"));
- }
+ COMPlusThrowArgumentException(W("structure"), W("Argument_MustHaveLayoutOrBeBlittable"));
+ }
+
+ HELPER_METHOD_FRAME_END();
}
FCIMPLEND
diff --git a/src/vm/object.h b/src/vm/object.h
index fad5f74f39..89019d31a9 100644
--- a/src/vm/object.h
+++ b/src/vm/object.h
@@ -3012,8 +3012,8 @@ class SafeBuffer : SafeHandle
public:
static FCDECL1(UINT, SizeOfType, ReflectClassBaseObject* typeUNSAFE);
static FCDECL1(UINT, AlignedSizeOfType, ReflectClassBaseObject* typeUNSAFE);
- static FCDECL3(void, PtrToStructure, BYTE* ptr, FC_TypedByRef structure, UINT32 sizeofT);
- static FCDECL3(void, StructureToPtr, FC_TypedByRef structure, BYTE* ptr, UINT32 sizeofT);
+ static FCDECL3_IVI(void, PtrToStructure, BYTE* ptr, FC_TypedByRef structure, UINT32 sizeofT);
+ static FCDECL3_VII(void, StructureToPtr, FC_TypedByRef structure, BYTE* ptr, UINT32 sizeofT);
};
#ifdef USE_CHECKED_OBJECTREFS
diff --git a/src/vm/safehandle.cpp b/src/vm/safehandle.cpp
index 828b221025..d79c5a749a 100644
--- a/src/vm/safehandle.cpp
+++ b/src/vm/safehandle.cpp
@@ -483,7 +483,7 @@ FCIMPL1(UINT, SafeBuffer::AlignedSizeOfType, ReflectClassBaseObject* typeUNSAFE)
}
FCIMPLEND
-FCIMPL3(void, SafeBuffer::PtrToStructure, BYTE* ptr, FC_TypedByRef structure, UINT32 sizeofT)
+FCIMPL3_IVI(void, SafeBuffer::PtrToStructure, BYTE* ptr, FC_TypedByRef structure, UINT32 sizeofT)
{
FCALL_CONTRACT;
@@ -494,7 +494,7 @@ FCIMPL3(void, SafeBuffer::PtrToStructure, BYTE* ptr, FC_TypedByRef structure, UI
}
FCIMPLEND
-FCIMPL3(void, SafeBuffer::StructureToPtr, FC_TypedByRef structure, BYTE* ptr, UINT32 sizeofT)
+FCIMPL3_VII(void, SafeBuffer::StructureToPtr, FC_TypedByRef structure, BYTE* ptr, UINT32 sizeofT)
{
FCALL_CONTRACT;
diff --git a/src/vm/stackwalk.cpp b/src/vm/stackwalk.cpp
index dacc85f1a0..31e233ceb7 100644
--- a/src/vm/stackwalk.cpp
+++ b/src/vm/stackwalk.cpp
@@ -3149,7 +3149,10 @@ void StackFrameIterator::PreProcessingForManagedFrames(void)
INDEBUG(m_crawl.pThread->DebugLogStackWalkInfo(&m_crawl, "CONSIDER", m_uFramesProcessed));
-#if defined(_DEBUG) && defined(_TARGET_X86_) && !defined(DACCESS_COMPILE)
+#if defined(_DEBUG) && !defined(WIN64EXCEPTIONS) && !defined(DACCESS_COMPILE)
+ //
+ // VM is responsible for synchronization on non-funclet EH model.
+ //
// m_crawl.GetThisPointer() requires full unwind
// In GC's relocate phase, objects is not verifiable
if ( !(m_flags & (LIGHTUNWIND | QUICKUNWIND | ALLOW_INVALID_OBJECTS)) &&
@@ -3173,7 +3176,7 @@ void StackFrameIterator::PreProcessingForManagedFrames(void)
END_GCX_ASSERT_COOP;
}
-#endif // _DEBUG && _TARGET_X86_ && !DACCESS_COMPILE
+#endif // _DEBUG && !WIN64EXCEPTIONS && !DACCESS_COMPILE
m_frameState = SFITER_FRAMELESS_METHOD;
} // StackFrameIterator::PreProcessingForManagedFrames()
diff --git a/src/vm/threads.cpp b/src/vm/threads.cpp
index 9af5aa4bcc..a126d1c816 100644
--- a/src/vm/threads.cpp
+++ b/src/vm/threads.cpp
@@ -5075,11 +5075,10 @@ void Thread::SafeUpdateLastThrownObject(void)
EX_TRY
{
IGCHandleTable *pHandleTable = GCHandleTableUtilities::GetGCHandleTable();
- void* table = pHandleTable->GetHandleTableForHandle(hThrowable);
// Creating a duplicate handle here ensures that the AD of the last thrown object
// matches the domain of the current throwable.
- OBJECTHANDLE duplicateHandle = pHandleTable->CreateHandleOfType(table, OBJECTREFToObject(ObjectFromHandle(hThrowable)), HNDTYPE_DEFAULT);
+ OBJECTHANDLE duplicateHandle = pHandleTable->CreateDuplicateHandle(hThrowable);
SetLastThrownObjectHandle(duplicateHandle);
}
EX_CATCH